[rust] Remove unneeded, grandparented-in, direct Rust crate deps.

This CL removes dependencies from
`third_party/rust/chromium_crates_io/Cargo.toml` that have been
grand-parented-in in https://crrev.com/c/5019147, but that do not seem
to have any direct dependencies from Chromium targets.

The CL has been created by:

* Manually editing `third_party/rust/chromium_crates_io/Cargo.toml`
  and `third_party/rust/chromium_crates_io/gnrt_config.toml`
* Running:
    - `tools/crates/run_gnrt.py vendor`
    - `tools/crates/run_gnrt.py gen`

Bug: 405468274
Change-Id: I398efbc6570134df156302bba98dec4c8e6d7def
Fixed: 378273233
Cq-Include-Trybots: chromium/try:android-rust-arm32-rel
Cq-Include-Trybots: chromium/try:android-rust-arm64-dbg
Cq-Include-Trybots: chromium/try:android-rust-arm64-rel
Cq-Include-Trybots: chromium/try:linux-rust-x64-dbg
Cq-Include-Trybots: chromium/try:linux-rust-x64-rel
Cq-Include-Trybots: chromium/try:win-rust-x64-dbg
Cq-Include-Trybots: chromium/try:win-rust-x64-rel
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/6384041
Commit-Queue: Ɓukasz Anforowicz <lukasza@chromium.org>
Reviewed-by: Daniel Cheng <dcheng@chromium.org>
Cr-Commit-Position: refs/heads/main@{#1437127}
diff --git a/third_party/rust/aho_corasick/v1/BUILD.gn b/third_party/rust/aho_corasick/v1/BUILD.gn
deleted file mode 100644
index 03a9e325..0000000
--- a/third_party/rust/aho_corasick/v1/BUILD.gn
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright 2023 The Chromium Authors
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# @generated from third_party/rust/chromium_crates_io/BUILD.gn.hbs by
-# tools/crates/gnrt.
-# Do not edit!
-
-import("//build/rust/cargo_crate.gni")
-
-cargo_crate("lib") {
-  crate_name = "aho_corasick"
-  epoch = "1"
-  crate_type = "rlib"
-  crate_root = "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/lib.rs"
-  sources = [
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/ahocorasick.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/automaton.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/dfa.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/lib.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/macros.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/nfa/contiguous.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/nfa/mod.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/nfa/noncontiguous.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/api.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/ext.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/mod.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/pattern.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/rabinkarp.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/teddy/builder.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/teddy/generic.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/teddy/mod.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/tests.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/vector.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/tests.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/transducer.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/alphabet.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/buffer.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/byte_frequencies.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/debug.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/error.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/int.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/mod.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/prefilter.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/primitives.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/remapper.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/search.rs",
-    "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/special.rs",
-  ]
-  inputs = [ "//third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/teddy/README.md" ]
-
-  build_native_rust_unit_tests = false
-  edition = "2021"
-  cargo_pkg_version = "1.1.3"
-  cargo_pkg_authors = "Andrew Gallant <jamslam@gmail.com>"
-  cargo_pkg_name = "aho-corasick"
-  cargo_pkg_description = "Fast multiple substring searching."
-  library_configs -= [ "//build/config/coverage:default_coverage" ]
-  library_configs -= [ "//build/config/compiler:chromium_code" ]
-  library_configs += [ "//build/config/compiler:no_chromium_code" ]
-  executable_configs -= [ "//build/config/compiler:chromium_code" ]
-  executable_configs += [ "//build/config/compiler:no_chromium_code" ]
-  proc_macro_configs -= [ "//build/config/compiler:chromium_code" ]
-  proc_macro_configs += [ "//build/config/compiler:no_chromium_code" ]
-  deps = [ "//third_party/rust/memchr/v2:lib" ]
-  features = [
-    "perf-literal",
-    "std",
-  ]
-  rustflags = [
-    "--cap-lints=allow",  # Suppress all warnings in crates.io crates
-  ]
-
-  # Only for usage from third-party crates. Add the crate to
-  # //third_party/rust/chromium_crates_io/Cargo.toml to use
-  # it from first-party code.
-  visibility = [ "//third_party/rust/*" ]
-}
diff --git a/third_party/rust/aho_corasick/v1/README.chromium b/third_party/rust/aho_corasick/v1/README.chromium
deleted file mode 100644
index 6094be7a..0000000
--- a/third_party/rust/aho_corasick/v1/README.chromium
+++ /dev/null
@@ -1,10 +0,0 @@
-Name: aho-corasick
-URL: https://crates.io/crates/aho-corasick
-Version: 1.1.3
-Revision: 56256dca1bcd2365fd1dc987c1c06195429a2e2c
-License: MIT
-License File: //third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/LICENSE-MIT
-Shipped: yes
-Security Critical: yes
-
-Description: Fast multiple substring searching.
diff --git a/third_party/rust/anyhow/v1/BUILD.gn b/third_party/rust/anyhow/v1/BUILD.gn
index ae584ed..e567f277 100644
--- a/third_party/rust/anyhow/v1/BUILD.gn
+++ b/third_party/rust/anyhow/v1/BUILD.gn
@@ -52,5 +52,9 @@
     "--cap-lints=allow",  # Suppress all warnings in crates.io crates
   ]
 
+  # Only for usage from third-party crates. Add the crate to
+  # //third_party/rust/chromium_crates_io/Cargo.toml to use
+  # it from first-party code.
+  visibility = [ "//third_party/rust/*" ]
   testonly = true
 }
diff --git a/third_party/rust/chromium_crates_io/Cargo.lock b/third_party/rust/chromium_crates_io/Cargo.lock
index 04f71d3d..66f6e31 100644
--- a/third_party/rust/chromium_crates_io/Cargo.lock
+++ b/third_party/rust/chromium_crates_io/Cargo.lock
@@ -8,14 +8,6 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
-name = "aho-corasick"
-version = "1.1.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "memchr",
-]
-
-[[package]]
 name = "android_system_properties"
 version = "0.1.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -116,7 +108,6 @@
 name = "chromium"
 version = "0.1.0"
 dependencies = [
- "anyhow",
  "base64",
  "bitflags",
  "bstr",
@@ -129,7 +120,6 @@
  "fend-core",
  "font-types",
  "hex",
- "hex-literal",
  "icu_capi",
  "itertools",
  "lazy_static",
@@ -144,12 +134,7 @@
  "rand",
  "rand_pcg",
  "read-fonts",
- "regex",
- "rstest",
  "rustc-demangle-capi",
- "rustversion",
- "serde",
- "serde_json",
  "serde_json_lenient",
  "sfv",
  "skrifa",
@@ -160,7 +145,6 @@
  "syn",
  "temporal_capi",
  "tinyvec",
- "unicode-linebreak",
 ]
 
 [[package]]
@@ -362,11 +346,6 @@
 ]
 
 [[package]]
-name = "glob"
-version = "0.3.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
 name = "hashbrown"
 version = "0.15.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -382,11 +361,6 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
-name = "hex-literal"
-version = "0.4.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
 name = "iana-time-zone"
 version = "0.1.61"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1103,60 +1077,9 @@
 ]
 
 [[package]]
-name = "regex"
-version = "1.11.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "aho-corasick",
- "memchr",
- "regex-automata",
- "regex-syntax",
-]
-
-[[package]]
 name = "regex-automata"
 version = "0.4.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "aho-corasick",
- "memchr",
- "regex-syntax",
-]
-
-[[package]]
-name = "regex-syntax"
-version = "0.8.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
-name = "relative-path"
-version = "1.9.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
-name = "rstest"
-version = "0.22.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "rstest_macros",
- "rustc_version",
-]
-
-[[package]]
-name = "rstest_macros"
-version = "0.22.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "cfg-if",
- "glob",
- "proc-macro2",
- "quote",
- "regex",
- "relative-path",
- "rustc_version",
- "syn",
- "unicode-ident",
-]
 
 [[package]]
 name = "rust_decimal"
@@ -1181,14 +1104,6 @@
 ]
 
 [[package]]
-name = "rustc_version"
-version = "0.4.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "semver",
-]
-
-[[package]]
 name = "rustversion"
 version = "1.0.20"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1199,11 +1114,6 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
-name = "semver"
-version = "1.0.26"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
 name = "serde"
 version = "1.0.219"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1222,17 +1132,6 @@
 ]
 
 [[package]]
-name = "serde_json"
-version = "1.0.140"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "itoa",
- "memchr",
- "ryu",
- "serde",
-]
-
-[[package]]
 name = "serde_json_lenient"
 version = "0.2.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1399,11 +1298,6 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
-name = "unicode-linebreak"
-version = "0.1.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
 name = "unicode-width"
 version = "0.1.14"
 source = "registry+https://github.com/rust-lang/crates.io-index"
diff --git a/third_party/rust/chromium_crates_io/Cargo.toml b/third_party/rust/chromium_crates_io/Cargo.toml
index b614b36..c3d35625 100644
--- a/third_party/rust/chromium_crates_io/Cargo.toml
+++ b/third_party/rust/chromium_crates_io/Cargo.toml
@@ -16,7 +16,6 @@
 [workspace]
 
 [dependencies]
-anyhow = "1"
 base64 = "0.22"
 bitflags = "2"
 bytes = "1"
@@ -25,7 +24,6 @@
 fend-core = "1"
 font-types = "0.8"
 hex = "0.4"
-hex-literal = "0.4"
 itertools = "0.11"
 lazy_static = "1"
 libc = "0.2"
@@ -38,10 +36,7 @@
 rand = "0.8"
 rand_pcg = "0.3"
 read-fonts = "0.27.3"
-regex = "1"
 rustc-demangle-capi = "0.1"
-serde = "1"
-serde_json = "1"
 sfv = "0.10.4"
 skrifa = "0.29"
 small_ctor = "0.1"
@@ -83,13 +78,6 @@
 # when debug asserts are off, which is when DCHECK_IS_ON() is false.
 features = [ "std", "release_max_level_info" ]
 
-[dependencies.rstest]
-default-features = false
-version = "0.22"
-
-[dependencies.rustversion]
-version = "1"
-
 # Temporarily inform our cargo tooling that we care about
 # the new version of serde_json_lenient. We are in the midst
 # of CLs that move from the older to the newer.
@@ -102,9 +90,6 @@
 version = "2"
 features = ["full"]
 
-[dependencies.unicode-linebreak]
-version = "0.1"
-
 [dependencies.bytemuck]
 version = "1"
 features = ["nightly_portable_simd"]  # For std::simd coverage.
diff --git a/third_party/rust/chromium_crates_io/gnrt_config.toml b/third_party/rust/chromium_crates_io/gnrt_config.toml
index 74606d73..bf937b3 100644
--- a/third_party/rust/chromium_crates_io/gnrt_config.toml
+++ b/third_party/rust/chromium_crates_io/gnrt_config.toml
@@ -147,10 +147,6 @@
 [crate.hex]
 group = 'test'
 
-[crate.hex-literal]
-group = 'test'
-extra_input_roots = ['../README.md']
-
 [crate.itertools]
 group = 'test'
 
@@ -255,16 +251,6 @@
 [crate.read-fonts]
 extra_src_roots = ['../generated']
 
-[crate.regex]
-group = 'test'
-
-[crate.relative-path]
-# TODO(https://crbug.com/369075726): Remove this exception.
-no_license_file_tracked_in_crbug_369075726 = true
-
-[crate.rstest]
-group = 'test'
-
 [crate.rust_decimal]
 build_script_outputs = ['README-lib.md']
 
@@ -278,9 +264,6 @@
 # TODO(https://crbug.com/369075726): Remove this exception.
 no_license_file_tracked_in_crbug_369075726 = true
 
-[crate.serde_json]
-group = 'test'
-
 [crate.skrifa]
 extra_src_roots = ['../generated']
 
@@ -296,10 +279,6 @@
 [crate.temporal_rs]
 license_files = ["LICENSE-MIT", "LICENSE-APACHE"]
 
-[crate.unicode-linebreak]
-allow_first_party_usage = false
-build_script_outputs = [ "tables.rs" ]
-
 [crate.windows-sys]
 extra_kv = { visibility = [ '//build/rust/tests/windows_sys_test' ] }
 
diff --git a/third_party/rust/chromium_crates_io/supply-chain/config.toml b/third_party/rust/chromium_crates_io/supply-chain/config.toml
index eb77302..02c8d2d 100644
--- a/third_party/rust/chromium_crates_io/supply-chain/config.toml
+++ b/third_party/rust/chromium_crates_io/supply-chain/config.toml
@@ -44,9 +44,6 @@
 [policy."adler2:2.0.0"]
 criteria = ["crypto-safe", "safe-to-deploy", "ub-risk-2"]
 
-[policy."aho-corasick:1.1.3"]
-criteria = ["crypto-safe", "safe-to-deploy", "ub-risk-2"]
-
 [policy."android_system_properties:0.1.5"]
 criteria = ["crypto-safe", "safe-to-deploy", "ub-risk-2"]
 
@@ -164,18 +161,12 @@
 [policy."getrandom:0.2.15"]
 criteria = ["crypto-safe", "safe-to-run"]
 
-[policy."glob:0.3.2"]
-criteria = ["crypto-safe", "safe-to-run"]
-
 [policy."hashbrown:0.15.2"]
 criteria = ["crypto-safe", "safe-to-deploy", "ub-risk-2"]
 
 [policy."heck:0.4.1"]
 criteria = ["crypto-safe", "safe-to-deploy", "ub-risk-2"]
 
-[policy."hex-literal:0.4.1"]
-criteria = ["crypto-safe", "safe-to-run"]
-
 [policy."hex:0.4.3"]
 criteria = ["crypto-safe", "safe-to-run"]
 
@@ -386,24 +377,6 @@
 [policy."regex-automata:0.4.9"]
 criteria = ["crypto-safe", "safe-to-deploy", "ub-risk-2"]
 
-[policy."regex-syntax:0.8.5"]
-criteria = ["crypto-safe", "safe-to-deploy", "ub-risk-2"]
-
-[policy."regex:1.11.1"]
-criteria = ["crypto-safe", "safe-to-run"]
-
-[policy."relative-path:1.9.3"]
-criteria = ["crypto-safe", "safe-to-run"]
-
-[policy."rstest:0.22.0"]
-criteria = ["crypto-safe", "safe-to-run"]
-
-[policy."rstest_macros:0.22.0"]
-criteria = ["crypto-safe", "safe-to-run"]
-
-[policy."rstest_reuse:0.7.0"]
-criteria = ["crypto-safe", "safe-to-run"]
-
 [policy."rust_decimal:1.37.0"]
 criteria = ["crypto-safe", "safe-to-deploy", "ub-risk-2"]
 
@@ -413,27 +386,18 @@
 [policy."rustc-demangle:0.1.24"]
 criteria = ["crypto-safe", "safe-to-run"]
 
-[policy."rustc_version:0.4.1"]
-criteria = ["crypto-safe", "safe-to-run"]
-
 [policy."rustversion:1.0.20"]
 criteria = ["crypto-safe", "safe-to-deploy", "ub-risk-2"]
 
 [policy."ryu:1.0.20"]
 criteria = ["crypto-safe", "safe-to-deploy", "ub-risk-2"]
 
-[policy."semver:1.0.26"]
-criteria = ["crypto-safe", "safe-to-run"]
-
 [policy."serde:1.0.219"]
 criteria = ["crypto-safe", "safe-to-deploy", "ub-risk-2"]
 
 [policy."serde_derive:1.0.219"]
 criteria = ["crypto-safe", "safe-to-deploy", "ub-risk-2"]
 
-[policy."serde_json:1.0.140"]
-criteria = ["crypto-safe", "safe-to-run"]
-
 [policy."serde_json_lenient:0.2.4"]
 criteria = ["crypto-safe", "safe-to-deploy", "ub-risk-2"]
 
@@ -500,9 +464,6 @@
 [policy."unicode-ident:1.0.18"]
 criteria = ["crypto-safe", "safe-to-deploy", "ub-risk-2"]
 
-[policy."unicode-linebreak:0.1.5"]
-criteria = ["crypto-safe", "safe-to-deploy", "ub-risk-2"]
-
 [policy."unicode-width:0.1.14"]
 criteria = ["crypto-safe", "safe-to-run"]
 
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/.cargo-checksum.json
deleted file mode 100644
index 697c9ce..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/.cargo-checksum.json
+++ /dev/null
@@ -1 +0,0 @@
-{"files":{}}
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/.cargo_vcs_info.json
deleted file mode 100644
index 62d66e6..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/.cargo_vcs_info.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
-  "git": {
-    "sha1": "56256dca1bcd2365fd1dc987c1c06195429a2e2c"
-  },
-  "path_in_vcs": ""
-}
\ No newline at end of file
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/.github/workflows/ci.yml b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/.github/workflows/ci.yml
deleted file mode 100644
index f1b34cf..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/.github/workflows/ci.yml
+++ /dev/null
@@ -1,148 +0,0 @@
-name: ci
-on:
-  pull_request:
-  push:
-    branches:
-    - master
-  schedule:
-  - cron: '00 01 * * *'
-
-# The section is needed to drop write-all permissions that are granted on
-# `schedule` event. By specifying any permission explicitly all others are set
-# to none. By using the principle of least privilege the damage a compromised
-# workflow can do (because of an injection or compromised third party tool or
-# action) is restricted. Currently the worklow doesn't need any additional
-# permission except for pulling the code. Adding labels to issues, commenting
-# on pull-requests, etc. may need additional permissions:
-#
-# Syntax for this section:
-# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
-#
-# Reference for how to assign permissions on a job-by-job basis:
-# https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
-#
-# Reference for available permissions that we can enable if needed:
-# https://docs.github.com/en/actions/security-guides/automatic-token-authentication#permissions-for-the-github_token
-permissions:
-  # to fetch code (actions/checkout)
-  contents: read
-
-jobs:
-  test:
-    name: test
-    env:
-      # For some builds, we use cross to test on 32-bit and big-endian
-      # systems.
-      CARGO: cargo
-      # When CARGO is set to CROSS, TARGET is set to `--target matrix.target`.
-      # Note that we only use cross on Linux, so setting a target on a
-      # different OS will just use normal cargo.
-      TARGET:
-      # Bump this as appropriate. We pin to a version to make sure CI
-      # continues to work as cross releases in the past have broken things
-      # in subtle ways.
-      CROSS_VERSION: v0.2.5
-    runs-on: ${{ matrix.os }}
-    strategy:
-      fail-fast: false
-      matrix:
-        include:
-        - build: pinned
-          os: ubuntu-latest
-          rust: 1.60.0
-        - build: stable
-          os: ubuntu-latest
-          rust: stable
-        - build: stable-x86
-          os: ubuntu-latest
-          rust: stable
-          target: i686-unknown-linux-gnu
-        - build: stable-aarch64
-          os: ubuntu-latest
-          rust: stable
-          target: aarch64-unknown-linux-gnu
-        - build: stable-powerpc64
-          os: ubuntu-latest
-          rust: stable
-          target: powerpc64-unknown-linux-gnu
-        - build: stable-s390x
-          os: ubuntu-latest
-          rust: stable
-          target: s390x-unknown-linux-gnu
-        - build: beta
-          os: ubuntu-latest
-          rust: beta
-        - build: nightly
-          os: ubuntu-latest
-          rust: nightly
-        - build: macos
-          os: macos-latest
-          rust: stable
-        - build: win-msvc
-          os: windows-latest
-          rust: stable
-        - build: win-gnu
-          os: windows-latest
-          rust: stable-x86_64-gnu
-    steps:
-    - name: Checkout repository
-      uses: actions/checkout@v3
-    - name: Install Rust
-      uses: dtolnay/rust-toolchain@master
-      with:
-        toolchain: ${{ matrix.rust }}
-    - name: Install and configure Cross
-      if: matrix.os == 'ubuntu-latest' && matrix.target != ''
-      run: |
-        # In the past, new releases of 'cross' have broken CI. So for now, we
-        # pin it. We also use their pre-compiled binary releases because cross
-        # has over 100 dependencies and takes a bit to compile.
-        dir="$RUNNER_TEMP/cross-download"
-        mkdir "$dir"
-        echo "$dir" >> $GITHUB_PATH
-        cd "$dir"
-        curl -LO "https://github.com/cross-rs/cross/releases/download/$CROSS_VERSION/cross-x86_64-unknown-linux-musl.tar.gz"
-        tar xf cross-x86_64-unknown-linux-musl.tar.gz
-
-        # We used to install 'cross' from master, but it kept failing. So now
-        # we build from a known-good version until 'cross' becomes more stable
-        # or we find an alternative. Notably, between v0.2.1 and current
-        # master (2022-06-14), the number of Cross's dependencies has doubled.
-        echo "CARGO=cross" >> $GITHUB_ENV
-        echo "TARGET=--target ${{ matrix.target }}" >> $GITHUB_ENV
-    - name: Show command used for Cargo
-      run: |
-        echo "cargo command is: ${{ env.CARGO }}"
-        echo "target flag is: ${{ env.TARGET }}"
-    - name: Show CPU info for debugging
-      if: matrix.os == 'ubuntu-latest'
-      run: lscpu
-    # See: https://github.com/rust-lang/regex/blob/a2887636930156023172e4b376a6febad4e49120/.github/workflows/ci.yml#L145-L163
-    - name: Pin memchr to 2.6.2
-      if: matrix.build == 'pinned'
-      run: cargo update -p memchr --precise 2.6.2
-    - run: ${{ env.CARGO }} build --verbose $TARGET
-    - run: ${{ env.CARGO }} doc --verbose $TARGET
-    - run: ${{ env.CARGO }} test --verbose $TARGET
-    - run: ${{ env.CARGO }} test --lib --verbose --no-default-features --features std,perf-literal $TARGET
-    - run: ${{ env.CARGO }} test --lib --verbose --no-default-features $TARGET
-    - run: ${{ env.CARGO }} test --lib --verbose --no-default-features --features std $TARGET
-    - run: ${{ env.CARGO }} test --lib --verbose --no-default-features --features perf-literal $TARGET
-    - run: ${{ env.CARGO }} test --lib --verbose --no-default-features --features std,perf-literal,logging $TARGET
-    - if: matrix.build == 'nightly'
-      run: ${{ env.CARGO }} build --manifest-path aho-corasick-debug/Cargo.toml $TARGET
-
-  rustfmt:
-    name: rustfmt
-    runs-on: ubuntu-latest
-    steps:
-    - name: Checkout repository
-      uses: actions/checkout@v3
-    - name: Install Rust
-      uses: dtolnay/rust-toolchain@master
-      with:
-        toolchain: stable
-        components: rustfmt
-    - name: Check formatting
-      run: |
-        cargo fmt --all -- --check
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/.gitignore b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/.gitignore
deleted file mode 100644
index f1a4d65d..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/.gitignore
+++ /dev/null
@@ -1,12 +0,0 @@
-.*.swp
-doc
-tags
-examples/ss10pusa.csv
-build
-target
-/Cargo.lock
-scratch*
-bench_large/huge
-BREADCRUMBS
-/tmp
-/aho-corasick-debug/Cargo.lock
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/.vim/coc-settings.json b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/.vim/coc-settings.json
deleted file mode 100644
index 887eb6f..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/.vim/coc-settings.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
-  "rust-analyzer.linkedProjects": [
-    "aho-corasick-debug/Cargo.toml",
-    "benchmarks/engines/rust-aho-corasick/Cargo.toml",
-    "benchmarks/engines/rust-daachorse/Cargo.toml",
-    "benchmarks/engines/rust-jetscii/Cargo.toml",
-    "benchmarks/engines/naive/Cargo.toml",
-    "benchmarks/shared/Cargo.toml",
-    "fuzz/Cargo.toml",
-    "Cargo.toml"
-  ]
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/COPYING b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/COPYING
deleted file mode 100644
index bb9c20a0..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/COPYING
+++ /dev/null
@@ -1,3 +0,0 @@
-This project is dual-licensed under the Unlicense and MIT licenses.
-
-You may use this code under the terms of either license.
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/Cargo.toml
deleted file mode 100644
index 737f3ef..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/Cargo.toml
+++ /dev/null
@@ -1,74 +0,0 @@
-# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
-#
-# When uploading crates to the registry Cargo will automatically
-# "normalize" Cargo.toml files for maximal compatibility
-# with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g., crates.io) dependencies.
-#
-# If you are reading this file be aware that the original Cargo.toml
-# will likely look very different (and much more reasonable).
-# See Cargo.toml.orig for the original contents.
-
-[package]
-edition = "2021"
-rust-version = "1.60.0"
-name = "aho-corasick"
-version = "1.1.3"
-authors = ["Andrew Gallant <jamslam@gmail.com>"]
-exclude = [
-    "/aho-corasick-debug",
-    "/benchmarks",
-    "/tmp",
-]
-autotests = false
-description = "Fast multiple substring searching."
-homepage = "https://github.com/BurntSushi/aho-corasick"
-readme = "README.md"
-keywords = [
-    "string",
-    "search",
-    "text",
-    "pattern",
-    "multi",
-]
-categories = ["text-processing"]
-license = "Unlicense OR MIT"
-repository = "https://github.com/BurntSushi/aho-corasick"
-
-[package.metadata.docs.rs]
-all-features = true
-rustdoc-args = [
-    "--cfg",
-    "docsrs",
-    "--generate-link-to-definition",
-]
-
-[profile.bench]
-debug = 2
-
-[profile.release]
-debug = 2
-
-[lib]
-name = "aho_corasick"
-
-[dependencies.log]
-version = "0.4.17"
-optional = true
-
-[dependencies.memchr]
-version = "2.4.0"
-optional = true
-default-features = false
-
-[dev-dependencies.doc-comment]
-version = "0.3.3"
-
-[features]
-default = [
-    "std",
-    "perf-literal",
-]
-logging = ["dep:log"]
-perf-literal = ["dep:memchr"]
-std = ["memchr?/std"]
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/Cargo.toml.orig
deleted file mode 100644
index c9ad16d2..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/Cargo.toml.orig
+++ /dev/null
@@ -1,74 +0,0 @@
-[package]
-name = "aho-corasick"
-version = "1.1.3"  #:version
-authors = ["Andrew Gallant <jamslam@gmail.com>"]
-description = "Fast multiple substring searching."
-homepage = "https://github.com/BurntSushi/aho-corasick"
-repository = "https://github.com/BurntSushi/aho-corasick"
-readme = "README.md"
-keywords = ["string", "search", "text", "pattern", "multi"]
-license = "Unlicense OR MIT"
-categories = ["text-processing"]
-autotests = false
-exclude = ["/aho-corasick-debug", "/benchmarks", "/tmp"]
-edition = "2021"
-rust-version = "1.60.0"
-
-[lib]
-name = "aho_corasick"
-
-[features]
-default = ["std", "perf-literal"]
-std = ["memchr?/std"]
-
-# Enables prefilter optimizations that depend on external crates.
-perf-literal = ["dep:memchr"]
-
-# Enable logging via the 'log' crate. This is useful for seeing messages about
-# internal decisions and metrics. For example, how the choice of the internal
-# Aho-Corasick implementation is used or the heap usage of an automaton.
-logging = ["dep:log"]
-
-# Provides a trait impl for fst::Automaton for nfa::noncontiguous::NFA,
-# nfa::contiguous::NFA and dfa::DFA. This is useful for searching an
-# FST with an Aho-Corasick automaton. Note that this does not apply
-# to the top-level 'AhoCorasick' type, as it does not implement the
-# aho_corasick::automaton::Automaton trait, and thus enabling this feature does
-# not cause it to implement fst::Automaton either.
-#
-# NOTE: Currently this feature is not available as `fst` is not at 1.0 yet,
-# and this would make `fst` a public dependency. If you absolutely need this,
-# you can copy the (very small) src/transducer.rs file to your tree. It
-# specifically does not use any private APIs and should work after replacing
-# 'crate::' with 'aho_corasick::'.
-#
-# NOTE: I think my current plan is to flip this around an add an optional
-# dependency on 'aho-corasick' to the 'fst' crate and move the trait impls
-# there. But I haven't gotten around to it yet.
-# transducer = ["fst"]
-
-[dependencies]
-log = { version = "0.4.17", optional = true }
-memchr = { version = "2.4.0", default-features = false, optional = true }
-
-[dev-dependencies]
-doc-comment = "0.3.3"
-# fst = "0.4.5"
-
-[package.metadata.docs.rs]
-# We want to document all features.
-all-features = true
-# This opts into a nightly unstable option to show the features that need to be
-# enabled for public API items. To do that, we set 'docsrs', and when that's
-# enabled, we enable the 'doc_auto_cfg' feature.
-#
-# To test this locally, run:
-#
-#     RUSTDOCFLAGS="--cfg docsrs" cargo +nightly doc --all-features
-rustdoc-args = ["--cfg", "docsrs", "--generate-link-to-definition"]
-
-[profile.release]
-debug = true
-
-[profile.bench]
-debug = true
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/DESIGN.md b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/DESIGN.md
deleted file mode 100644
index f911f0c..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/DESIGN.md
+++ /dev/null
@@ -1,481 +0,0 @@
-This document describes the internal design of this crate, which is an object
-lesson in what happens when you take a fairly simple old algorithm like
-Aho-Corasick and make it fast and production ready.
-
-The target audience of this document is Rust programmers that have some
-familiarity with string searching, however, one does not need to know the
-Aho-Corasick algorithm in order to read this (it is explained below). One
-should, however, know what a trie is. (If you don't, go read its Wikipedia
-article.)
-
-The center-piece of this crate is an implementation of Aho-Corasick. On its
-own, Aho-Corasick isn't that complicated. The complex pieces come from the
-different variants of Aho-Corasick implemented in this crate. Specifically,
-they are:
-
-* Aho-Corasick as a noncontiguous NFA. States have their transitions
-  represented sparsely, and each state puts its transitions in its own separate
-  allocation. Hence the same "noncontiguous."
-* Aho-Corasick as a contiguous NFA. This NFA uses a single allocation to
-  represent the transitions of all states. That is, transitions are laid out
-  contiguously in memory. Moreover, states near the starting state are
-  represented densely, such that finding the next state ID takes a constant
-  number of instructions.
-* Aho-Corasick as a DFA. In this case, all states are represented densely in
-  a transition table that uses one allocation.
-* Supporting "standard" match semantics, along with its overlapping variant,
-  in addition to leftmost-first and leftmost-longest semantics. The "standard"
-  semantics are typically what you see in a textbook description of
-  Aho-Corasick. However, Aho-Corasick is also useful as an optimization in
-  regex engines, which often use leftmost-first or leftmost-longest semantics.
-  Thus, it is useful to implement those semantics here. The "standard" and
-  "leftmost" search algorithms are subtly different, and also require slightly
-  different construction algorithms.
-* Support for ASCII case insensitive matching.
-* Support for accelerating searches when the patterns all start with a small
-  number of fixed bytes. Or alternatively, when the  patterns all contain a
-  small number of rare bytes. (Searching for these bytes uses SIMD vectorized
-  code courtesy of `memchr`.)
-* Transparent support for alternative SIMD vectorized search routines for
-  smaller number of literals, such as the Teddy algorithm. We called these
-  "packed" search routines because they use SIMD. They can often be an order of
-  magnitude faster than just Aho-Corasick, but don't scale as well.
-* Support for searching streams. This can reuse most of the underlying code,
-  but does require careful buffering support.
-* Support for anchored searches, which permit efficient "is prefix" checks for
-  a large number of patterns.
-
-When you combine all of this together along with trying to make everything as
-fast as possible, what you end up with is enitrely too much code with too much
-`unsafe`. Alas, I was not smart enough to figure out how to reduce it. Instead,
-we will explain it.
-
-
-# Basics
-
-The fundamental problem this crate is trying to solve is to determine the
-occurrences of possibly many patterns in a haystack. The naive way to solve
-this is to look for a match for each pattern at each position in the haystack:
-
-    for i in 0..haystack.len():
-      for p in patterns.iter():
-        if haystack[i..].starts_with(p.bytes()):
-          return Match(p.id(), i, i + p.bytes().len())
-
-Those four lines are effectively all this crate does. The problem with those
-four lines is that they are very slow, especially when you're searching for a
-large number of patterns.
-
-While there are many different algorithms available to solve this, a popular
-one is Aho-Corasick. It's a common solution because it's not too hard to
-implement, scales quite well even when searching for thousands of patterns and
-is generally pretty fast. Aho-Corasick does well here because, regardless of
-the number of patterns you're searching for, it always visits each byte in the
-haystack exactly once. This means, generally speaking, adding more patterns to
-an Aho-Corasick automaton does not make it slower. (Strictly speaking, however,
-this is not true, since a larger automaton will make less effective use of the
-CPU's cache.)
-
-Aho-Corasick can be succinctly described as a trie with state transitions
-between some of the nodes that efficiently instruct the search algorithm to
-try matching alternative keys in the trie. The trick is that these state
-transitions are arranged such that each byte of input needs to be inspected
-only once. These state transitions are typically called "failure transitions,"
-because they instruct the searcher (the thing traversing the automaton while
-reading from the haystack) what to do when a byte in the haystack does not
-correspond to a valid transition in the current state of the trie.
-
-More formally, a failure transition points to a state in the automaton that may
-lead to a match whose prefix is a proper suffix of the path traversed through
-the trie so far. (If no such proper suffix exists, then the failure transition
-points back to the start state of the trie, effectively restarting the search.)
-This is perhaps simpler to explain pictorally. For example, let's say we built
-an Aho-Corasick automaton with the following patterns: 'abcd' and 'cef'. The
-trie looks like this:
-
-         a - S1 - b - S2 - c - S3 - d - S4*
-        /
-    S0 - c - S5 - e - S6 - f - S7*
-
-where states marked with a `*` are match states (meaning, the search algorithm
-should stop and report a match to the caller).
-
-So given this trie, it should be somewhat straight-forward to see how it can
-be used to determine whether any particular haystack *starts* with either
-`abcd` or `cef`. It's easy to express this in code:
-
-    fn has_prefix(trie: &Trie, haystack: &[u8]) -> bool {
-      let mut state_id = trie.start();
-      // If the empty pattern is in trie, then state_id is a match state.
-      if trie.is_match(state_id) {
-        return true;
-      }
-      for (i, &b) in haystack.iter().enumerate() {
-        state_id = match trie.next_state(state_id, b) {
-          Some(id) => id,
-          // If there was no transition for this state and byte, then we know
-          // the haystack does not start with one of the patterns in our trie.
-          None => return false,
-        };
-        if trie.is_match(state_id) {
-          return true;
-        }
-      }
-      false
-    }
-
-And that's pretty much it. All we do is move through the trie starting with the
-bytes at the beginning of the haystack. If we find ourselves in a position
-where we can't move, or if we've looked through the entire haystack without
-seeing a match state, then we know the haystack does not start with any of the
-patterns in the trie.
-
-The meat of the Aho-Corasick algorithm is in how we add failure transitions to
-our trie to keep searching efficient. Specifically, it permits us to not only
-check whether a haystack *starts* with any one of a number of patterns, but
-rather, whether the haystack contains any of a number of patterns *anywhere* in
-the haystack.
-
-As mentioned before, failure transitions connect a proper suffix of the path
-traversed through the trie before, with a path that leads to a match that has a
-prefix corresponding to that proper suffix. So in our case, for patterns `abcd`
-and `cef`, with a haystack `abcef`, we want to transition to state `S5` (from
-the diagram above) from `S3` upon seeing that the byte following `c` is not
-`d`. Namely, the proper suffix in this example is `c`, which is a prefix of
-`cef`. So the modified diagram looks like this:
-
-
-         a - S1 - b - S2 - c - S3 - d - S4*
-        /                      /
-       /       ----------------
-      /       /
-    S0 - c - S5 - e - S6 - f - S7*
-
-One thing that isn't shown in this diagram is that *all* states have a failure
-transition, but only `S3` has a *non-trivial* failure transition. That is, all
-other states have a failure transition back to the start state. So if our
-haystack was `abzabcd`, then the searcher would transition back to `S0` after
-seeing `z`, which effectively restarts the search. (Because there is no pattern
-in our trie that has a prefix of `bz` or `z`.)
-
-The code for traversing this *automaton* or *finite state machine* (it is no
-longer just a trie) is not that much different from the `has_prefix` code
-above:
-
-    fn contains(fsm: &FiniteStateMachine, haystack: &[u8]) -> bool {
-      let mut state_id = fsm.start();
-      // If the empty pattern is in fsm, then state_id is a match state.
-      if fsm.is_match(state_id) {
-        return true;
-      }
-      for (i, &b) in haystack.iter().enumerate() {
-        // While the diagram above doesn't show this, we may wind up needing
-        // to follow multiple failure transitions before we land on a state
-        // in which we can advance. Therefore, when searching for the next
-        // state, we need to loop until we don't see a failure transition.
-        //
-        // This loop terminates because the start state has no empty
-        // transitions. Every transition from the start state either points to
-        // another state, or loops back to the start state.
-        loop {
-          match fsm.next_state(state_id, b) {
-            Some(id) => {
-              state_id = id;
-              break;
-            }
-            // Unlike our code above, if there was no transition for this
-            // state, then we don't quit. Instead, we look for this state's
-            // failure transition and follow that instead.
-            None => {
-              state_id = fsm.next_fail_state(state_id);
-            }
-          };
-        }
-        if fsm.is_match(state_id) {
-          return true;
-        }
-      }
-      false
-    }
-
-Other than the complication around traversing failure transitions, this code
-is still roughly "traverse the automaton with bytes from the haystack, and quit
-when a match is seen."
-
-And that concludes our section on the basics. While we didn't go deep into how
-the automaton is built (see `src/nfa/noncontiguous.rs`, which has detailed
-comments about that), the basic structure of Aho-Corasick should be reasonably
-clear.
-
-
-# NFAs and DFAs
-
-There are generally two types of finite automata: non-deterministic finite
-automata (NFA) and deterministic finite automata (DFA). The difference between
-them is, principally, that an NFA can be in multiple states at once. This is
-typically accomplished by things called _epsilon_ transitions, where one could
-move to a new state without consuming any bytes from the input. (The other
-mechanism by which NFAs can be in more than one state is where the same byte in
-a particular state transitions to multiple distinct states.) In contrast, a DFA
-can only ever be in one state at a time. A DFA has no epsilon transitions, and
-for any given state, a byte transitions to at most one other state.
-
-By this formulation, the Aho-Corasick automaton described in the previous
-section is an NFA. This is because failure transitions are, effectively,
-epsilon transitions. That is, whenever the automaton is in state `S`, it is
-actually in the set of states that are reachable by recursively following
-failure transitions from `S` until you reach the start state. (This means
-that, for example, the start state is always active since the start state is
-reachable via failure transitions from any state in the automaton.)
-
-NFAs have a lot of nice properties. They tend to be easier to construct, and
-also tend to use less memory. However, their primary downside is that they are
-typically slower to execute a search with. For example, the code above showing
-how to search with an Aho-Corasick automaton needs to potentially iterate
-through many failure transitions for every byte of input. While this is a
-fairly small amount of overhead, this can add up, especially if the automaton
-has a lot of overlapping patterns with a lot of failure transitions.
-
-A DFA's search code, by contrast, looks like this:
-
-    fn contains(dfa: &DFA, haystack: &[u8]) -> bool {
-      let mut state_id = dfa.start();
-      // If the empty pattern is in dfa, then state_id is a match state.
-      if dfa.is_match(state_id) {
-        return true;
-      }
-      for (i, &b) in haystack.iter().enumerate() {
-        // An Aho-Corasick DFA *never* has a missing state that requires
-        // failure transitions to be followed. One byte of input advances the
-        // automaton by one state. Always.
-        state_id = dfa.next_state(state_id, b);
-        if dfa.is_match(state_id) {
-          return true;
-        }
-      }
-      false
-    }
-
-The search logic here is much simpler than for the NFA, and this tends to
-translate into significant performance benefits as well, since there's a lot
-less work being done for each byte in the haystack. How is this accomplished?
-It's done by pre-following all failure transitions for all states for all bytes
-in the alphabet, and then building a single state transition table. Building
-this DFA can be much more costly than building the NFA, and use much more
-memory, but the better performance can be worth it.
-
-Users of this crate can actually choose between using one of two possible NFAs
-(noncontiguous or contiguous) or a DFA. By default, a contiguous NFA is used,
-in most circumstances, but if the number of patterns is small enough a DFA will
-be used. A contiguous NFA is chosen because it uses orders of magnitude less
-memory than a DFA, takes only a little longer to build than a noncontiguous
-NFA and usually gets pretty close to the search speed of a DFA. (Callers can
-override this automatic selection via the `AhoCorasickBuilder::start_kind`
-configuration.)
-
-
-# More DFA tricks
-
-As described in the previous section, one of the downsides of using a DFA
-is that it uses more memory and can take longer to build. One small way of
-mitigating these concerns is to map the alphabet used by the automaton into
-a smaller space. Typically, the alphabet of a DFA has 256 elements in it:
-one element for each possible value that fits into a byte. However, in many
-cases, one does not need the full alphabet. For example, if all patterns in an
-Aho-Corasick automaton are ASCII letters, then this only uses up 52 distinct
-bytes. As far as the automaton is concerned, the rest of the 204 bytes are
-indistinguishable from one another: they will never disrciminate between a
-match or a non-match. Therefore, in cases like that, the alphabet can be shrunk
-to just 53 elements. One for each ASCII letter, and then another to serve as a
-placeholder for every other unused byte.
-
-In practice, this library doesn't quite compute the optimal set of equivalence
-classes, but it's close enough in most cases. The key idea is that this then
-allows the transition table for the DFA to be potentially much smaller. The
-downside of doing this, however, is that since the transition table is defined
-in terms of this smaller alphabet space, every byte in the haystack must be
-re-mapped to this smaller space. This requires an additional 256-byte table.
-In practice, this can lead to a small search time hit, but it can be difficult
-to measure. Moreover, it can sometimes lead to faster search times for bigger
-automata, since it could be difference between more parts of the automaton
-staying in the CPU cache or not.
-
-One other trick for DFAs employed by this crate is the notion of premultiplying
-state identifiers. Specifically, the normal way to compute the next transition
-in a DFA is via the following (assuming that the transition table is laid out
-sequentially in memory, in row-major order, where the rows are states):
-
-    next_state_id = dfa.transitions[current_state_id * 256 + current_byte]
-
-However, since the value `256` is a fixed constant, we can actually premultiply
-the state identifiers in the table when we build the table initially. Then, the
-next transition computation simply becomes:
-
-    next_state_id = dfa.transitions[current_state_id + current_byte]
-
-This doesn't seem like much, but when this is being executed for every byte of
-input that you're searching, saving that extra multiplication instruction can
-add up.
-
-The same optimization works even when equivalence classes are enabled, as
-described above. The only difference is that the premultiplication is by the
-total number of equivalence classes instead of 256.
-
-There isn't much downside to premultiplying state identifiers, other than it
-imposes a smaller limit on the total number of states in the DFA. Namely, with
-premultiplied state identifiers, you run out of room in your state identifier
-representation more rapidly than if the identifiers are just state indices.
-
-Both equivalence classes and premultiplication are always enabled. There is a
-`AhoCorasickBuilder::byte_classes` configuration, but disabling this just makes
-it so there are always 256 equivalence classes, i.e., every class corresponds
-to precisely one byte. When it's disabled, the equivalence class map itself is
-still used. The purpose of disabling it is when one is debugging the underlying
-automaton. It can be easier to comprehend when it uses actual byte values for
-its transitions instead of equivalence classes.
-
-
-# Match semantics
-
-One of the more interesting things about this implementation of Aho-Corasick
-that (as far as this author knows) separates it from other implementations, is
-that it natively supports leftmost-first and leftmost-longest match semantics.
-Briefly, match semantics refer to the decision procedure by which searching
-will disambiguate matches when there are multiple to choose from:
-
-* **standard** match semantics emits matches as soon as they are detected by
-  the automaton. This is typically equivalent to the textbook non-overlapping
-  formulation of Aho-Corasick.
-* **leftmost-first** match semantics means that 1) the next match is the match
-  starting at the leftmost position and 2) among multiple matches starting at
-  the same leftmost position, the match corresponding to the pattern provided
-  first by the caller is reported.
-* **leftmost-longest** is like leftmost-first, except when there are multiple
-  matches starting at the same leftmost position, the pattern corresponding to
-  the longest match is returned.
-
-(The crate API documentation discusses these differences, with examples, in
-more depth on the `MatchKind` type.)
-
-The reason why supporting these match semantics is important is because it
-gives the user more control over the match procedure. For example,
-leftmost-first permits users to implement match priority by simply putting the
-higher priority patterns first. Leftmost-longest, on the other hand, permits
-finding the longest possible match, which might be useful when trying to find
-words matching a dictionary. Additionally, regex engines often want to use
-Aho-Corasick as an optimization when searching for an alternation of literals.
-In order to preserve correct match semantics, regex engines typically can't use
-the standard textbook definition directly, since regex engines will implement
-either leftmost-first (Perl-like) or leftmost-longest (POSIX) match semantics.
-
-Supporting leftmost semantics requires a couple key changes:
-
-* Constructing the Aho-Corasick automaton changes a bit in both how the trie is
-  constructed and how failure transitions are found. Namely, only a subset
-  of the failure transitions are added. Specifically, only the failure
-  transitions that either do not occur after a match or do occur after a match
-  but preserve that match are kept. (More details on this can be found in
-  `src/nfa/noncontiguous.rs`.)
-* The search algorithm changes slightly. Since we are looking for the leftmost
-  match, we cannot quit as soon as a match is detected. Instead, after a match
-  is detected, we must keep searching until either the end of the input or
-  until a dead state is seen. (Dead states are not used for standard match
-  semantics. Dead states mean that searching should stop after a match has been
-  found.)
-
-Most other implementations of Aho-Corasick do support leftmost match semantics,
-but they do it with more overhead at search time, or even worse, with a queue
-of matches and sophisticated hijinks to disambiguate the matches. While our
-construction algorithm becomes a bit more complicated, the correct match
-semantics fall out from the structure of the automaton itself.
-
-
-# Overlapping matches
-
-One of the nice properties of an Aho-Corasick automaton is that it can report
-all possible matches, even when they overlap with one another. In this mode,
-the match semantics don't matter, since all possible matches are reported.
-Overlapping searches work just like regular searches, except the state
-identifier at which the previous search left off is carried over to the next
-search, so that it can pick up where it left off. If there are additional
-matches at that state, then they are reported before resuming the search.
-
-Enabling leftmost-first or leftmost-longest match semantics causes the
-automaton to use a subset of all failure transitions, which means that
-overlapping searches cannot be used. Therefore, if leftmost match semantics are
-used, attempting to do an overlapping search will return an error (or panic
-when using the infallible APIs). Thus, to get overlapping searches, the caller
-must use the default standard match semantics. This behavior was chosen because
-there are only two alternatives, which were deemed worse:
-
-* Compile two automatons internally, one for standard semantics and one for
-  the semantics requested by the caller (if not standard).
-* Create a new type, distinct from the `AhoCorasick` type, which has different
-  capabilities based on the configuration options.
-
-The first is untenable because of the amount of memory used by the automaton.
-The second increases the complexity of the API too much by adding too many
-types that do similar things. It is conceptually much simpler to keep all
-searching isolated to a single type.
-
-
-# Stream searching
-
-Since Aho-Corasick is an automaton, it is possible to do partial searches on
-partial parts of the haystack, and then resume that search on subsequent pieces
-of the haystack. This is useful when the haystack you're trying to search is
-not stored contiguously in memory, or if one does not want to read the entire
-haystack into memory at once.
-
-Currently, only standard semantics are supported for stream searching. This is
-some of the more complicated code in this crate, and is something I would very
-much like to improve. In particular, it currently has the restriction that it
-must buffer at least enough of the haystack in memory in order to fit the
-longest possible match. The difficulty in getting stream searching right is
-that the implementation choices (such as the buffer size) often impact what the
-API looks like and what it's allowed to do.
-
-
-# Prefilters
-
-In some cases, Aho-Corasick is not the fastest way to find matches containing
-multiple patterns. Sometimes, the search can be accelerated using highly
-optimized SIMD routines. For example, consider searching the following
-patterns:
-
-    Sherlock
-    Moriarty
-    Watson
-
-It is plausible that it would be much faster to quickly look for occurrences of
-the leading bytes, `S`, `M` or `W`, before trying to start searching via the
-automaton. Indeed, this is exactly what this crate will do.
-
-When there are more than three distinct starting bytes, then this crate will
-look for three distinct bytes occurring at any position in the patterns, while
-preferring bytes that are heuristically determined to be rare over others. For
-example:
-
-    Abuzz
-    Sanchez
-    Vasquez
-    Topaz
-    Waltz
-
-Here, we have more than 3 distinct starting bytes, but all of the patterns
-contain `z`, which is typically a rare byte. In this case, the prefilter will
-scan for `z`, back up a bit, and then execute the Aho-Corasick automaton.
-
-If all of that fails, then a packed multiple substring algorithm will be
-attempted. Currently, the only algorithm available for this is Teddy, but more
-may be added in the future. Teddy is unlike the above prefilters in that it
-confirms its own matches, so when Teddy is active, it might not be necessary
-for Aho-Corasick to run at all. However, the current Teddy implementation
-only works in `x86_64` when SSSE3 or AVX2 are available or in `aarch64`
-(using NEON), and moreover, only works _well_ when there are a small number
-of patterns (say, less than 100). Teddy also requires the haystack to be of a
-certain length (more than 16-34 bytes). When the haystack is shorter than that,
-Rabin-Karp is used instead. (See `src/packed/rabinkarp.rs`.)
-
-There is a more thorough description of Teddy at
-[`src/packed/teddy/README.md`](src/packed/teddy/README.md).
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/LICENSE-MIT b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/LICENSE-MIT
deleted file mode 100644
index 3b0a5dc..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/LICENSE-MIT
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2015 Andrew Gallant
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/README.md b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/README.md
deleted file mode 100644
index c0f525fdc..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/README.md
+++ /dev/null
@@ -1,174 +0,0 @@
-aho-corasick
-============
-A library for finding occurrences of many patterns at once with SIMD
-acceleration in some cases. This library provides multiple pattern
-search principally through an implementation of the
-[Aho-Corasick algorithm](https://en.wikipedia.org/wiki/Aho%E2%80%93Corasick_algorithm),
-which builds a finite state machine for executing searches in linear time.
-Features include case insensitive matching, overlapping matches, fast searching
-via SIMD and optional full DFA construction and search & replace in streams.
-
-[![Build status](https://github.com/BurntSushi/aho-corasick/workflows/ci/badge.svg)](https://github.com/BurntSushi/aho-corasick/actions)
-[![crates.io](https://img.shields.io/crates/v/aho-corasick.svg)](https://crates.io/crates/aho-corasick)
-
-Dual-licensed under MIT or the [UNLICENSE](https://unlicense.org/).
-
-
-### Documentation
-
-https://docs.rs/aho-corasick
-
-
-### Usage
-
-Run `cargo add aho-corasick` to automatically add this crate as a dependency
-in your `Cargo.toml` file.
-
-
-### Example: basic searching
-
-This example shows how to search for occurrences of multiple patterns
-simultaneously. Each match includes the pattern that matched along with the
-byte offsets of the match.
-
-```rust
-use aho_corasick::{AhoCorasick, PatternID};
-
-let patterns = &["apple", "maple", "Snapple"];
-let haystack = "Nobody likes maple in their apple flavored Snapple.";
-
-let ac = AhoCorasick::new(patterns).unwrap();
-let mut matches = vec![];
-for mat in ac.find_iter(haystack) {
-    matches.push((mat.pattern(), mat.start(), mat.end()));
-}
-assert_eq!(matches, vec![
-    (PatternID::must(1), 13, 18),
-    (PatternID::must(0), 28, 33),
-    (PatternID::must(2), 43, 50),
-]);
-```
-
-
-### Example: ASCII case insensitivity
-
-This is like the previous example, but matches `Snapple` case insensitively
-using `AhoCorasickBuilder`:
-
-```rust
-use aho_corasick::{AhoCorasick, PatternID};
-
-let patterns = &["apple", "maple", "snapple"];
-let haystack = "Nobody likes maple in their apple flavored Snapple.";
-
-let ac = AhoCorasick::builder()
-    .ascii_case_insensitive(true)
-    .build(patterns)
-    .unwrap();
-let mut matches = vec![];
-for mat in ac.find_iter(haystack) {
-    matches.push((mat.pattern(), mat.start(), mat.end()));
-}
-assert_eq!(matches, vec![
-    (PatternID::must(1), 13, 18),
-    (PatternID::must(0), 28, 33),
-    (PatternID::must(2), 43, 50),
-]);
-```
-
-
-### Example: replacing matches in a stream
-
-This example shows how to execute a search and replace on a stream without
-loading the entire stream into memory first.
-
-```rust,ignore
-use aho_corasick::AhoCorasick;
-
-let patterns = &["fox", "brown", "quick"];
-let replace_with = &["sloth", "grey", "slow"];
-
-// In a real example, these might be `std::fs::File`s instead. All you need to
-// do is supply a pair of `std::io::Read` and `std::io::Write` implementations.
-let rdr = "The quick brown fox.";
-let mut wtr = vec![];
-
-let ac = AhoCorasick::new(patterns).unwrap();
-ac.stream_replace_all(rdr.as_bytes(), &mut wtr, replace_with)
-    .expect("stream_replace_all failed");
-assert_eq!(b"The slow grey sloth.".to_vec(), wtr);
-```
-
-
-### Example: finding the leftmost first match
-
-In the textbook description of Aho-Corasick, its formulation is typically
-structured such that it reports all possible matches, even when they overlap
-with another. In many cases, overlapping matches may not be desired, such as
-the case of finding all successive non-overlapping matches like you might with
-a standard regular expression.
-
-Unfortunately the "obvious" way to modify the Aho-Corasick algorithm to do
-this doesn't always work in the expected way, since it will report matches as
-soon as they are seen. For example, consider matching the regex `Samwise|Sam`
-against the text `Samwise`. Most regex engines (that are Perl-like, or
-non-POSIX) will report `Samwise` as a match, but the standard Aho-Corasick
-algorithm modified for reporting non-overlapping matches will report `Sam`.
-
-A novel contribution of this library is the ability to change the match
-semantics of Aho-Corasick (without additional search time overhead) such that
-`Samwise` is reported instead. For example, here's the standard approach:
-
-```rust
-use aho_corasick::AhoCorasick;
-
-let patterns = &["Samwise", "Sam"];
-let haystack = "Samwise";
-
-let ac = AhoCorasick::new(patterns).unwrap();
-let mat = ac.find(haystack).expect("should have a match");
-assert_eq!("Sam", &haystack[mat.start()..mat.end()]);
-```
-
-And now here's the leftmost-first version, which matches how a Perl-like
-regex will work:
-
-```rust
-use aho_corasick::{AhoCorasick, MatchKind};
-
-let patterns = &["Samwise", "Sam"];
-let haystack = "Samwise";
-
-let ac = AhoCorasick::builder()
-    .match_kind(MatchKind::LeftmostFirst)
-    .build(patterns)
-    .unwrap();
-let mat = ac.find(haystack).expect("should have a match");
-assert_eq!("Samwise", &haystack[mat.start()..mat.end()]);
-```
-
-In addition to leftmost-first semantics, this library also supports
-leftmost-longest semantics, which match the POSIX behavior of a regular
-expression alternation. See `MatchKind` in the docs for more details.
-
-
-### Minimum Rust version policy
-
-This crate's minimum supported `rustc` version is `1.60.0`.
-
-The current policy is that the minimum Rust version required to use this crate
-can be increased in minor version updates. For example, if `crate 1.0` requires
-Rust 1.20.0, then `crate 1.0.z` for all values of `z` will also require Rust
-1.20.0 or newer. However, `crate 1.y` for `y > 0` may require a newer minimum
-version of Rust.
-
-In general, this crate will be conservative with respect to the minimum
-supported version of Rust.
-
-
-### FFI bindings
-
-* [G-Research/ahocorasick_rs](https://github.com/G-Research/ahocorasick_rs/)
-is a Python wrapper for this library.
-* [tmikus/ahocorasick_rs](https://github.com/tmikus/ahocorasick_rs) is a Go
-    wrapper for this library.
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/UNLICENSE b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/UNLICENSE
deleted file mode 100644
index 68a49da..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/UNLICENSE
+++ /dev/null
@@ -1,24 +0,0 @@
-This is free and unencumbered software released into the public domain.
-
-Anyone is free to copy, modify, publish, use, compile, sell, or
-distribute this software, either in source code form or as a compiled
-binary, for any purpose, commercial or non-commercial, and by any
-means.
-
-In jurisdictions that recognize copyright laws, the author or authors
-of this software dedicate any and all copyright interest in the
-software to the public domain. We make this dedication for the benefit
-of the public at large and to the detriment of our heirs and
-successors. We intend this dedication to be an overt act of
-relinquishment in perpetuity of all present and future rights to this
-software under copyright law.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
-OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
-
-For more information, please refer to <http://unlicense.org/>
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/rustfmt.toml b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/rustfmt.toml
deleted file mode 100644
index aa37a218..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/rustfmt.toml
+++ /dev/null
@@ -1,2 +0,0 @@
-max_width = 79
-use_small_heuristics = "max"
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/ahocorasick.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/ahocorasick.rs
deleted file mode 100644
index 2947627..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/ahocorasick.rs
+++ /dev/null
@@ -1,2789 +0,0 @@
-use core::{
-    fmt::Debug,
-    panic::{RefUnwindSafe, UnwindSafe},
-};
-
-use alloc::{string::String, sync::Arc, vec::Vec};
-
-use crate::{
-    automaton::{self, Automaton, OverlappingState},
-    dfa,
-    nfa::{contiguous, noncontiguous},
-    util::{
-        error::{BuildError, MatchError},
-        prefilter::Prefilter,
-        primitives::{PatternID, StateID},
-        search::{Anchored, Input, Match, MatchKind, StartKind},
-    },
-};
-
-/// An automaton for searching multiple strings in linear time.
-///
-/// The `AhoCorasick` type supports a few basic ways of constructing an
-/// automaton, with the default being [`AhoCorasick::new`]. However, there
-/// are a fair number of configurable options that can be set by using
-/// [`AhoCorasickBuilder`] instead. Such options include, but are not limited
-/// to, how matches are determined, simple case insensitivity, whether to use a
-/// DFA or not and various knobs for controlling the space-vs-time trade offs
-/// taken when building the automaton.
-///
-/// # Resource usage
-///
-/// Aho-Corasick automatons are always constructed in `O(p)` time, where
-/// `p` is the combined length of all patterns being searched. With that
-/// said, building an automaton can be fairly costly because of high constant
-/// factors, particularly when enabling the [DFA](AhoCorasickKind::DFA) option
-/// with [`AhoCorasickBuilder::kind`]. For this reason, it's generally a good
-/// idea to build an automaton once and reuse it as much as possible.
-///
-/// Aho-Corasick automatons can also use a fair bit of memory. To get
-/// a concrete idea of how much memory is being used, try using the
-/// [`AhoCorasick::memory_usage`] method.
-///
-/// To give a quick idea of the differences between Aho-Corasick
-/// implementations and their resource usage, here's a sample of construction
-/// times and heap memory used after building an automaton from 100,000
-/// randomly selected titles from Wikipedia:
-///
-/// * 99MB for a [`noncontiguous::NFA`] in 240ms.
-/// * 21MB for a [`contiguous::NFA`] in 275ms.
-/// * 1.6GB for a [`dfa::DFA`] in 1.88s.
-///
-/// (Note that the memory usage above reflects the size of each automaton and
-/// not peak memory usage. For example, building a contiguous NFA requires
-/// first building a noncontiguous NFA. Once the contiguous NFA is built, the
-/// noncontiguous NFA is freed.)
-///
-/// This experiment very strongly argues that a contiguous NFA is often the
-/// best balance in terms of resource usage. It takes a little longer to build,
-/// but its memory usage is quite small. Its search speed (not listed) is
-/// also often faster than a noncontiguous NFA, but a little slower than a
-/// DFA. Indeed, when no specific [`AhoCorasickKind`] is used (which is the
-/// default), a contiguous NFA is used in most cases.
-///
-/// The only "catch" to using a contiguous NFA is that, because of its variety
-/// of compression tricks, it may not be able to support automatons as large as
-/// what the noncontiguous NFA supports. In which case, building a contiguous
-/// NFA will fail and (by default) `AhoCorasick` will automatically fall
-/// back to a noncontiguous NFA. (This typically only happens when building
-/// automatons from millions of patterns.) Otherwise, the small additional time
-/// for building a contiguous NFA is almost certainly worth it.
-///
-/// # Cloning
-///
-/// The `AhoCorasick` type uses thread safe reference counting internally. It
-/// is guaranteed that it is cheap to clone.
-///
-/// # Search configuration
-///
-/// Most of the search routines accept anything that can be cheaply converted
-/// to an [`Input`]. This includes `&[u8]`, `&str` and `Input` itself.
-///
-/// # Construction failure
-///
-/// It is generally possible for building an Aho-Corasick automaton to fail.
-/// Construction can fail in generally one way: when the inputs provided are
-/// too big. Whether that's a pattern that is too long, too many patterns
-/// or some combination of both. A first approximation for the scale at which
-/// construction can fail is somewhere around "millions of patterns."
-///
-/// For that reason, if you're building an Aho-Corasick automaton from
-/// untrusted input (or input that doesn't have any reasonable bounds on its
-/// size), then it is strongly recommended to handle the possibility of an
-/// error.
-///
-/// If you're constructing an Aho-Corasick automaton from static or trusted
-/// data, then it is likely acceptable to panic (by calling `unwrap()` or
-/// `expect()`) if construction fails.
-///
-/// # Fallibility
-///
-/// The `AhoCorasick` type provides a number of methods for searching, as one
-/// might expect. Depending on how the Aho-Corasick automaton was built and
-/// depending on the search configuration, it is possible for a search to
-/// return an error. Since an error is _never_ dependent on the actual contents
-/// of the haystack, this type provides both infallible and fallible methods
-/// for searching. The infallible methods panic if an error occurs, and can be
-/// used for convenience and when you know the search will never return an
-/// error.
-///
-/// For example, the [`AhoCorasick::find_iter`] method is the infallible
-/// version of the [`AhoCorasick::try_find_iter`] method.
-///
-/// Examples of errors that can occur:
-///
-/// * Running a search that requires [`MatchKind::Standard`] semantics (such
-/// as a stream or overlapping search) with an automaton that was built with
-/// [`MatchKind::LeftmostFirst`] or [`MatchKind::LeftmostLongest`] semantics.
-/// * Running an anchored search with an automaton that only supports
-/// unanchored searches. (By default, `AhoCorasick` only supports unanchored
-/// searches. But this can be toggled with [`AhoCorasickBuilder::start_kind`].)
-/// * Running an unanchored search with an automaton that only supports
-/// anchored searches.
-///
-/// The common thread between the different types of errors is that they are
-/// all rooted in the automaton construction and search configurations. If
-/// those configurations are a static property of your program, then it is
-/// reasonable to call infallible routines since you know an error will never
-/// occur. And if one _does_ occur, then it's a bug in your program.
-///
-/// To re-iterate, if the patterns, build or search configuration come from
-/// user or untrusted data, then you should handle errors at build or search
-/// time. If only the haystack comes from user or untrusted data, then there
-/// should be no need to handle errors anywhere and it is generally encouraged
-/// to `unwrap()` (or `expect()`) both build and search time calls.
-///
-/// # Examples
-///
-/// This example shows how to search for occurrences of multiple patterns
-/// simultaneously in a case insensitive fashion. Each match includes the
-/// pattern that matched along with the byte offsets of the match.
-///
-/// ```
-/// use aho_corasick::{AhoCorasick, PatternID};
-///
-/// let patterns = &["apple", "maple", "snapple"];
-/// let haystack = "Nobody likes maple in their apple flavored Snapple.";
-///
-/// let ac = AhoCorasick::builder()
-///     .ascii_case_insensitive(true)
-///     .build(patterns)
-///     .unwrap();
-/// let mut matches = vec![];
-/// for mat in ac.find_iter(haystack) {
-///     matches.push((mat.pattern(), mat.start(), mat.end()));
-/// }
-/// assert_eq!(matches, vec![
-///     (PatternID::must(1), 13, 18),
-///     (PatternID::must(0), 28, 33),
-///     (PatternID::must(2), 43, 50),
-/// ]);
-/// ```
-///
-/// This example shows how to replace matches with some other string:
-///
-/// ```
-/// use aho_corasick::AhoCorasick;
-///
-/// let patterns = &["fox", "brown", "quick"];
-/// let haystack = "The quick brown fox.";
-/// let replace_with = &["sloth", "grey", "slow"];
-///
-/// let ac = AhoCorasick::new(patterns).unwrap();
-/// let result = ac.replace_all(haystack, replace_with);
-/// assert_eq!(result, "The slow grey sloth.");
-/// ```
-#[derive(Clone)]
-pub struct AhoCorasick {
-    /// The underlying Aho-Corasick automaton. It's one of
-    /// nfa::noncontiguous::NFA, nfa::contiguous::NFA or dfa::DFA.
-    aut: Arc<dyn AcAutomaton>,
-    /// The specific Aho-Corasick kind chosen. This makes it possible to
-    /// inspect any `AhoCorasick` and know what kind of search strategy it
-    /// uses.
-    kind: AhoCorasickKind,
-    /// The start kind of this automaton as configured by the caller.
-    ///
-    /// We don't really *need* to put this here, since the underlying automaton
-    /// will correctly return errors if the caller requests an unsupported
-    /// search type. But we do keep this here for API behavior consistency.
-    /// Namely, the NFAs in this crate support both unanchored and anchored
-    /// searches unconditionally. There's no way to disable one or the other.
-    /// They always both work. But the DFA in this crate specifically only
-    /// supports both unanchored and anchored searches if it's configured to
-    /// do so. Why? Because for the DFA, supporting both essentially requires
-    /// two copies of the transition table: one generated by following failure
-    /// transitions from the original NFA and one generated by not following
-    /// those failure transitions.
-    ///
-    /// So why record the start kind here? Well, consider what happens
-    /// when no specific 'AhoCorasickKind' is selected by the caller and
-    /// 'StartKind::Unanchored' is used (both are the default). It *might*
-    /// result in using a DFA or it might pick an NFA. If it picks an NFA, the
-    /// caller would then be able to run anchored searches, even though the
-    /// caller only asked for support for unanchored searches. Maybe that's
-    /// fine, but what if the DFA was chosen instead? Oops, the caller would
-    /// get an error.
-    ///
-    /// Basically, it seems bad to return an error or not based on some
-    /// internal implementation choice. So we smooth things out and ensure
-    /// anchored searches *always* report an error when only unanchored support
-    /// was asked for (and vice versa), even if the underlying automaton
-    /// supports it.
-    start_kind: StartKind,
-}
-
-/// Convenience constructors for an Aho-Corasick searcher. To configure the
-/// searcher, use an [`AhoCorasickBuilder`] instead.
-impl AhoCorasick {
-    /// Create a new Aho-Corasick automaton using the default configuration.
-    ///
-    /// The default configuration optimizes for less space usage, but at the
-    /// expense of longer search times. To change the configuration, use
-    /// [`AhoCorasickBuilder`].
-    ///
-    /// This uses the default [`MatchKind::Standard`] match semantics, which
-    /// reports a match as soon as it is found. This corresponds to the
-    /// standard match semantics supported by textbook descriptions of the
-    /// Aho-Corasick algorithm.
-    ///
-    /// # Examples
-    ///
-    /// Basic usage:
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, PatternID};
-    ///
-    /// let ac = AhoCorasick::new(&["foo", "bar", "baz"]).unwrap();
-    /// assert_eq!(
-    ///     Some(PatternID::must(1)),
-    ///     ac.find("xxx bar xxx").map(|m| m.pattern()),
-    /// );
-    /// ```
-    pub fn new<I, P>(patterns: I) -> Result<AhoCorasick, BuildError>
-    where
-        I: IntoIterator<Item = P>,
-        P: AsRef<[u8]>,
-    {
-        AhoCorasickBuilder::new().build(patterns)
-    }
-
-    /// A convenience method for returning a new Aho-Corasick builder.
-    ///
-    /// This usually permits one to just import the `AhoCorasick` type.
-    ///
-    /// # Examples
-    ///
-    /// Basic usage:
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, Match, MatchKind};
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::LeftmostFirst)
-    ///     .build(&["samwise", "sam"])
-    ///     .unwrap();
-    /// assert_eq!(Some(Match::must(0, 0..7)), ac.find("samwise"));
-    /// ```
-    pub fn builder() -> AhoCorasickBuilder {
-        AhoCorasickBuilder::new()
-    }
-}
-
-/// Infallible search routines. These APIs panic when the underlying search
-/// would otherwise fail. Infallible routines are useful because the errors are
-/// a result of both search-time configuration and what configuration is used
-/// to build the Aho-Corasick searcher. Both of these things are not usually
-/// the result of user input, and thus, an error is typically indicative of a
-/// programmer error. In cases where callers want errors instead of panics, use
-/// the corresponding `try` method in the section below.
-impl AhoCorasick {
-    /// Returns true if and only if this automaton matches the haystack at any
-    /// position.
-    ///
-    /// `input` may be any type that is cheaply convertible to an `Input`. This
-    /// includes, but is not limited to, `&str` and `&[u8]`.
-    ///
-    /// Aside from convenience, when `AhoCorasick` was built with
-    /// leftmost-first or leftmost-longest semantics, this might result in a
-    /// search that visits less of the haystack than [`AhoCorasick::find`]
-    /// would otherwise. (For standard semantics, matches are always
-    /// immediately returned once they are seen, so there is no way for this to
-    /// do less work in that case.)
-    ///
-    /// Note that there is no corresponding fallible routine for this method.
-    /// If you need a fallible version of this, then [`AhoCorasick::try_find`]
-    /// can be used with [`Input::earliest`] enabled.
-    ///
-    /// # Examples
-    ///
-    /// Basic usage:
-    ///
-    /// ```
-    /// use aho_corasick::AhoCorasick;
-    ///
-    /// let ac = AhoCorasick::new(&[
-    ///     "foo", "bar", "quux", "baz",
-    /// ]).unwrap();
-    /// assert!(ac.is_match("xxx bar xxx"));
-    /// assert!(!ac.is_match("xxx qux xxx"));
-    /// ```
-    pub fn is_match<'h, I: Into<Input<'h>>>(&self, input: I) -> bool {
-        self.aut
-            .try_find(&input.into().earliest(true))
-            .expect("AhoCorasick::try_find is not expected to fail")
-            .is_some()
-    }
-
-    /// Returns the location of the first match according to the match
-    /// semantics that this automaton was constructed with.
-    ///
-    /// `input` may be any type that is cheaply convertible to an `Input`. This
-    /// includes, but is not limited to, `&str` and `&[u8]`.
-    ///
-    /// This is the infallible version of [`AhoCorasick::try_find`].
-    ///
-    /// # Panics
-    ///
-    /// This panics when [`AhoCorasick::try_find`] would return an error.
-    ///
-    /// # Examples
-    ///
-    /// Basic usage, with standard semantics:
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, MatchKind};
-    ///
-    /// let patterns = &["b", "abc", "abcd"];
-    /// let haystack = "abcd";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::Standard) // default, not necessary
-    ///     .build(patterns)
-    ///     .unwrap();
-    /// let mat = ac.find(haystack).expect("should have a match");
-    /// assert_eq!("b", &haystack[mat.start()..mat.end()]);
-    /// ```
-    ///
-    /// Now with leftmost-first semantics:
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, MatchKind};
-    ///
-    /// let patterns = &["b", "abc", "abcd"];
-    /// let haystack = "abcd";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::LeftmostFirst)
-    ///     .build(patterns)
-    ///     .unwrap();
-    /// let mat = ac.find(haystack).expect("should have a match");
-    /// assert_eq!("abc", &haystack[mat.start()..mat.end()]);
-    /// ```
-    ///
-    /// And finally, leftmost-longest semantics:
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, MatchKind};
-    ///
-    /// let patterns = &["b", "abc", "abcd"];
-    /// let haystack = "abcd";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::LeftmostLongest)
-    ///     .build(patterns)
-    ///     .unwrap();
-    /// let mat = ac.find(haystack).expect("should have a match");
-    /// ```
-    ///
-    /// # Example: configuring a search
-    ///
-    /// Because this method accepts anything that can be turned into an
-    /// [`Input`], it's possible to provide an `Input` directly in order to
-    /// configure the search. In this example, we show how to use the
-    /// `earliest` option to force the search to return as soon as it knows
-    /// a match has occurred.
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, Input, MatchKind};
-    ///
-    /// let patterns = &["b", "abc", "abcd"];
-    /// let haystack = "abcd";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::LeftmostLongest)
-    ///     .build(patterns)
-    ///     .unwrap();
-    /// let mat = ac.find(Input::new(haystack).earliest(true))
-    ///     .expect("should have a match");
-    /// // The correct leftmost-longest match here is 'abcd', but since we
-    /// // told the search to quit as soon as it knows a match has occurred,
-    /// // we get a different match back.
-    /// assert_eq!("b", &haystack[mat.start()..mat.end()]);
-    /// ```
-    pub fn find<'h, I: Into<Input<'h>>>(&self, input: I) -> Option<Match> {
-        self.try_find(input)
-            .expect("AhoCorasick::try_find is not expected to fail")
-    }
-
-    /// Returns the location of the first overlapping match in the given
-    /// input with respect to the current state of the underlying searcher.
-    ///
-    /// `input` may be any type that is cheaply convertible to an `Input`. This
-    /// includes, but is not limited to, `&str` and `&[u8]`.
-    ///
-    /// Overlapping searches do not report matches in their return value.
-    /// Instead, matches can be accessed via [`OverlappingState::get_match`]
-    /// after a search call.
-    ///
-    /// This is the infallible version of
-    /// [`AhoCorasick::try_find_overlapping`].
-    ///
-    /// # Panics
-    ///
-    /// This panics when [`AhoCorasick::try_find_overlapping`] would
-    /// return an error. For example, when the Aho-Corasick searcher
-    /// doesn't support overlapping searches. (Only searchers built with
-    /// [`MatchKind::Standard`] semantics support overlapping searches.)
-    ///
-    /// # Example
-    ///
-    /// This shows how we can repeatedly call an overlapping search without
-    /// ever needing to explicitly re-slice the haystack. Overlapping search
-    /// works this way because searches depend on state saved during the
-    /// previous search.
-    ///
-    /// ```
-    /// use aho_corasick::{
-    ///     automaton::OverlappingState,
-    ///     AhoCorasick, Input, Match,
-    /// };
-    ///
-    /// let patterns = &["append", "appendage", "app"];
-    /// let haystack = "append the app to the appendage";
-    ///
-    /// let ac = AhoCorasick::new(patterns).unwrap();
-    /// let mut state = OverlappingState::start();
-    ///
-    /// ac.find_overlapping(haystack, &mut state);
-    /// assert_eq!(Some(Match::must(2, 0..3)), state.get_match());
-    ///
-    /// ac.find_overlapping(haystack, &mut state);
-    /// assert_eq!(Some(Match::must(0, 0..6)), state.get_match());
-    ///
-    /// ac.find_overlapping(haystack, &mut state);
-    /// assert_eq!(Some(Match::must(2, 11..14)), state.get_match());
-    ///
-    /// ac.find_overlapping(haystack, &mut state);
-    /// assert_eq!(Some(Match::must(2, 22..25)), state.get_match());
-    ///
-    /// ac.find_overlapping(haystack, &mut state);
-    /// assert_eq!(Some(Match::must(0, 22..28)), state.get_match());
-    ///
-    /// ac.find_overlapping(haystack, &mut state);
-    /// assert_eq!(Some(Match::must(1, 22..31)), state.get_match());
-    ///
-    /// // No more match matches to be found.
-    /// ac.find_overlapping(haystack, &mut state);
-    /// assert_eq!(None, state.get_match());
-    /// ```
-    pub fn find_overlapping<'h, I: Into<Input<'h>>>(
-        &self,
-        input: I,
-        state: &mut OverlappingState,
-    ) {
-        self.try_find_overlapping(input, state).expect(
-            "AhoCorasick::try_find_overlapping is not expected to fail",
-        )
-    }
-
-    /// Returns an iterator of non-overlapping matches, using the match
-    /// semantics that this automaton was constructed with.
-    ///
-    /// `input` may be any type that is cheaply convertible to an `Input`. This
-    /// includes, but is not limited to, `&str` and `&[u8]`.
-    ///
-    /// This is the infallible version of [`AhoCorasick::try_find_iter`].
-    ///
-    /// # Panics
-    ///
-    /// This panics when [`AhoCorasick::try_find_iter`] would return an error.
-    ///
-    /// # Examples
-    ///
-    /// Basic usage, with standard semantics:
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, MatchKind, PatternID};
-    ///
-    /// let patterns = &["append", "appendage", "app"];
-    /// let haystack = "append the app to the appendage";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::Standard) // default, not necessary
-    ///     .build(patterns)
-    ///     .unwrap();
-    /// let matches: Vec<PatternID> = ac
-    ///     .find_iter(haystack)
-    ///     .map(|mat| mat.pattern())
-    ///     .collect();
-    /// assert_eq!(vec![
-    ///     PatternID::must(2),
-    ///     PatternID::must(2),
-    ///     PatternID::must(2),
-    /// ], matches);
-    /// ```
-    ///
-    /// Now with leftmost-first semantics:
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, MatchKind, PatternID};
-    ///
-    /// let patterns = &["append", "appendage", "app"];
-    /// let haystack = "append the app to the appendage";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::LeftmostFirst)
-    ///     .build(patterns)
-    ///     .unwrap();
-    /// let matches: Vec<PatternID> = ac
-    ///     .find_iter(haystack)
-    ///     .map(|mat| mat.pattern())
-    ///     .collect();
-    /// assert_eq!(vec![
-    ///     PatternID::must(0),
-    ///     PatternID::must(2),
-    ///     PatternID::must(0),
-    /// ], matches);
-    /// ```
-    ///
-    /// And finally, leftmost-longest semantics:
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, MatchKind, PatternID};
-    ///
-    /// let patterns = &["append", "appendage", "app"];
-    /// let haystack = "append the app to the appendage";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::LeftmostLongest)
-    ///     .build(patterns)
-    ///     .unwrap();
-    /// let matches: Vec<PatternID> = ac
-    ///     .find_iter(haystack)
-    ///     .map(|mat| mat.pattern())
-    ///     .collect();
-    /// assert_eq!(vec![
-    ///     PatternID::must(0),
-    ///     PatternID::must(2),
-    ///     PatternID::must(1),
-    /// ], matches);
-    /// ```
-    pub fn find_iter<'a, 'h, I: Into<Input<'h>>>(
-        &'a self,
-        input: I,
-    ) -> FindIter<'a, 'h> {
-        self.try_find_iter(input)
-            .expect("AhoCorasick::try_find_iter is not expected to fail")
-    }
-
-    /// Returns an iterator of overlapping matches. Stated differently, this
-    /// returns an iterator of all possible matches at every position.
-    ///
-    /// `input` may be any type that is cheaply convertible to an `Input`. This
-    /// includes, but is not limited to, `&str` and `&[u8]`.
-    ///
-    /// This is the infallible version of
-    /// [`AhoCorasick::try_find_overlapping_iter`].
-    ///
-    /// # Panics
-    ///
-    /// This panics when `AhoCorasick::try_find_overlapping_iter` would return
-    /// an error. For example, when the Aho-Corasick searcher is built with
-    /// either leftmost-first or leftmost-longest match semantics. Stated
-    /// differently, overlapping searches require one to build the searcher
-    /// with [`MatchKind::Standard`] (it is the default).
-    ///
-    /// # Example: basic usage
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, PatternID};
-    ///
-    /// let patterns = &["append", "appendage", "app"];
-    /// let haystack = "append the app to the appendage";
-    ///
-    /// let ac = AhoCorasick::new(patterns).unwrap();
-    /// let matches: Vec<PatternID> = ac
-    ///     .find_overlapping_iter(haystack)
-    ///     .map(|mat| mat.pattern())
-    ///     .collect();
-    /// assert_eq!(vec![
-    ///     PatternID::must(2),
-    ///     PatternID::must(0),
-    ///     PatternID::must(2),
-    ///     PatternID::must(2),
-    ///     PatternID::must(0),
-    ///     PatternID::must(1),
-    /// ], matches);
-    /// ```
-    pub fn find_overlapping_iter<'a, 'h, I: Into<Input<'h>>>(
-        &'a self,
-        input: I,
-    ) -> FindOverlappingIter<'a, 'h> {
-        self.try_find_overlapping_iter(input).expect(
-            "AhoCorasick::try_find_overlapping_iter is not expected to fail",
-        )
-    }
-
-    /// Replace all matches with a corresponding value in the `replace_with`
-    /// slice given. Matches correspond to the same matches as reported by
-    /// [`AhoCorasick::find_iter`].
-    ///
-    /// Replacements are determined by the index of the matching pattern.
-    /// For example, if the pattern with index `2` is found, then it is
-    /// replaced by `replace_with[2]`.
-    ///
-    /// This is the infallible version of [`AhoCorasick::try_replace_all`].
-    ///
-    /// # Panics
-    ///
-    /// This panics when [`AhoCorasick::try_replace_all`] would return an
-    /// error.
-    ///
-    /// This also panics when `replace_with.len()` does not equal
-    /// [`AhoCorasick::patterns_len`].
-    ///
-    /// # Example: basic usage
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, MatchKind};
-    ///
-    /// let patterns = &["append", "appendage", "app"];
-    /// let haystack = "append the app to the appendage";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::LeftmostFirst)
-    ///     .build(patterns)
-    ///     .unwrap();
-    /// let result = ac.replace_all(haystack, &["x", "y", "z"]);
-    /// assert_eq!("x the z to the xage", result);
-    /// ```
-    pub fn replace_all<B>(&self, haystack: &str, replace_with: &[B]) -> String
-    where
-        B: AsRef<str>,
-    {
-        self.try_replace_all(haystack, replace_with)
-            .expect("AhoCorasick::try_replace_all is not expected to fail")
-    }
-
-    /// Replace all matches using raw bytes with a corresponding value in the
-    /// `replace_with` slice given. Matches correspond to the same matches as
-    /// reported by [`AhoCorasick::find_iter`].
-    ///
-    /// Replacements are determined by the index of the matching pattern.
-    /// For example, if the pattern with index `2` is found, then it is
-    /// replaced by `replace_with[2]`.
-    ///
-    /// This is the infallible version of
-    /// [`AhoCorasick::try_replace_all_bytes`].
-    ///
-    /// # Panics
-    ///
-    /// This panics when [`AhoCorasick::try_replace_all_bytes`] would return an
-    /// error.
-    ///
-    /// This also panics when `replace_with.len()` does not equal
-    /// [`AhoCorasick::patterns_len`].
-    ///
-    /// # Example: basic usage
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, MatchKind};
-    ///
-    /// let patterns = &["append", "appendage", "app"];
-    /// let haystack = b"append the app to the appendage";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::LeftmostFirst)
-    ///     .build(patterns)
-    ///     .unwrap();
-    /// let result = ac.replace_all_bytes(haystack, &["x", "y", "z"]);
-    /// assert_eq!(b"x the z to the xage".to_vec(), result);
-    /// ```
-    pub fn replace_all_bytes<B>(
-        &self,
-        haystack: &[u8],
-        replace_with: &[B],
-    ) -> Vec<u8>
-    where
-        B: AsRef<[u8]>,
-    {
-        self.try_replace_all_bytes(haystack, replace_with)
-            .expect("AhoCorasick::try_replace_all_bytes should not fail")
-    }
-
-    /// Replace all matches using a closure called on each match.
-    /// Matches correspond to the same matches as reported by
-    /// [`AhoCorasick::find_iter`].
-    ///
-    /// The closure accepts three parameters: the match found, the text of
-    /// the match and a string buffer with which to write the replaced text
-    /// (if any). If the closure returns `true`, then it continues to the next
-    /// match. If the closure returns `false`, then searching is stopped.
-    ///
-    /// Note that any matches with boundaries that don't fall on a valid UTF-8
-    /// boundary are silently skipped.
-    ///
-    /// This is the infallible version of
-    /// [`AhoCorasick::try_replace_all_with`].
-    ///
-    /// # Panics
-    ///
-    /// This panics when [`AhoCorasick::try_replace_all_with`] would return an
-    /// error.
-    ///
-    /// # Examples
-    ///
-    /// Basic usage:
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, MatchKind};
-    ///
-    /// let patterns = &["append", "appendage", "app"];
-    /// let haystack = "append the app to the appendage";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::LeftmostFirst)
-    ///     .build(patterns)
-    ///     .unwrap();
-    /// let mut result = String::new();
-    /// ac.replace_all_with(haystack, &mut result, |mat, _, dst| {
-    ///     dst.push_str(&mat.pattern().as_usize().to_string());
-    ///     true
-    /// });
-    /// assert_eq!("0 the 2 to the 0age", result);
-    /// ```
-    ///
-    /// Stopping the replacement by returning `false` (continued from the
-    /// example above):
-    ///
-    /// ```
-    /// # use aho_corasick::{AhoCorasick, MatchKind, PatternID};
-    /// # let patterns = &["append", "appendage", "app"];
-    /// # let haystack = "append the app to the appendage";
-    /// # let ac = AhoCorasick::builder()
-    /// #    .match_kind(MatchKind::LeftmostFirst)
-    /// #    .build(patterns)
-    /// #    .unwrap();
-    /// let mut result = String::new();
-    /// ac.replace_all_with(haystack, &mut result, |mat, _, dst| {
-    ///     dst.push_str(&mat.pattern().as_usize().to_string());
-    ///     mat.pattern() != PatternID::must(2)
-    /// });
-    /// assert_eq!("0 the 2 to the appendage", result);
-    /// ```
-    pub fn replace_all_with<F>(
-        &self,
-        haystack: &str,
-        dst: &mut String,
-        replace_with: F,
-    ) where
-        F: FnMut(&Match, &str, &mut String) -> bool,
-    {
-        self.try_replace_all_with(haystack, dst, replace_with)
-            .expect("AhoCorasick::try_replace_all_with should not fail")
-    }
-
-    /// Replace all matches using raw bytes with a closure called on each
-    /// match. Matches correspond to the same matches as reported by
-    /// [`AhoCorasick::find_iter`].
-    ///
-    /// The closure accepts three parameters: the match found, the text of
-    /// the match and a byte buffer with which to write the replaced text
-    /// (if any). If the closure returns `true`, then it continues to the next
-    /// match. If the closure returns `false`, then searching is stopped.
-    ///
-    /// This is the infallible version of
-    /// [`AhoCorasick::try_replace_all_with_bytes`].
-    ///
-    /// # Panics
-    ///
-    /// This panics when [`AhoCorasick::try_replace_all_with_bytes`] would
-    /// return an error.
-    ///
-    /// # Examples
-    ///
-    /// Basic usage:
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, MatchKind};
-    ///
-    /// let patterns = &["append", "appendage", "app"];
-    /// let haystack = b"append the app to the appendage";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::LeftmostFirst)
-    ///     .build(patterns)
-    ///     .unwrap();
-    /// let mut result = vec![];
-    /// ac.replace_all_with_bytes(haystack, &mut result, |mat, _, dst| {
-    ///     dst.extend(mat.pattern().as_usize().to_string().bytes());
-    ///     true
-    /// });
-    /// assert_eq!(b"0 the 2 to the 0age".to_vec(), result);
-    /// ```
-    ///
-    /// Stopping the replacement by returning `false` (continued from the
-    /// example above):
-    ///
-    /// ```
-    /// # use aho_corasick::{AhoCorasick, MatchKind, PatternID};
-    /// # let patterns = &["append", "appendage", "app"];
-    /// # let haystack = b"append the app to the appendage";
-    /// # let ac = AhoCorasick::builder()
-    /// #    .match_kind(MatchKind::LeftmostFirst)
-    /// #    .build(patterns)
-    /// #    .unwrap();
-    /// let mut result = vec![];
-    /// ac.replace_all_with_bytes(haystack, &mut result, |mat, _, dst| {
-    ///     dst.extend(mat.pattern().as_usize().to_string().bytes());
-    ///     mat.pattern() != PatternID::must(2)
-    /// });
-    /// assert_eq!(b"0 the 2 to the appendage".to_vec(), result);
-    /// ```
-    pub fn replace_all_with_bytes<F>(
-        &self,
-        haystack: &[u8],
-        dst: &mut Vec<u8>,
-        replace_with: F,
-    ) where
-        F: FnMut(&Match, &[u8], &mut Vec<u8>) -> bool,
-    {
-        self.try_replace_all_with_bytes(haystack, dst, replace_with)
-            .expect("AhoCorasick::try_replace_all_with_bytes should not fail")
-    }
-
-    /// Returns an iterator of non-overlapping matches in the given
-    /// stream. Matches correspond to the same matches as reported by
-    /// [`AhoCorasick::find_iter`].
-    ///
-    /// The matches yielded by this iterator use absolute position offsets in
-    /// the stream given, where the first byte has index `0`. Matches are
-    /// yieled until the stream is exhausted.
-    ///
-    /// Each item yielded by the iterator is an `Result<Match,
-    /// std::io::Error>`, where an error is yielded if there was a problem
-    /// reading from the reader given.
-    ///
-    /// When searching a stream, an internal buffer is used. Therefore, callers
-    /// should avoiding providing a buffered reader, if possible.
-    ///
-    /// This is the infallible version of
-    /// [`AhoCorasick::try_stream_find_iter`]. Note that both methods return
-    /// iterators that produce `Result` values. The difference is that this
-    /// routine panics if _construction_ of the iterator failed. The `Result`
-    /// values yield by the iterator come from whether the given reader returns
-    /// an error or not during the search.
-    ///
-    /// # Memory usage
-    ///
-    /// In general, searching streams will use a constant amount of memory for
-    /// its internal buffer. The one requirement is that the internal buffer
-    /// must be at least the size of the longest possible match. In most use
-    /// cases, the default buffer size will be much larger than any individual
-    /// match.
-    ///
-    /// # Panics
-    ///
-    /// This panics when [`AhoCorasick::try_stream_find_iter`] would return
-    /// an error. For example, when the Aho-Corasick searcher doesn't support
-    /// stream searches. (Only searchers built with [`MatchKind::Standard`]
-    /// semantics support stream searches.)
-    ///
-    /// # Example: basic usage
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, PatternID};
-    ///
-    /// let patterns = &["append", "appendage", "app"];
-    /// let haystack = "append the app to the appendage";
-    ///
-    /// let ac = AhoCorasick::new(patterns).unwrap();
-    /// let mut matches = vec![];
-    /// for result in ac.stream_find_iter(haystack.as_bytes()) {
-    ///     let mat = result?;
-    ///     matches.push(mat.pattern());
-    /// }
-    /// assert_eq!(vec![
-    ///     PatternID::must(2),
-    ///     PatternID::must(2),
-    ///     PatternID::must(2),
-    /// ], matches);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    #[cfg(feature = "std")]
-    pub fn stream_find_iter<'a, R: std::io::Read>(
-        &'a self,
-        rdr: R,
-    ) -> StreamFindIter<'a, R> {
-        self.try_stream_find_iter(rdr)
-            .expect("AhoCorasick::try_stream_find_iter should not fail")
-    }
-}
-
-/// Fallible search routines. These APIs return an error in cases where the
-/// infallible routines would panic.
-impl AhoCorasick {
-    /// Returns the location of the first match according to the match
-    /// semantics that this automaton was constructed with, and according
-    /// to the given `Input` configuration.
-    ///
-    /// This is the fallible version of [`AhoCorasick::find`].
-    ///
-    /// # Errors
-    ///
-    /// This returns an error when this Aho-Corasick searcher does not support
-    /// the given `Input` configuration.
-    ///
-    /// For example, if the Aho-Corasick searcher only supports anchored
-    /// searches or only supports unanchored searches, then providing an
-    /// `Input` that requests an anchored (or unanchored) search when it isn't
-    /// supported would result in an error.
-    ///
-    /// # Example: leftmost-first searching
-    ///
-    /// Basic usage with leftmost-first semantics:
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, MatchKind, Input};
-    ///
-    /// let patterns = &["b", "abc", "abcd"];
-    /// let haystack = "foo abcd";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::LeftmostFirst)
-    ///     .build(patterns)
-    ///     .unwrap();
-    /// let mat = ac.try_find(haystack)?.expect("should have a match");
-    /// assert_eq!("abc", &haystack[mat.span()]);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    ///
-    /// # Example: anchored leftmost-first searching
-    ///
-    /// This shows how to anchor the search, so that even if the haystack
-    /// contains a match somewhere, a match won't be reported unless one can
-    /// be found that starts at the beginning of the search:
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, Anchored, Input, MatchKind, StartKind};
-    ///
-    /// let patterns = &["b", "abc", "abcd"];
-    /// let haystack = "foo abcd";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::LeftmostFirst)
-    ///     .start_kind(StartKind::Anchored)
-    ///     .build(patterns)
-    ///     .unwrap();
-    /// let input = Input::new(haystack).anchored(Anchored::Yes);
-    /// assert_eq!(None, ac.try_find(input)?);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    ///
-    /// If the beginning of the search is changed to where a match begins, then
-    /// it will be found:
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, Anchored, Input, MatchKind, StartKind};
-    ///
-    /// let patterns = &["b", "abc", "abcd"];
-    /// let haystack = "foo abcd";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::LeftmostFirst)
-    ///     .start_kind(StartKind::Anchored)
-    ///     .build(patterns)
-    ///     .unwrap();
-    /// let input = Input::new(haystack).range(4..).anchored(Anchored::Yes);
-    /// let mat = ac.try_find(input)?.expect("should have a match");
-    /// assert_eq!("abc", &haystack[mat.span()]);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    ///
-    /// # Example: earliest leftmost-first searching
-    ///
-    /// This shows how to run an "earliest" search even when the Aho-Corasick
-    /// searcher was compiled with leftmost-first match semantics. In this
-    /// case, the search is stopped as soon as it is known that a match has
-    /// occurred, even if it doesn't correspond to the leftmost-first match.
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, Input, MatchKind};
-    ///
-    /// let patterns = &["b", "abc", "abcd"];
-    /// let haystack = "foo abcd";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::LeftmostFirst)
-    ///     .build(patterns)
-    ///     .unwrap();
-    /// let input = Input::new(haystack).earliest(true);
-    /// let mat = ac.try_find(input)?.expect("should have a match");
-    /// assert_eq!("b", &haystack[mat.span()]);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    pub fn try_find<'h, I: Into<Input<'h>>>(
-        &self,
-        input: I,
-    ) -> Result<Option<Match>, MatchError> {
-        let input = input.into();
-        enforce_anchored_consistency(self.start_kind, input.get_anchored())?;
-        self.aut.try_find(&input)
-    }
-
-    /// Returns the location of the first overlapping match in the given
-    /// input with respect to the current state of the underlying searcher.
-    ///
-    /// Overlapping searches do not report matches in their return value.
-    /// Instead, matches can be accessed via [`OverlappingState::get_match`]
-    /// after a search call.
-    ///
-    /// This is the fallible version of [`AhoCorasick::find_overlapping`].
-    ///
-    /// # Errors
-    ///
-    /// This returns an error when this Aho-Corasick searcher does not support
-    /// the given `Input` configuration or if overlapping search is not
-    /// supported.
-    ///
-    /// One example is that only Aho-Corasicker searchers built with
-    /// [`MatchKind::Standard`] semantics support overlapping searches. Using
-    /// any other match semantics will result in this returning an error.
-    ///
-    /// # Example: basic usage
-    ///
-    /// This shows how we can repeatedly call an overlapping search without
-    /// ever needing to explicitly re-slice the haystack. Overlapping search
-    /// works this way because searches depend on state saved during the
-    /// previous search.
-    ///
-    /// ```
-    /// use aho_corasick::{
-    ///     automaton::OverlappingState,
-    ///     AhoCorasick, Input, Match,
-    /// };
-    ///
-    /// let patterns = &["append", "appendage", "app"];
-    /// let haystack = "append the app to the appendage";
-    ///
-    /// let ac = AhoCorasick::new(patterns).unwrap();
-    /// let mut state = OverlappingState::start();
-    ///
-    /// ac.try_find_overlapping(haystack, &mut state)?;
-    /// assert_eq!(Some(Match::must(2, 0..3)), state.get_match());
-    ///
-    /// ac.try_find_overlapping(haystack, &mut state)?;
-    /// assert_eq!(Some(Match::must(0, 0..6)), state.get_match());
-    ///
-    /// ac.try_find_overlapping(haystack, &mut state)?;
-    /// assert_eq!(Some(Match::must(2, 11..14)), state.get_match());
-    ///
-    /// ac.try_find_overlapping(haystack, &mut state)?;
-    /// assert_eq!(Some(Match::must(2, 22..25)), state.get_match());
-    ///
-    /// ac.try_find_overlapping(haystack, &mut state)?;
-    /// assert_eq!(Some(Match::must(0, 22..28)), state.get_match());
-    ///
-    /// ac.try_find_overlapping(haystack, &mut state)?;
-    /// assert_eq!(Some(Match::must(1, 22..31)), state.get_match());
-    ///
-    /// // No more match matches to be found.
-    /// ac.try_find_overlapping(haystack, &mut state)?;
-    /// assert_eq!(None, state.get_match());
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    ///
-    /// # Example: implementing your own overlapping iteration
-    ///
-    /// The previous example can be easily adapted to implement your own
-    /// iteration by repeatedly calling `try_find_overlapping` until either
-    /// an error occurs or no more matches are reported.
-    ///
-    /// This is effectively equivalent to the iterator returned by
-    /// [`AhoCorasick::try_find_overlapping_iter`], with the only difference
-    /// being that the iterator checks for errors before construction and
-    /// absolves the caller of needing to check for errors on every search
-    /// call. (Indeed, if the first `try_find_overlapping` call succeeds and
-    /// the same `Input` is given to subsequent calls, then all subsequent
-    /// calls are guaranteed to succeed.)
-    ///
-    /// ```
-    /// use aho_corasick::{
-    ///     automaton::OverlappingState,
-    ///     AhoCorasick, Input, Match,
-    /// };
-    ///
-    /// let patterns = &["append", "appendage", "app"];
-    /// let haystack = "append the app to the appendage";
-    ///
-    /// let ac = AhoCorasick::new(patterns).unwrap();
-    /// let mut state = OverlappingState::start();
-    /// let mut matches = vec![];
-    ///
-    /// loop {
-    ///     ac.try_find_overlapping(haystack, &mut state)?;
-    ///     let mat = match state.get_match() {
-    ///         None => break,
-    ///         Some(mat) => mat,
-    ///     };
-    ///     matches.push(mat);
-    /// }
-    /// let expected = vec![
-    ///     Match::must(2, 0..3),
-    ///     Match::must(0, 0..6),
-    ///     Match::must(2, 11..14),
-    ///     Match::must(2, 22..25),
-    ///     Match::must(0, 22..28),
-    ///     Match::must(1, 22..31),
-    /// ];
-    /// assert_eq!(expected, matches);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    ///
-    /// # Example: anchored iteration
-    ///
-    /// The previous example can also be adapted to implement
-    /// iteration over all anchored matches. In particular,
-    /// [`AhoCorasick::try_find_overlapping_iter`] does not support this
-    /// because it isn't totally clear what the match semantics ought to be.
-    ///
-    /// In this example, we will find all overlapping matches that start at
-    /// the beginning of our search.
-    ///
-    /// ```
-    /// use aho_corasick::{
-    ///     automaton::OverlappingState,
-    ///     AhoCorasick, Anchored, Input, Match, StartKind,
-    /// };
-    ///
-    /// let patterns = &["append", "appendage", "app"];
-    /// let haystack = "append the app to the appendage";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .start_kind(StartKind::Anchored)
-    ///     .build(patterns)
-    ///     .unwrap();
-    /// let input = Input::new(haystack).anchored(Anchored::Yes);
-    /// let mut state = OverlappingState::start();
-    /// let mut matches = vec![];
-    ///
-    /// loop {
-    ///     ac.try_find_overlapping(input.clone(), &mut state)?;
-    ///     let mat = match state.get_match() {
-    ///         None => break,
-    ///         Some(mat) => mat,
-    ///     };
-    ///     matches.push(mat);
-    /// }
-    /// let expected = vec![
-    ///     Match::must(2, 0..3),
-    ///     Match::must(0, 0..6),
-    /// ];
-    /// assert_eq!(expected, matches);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    pub fn try_find_overlapping<'h, I: Into<Input<'h>>>(
-        &self,
-        input: I,
-        state: &mut OverlappingState,
-    ) -> Result<(), MatchError> {
-        let input = input.into();
-        enforce_anchored_consistency(self.start_kind, input.get_anchored())?;
-        self.aut.try_find_overlapping(&input, state)
-    }
-
-    /// Returns an iterator of non-overlapping matches, using the match
-    /// semantics that this automaton was constructed with.
-    ///
-    /// This is the fallible version of [`AhoCorasick::find_iter`].
-    ///
-    /// Note that the error returned by this method occurs during construction
-    /// of the iterator. The iterator itself yields `Match` values. That is,
-    /// once the iterator is constructed, the iteration itself will never
-    /// report an error.
-    ///
-    /// # Errors
-    ///
-    /// This returns an error when this Aho-Corasick searcher does not support
-    /// the given `Input` configuration.
-    ///
-    /// For example, if the Aho-Corasick searcher only supports anchored
-    /// searches or only supports unanchored searches, then providing an
-    /// `Input` that requests an anchored (or unanchored) search when it isn't
-    /// supported would result in an error.
-    ///
-    /// # Example: leftmost-first searching
-    ///
-    /// Basic usage with leftmost-first semantics:
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, Input, MatchKind, PatternID};
-    ///
-    /// let patterns = &["append", "appendage", "app"];
-    /// let haystack = "append the app to the appendage";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::LeftmostFirst)
-    ///     .build(patterns)
-    ///     .unwrap();
-    /// let matches: Vec<PatternID> = ac
-    ///     .try_find_iter(Input::new(haystack))?
-    ///     .map(|mat| mat.pattern())
-    ///     .collect();
-    /// assert_eq!(vec![
-    ///     PatternID::must(0),
-    ///     PatternID::must(2),
-    ///     PatternID::must(0),
-    /// ], matches);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    ///
-    /// # Example: anchored leftmost-first searching
-    ///
-    /// This shows how to anchor the search, such that all matches must begin
-    /// at the starting location of the search. For an iterator, an anchored
-    /// search implies that all matches are adjacent.
-    ///
-    /// ```
-    /// use aho_corasick::{
-    ///     AhoCorasick, Anchored, Input, MatchKind, PatternID, StartKind,
-    /// };
-    ///
-    /// let patterns = &["foo", "bar", "quux"];
-    /// let haystack = "fooquuxbar foo";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::LeftmostFirst)
-    ///     .start_kind(StartKind::Anchored)
-    ///     .build(patterns)
-    ///     .unwrap();
-    /// let matches: Vec<PatternID> = ac
-    ///     .try_find_iter(Input::new(haystack).anchored(Anchored::Yes))?
-    ///     .map(|mat| mat.pattern())
-    ///     .collect();
-    /// assert_eq!(vec![
-    ///     PatternID::must(0),
-    ///     PatternID::must(2),
-    ///     PatternID::must(1),
-    ///     // The final 'foo' is not found because it is not adjacent to the
-    ///     // 'bar' match. It needs to be adjacent because our search is
-    ///     // anchored.
-    /// ], matches);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    pub fn try_find_iter<'a, 'h, I: Into<Input<'h>>>(
-        &'a self,
-        input: I,
-    ) -> Result<FindIter<'a, 'h>, MatchError> {
-        let input = input.into();
-        enforce_anchored_consistency(self.start_kind, input.get_anchored())?;
-        Ok(FindIter(self.aut.try_find_iter(input)?))
-    }
-
-    /// Returns an iterator of overlapping matches.
-    ///
-    /// This is the fallible version of [`AhoCorasick::find_overlapping_iter`].
-    ///
-    /// Note that the error returned by this method occurs during construction
-    /// of the iterator. The iterator itself yields `Match` values. That is,
-    /// once the iterator is constructed, the iteration itself will never
-    /// report an error.
-    ///
-    /// # Errors
-    ///
-    /// This returns an error when this Aho-Corasick searcher does not support
-    /// the given `Input` configuration or does not support overlapping
-    /// searches.
-    ///
-    /// One example is that only Aho-Corasicker searchers built with
-    /// [`MatchKind::Standard`] semantics support overlapping searches. Using
-    /// any other match semantics will result in this returning an error.
-    ///
-    /// # Example: basic usage
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, Input, PatternID};
-    ///
-    /// let patterns = &["append", "appendage", "app"];
-    /// let haystack = "append the app to the appendage";
-    ///
-    /// let ac = AhoCorasick::new(patterns).unwrap();
-    /// let matches: Vec<PatternID> = ac
-    ///     .try_find_overlapping_iter(Input::new(haystack))?
-    ///     .map(|mat| mat.pattern())
-    ///     .collect();
-    /// assert_eq!(vec![
-    ///     PatternID::must(2),
-    ///     PatternID::must(0),
-    ///     PatternID::must(2),
-    ///     PatternID::must(2),
-    ///     PatternID::must(0),
-    ///     PatternID::must(1),
-    /// ], matches);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    ///
-    /// # Example: anchored overlapping search returns an error
-    ///
-    /// It isn't clear what the match semantics for anchored overlapping
-    /// iterators *ought* to be, so currently an error is returned. Callers
-    /// may use [`AhoCorasick::try_find_overlapping`] to implement their own
-    /// semantics if desired.
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, Anchored, Input, StartKind};
-    ///
-    /// let patterns = &["append", "appendage", "app"];
-    /// let haystack = "appendappendage app";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .start_kind(StartKind::Anchored)
-    ///     .build(patterns)
-    ///     .unwrap();
-    /// let input = Input::new(haystack).anchored(Anchored::Yes);
-    /// assert!(ac.try_find_overlapping_iter(input).is_err());
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    pub fn try_find_overlapping_iter<'a, 'h, I: Into<Input<'h>>>(
-        &'a self,
-        input: I,
-    ) -> Result<FindOverlappingIter<'a, 'h>, MatchError> {
-        let input = input.into();
-        enforce_anchored_consistency(self.start_kind, input.get_anchored())?;
-        Ok(FindOverlappingIter(self.aut.try_find_overlapping_iter(input)?))
-    }
-
-    /// Replace all matches with a corresponding value in the `replace_with`
-    /// slice given. Matches correspond to the same matches as reported by
-    /// [`AhoCorasick::try_find_iter`].
-    ///
-    /// Replacements are determined by the index of the matching pattern.
-    /// For example, if the pattern with index `2` is found, then it is
-    /// replaced by `replace_with[2]`.
-    ///
-    /// # Panics
-    ///
-    /// This panics when `replace_with.len()` does not equal
-    /// [`AhoCorasick::patterns_len`].
-    ///
-    /// # Errors
-    ///
-    /// This returns an error when this Aho-Corasick searcher does not support
-    /// the default `Input` configuration. More specifically, this occurs only
-    /// when the Aho-Corasick searcher does not support unanchored searches
-    /// since this replacement routine always does an unanchored search.
-    ///
-    /// # Example: basic usage
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, MatchKind};
-    ///
-    /// let patterns = &["append", "appendage", "app"];
-    /// let haystack = "append the app to the appendage";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::LeftmostFirst)
-    ///     .build(patterns)
-    ///     .unwrap();
-    /// let result = ac.try_replace_all(haystack, &["x", "y", "z"])?;
-    /// assert_eq!("x the z to the xage", result);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    pub fn try_replace_all<B>(
-        &self,
-        haystack: &str,
-        replace_with: &[B],
-    ) -> Result<String, MatchError>
-    where
-        B: AsRef<str>,
-    {
-        enforce_anchored_consistency(self.start_kind, Anchored::No)?;
-        self.aut.try_replace_all(haystack, replace_with)
-    }
-
-    /// Replace all matches using raw bytes with a corresponding value in the
-    /// `replace_with` slice given. Matches correspond to the same matches as
-    /// reported by [`AhoCorasick::try_find_iter`].
-    ///
-    /// Replacements are determined by the index of the matching pattern.
-    /// For example, if the pattern with index `2` is found, then it is
-    /// replaced by `replace_with[2]`.
-    ///
-    /// This is the fallible version of [`AhoCorasick::replace_all_bytes`].
-    ///
-    /// # Panics
-    ///
-    /// This panics when `replace_with.len()` does not equal
-    /// [`AhoCorasick::patterns_len`].
-    ///
-    /// # Errors
-    ///
-    /// This returns an error when this Aho-Corasick searcher does not support
-    /// the default `Input` configuration. More specifically, this occurs only
-    /// when the Aho-Corasick searcher does not support unanchored searches
-    /// since this replacement routine always does an unanchored search.
-    ///
-    /// # Example: basic usage
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, MatchKind};
-    ///
-    /// let patterns = &["append", "appendage", "app"];
-    /// let haystack = b"append the app to the appendage";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::LeftmostFirst)
-    ///     .build(patterns)
-    ///     .unwrap();
-    /// let result = ac.try_replace_all_bytes(haystack, &["x", "y", "z"])?;
-    /// assert_eq!(b"x the z to the xage".to_vec(), result);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    pub fn try_replace_all_bytes<B>(
-        &self,
-        haystack: &[u8],
-        replace_with: &[B],
-    ) -> Result<Vec<u8>, MatchError>
-    where
-        B: AsRef<[u8]>,
-    {
-        enforce_anchored_consistency(self.start_kind, Anchored::No)?;
-        self.aut.try_replace_all_bytes(haystack, replace_with)
-    }
-
-    /// Replace all matches using a closure called on each match.
-    /// Matches correspond to the same matches as reported by
-    /// [`AhoCorasick::try_find_iter`].
-    ///
-    /// The closure accepts three parameters: the match found, the text of
-    /// the match and a string buffer with which to write the replaced text
-    /// (if any). If the closure returns `true`, then it continues to the next
-    /// match. If the closure returns `false`, then searching is stopped.
-    ///
-    /// Note that any matches with boundaries that don't fall on a valid UTF-8
-    /// boundary are silently skipped.
-    ///
-    /// This is the fallible version of [`AhoCorasick::replace_all_with`].
-    ///
-    /// # Errors
-    ///
-    /// This returns an error when this Aho-Corasick searcher does not support
-    /// the default `Input` configuration. More specifically, this occurs only
-    /// when the Aho-Corasick searcher does not support unanchored searches
-    /// since this replacement routine always does an unanchored search.
-    ///
-    /// # Examples
-    ///
-    /// Basic usage:
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, MatchKind};
-    ///
-    /// let patterns = &["append", "appendage", "app"];
-    /// let haystack = "append the app to the appendage";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::LeftmostFirst)
-    ///     .build(patterns)
-    ///     .unwrap();
-    /// let mut result = String::new();
-    /// ac.try_replace_all_with(haystack, &mut result, |mat, _, dst| {
-    ///     dst.push_str(&mat.pattern().as_usize().to_string());
-    ///     true
-    /// })?;
-    /// assert_eq!("0 the 2 to the 0age", result);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    ///
-    /// Stopping the replacement by returning `false` (continued from the
-    /// example above):
-    ///
-    /// ```
-    /// # use aho_corasick::{AhoCorasick, MatchKind, PatternID};
-    /// # let patterns = &["append", "appendage", "app"];
-    /// # let haystack = "append the app to the appendage";
-    /// # let ac = AhoCorasick::builder()
-    /// #    .match_kind(MatchKind::LeftmostFirst)
-    /// #    .build(patterns)
-    /// #    .unwrap();
-    /// let mut result = String::new();
-    /// ac.try_replace_all_with(haystack, &mut result, |mat, _, dst| {
-    ///     dst.push_str(&mat.pattern().as_usize().to_string());
-    ///     mat.pattern() != PatternID::must(2)
-    /// })?;
-    /// assert_eq!("0 the 2 to the appendage", result);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    pub fn try_replace_all_with<F>(
-        &self,
-        haystack: &str,
-        dst: &mut String,
-        replace_with: F,
-    ) -> Result<(), MatchError>
-    where
-        F: FnMut(&Match, &str, &mut String) -> bool,
-    {
-        enforce_anchored_consistency(self.start_kind, Anchored::No)?;
-        self.aut.try_replace_all_with(haystack, dst, replace_with)
-    }
-
-    /// Replace all matches using raw bytes with a closure called on each
-    /// match. Matches correspond to the same matches as reported by
-    /// [`AhoCorasick::try_find_iter`].
-    ///
-    /// The closure accepts three parameters: the match found, the text of
-    /// the match and a byte buffer with which to write the replaced text
-    /// (if any). If the closure returns `true`, then it continues to the next
-    /// match. If the closure returns `false`, then searching is stopped.
-    ///
-    /// This is the fallible version of
-    /// [`AhoCorasick::replace_all_with_bytes`].
-    ///
-    /// # Errors
-    ///
-    /// This returns an error when this Aho-Corasick searcher does not support
-    /// the default `Input` configuration. More specifically, this occurs only
-    /// when the Aho-Corasick searcher does not support unanchored searches
-    /// since this replacement routine always does an unanchored search.
-    ///
-    /// # Examples
-    ///
-    /// Basic usage:
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, MatchKind};
-    ///
-    /// let patterns = &["append", "appendage", "app"];
-    /// let haystack = b"append the app to the appendage";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::LeftmostFirst)
-    ///     .build(patterns)
-    ///     .unwrap();
-    /// let mut result = vec![];
-    /// ac.try_replace_all_with_bytes(haystack, &mut result, |mat, _, dst| {
-    ///     dst.extend(mat.pattern().as_usize().to_string().bytes());
-    ///     true
-    /// })?;
-    /// assert_eq!(b"0 the 2 to the 0age".to_vec(), result);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    ///
-    /// Stopping the replacement by returning `false` (continued from the
-    /// example above):
-    ///
-    /// ```
-    /// # use aho_corasick::{AhoCorasick, MatchKind, PatternID};
-    /// # let patterns = &["append", "appendage", "app"];
-    /// # let haystack = b"append the app to the appendage";
-    /// # let ac = AhoCorasick::builder()
-    /// #    .match_kind(MatchKind::LeftmostFirst)
-    /// #    .build(patterns)
-    /// #    .unwrap();
-    /// let mut result = vec![];
-    /// ac.try_replace_all_with_bytes(haystack, &mut result, |mat, _, dst| {
-    ///     dst.extend(mat.pattern().as_usize().to_string().bytes());
-    ///     mat.pattern() != PatternID::must(2)
-    /// })?;
-    /// assert_eq!(b"0 the 2 to the appendage".to_vec(), result);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    pub fn try_replace_all_with_bytes<F>(
-        &self,
-        haystack: &[u8],
-        dst: &mut Vec<u8>,
-        replace_with: F,
-    ) -> Result<(), MatchError>
-    where
-        F: FnMut(&Match, &[u8], &mut Vec<u8>) -> bool,
-    {
-        enforce_anchored_consistency(self.start_kind, Anchored::No)?;
-        self.aut.try_replace_all_with_bytes(haystack, dst, replace_with)
-    }
-
-    /// Returns an iterator of non-overlapping matches in the given
-    /// stream. Matches correspond to the same matches as reported by
-    /// [`AhoCorasick::try_find_iter`].
-    ///
-    /// The matches yielded by this iterator use absolute position offsets in
-    /// the stream given, where the first byte has index `0`. Matches are
-    /// yieled until the stream is exhausted.
-    ///
-    /// Each item yielded by the iterator is an `Result<Match,
-    /// std::io::Error>`, where an error is yielded if there was a problem
-    /// reading from the reader given.
-    ///
-    /// When searching a stream, an internal buffer is used. Therefore, callers
-    /// should avoiding providing a buffered reader, if possible.
-    ///
-    /// This is the fallible version of [`AhoCorasick::stream_find_iter`].
-    /// Note that both methods return iterators that produce `Result` values.
-    /// The difference is that this routine returns an error if _construction_
-    /// of the iterator failed. The `Result` values yield by the iterator
-    /// come from whether the given reader returns an error or not during the
-    /// search.
-    ///
-    /// # Memory usage
-    ///
-    /// In general, searching streams will use a constant amount of memory for
-    /// its internal buffer. The one requirement is that the internal buffer
-    /// must be at least the size of the longest possible match. In most use
-    /// cases, the default buffer size will be much larger than any individual
-    /// match.
-    ///
-    /// # Errors
-    ///
-    /// This returns an error when this Aho-Corasick searcher does not support
-    /// the default `Input` configuration. More specifically, this occurs only
-    /// when the Aho-Corasick searcher does not support unanchored searches
-    /// since this stream searching routine always does an unanchored search.
-    ///
-    /// This also returns an error if the searcher does not support stream
-    /// searches. Only searchers built with [`MatchKind::Standard`] semantics
-    /// support stream searches.
-    ///
-    /// # Example: basic usage
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, PatternID};
-    ///
-    /// let patterns = &["append", "appendage", "app"];
-    /// let haystack = "append the app to the appendage";
-    ///
-    /// let ac = AhoCorasick::new(patterns).unwrap();
-    /// let mut matches = vec![];
-    /// for result in ac.try_stream_find_iter(haystack.as_bytes())? {
-    ///     let mat = result?;
-    ///     matches.push(mat.pattern());
-    /// }
-    /// assert_eq!(vec![
-    ///     PatternID::must(2),
-    ///     PatternID::must(2),
-    ///     PatternID::must(2),
-    /// ], matches);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    #[cfg(feature = "std")]
-    pub fn try_stream_find_iter<'a, R: std::io::Read>(
-        &'a self,
-        rdr: R,
-    ) -> Result<StreamFindIter<'a, R>, MatchError> {
-        enforce_anchored_consistency(self.start_kind, Anchored::No)?;
-        self.aut.try_stream_find_iter(rdr).map(StreamFindIter)
-    }
-
-    /// Search for and replace all matches of this automaton in
-    /// the given reader, and write the replacements to the given
-    /// writer. Matches correspond to the same matches as reported by
-    /// [`AhoCorasick::try_find_iter`].
-    ///
-    /// Replacements are determined by the index of the matching pattern. For
-    /// example, if the pattern with index `2` is found, then it is replaced by
-    /// `replace_with[2]`.
-    ///
-    /// After all matches are replaced, the writer is _not_ flushed.
-    ///
-    /// If there was a problem reading from the given reader or writing to the
-    /// given writer, then the corresponding `io::Error` is returned and all
-    /// replacement is stopped.
-    ///
-    /// When searching a stream, an internal buffer is used. Therefore, callers
-    /// should avoiding providing a buffered reader, if possible. However,
-    /// callers may want to provide a buffered writer.
-    ///
-    /// Note that there is currently no infallible version of this routine.
-    ///
-    /// # Memory usage
-    ///
-    /// In general, searching streams will use a constant amount of memory for
-    /// its internal buffer. The one requirement is that the internal buffer
-    /// must be at least the size of the longest possible match. In most use
-    /// cases, the default buffer size will be much larger than any individual
-    /// match.
-    ///
-    /// # Panics
-    ///
-    /// This panics when `replace_with.len()` does not equal
-    /// [`AhoCorasick::patterns_len`].
-    ///
-    /// # Errors
-    ///
-    /// This returns an error when this Aho-Corasick searcher does not support
-    /// the default `Input` configuration. More specifically, this occurs only
-    /// when the Aho-Corasick searcher does not support unanchored searches
-    /// since this stream searching routine always does an unanchored search.
-    ///
-    /// This also returns an error if the searcher does not support stream
-    /// searches. Only searchers built with [`MatchKind::Standard`] semantics
-    /// support stream searches.
-    ///
-    /// # Example: basic usage
-    ///
-    /// ```
-    /// use aho_corasick::AhoCorasick;
-    ///
-    /// let patterns = &["fox", "brown", "quick"];
-    /// let haystack = "The quick brown fox.";
-    /// let replace_with = &["sloth", "grey", "slow"];
-    ///
-    /// let ac = AhoCorasick::new(patterns).unwrap();
-    /// let mut result = vec![];
-    /// ac.try_stream_replace_all(
-    ///     haystack.as_bytes(),
-    ///     &mut result,
-    ///     replace_with,
-    /// )?;
-    /// assert_eq!(b"The slow grey sloth.".to_vec(), result);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    #[cfg(feature = "std")]
-    pub fn try_stream_replace_all<R, W, B>(
-        &self,
-        rdr: R,
-        wtr: W,
-        replace_with: &[B],
-    ) -> Result<(), std::io::Error>
-    where
-        R: std::io::Read,
-        W: std::io::Write,
-        B: AsRef<[u8]>,
-    {
-        enforce_anchored_consistency(self.start_kind, Anchored::No)
-            .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?;
-        self.aut.try_stream_replace_all(rdr, wtr, replace_with)
-    }
-
-    /// Search the given reader and replace all matches of this automaton
-    /// using the given closure. The result is written to the given
-    /// writer. Matches correspond to the same matches as reported by
-    /// [`AhoCorasick::try_find_iter`].
-    ///
-    /// The closure accepts three parameters: the match found, the text of
-    /// the match and the writer with which to write the replaced text (if any).
-    ///
-    /// After all matches are replaced, the writer is _not_ flushed.
-    ///
-    /// If there was a problem reading from the given reader or writing to the
-    /// given writer, then the corresponding `io::Error` is returned and all
-    /// replacement is stopped.
-    ///
-    /// When searching a stream, an internal buffer is used. Therefore, callers
-    /// should avoiding providing a buffered reader, if possible. However,
-    /// callers may want to provide a buffered writer.
-    ///
-    /// Note that there is currently no infallible version of this routine.
-    ///
-    /// # Memory usage
-    ///
-    /// In general, searching streams will use a constant amount of memory for
-    /// its internal buffer. The one requirement is that the internal buffer
-    /// must be at least the size of the longest possible match. In most use
-    /// cases, the default buffer size will be much larger than any individual
-    /// match.
-    ///
-    /// # Errors
-    ///
-    /// This returns an error when this Aho-Corasick searcher does not support
-    /// the default `Input` configuration. More specifically, this occurs only
-    /// when the Aho-Corasick searcher does not support unanchored searches
-    /// since this stream searching routine always does an unanchored search.
-    ///
-    /// This also returns an error if the searcher does not support stream
-    /// searches. Only searchers built with [`MatchKind::Standard`] semantics
-    /// support stream searches.
-    ///
-    /// # Example: basic usage
-    ///
-    /// ```
-    /// use std::io::Write;
-    /// use aho_corasick::AhoCorasick;
-    ///
-    /// let patterns = &["fox", "brown", "quick"];
-    /// let haystack = "The quick brown fox.";
-    ///
-    /// let ac = AhoCorasick::new(patterns).unwrap();
-    /// let mut result = vec![];
-    /// ac.try_stream_replace_all_with(
-    ///     haystack.as_bytes(),
-    ///     &mut result,
-    ///     |mat, _, wtr| {
-    ///         wtr.write_all(mat.pattern().as_usize().to_string().as_bytes())
-    ///     },
-    /// )?;
-    /// assert_eq!(b"The 2 1 0.".to_vec(), result);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    #[cfg(feature = "std")]
-    pub fn try_stream_replace_all_with<R, W, F>(
-        &self,
-        rdr: R,
-        wtr: W,
-        replace_with: F,
-    ) -> Result<(), std::io::Error>
-    where
-        R: std::io::Read,
-        W: std::io::Write,
-        F: FnMut(&Match, &[u8], &mut W) -> Result<(), std::io::Error>,
-    {
-        enforce_anchored_consistency(self.start_kind, Anchored::No)
-            .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?;
-        self.aut.try_stream_replace_all_with(rdr, wtr, replace_with)
-    }
-}
-
-/// Routines for querying information about the Aho-Corasick automaton.
-impl AhoCorasick {
-    /// Returns the kind of the Aho-Corasick automaton used by this searcher.
-    ///
-    /// Knowing the Aho-Corasick kind is principally useful for diagnostic
-    /// purposes. In particular, if no specific kind was given to
-    /// [`AhoCorasickBuilder::kind`], then one is automatically chosen and
-    /// this routine will report which one.
-    ///
-    /// Note that the heuristics used for choosing which `AhoCorasickKind`
-    /// may be changed in a semver compatible release.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, AhoCorasickKind};
-    ///
-    /// let ac = AhoCorasick::new(&["foo", "bar", "quux", "baz"]).unwrap();
-    /// // The specific Aho-Corasick kind chosen is not guaranteed!
-    /// assert_eq!(AhoCorasickKind::DFA, ac.kind());
-    /// ```
-    pub fn kind(&self) -> AhoCorasickKind {
-        self.kind
-    }
-
-    /// Returns the type of starting search configuration supported by this
-    /// Aho-Corasick automaton.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, StartKind};
-    ///
-    /// let ac = AhoCorasick::new(&["foo", "bar", "quux", "baz"]).unwrap();
-    /// assert_eq!(StartKind::Unanchored, ac.start_kind());
-    /// ```
-    pub fn start_kind(&self) -> StartKind {
-        self.start_kind
-    }
-
-    /// Returns the match kind used by this automaton.
-    ///
-    /// The match kind is important because it determines what kinds of
-    /// matches are returned. Also, some operations (such as overlapping
-    /// search and stream searching) are only supported when using the
-    /// [`MatchKind::Standard`] match kind.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, MatchKind};
-    ///
-    /// let ac = AhoCorasick::new(&["foo", "bar", "quux", "baz"]).unwrap();
-    /// assert_eq!(MatchKind::Standard, ac.match_kind());
-    /// ```
-    pub fn match_kind(&self) -> MatchKind {
-        self.aut.match_kind()
-    }
-
-    /// Returns the length of the shortest pattern matched by this automaton.
-    ///
-    /// # Examples
-    ///
-    /// Basic usage:
-    ///
-    /// ```
-    /// use aho_corasick::AhoCorasick;
-    ///
-    /// let ac = AhoCorasick::new(&["foo", "bar", "quux", "baz"]).unwrap();
-    /// assert_eq!(3, ac.min_pattern_len());
-    /// ```
-    ///
-    /// Note that an `AhoCorasick` automaton has a minimum length of `0` if
-    /// and only if it can match the empty string:
-    ///
-    /// ```
-    /// use aho_corasick::AhoCorasick;
-    ///
-    /// let ac = AhoCorasick::new(&["foo", "", "quux", "baz"]).unwrap();
-    /// assert_eq!(0, ac.min_pattern_len());
-    /// ```
-    pub fn min_pattern_len(&self) -> usize {
-        self.aut.min_pattern_len()
-    }
-
-    /// Returns the length of the longest pattern matched by this automaton.
-    ///
-    /// # Examples
-    ///
-    /// Basic usage:
-    ///
-    /// ```
-    /// use aho_corasick::AhoCorasick;
-    ///
-    /// let ac = AhoCorasick::new(&["foo", "bar", "quux", "baz"]).unwrap();
-    /// assert_eq!(4, ac.max_pattern_len());
-    /// ```
-    pub fn max_pattern_len(&self) -> usize {
-        self.aut.max_pattern_len()
-    }
-
-    /// Return the total number of patterns matched by this automaton.
-    ///
-    /// This includes patterns that may never participate in a match. For
-    /// example, if [`MatchKind::LeftmostFirst`] match semantics are used, and
-    /// the patterns `Sam` and `Samwise` were used to build the automaton (in
-    /// that order), then `Samwise` can never participate in a match because
-    /// `Sam` will always take priority.
-    ///
-    /// # Examples
-    ///
-    /// Basic usage:
-    ///
-    /// ```
-    /// use aho_corasick::AhoCorasick;
-    ///
-    /// let ac = AhoCorasick::new(&["foo", "bar", "baz"]).unwrap();
-    /// assert_eq!(3, ac.patterns_len());
-    /// ```
-    pub fn patterns_len(&self) -> usize {
-        self.aut.patterns_len()
-    }
-
-    /// Returns the approximate total amount of heap used by this automaton, in
-    /// units of bytes.
-    ///
-    /// # Examples
-    ///
-    /// This example shows the difference in heap usage between a few
-    /// configurations:
-    ///
-    /// ```
-    /// # if !cfg!(target_pointer_width = "64") { return; }
-    /// use aho_corasick::{AhoCorasick, AhoCorasickKind, MatchKind};
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .kind(None) // default
-    ///     .build(&["foobar", "bruce", "triskaidekaphobia", "springsteen"])
-    ///     .unwrap();
-    /// assert_eq!(5_632, ac.memory_usage());
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .kind(None) // default
-    ///     .ascii_case_insensitive(true)
-    ///     .build(&["foobar", "bruce", "triskaidekaphobia", "springsteen"])
-    ///     .unwrap();
-    /// assert_eq!(11_136, ac.memory_usage());
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .kind(Some(AhoCorasickKind::NoncontiguousNFA))
-    ///     .ascii_case_insensitive(true)
-    ///     .build(&["foobar", "bruce", "triskaidekaphobia", "springsteen"])
-    ///     .unwrap();
-    /// assert_eq!(10_879, ac.memory_usage());
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .kind(Some(AhoCorasickKind::ContiguousNFA))
-    ///     .ascii_case_insensitive(true)
-    ///     .build(&["foobar", "bruce", "triskaidekaphobia", "springsteen"])
-    ///     .unwrap();
-    /// assert_eq!(2_584, ac.memory_usage());
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .kind(Some(AhoCorasickKind::DFA))
-    ///     .ascii_case_insensitive(true)
-    ///     .build(&["foobar", "bruce", "triskaidekaphobia", "springsteen"])
-    ///     .unwrap();
-    /// // While this shows the DFA being the biggest here by a small margin,
-    /// // don't let the difference fool you. With such a small number of
-    /// // patterns, the difference is small, but a bigger number of patterns
-    /// // will reveal that the rate of growth of the DFA is far bigger than
-    /// // the NFAs above. For a large number of patterns, it is easy for the
-    /// // DFA to take an order of magnitude more heap space (or more!).
-    /// assert_eq!(11_136, ac.memory_usage());
-    /// ```
-    pub fn memory_usage(&self) -> usize {
-        self.aut.memory_usage()
-    }
-}
-
-// We provide a manual debug impl so that we don't include the 'start_kind',
-// principally because it's kind of weird to do so and because it screws with
-// the carefully curated debug output for the underlying automaton.
-impl core::fmt::Debug for AhoCorasick {
-    fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-        f.debug_tuple("AhoCorasick").field(&self.aut).finish()
-    }
-}
-
-/// An iterator of non-overlapping matches in a particular haystack.
-///
-/// This iterator yields matches according to the [`MatchKind`] used by this
-/// automaton.
-///
-/// This iterator is constructed via the [`AhoCorasick::find_iter`] and
-/// [`AhoCorasick::try_find_iter`] methods.
-///
-/// The lifetime `'a` refers to the lifetime of the `AhoCorasick` automaton.
-///
-/// The lifetime `'h` refers to the lifetime of the haystack being searched.
-#[derive(Debug)]
-pub struct FindIter<'a, 'h>(automaton::FindIter<'a, 'h, Arc<dyn AcAutomaton>>);
-
-impl<'a, 'h> Iterator for FindIter<'a, 'h> {
-    type Item = Match;
-
-    #[inline]
-    fn next(&mut self) -> Option<Match> {
-        self.0.next()
-    }
-}
-
-/// An iterator of overlapping matches in a particular haystack.
-///
-/// This iterator will report all possible matches in a particular haystack,
-/// even when the matches overlap.
-///
-/// This iterator is constructed via the [`AhoCorasick::find_overlapping_iter`]
-/// and [`AhoCorasick::try_find_overlapping_iter`] methods.
-///
-/// The lifetime `'a` refers to the lifetime of the `AhoCorasick` automaton.
-///
-/// The lifetime `'h` refers to the lifetime of the haystack being searched.
-#[derive(Debug)]
-pub struct FindOverlappingIter<'a, 'h>(
-    automaton::FindOverlappingIter<'a, 'h, Arc<dyn AcAutomaton>>,
-);
-
-impl<'a, 'h> Iterator for FindOverlappingIter<'a, 'h> {
-    type Item = Match;
-
-    #[inline]
-    fn next(&mut self) -> Option<Match> {
-        self.0.next()
-    }
-}
-
-/// An iterator that reports Aho-Corasick matches in a stream.
-///
-/// This iterator yields elements of type `Result<Match, std::io::Error>`,
-/// where an error is reported if there was a problem reading from the
-/// underlying stream. The iterator terminates only when the underlying stream
-/// reaches `EOF`.
-///
-/// This iterator is constructed via the [`AhoCorasick::stream_find_iter`] and
-/// [`AhoCorasick::try_stream_find_iter`] methods.
-///
-/// The type variable `R` refers to the `io::Read` stream that is being read
-/// from.
-///
-/// The lifetime `'a` refers to the lifetime of the corresponding
-/// [`AhoCorasick`] searcher.
-#[cfg(feature = "std")]
-#[derive(Debug)]
-pub struct StreamFindIter<'a, R>(
-    automaton::StreamFindIter<'a, Arc<dyn AcAutomaton>, R>,
-);
-
-#[cfg(feature = "std")]
-impl<'a, R: std::io::Read> Iterator for StreamFindIter<'a, R> {
-    type Item = Result<Match, std::io::Error>;
-
-    fn next(&mut self) -> Option<Result<Match, std::io::Error>> {
-        self.0.next()
-    }
-}
-
-/// A builder for configuring an Aho-Corasick automaton.
-///
-/// # Quick advice
-///
-/// * Use [`AhoCorasickBuilder::match_kind`] to configure your searcher
-/// with [`MatchKind::LeftmostFirst`] if you want to match how backtracking
-/// regex engines execute searches for `pat1|pat2|..|patN`. Use
-/// [`MatchKind::LeftmostLongest`] if you want to match how POSIX regex engines
-/// do it.
-/// * If you need an anchored search, use [`AhoCorasickBuilder::start_kind`] to
-/// set the [`StartKind::Anchored`] mode since [`StartKind::Unanchored`] is the
-/// default. Or just use [`StartKind::Both`] to support both types of searches.
-/// * You might want to use [`AhoCorasickBuilder::kind`] to set your searcher
-/// to always use a [`AhoCorasickKind::DFA`] if search speed is critical and
-/// memory usage isn't a concern. Otherwise, not setting a kind will probably
-/// make the right choice for you. Beware that if you use [`StartKind::Both`]
-/// to build a searcher that supports both unanchored and anchored searches
-/// _and_ you set [`AhoCorasickKind::DFA`], then the DFA will essentially be
-/// duplicated to support both simultaneously. This results in very high memory
-/// usage.
-/// * For all other options, their defaults are almost certainly what you want.
-#[derive(Clone, Debug, Default)]
-pub struct AhoCorasickBuilder {
-    nfa_noncontiguous: noncontiguous::Builder,
-    nfa_contiguous: contiguous::Builder,
-    dfa: dfa::Builder,
-    kind: Option<AhoCorasickKind>,
-    start_kind: StartKind,
-}
-
-impl AhoCorasickBuilder {
-    /// Create a new builder for configuring an Aho-Corasick automaton.
-    ///
-    /// The builder provides a way to configure a number of things, including
-    /// ASCII case insensitivity and what kind of match semantics are used.
-    pub fn new() -> AhoCorasickBuilder {
-        AhoCorasickBuilder::default()
-    }
-
-    /// Build an Aho-Corasick automaton using the configuration set on this
-    /// builder.
-    ///
-    /// A builder may be reused to create more automatons.
-    ///
-    /// # Examples
-    ///
-    /// Basic usage:
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasickBuilder, PatternID};
-    ///
-    /// let patterns = &["foo", "bar", "baz"];
-    /// let ac = AhoCorasickBuilder::new().build(patterns).unwrap();
-    /// assert_eq!(
-    ///     Some(PatternID::must(1)),
-    ///     ac.find("xxx bar xxx").map(|m| m.pattern()),
-    /// );
-    /// ```
-    pub fn build<I, P>(&self, patterns: I) -> Result<AhoCorasick, BuildError>
-    where
-        I: IntoIterator<Item = P>,
-        P: AsRef<[u8]>,
-    {
-        let nfa = self.nfa_noncontiguous.build(patterns)?;
-        let (aut, kind): (Arc<dyn AcAutomaton>, AhoCorasickKind) =
-            match self.kind {
-                None => {
-                    debug!(
-                        "asked for automatic Aho-Corasick implementation, \
-                     criteria: <patterns: {:?}, max pattern len: {:?}, \
-                     start kind: {:?}>",
-                        nfa.patterns_len(),
-                        nfa.max_pattern_len(),
-                        self.start_kind,
-                    );
-                    self.build_auto(nfa)
-                }
-                Some(AhoCorasickKind::NoncontiguousNFA) => {
-                    debug!("forcefully chose noncontiguous NFA");
-                    (Arc::new(nfa), AhoCorasickKind::NoncontiguousNFA)
-                }
-                Some(AhoCorasickKind::ContiguousNFA) => {
-                    debug!("forcefully chose contiguous NFA");
-                    let cnfa =
-                        self.nfa_contiguous.build_from_noncontiguous(&nfa)?;
-                    (Arc::new(cnfa), AhoCorasickKind::ContiguousNFA)
-                }
-                Some(AhoCorasickKind::DFA) => {
-                    debug!("forcefully chose DFA");
-                    let dfa = self.dfa.build_from_noncontiguous(&nfa)?;
-                    (Arc::new(dfa), AhoCorasickKind::DFA)
-                }
-            };
-        Ok(AhoCorasick { aut, kind, start_kind: self.start_kind })
-    }
-
-    /// Implements the automatic selection logic for the Aho-Corasick
-    /// implementation to use. Since all Aho-Corasick automatons are built
-    /// from a non-contiguous NFA, the caller is responsible for building
-    /// that first.
-    fn build_auto(
-        &self,
-        nfa: noncontiguous::NFA,
-    ) -> (Arc<dyn AcAutomaton>, AhoCorasickKind) {
-        // We try to build a DFA if we have a very small number of patterns,
-        // otherwise the memory usage just gets too crazy. We also only do it
-        // when the start kind is unanchored or anchored, but not both, because
-        // both implies two full copies of the transition table.
-        let try_dfa = !matches!(self.start_kind, StartKind::Both)
-            && nfa.patterns_len() <= 100;
-        if try_dfa {
-            match self.dfa.build_from_noncontiguous(&nfa) {
-                Ok(dfa) => {
-                    debug!("chose a DFA");
-                    return (Arc::new(dfa), AhoCorasickKind::DFA);
-                }
-                Err(_err) => {
-                    debug!(
-                        "failed to build DFA, trying something else: {}",
-                        _err
-                    );
-                }
-            }
-        }
-        // We basically always want a contiguous NFA if the limited
-        // circumstances in which we use a DFA are not true. It is quite fast
-        // and has excellent memory usage. The only way we don't use it is if
-        // there are so many states that it can't fit in a contiguous NFA.
-        // And the only way to know that is to try to build it. Building a
-        // contiguous NFA is mostly just reshuffling data from a noncontiguous
-        // NFA, so it isn't too expensive, especially relative to building a
-        // noncontiguous NFA in the first place.
-        match self.nfa_contiguous.build_from_noncontiguous(&nfa) {
-            Ok(nfa) => {
-                debug!("chose contiguous NFA");
-                return (Arc::new(nfa), AhoCorasickKind::ContiguousNFA);
-            }
-            #[allow(unused_variables)] // unused when 'logging' is disabled
-            Err(_err) => {
-                debug!(
-                    "failed to build contiguous NFA, \
-                     trying something else: {}",
-                    _err
-                );
-            }
-        }
-        debug!("chose non-contiguous NFA");
-        (Arc::new(nfa), AhoCorasickKind::NoncontiguousNFA)
-    }
-
-    /// Set the desired match semantics.
-    ///
-    /// The default is [`MatchKind::Standard`], which corresponds to the match
-    /// semantics supported by the standard textbook description of the
-    /// Aho-Corasick algorithm. Namely, matches are reported as soon as they
-    /// are found. Moreover, this is the only way to get overlapping matches
-    /// or do stream searching.
-    ///
-    /// The other kinds of match semantics that are supported are
-    /// [`MatchKind::LeftmostFirst`] and [`MatchKind::LeftmostLongest`]. The
-    /// former corresponds to the match you would get if you were to try to
-    /// match each pattern at each position in the haystack in the same order
-    /// that you give to the automaton. That is, it returns the leftmost match
-    /// corresponding to the earliest pattern given to the automaton. The
-    /// latter corresponds to finding the longest possible match among all
-    /// leftmost matches.
-    ///
-    /// For more details on match semantics, see the [documentation for
-    /// `MatchKind`](MatchKind).
-    ///
-    /// Note that setting this to [`MatchKind::LeftmostFirst`] or
-    /// [`MatchKind::LeftmostLongest`] will cause some search routines on
-    /// [`AhoCorasick`] to return an error (or panic if you're using the
-    /// infallible API). Notably, this includes stream and overlapping
-    /// searches.
-    ///
-    /// # Examples
-    ///
-    /// In these examples, we demonstrate the differences between match
-    /// semantics for a particular set of patterns in a specific order:
-    /// `b`, `abc`, `abcd`.
-    ///
-    /// Standard semantics:
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, MatchKind};
-    ///
-    /// let patterns = &["b", "abc", "abcd"];
-    /// let haystack = "abcd";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::Standard) // default, not necessary
-    ///     .build(patterns)
-    ///     .unwrap();
-    /// let mat = ac.find(haystack).expect("should have a match");
-    /// assert_eq!("b", &haystack[mat.start()..mat.end()]);
-    /// ```
-    ///
-    /// Leftmost-first semantics:
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, MatchKind};
-    ///
-    /// let patterns = &["b", "abc", "abcd"];
-    /// let haystack = "abcd";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::LeftmostFirst)
-    ///     .build(patterns)
-    ///     .unwrap();
-    /// let mat = ac.find(haystack).expect("should have a match");
-    /// assert_eq!("abc", &haystack[mat.start()..mat.end()]);
-    /// ```
-    ///
-    /// Leftmost-longest semantics:
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, MatchKind};
-    ///
-    /// let patterns = &["b", "abc", "abcd"];
-    /// let haystack = "abcd";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::LeftmostLongest)
-    ///     .build(patterns)
-    ///     .unwrap();
-    /// let mat = ac.find(haystack).expect("should have a match");
-    /// assert_eq!("abcd", &haystack[mat.start()..mat.end()]);
-    /// ```
-    pub fn match_kind(&mut self, kind: MatchKind) -> &mut AhoCorasickBuilder {
-        self.nfa_noncontiguous.match_kind(kind);
-        self.nfa_contiguous.match_kind(kind);
-        self.dfa.match_kind(kind);
-        self
-    }
-
-    /// Sets the starting state configuration for the automaton.
-    ///
-    /// Every Aho-Corasick automaton is capable of having two start states: one
-    /// that is used for unanchored searches and one that is used for anchored
-    /// searches. Some automatons, like the NFAs, support this with almost zero
-    /// additional cost. Other automatons, like the DFA, require two copies of
-    /// the underlying transition table to support both simultaneously.
-    ///
-    /// Because there may be an added non-trivial cost to supporting both, it
-    /// is possible to configure which starting state configuration is needed.
-    ///
-    /// Indeed, since anchored searches tend to be somewhat more rare,
-    /// _only_ unanchored searches are supported by default. Thus,
-    /// [`StartKind::Unanchored`] is the default.
-    ///
-    /// Note that when this is set to [`StartKind::Unanchored`], then
-    /// running an anchored search will result in an error (or a panic
-    /// if using the infallible APIs). Similarly, when this is set to
-    /// [`StartKind::Anchored`], then running an unanchored search will
-    /// result in an error (or a panic if using the infallible APIs). When
-    /// [`StartKind::Both`] is used, then both unanchored and anchored searches
-    /// are always supported.
-    ///
-    /// Also note that even if an `AhoCorasick` searcher is using an NFA
-    /// internally (which always supports both unanchored and anchored
-    /// searches), an error will still be reported for a search that isn't
-    /// supported by the configuration set via this method. This means,
-    /// for example, that an error is never dependent on which internal
-    /// implementation of Aho-Corasick is used.
-    ///
-    /// # Example: anchored search
-    ///
-    /// This shows how to build a searcher that only supports anchored
-    /// searches:
-    ///
-    /// ```
-    /// use aho_corasick::{
-    ///     AhoCorasick, Anchored, Input, Match, MatchKind, StartKind,
-    /// };
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::LeftmostFirst)
-    ///     .start_kind(StartKind::Anchored)
-    ///     .build(&["b", "abc", "abcd"])
-    ///     .unwrap();
-    ///
-    /// // An unanchored search is not supported! An error here is guaranteed
-    /// // given the configuration above regardless of which kind of
-    /// // Aho-Corasick implementation ends up being used internally.
-    /// let input = Input::new("foo abcd").anchored(Anchored::No);
-    /// assert!(ac.try_find(input).is_err());
-    ///
-    /// let input = Input::new("foo abcd").anchored(Anchored::Yes);
-    /// assert_eq!(None, ac.try_find(input)?);
-    ///
-    /// let input = Input::new("abcd").anchored(Anchored::Yes);
-    /// assert_eq!(Some(Match::must(1, 0..3)), ac.try_find(input)?);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    ///
-    /// # Example: unanchored and anchored searches
-    ///
-    /// This shows how to build a searcher that supports both unanchored and
-    /// anchored searches:
-    ///
-    /// ```
-    /// use aho_corasick::{
-    ///     AhoCorasick, Anchored, Input, Match, MatchKind, StartKind,
-    /// };
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::LeftmostFirst)
-    ///     .start_kind(StartKind::Both)
-    ///     .build(&["b", "abc", "abcd"])
-    ///     .unwrap();
-    ///
-    /// let input = Input::new("foo abcd").anchored(Anchored::No);
-    /// assert_eq!(Some(Match::must(1, 4..7)), ac.try_find(input)?);
-    ///
-    /// let input = Input::new("foo abcd").anchored(Anchored::Yes);
-    /// assert_eq!(None, ac.try_find(input)?);
-    ///
-    /// let input = Input::new("abcd").anchored(Anchored::Yes);
-    /// assert_eq!(Some(Match::must(1, 0..3)), ac.try_find(input)?);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    pub fn start_kind(&mut self, kind: StartKind) -> &mut AhoCorasickBuilder {
-        self.dfa.start_kind(kind);
-        self.start_kind = kind;
-        self
-    }
-
-    /// Enable ASCII-aware case insensitive matching.
-    ///
-    /// When this option is enabled, searching will be performed without
-    /// respect to case for ASCII letters (`a-z` and `A-Z`) only.
-    ///
-    /// Enabling this option does not change the search algorithm, but it may
-    /// increase the size of the automaton.
-    ///
-    /// **NOTE:** It is unlikely that support for Unicode case folding will
-    /// be added in the future. The ASCII case works via a simple hack to the
-    /// underlying automaton, but full Unicode handling requires a fair bit of
-    /// sophistication. If you do need Unicode handling, you might consider
-    /// using the [`regex` crate](https://docs.rs/regex) or the lower level
-    /// [`regex-automata` crate](https://docs.rs/regex-automata).
-    ///
-    /// # Examples
-    ///
-    /// Basic usage:
-    ///
-    /// ```
-    /// use aho_corasick::AhoCorasick;
-    ///
-    /// let patterns = &["FOO", "bAr", "BaZ"];
-    /// let haystack = "foo bar baz";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .ascii_case_insensitive(true)
-    ///     .build(patterns)
-    ///     .unwrap();
-    /// assert_eq!(3, ac.find_iter(haystack).count());
-    /// ```
-    pub fn ascii_case_insensitive(
-        &mut self,
-        yes: bool,
-    ) -> &mut AhoCorasickBuilder {
-        self.nfa_noncontiguous.ascii_case_insensitive(yes);
-        self.nfa_contiguous.ascii_case_insensitive(yes);
-        self.dfa.ascii_case_insensitive(yes);
-        self
-    }
-
-    /// Choose the type of underlying automaton to use.
-    ///
-    /// Currently, there are four choices:
-    ///
-    /// * [`AhoCorasickKind::NoncontiguousNFA`] instructs the searcher to
-    /// use a [`noncontiguous::NFA`]. A noncontiguous NFA is the fastest to
-    /// be built, has moderate memory usage and is typically the slowest to
-    /// execute a search.
-    /// * [`AhoCorasickKind::ContiguousNFA`] instructs the searcher to use a
-    /// [`contiguous::NFA`]. A contiguous NFA is a little slower to build than
-    /// a noncontiguous NFA, has excellent memory usage and is typically a
-    /// little slower than a DFA for a search.
-    /// * [`AhoCorasickKind::DFA`] instructs the searcher to use a
-    /// [`dfa::DFA`]. A DFA is very slow to build, uses exorbitant amounts of
-    /// memory, but will typically execute searches the fastest.
-    /// * `None` (the default) instructs the searcher to choose the "best"
-    /// Aho-Corasick implementation. This choice is typically based primarily
-    /// on the number of patterns.
-    ///
-    /// Setting this configuration does not change the time complexity for
-    /// constructing the Aho-Corasick automaton (which is `O(p)` where `p`
-    /// is the total number of patterns being compiled). Setting this to
-    /// [`AhoCorasickKind::DFA`] does however reduce the time complexity of
-    /// non-overlapping searches from `O(n + p)` to `O(n)`, where `n` is the
-    /// length of the haystack.
-    ///
-    /// In general, you should probably stick to the default unless you have
-    /// some kind of reason to use a specific Aho-Corasick implementation. For
-    /// example, you might choose `AhoCorasickKind::DFA` if you don't care
-    /// about memory usage and want the fastest possible search times.
-    ///
-    /// Setting this guarantees that the searcher returned uses the chosen
-    /// implementation. If that implementation could not be constructed, then
-    /// an error will be returned. In contrast, when `None` is used, it is
-    /// possible for it to attempt to construct, for example, a contiguous
-    /// NFA and have it fail. In which case, it will fall back to using a
-    /// noncontiguous NFA.
-    ///
-    /// If `None` is given, then one may use [`AhoCorasick::kind`] to determine
-    /// which Aho-Corasick implementation was chosen.
-    ///
-    /// Note that the heuristics used for choosing which `AhoCorasickKind`
-    /// may be changed in a semver compatible release.
-    pub fn kind(
-        &mut self,
-        kind: Option<AhoCorasickKind>,
-    ) -> &mut AhoCorasickBuilder {
-        self.kind = kind;
-        self
-    }
-
-    /// Enable heuristic prefilter optimizations.
-    ///
-    /// When enabled, searching will attempt to quickly skip to match
-    /// candidates using specialized literal search routines. A prefilter
-    /// cannot always be used, and is generally treated as a heuristic. It
-    /// can be useful to disable this if the prefilter is observed to be
-    /// sub-optimal for a particular workload.
-    ///
-    /// Currently, prefilters are typically only active when building searchers
-    /// with a small (less than 100) number of patterns.
-    ///
-    /// This is enabled by default.
-    pub fn prefilter(&mut self, yes: bool) -> &mut AhoCorasickBuilder {
-        self.nfa_noncontiguous.prefilter(yes);
-        self.nfa_contiguous.prefilter(yes);
-        self.dfa.prefilter(yes);
-        self
-    }
-
-    /// Set the limit on how many states use a dense representation for their
-    /// transitions. Other states will generally use a sparse representation.
-    ///
-    /// A dense representation uses more memory but is generally faster, since
-    /// the next transition in a dense representation can be computed in a
-    /// constant number of instructions. A sparse representation uses less
-    /// memory but is generally slower, since the next transition in a sparse
-    /// representation requires executing a variable number of instructions.
-    ///
-    /// This setting is only used when an Aho-Corasick implementation is used
-    /// that supports the dense versus sparse representation trade off. Not all
-    /// do.
-    ///
-    /// This limit is expressed in terms of the depth of a state, i.e., the
-    /// number of transitions from the starting state of the automaton. The
-    /// idea is that most of the time searching will be spent near the starting
-    /// state of the automaton, so states near the start state should use a
-    /// dense representation. States further away from the start state would
-    /// then use a sparse representation.
-    ///
-    /// By default, this is set to a low but non-zero number. Setting this to
-    /// `0` is almost never what you want, since it is likely to make searches
-    /// very slow due to the start state itself being forced to use a sparse
-    /// representation. However, it is unlikely that increasing this number
-    /// will help things much, since the most active states have a small depth.
-    /// More to the point, the memory usage increases superlinearly as this
-    /// number increases.
-    pub fn dense_depth(&mut self, depth: usize) -> &mut AhoCorasickBuilder {
-        self.nfa_noncontiguous.dense_depth(depth);
-        self.nfa_contiguous.dense_depth(depth);
-        self
-    }
-
-    /// A debug settting for whether to attempt to shrink the size of the
-    /// automaton's alphabet or not.
-    ///
-    /// This option is enabled by default and should never be disabled unless
-    /// one is debugging the underlying automaton.
-    ///
-    /// When enabled, some (but not all) Aho-Corasick automatons will use a map
-    /// from all possible bytes to their corresponding equivalence class. Each
-    /// equivalence class represents a set of bytes that does not discriminate
-    /// between a match and a non-match in the automaton.
-    ///
-    /// The advantage of this map is that the size of the transition table can
-    /// be reduced drastically from `#states * 256 * sizeof(u32)` to
-    /// `#states * k * sizeof(u32)` where `k` is the number of equivalence
-    /// classes (rounded up to the nearest power of 2). As a result, total
-    /// space usage can decrease substantially. Moreover, since a smaller
-    /// alphabet is used, automaton compilation becomes faster as well.
-    ///
-    /// **WARNING:** This is only useful for debugging automatons. Disabling
-    /// this does not yield any speed advantages. Namely, even when this is
-    /// disabled, a byte class map is still used while searching. The only
-    /// difference is that every byte will be forced into its own distinct
-    /// equivalence class. This is useful for debugging the actual generated
-    /// transitions because it lets one see the transitions defined on actual
-    /// bytes instead of the equivalence classes.
-    pub fn byte_classes(&mut self, yes: bool) -> &mut AhoCorasickBuilder {
-        self.nfa_contiguous.byte_classes(yes);
-        self.dfa.byte_classes(yes);
-        self
-    }
-}
-
-/// The type of Aho-Corasick implementation to use in an [`AhoCorasick`]
-/// searcher.
-///
-/// This is principally used as an input to the
-/// [`AhoCorasickBuilder::start_kind`] method. Its documentation goes into more
-/// detail about each choice.
-#[non_exhaustive]
-#[derive(Clone, Copy, Debug, Eq, PartialEq)]
-pub enum AhoCorasickKind {
-    /// Use a noncontiguous NFA.
-    NoncontiguousNFA,
-    /// Use a contiguous NFA.
-    ContiguousNFA,
-    /// Use a DFA. Warning: DFAs typically use a large amount of memory.
-    DFA,
-}
-
-/// A trait that effectively gives us practical dynamic dispatch over anything
-/// that impls `Automaton`, but without needing to add a bunch of bounds to
-/// the core `Automaton` trait. Basically, we provide all of the marker traits
-/// that our automatons have, in addition to `Debug` impls and requiring that
-/// there is no borrowed data. Without these, the main `AhoCorasick` type would
-/// not be able to meaningfully impl `Debug` or the marker traits without also
-/// requiring that all impls of `Automaton` do so, which would be not great.
-trait AcAutomaton:
-    Automaton + Debug + Send + Sync + UnwindSafe + RefUnwindSafe + 'static
-{
-}
-
-impl<A> AcAutomaton for A where
-    A: Automaton + Debug + Send + Sync + UnwindSafe + RefUnwindSafe + 'static
-{
-}
-
-impl crate::automaton::private::Sealed for Arc<dyn AcAutomaton> {}
-
-// I'm not sure why this trait impl shows up in the docs, as the AcAutomaton
-// trait is not exported. So we forcefully hide it.
-//
-// SAFETY: This just defers to the underlying 'AcAutomaton' and thus inherits
-// its safety properties.
-#[doc(hidden)]
-unsafe impl Automaton for Arc<dyn AcAutomaton> {
-    #[inline(always)]
-    fn start_state(&self, anchored: Anchored) -> Result<StateID, MatchError> {
-        (**self).start_state(anchored)
-    }
-
-    #[inline(always)]
-    fn next_state(
-        &self,
-        anchored: Anchored,
-        sid: StateID,
-        byte: u8,
-    ) -> StateID {
-        (**self).next_state(anchored, sid, byte)
-    }
-
-    #[inline(always)]
-    fn is_special(&self, sid: StateID) -> bool {
-        (**self).is_special(sid)
-    }
-
-    #[inline(always)]
-    fn is_dead(&self, sid: StateID) -> bool {
-        (**self).is_dead(sid)
-    }
-
-    #[inline(always)]
-    fn is_match(&self, sid: StateID) -> bool {
-        (**self).is_match(sid)
-    }
-
-    #[inline(always)]
-    fn is_start(&self, sid: StateID) -> bool {
-        (**self).is_start(sid)
-    }
-
-    #[inline(always)]
-    fn match_kind(&self) -> MatchKind {
-        (**self).match_kind()
-    }
-
-    #[inline(always)]
-    fn match_len(&self, sid: StateID) -> usize {
-        (**self).match_len(sid)
-    }
-
-    #[inline(always)]
-    fn match_pattern(&self, sid: StateID, index: usize) -> PatternID {
-        (**self).match_pattern(sid, index)
-    }
-
-    #[inline(always)]
-    fn patterns_len(&self) -> usize {
-        (**self).patterns_len()
-    }
-
-    #[inline(always)]
-    fn pattern_len(&self, pid: PatternID) -> usize {
-        (**self).pattern_len(pid)
-    }
-
-    #[inline(always)]
-    fn min_pattern_len(&self) -> usize {
-        (**self).min_pattern_len()
-    }
-
-    #[inline(always)]
-    fn max_pattern_len(&self) -> usize {
-        (**self).max_pattern_len()
-    }
-
-    #[inline(always)]
-    fn memory_usage(&self) -> usize {
-        (**self).memory_usage()
-    }
-
-    #[inline(always)]
-    fn prefilter(&self) -> Option<&Prefilter> {
-        (**self).prefilter()
-    }
-
-    // Even though 'try_find' and 'try_find_overlapping' each have their
-    // own default impls, we explicitly define them here to fix a perf bug.
-    // Without these explicit definitions, the default impl will wind up using
-    // dynamic dispatch for all 'Automaton' method calls, including things like
-    // 'next_state' that absolutely must get inlined or else perf is trashed.
-    // Defining them explicitly here like this still requires dynamic dispatch
-    // to call 'try_find' itself, but all uses of 'Automaton' within 'try_find'
-    // are monomorphized.
-    //
-    // We don't need to explicitly impl any other methods, I think, because
-    // they are all implemented themselves in terms of 'try_find' and
-    // 'try_find_overlapping'. We still might wind up with an extra virtual
-    // call here or there, but that's okay since it's outside of any perf
-    // critical areas.
-
-    #[inline(always)]
-    fn try_find(
-        &self,
-        input: &Input<'_>,
-    ) -> Result<Option<Match>, MatchError> {
-        (**self).try_find(input)
-    }
-
-    #[inline(always)]
-    fn try_find_overlapping(
-        &self,
-        input: &Input<'_>,
-        state: &mut OverlappingState,
-    ) -> Result<(), MatchError> {
-        (**self).try_find_overlapping(input, state)
-    }
-}
-
-/// Returns an error if the start state configuration does not support the
-/// desired search configuration. See the internal 'AhoCorasick::start_kind'
-/// field docs for more details.
-fn enforce_anchored_consistency(
-    have: StartKind,
-    want: Anchored,
-) -> Result<(), MatchError> {
-    match have {
-        StartKind::Both => Ok(()),
-        StartKind::Unanchored if !want.is_anchored() => Ok(()),
-        StartKind::Unanchored => Err(MatchError::invalid_input_anchored()),
-        StartKind::Anchored if want.is_anchored() => Ok(()),
-        StartKind::Anchored => Err(MatchError::invalid_input_unanchored()),
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/automaton.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/automaton.rs
deleted file mode 100644
index c41dc6e..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/automaton.rs
+++ /dev/null
@@ -1,1608 +0,0 @@
-/*!
-Provides [`Automaton`] trait for abstracting over Aho-Corasick automata.
-
-The `Automaton` trait provides a way to write generic code over any
-Aho-Corasick automaton. It also provides access to lower level APIs that
-permit walking the state transitions of an Aho-Corasick automaton manually.
-*/
-
-use alloc::{string::String, vec::Vec};
-
-use crate::util::{
-    error::MatchError,
-    primitives::PatternID,
-    search::{Anchored, Input, Match, MatchKind, Span},
-};
-
-pub use crate::util::{
-    prefilter::{Candidate, Prefilter},
-    primitives::{StateID, StateIDError},
-};
-
-/// We seal the `Automaton` trait for now. It's a big trait, and it's
-/// conceivable that I might want to add new required methods, and sealing the
-/// trait permits doing that in a backwards compatible fashion. On other the
-/// hand, if you have a solid use case for implementing the trait yourself,
-/// please file an issue and we can discuss it. This was *mostly* done as a
-/// conservative step.
-pub(crate) mod private {
-    pub trait Sealed {}
-}
-impl private::Sealed for crate::nfa::noncontiguous::NFA {}
-impl private::Sealed for crate::nfa::contiguous::NFA {}
-impl private::Sealed for crate::dfa::DFA {}
-
-impl<'a, T: private::Sealed + ?Sized> private::Sealed for &'a T {}
-
-/// A trait that abstracts over Aho-Corasick automata.
-///
-/// This trait primarily exists for niche use cases such as:
-///
-/// * Using an NFA or DFA directly, bypassing the top-level
-/// [`AhoCorasick`](crate::AhoCorasick) searcher. Currently, these include
-/// [`noncontiguous::NFA`](crate::nfa::noncontiguous::NFA),
-/// [`contiguous::NFA`](crate::nfa::contiguous::NFA) and
-/// [`dfa::DFA`](crate::dfa::DFA).
-/// * Implementing your own custom search routine by walking the automaton
-/// yourself. This might be useful for implementing search on non-contiguous
-/// strings or streams.
-///
-/// For most use cases, it is not expected that users will need
-/// to use or even know about this trait. Indeed, the top level
-/// [`AhoCorasick`](crate::AhoCorasick) searcher does not expose any details
-/// about this trait, nor does it implement it itself.
-///
-/// Note that this trait defines a number of default methods, such as
-/// [`Automaton::try_find`] and [`Automaton::try_find_iter`], which implement
-/// higher level search routines in terms of the lower level automata API.
-///
-/// # Sealed
-///
-/// Currently, this trait is sealed. That means users of this crate can write
-/// generic routines over this trait but cannot implement it themselves. This
-/// restriction may be lifted in the future, but sealing the trait permits
-/// adding new required methods in a backwards compatible fashion.
-///
-/// # Special states
-///
-/// This trait encodes a notion of "special" states in an automaton. Namely,
-/// a state is treated as special if it is a dead, match or start state:
-///
-/// * A dead state is a state that cannot be left once entered. All transitions
-/// on a dead state lead back to itself. The dead state is meant to be treated
-/// as a sentinel indicating that the search should stop and return a match if
-/// one has been found, and nothing otherwise.
-/// * A match state is a state that indicates one or more patterns have
-/// matched. Depending on the [`MatchKind`] of the automaton, a search may
-/// stop once a match is seen, or it may continue looking for matches until
-/// it enters a dead state or sees the end of the haystack.
-/// * A start state is a state that a search begins in. It is useful to know
-/// when a search enters a start state because it may mean that a prefilter can
-/// be used to skip ahead and quickly look for candidate matches. Unlike dead
-/// and match states, it is never necessary to explicitly handle start states
-/// for correctness. Indeed, in this crate, implementations of `Automaton`
-/// will only treat start states as "special" when a prefilter is enabled and
-/// active. Otherwise, treating it as special has no purpose and winds up
-/// slowing down the overall search because it results in ping-ponging between
-/// the main state transition and the "special" state logic.
-///
-/// Since checking whether a state is special by doing three different
-/// checks would be too expensive inside a fast search loop, the
-/// [`Automaton::is_special`] method is provided for quickly checking whether
-/// the state is special. The `Automaton::is_dead`, `Automaton::is_match` and
-/// `Automaton::is_start` predicates can then be used to determine which kind
-/// of special state it is.
-///
-/// # Panics
-///
-/// Most of the APIs on this trait should panic or give incorrect results
-/// if invalid inputs are given to it. For example, `Automaton::next_state`
-/// has unspecified behavior if the state ID given to it is not a valid
-/// state ID for the underlying automaton. Valid state IDs can only be
-/// retrieved in one of two ways: calling `Automaton::start_state` or calling
-/// `Automaton::next_state` with a valid state ID.
-///
-/// # Safety
-///
-/// This trait is not safe to implement so that code may rely on the
-/// correctness of implementations of this trait to avoid undefined behavior.
-/// The primary correctness guarantees are:
-///
-/// * `Automaton::start_state` always returns a valid state ID or an error or
-/// panics.
-/// * `Automaton::next_state`, when given a valid state ID, always returns
-/// a valid state ID for all values of `anchored` and `byte`, or otherwise
-/// panics.
-///
-/// In general, the rest of the methods on `Automaton` need to uphold their
-/// contracts as well. For example, `Automaton::is_dead` should only returns
-/// true if the given state ID is actually a dead state.
-///
-/// Note that currently this crate does not rely on the safety property defined
-/// here to avoid undefined behavior. Instead, this was done to make it
-/// _possible_ to do in the future.
-///
-/// # Example
-///
-/// This example shows how one might implement a basic but correct search
-/// routine. We keep things simple by not using prefilters or worrying about
-/// anchored searches, but do make sure our search is correct for all possible
-/// [`MatchKind`] semantics. (The comments in the code below note the parts
-/// that are needed to support certain `MatchKind` semantics.)
-///
-/// ```
-/// use aho_corasick::{
-///     automaton::Automaton,
-///     nfa::noncontiguous::NFA,
-///     Anchored, Match, MatchError, MatchKind,
-/// };
-///
-/// // Run an unanchored search for 'aut' in 'haystack'. Return the first match
-/// // seen according to the automaton's match semantics. This returns an error
-/// // if the given automaton does not support unanchored searches.
-/// fn find<A: Automaton>(
-///     aut: A,
-///     haystack: &[u8],
-/// ) -> Result<Option<Match>, MatchError> {
-///     let mut sid = aut.start_state(Anchored::No)?;
-///     let mut at = 0;
-///     let mut mat = None;
-///     let get_match = |sid, at| {
-///         let pid = aut.match_pattern(sid, 0);
-///         let len = aut.pattern_len(pid);
-///         Match::new(pid, (at - len)..at)
-///     };
-///     // Start states can be match states!
-///     if aut.is_match(sid) {
-///         mat = Some(get_match(sid, at));
-///         // Standard semantics require matches to be reported as soon as
-///         // they're seen. Otherwise, we continue until we see a dead state
-///         // or the end of the haystack.
-///         if matches!(aut.match_kind(), MatchKind::Standard) {
-///             return Ok(mat);
-///         }
-///     }
-///     while at < haystack.len() {
-///         sid = aut.next_state(Anchored::No, sid, haystack[at]);
-///         if aut.is_special(sid) {
-///             if aut.is_dead(sid) {
-///                 return Ok(mat);
-///             } else if aut.is_match(sid) {
-///                 mat = Some(get_match(sid, at + 1));
-///                 // As above, standard semantics require that we return
-///                 // immediately once a match is found.
-///                 if matches!(aut.match_kind(), MatchKind::Standard) {
-///                     return Ok(mat);
-///                 }
-///             }
-///         }
-///         at += 1;
-///     }
-///     Ok(mat)
-/// }
-///
-/// // Show that it works for standard searches.
-/// let nfa = NFA::new(&["samwise", "sam"]).unwrap();
-/// assert_eq!(Some(Match::must(1, 0..3)), find(&nfa, b"samwise")?);
-///
-/// // But also works when using leftmost-first. Notice how the match result
-/// // has changed!
-/// let nfa = NFA::builder()
-///     .match_kind(MatchKind::LeftmostFirst)
-///     .build(&["samwise", "sam"])
-///     .unwrap();
-/// assert_eq!(Some(Match::must(0, 0..7)), find(&nfa, b"samwise")?);
-///
-/// # Ok::<(), Box<dyn std::error::Error>>(())
-/// ```
-pub unsafe trait Automaton: private::Sealed {
-    /// Returns the starting state for the given anchor mode.
-    ///
-    /// Upon success, the state ID returned is guaranteed to be valid for
-    /// this automaton.
-    ///
-    /// # Errors
-    ///
-    /// This returns an error when the given search configuration is not
-    /// supported by the underlying automaton. For example, if the underlying
-    /// automaton only supports unanchored searches but the given configuration
-    /// was set to an anchored search, then this must return an error.
-    fn start_state(&self, anchored: Anchored) -> Result<StateID, MatchError>;
-
-    /// Performs a state transition from `sid` for `byte` and returns the next
-    /// state.
-    ///
-    /// `anchored` should be [`Anchored::Yes`] when executing an anchored
-    /// search and [`Anchored::No`] otherwise. For some implementations of
-    /// `Automaton`, it is required to know whether the search is anchored
-    /// or not in order to avoid following failure transitions. Other
-    /// implementations may ignore `anchored` altogether and depend on
-    /// `Automaton::start_state` returning a state that walks a different path
-    /// through the automaton depending on whether the search is anchored or
-    /// not.
-    ///
-    /// # Panics
-    ///
-    /// This routine may panic or return incorrect results when the given state
-    /// ID is invalid. A state ID is valid if and only if:
-    ///
-    /// 1. It came from a call to `Automaton::start_state`, or
-    /// 2. It came from a previous call to `Automaton::next_state` with a
-    /// valid state ID.
-    ///
-    /// Implementations must treat all possible values of `byte` as valid.
-    ///
-    /// Implementations may panic on unsupported values of `anchored`, but are
-    /// not required to do so.
-    fn next_state(
-        &self,
-        anchored: Anchored,
-        sid: StateID,
-        byte: u8,
-    ) -> StateID;
-
-    /// Returns true if the given ID represents a "special" state. A special
-    /// state is a dead, match or start state.
-    ///
-    /// Note that implementations may choose to return false when the given ID
-    /// corresponds to a start state. Namely, it always correct to treat start
-    /// states as non-special. Implementations must return true for states that
-    /// are dead or contain matches.
-    ///
-    /// This has unspecified behavior when given an invalid state ID.
-    fn is_special(&self, sid: StateID) -> bool;
-
-    /// Returns true if the given ID represents a dead state.
-    ///
-    /// A dead state is a type of "sink" in a finite state machine. It
-    /// corresponds to a state whose transitions all loop back to itself. That
-    /// is, once entered, it can never be left. In practice, it serves as a
-    /// sentinel indicating that the search should terminate.
-    ///
-    /// This has unspecified behavior when given an invalid state ID.
-    fn is_dead(&self, sid: StateID) -> bool;
-
-    /// Returns true if the given ID represents a match state.
-    ///
-    /// A match state is always associated with one or more pattern IDs that
-    /// matched at the position in the haystack when the match state was
-    /// entered. When a match state is entered, the match semantics dictate
-    /// whether it should be returned immediately (for `MatchKind::Standard`)
-    /// or if the search should continue (for `MatchKind::LeftmostFirst` and
-    /// `MatchKind::LeftmostLongest`) until a dead state is seen or the end of
-    /// the haystack has been reached.
-    ///
-    /// This has unspecified behavior when given an invalid state ID.
-    fn is_match(&self, sid: StateID) -> bool;
-
-    /// Returns true if the given ID represents a start state.
-    ///
-    /// While it is never incorrect to ignore start states during a search
-    /// (except for the start of the search of course), knowing whether one has
-    /// entered a start state can be useful for certain classes of performance
-    /// optimizations. For example, if one is in a start state, it may be legal
-    /// to try to skip ahead and look for match candidates more quickly than
-    /// would otherwise be accomplished by walking the automaton.
-    ///
-    /// Implementations of `Automaton` in this crate "unspecialize" start
-    /// states when a prefilter is not active or enabled. In this case, it
-    /// is possible for `Automaton::is_special(sid)` to return false while
-    /// `Automaton::is_start(sid)` returns true.
-    ///
-    /// This has unspecified behavior when given an invalid state ID.
-    fn is_start(&self, sid: StateID) -> bool;
-
-    /// Returns the match semantics that this automaton was built with.
-    fn match_kind(&self) -> MatchKind;
-
-    /// Returns the total number of matches for the given state ID.
-    ///
-    /// This has unspecified behavior if the given ID does not refer to a match
-    /// state.
-    fn match_len(&self, sid: StateID) -> usize;
-
-    /// Returns the pattern ID for the match state given by `sid` at the
-    /// `index` given.
-    ///
-    /// Typically, `index` is only ever greater than `0` when implementing an
-    /// overlapping search. Otherwise, it's likely that your search only cares
-    /// about reporting the first pattern ID in a match state.
-    ///
-    /// This has unspecified behavior if the given ID does not refer to a match
-    /// state, or if the index is greater than or equal to the total number of
-    /// matches in this match state.
-    fn match_pattern(&self, sid: StateID, index: usize) -> PatternID;
-
-    /// Returns the total number of patterns compiled into this automaton.
-    fn patterns_len(&self) -> usize;
-
-    /// Returns the length of the pattern for the given ID.
-    ///
-    /// This has unspecified behavior when given an invalid pattern
-    /// ID. A pattern ID is valid if and only if it is less than
-    /// `Automaton::patterns_len`.
-    fn pattern_len(&self, pid: PatternID) -> usize;
-
-    /// Returns the length, in bytes, of the shortest pattern in this
-    /// automaton.
-    fn min_pattern_len(&self) -> usize;
-
-    /// Returns the length, in bytes, of the longest pattern in this automaton.
-    fn max_pattern_len(&self) -> usize;
-
-    /// Returns the heap memory usage, in bytes, used by this automaton.
-    fn memory_usage(&self) -> usize;
-
-    /// Returns a prefilter, if available, that can be used to accelerate
-    /// searches for this automaton.
-    ///
-    /// The typical way this is used is when the start state is entered during
-    /// a search. When that happens, one can use a prefilter to skip ahead and
-    /// look for candidate matches without having to walk the automaton on the
-    /// bytes between candidates.
-    ///
-    /// Typically a prefilter is only available when there are a small (<100)
-    /// number of patterns built into the automaton.
-    fn prefilter(&self) -> Option<&Prefilter>;
-
-    /// Executes a non-overlapping search with this automaton using the given
-    /// configuration.
-    ///
-    /// See
-    /// [`AhoCorasick::try_find`](crate::AhoCorasick::try_find)
-    /// for more documentation and examples.
-    fn try_find(
-        &self,
-        input: &Input<'_>,
-    ) -> Result<Option<Match>, MatchError> {
-        try_find_fwd(&self, input)
-    }
-
-    /// Executes a overlapping search with this automaton using the given
-    /// configuration.
-    ///
-    /// See
-    /// [`AhoCorasick::try_find_overlapping`](crate::AhoCorasick::try_find_overlapping)
-    /// for more documentation and examples.
-    fn try_find_overlapping(
-        &self,
-        input: &Input<'_>,
-        state: &mut OverlappingState,
-    ) -> Result<(), MatchError> {
-        try_find_overlapping_fwd(&self, input, state)
-    }
-
-    /// Returns an iterator of non-overlapping matches with this automaton
-    /// using the given configuration.
-    ///
-    /// See
-    /// [`AhoCorasick::try_find_iter`](crate::AhoCorasick::try_find_iter)
-    /// for more documentation and examples.
-    fn try_find_iter<'a, 'h>(
-        &'a self,
-        input: Input<'h>,
-    ) -> Result<FindIter<'a, 'h, Self>, MatchError>
-    where
-        Self: Sized,
-    {
-        FindIter::new(self, input)
-    }
-
-    /// Returns an iterator of overlapping matches with this automaton
-    /// using the given configuration.
-    ///
-    /// See
-    /// [`AhoCorasick::try_find_overlapping_iter`](crate::AhoCorasick::try_find_overlapping_iter)
-    /// for more documentation and examples.
-    fn try_find_overlapping_iter<'a, 'h>(
-        &'a self,
-        input: Input<'h>,
-    ) -> Result<FindOverlappingIter<'a, 'h, Self>, MatchError>
-    where
-        Self: Sized,
-    {
-        if !self.match_kind().is_standard() {
-            return Err(MatchError::unsupported_overlapping(
-                self.match_kind(),
-            ));
-        }
-        //  We might consider lifting this restriction. The reason why I added
-        // it was to ban the combination of "anchored search" and "overlapping
-        // iteration." The match semantics aren't totally clear in that case.
-        // Should we allow *any* matches that are adjacent to *any* previous
-        // match? Or only following the most recent one? Or only matches
-        // that start at the beginning of the search? We might also elect to
-        // just keep this restriction in place, as callers should be able to
-        // implement it themselves if they want to.
-        if input.get_anchored().is_anchored() {
-            return Err(MatchError::invalid_input_anchored());
-        }
-        let _ = self.start_state(input.get_anchored())?;
-        let state = OverlappingState::start();
-        Ok(FindOverlappingIter { aut: self, input, state })
-    }
-
-    /// Replaces all non-overlapping matches in `haystack` with
-    /// strings from `replace_with` depending on the pattern that
-    /// matched. The `replace_with` slice must have length equal to
-    /// `Automaton::patterns_len`.
-    ///
-    /// See
-    /// [`AhoCorasick::try_replace_all`](crate::AhoCorasick::try_replace_all)
-    /// for more documentation and examples.
-    fn try_replace_all<B>(
-        &self,
-        haystack: &str,
-        replace_with: &[B],
-    ) -> Result<String, MatchError>
-    where
-        Self: Sized,
-        B: AsRef<str>,
-    {
-        assert_eq!(
-            replace_with.len(),
-            self.patterns_len(),
-            "replace_all requires a replacement for every pattern \
-             in the automaton"
-        );
-        let mut dst = String::with_capacity(haystack.len());
-        self.try_replace_all_with(haystack, &mut dst, |mat, _, dst| {
-            dst.push_str(replace_with[mat.pattern()].as_ref());
-            true
-        })?;
-        Ok(dst)
-    }
-
-    /// Replaces all non-overlapping matches in `haystack` with
-    /// strings from `replace_with` depending on the pattern that
-    /// matched. The `replace_with` slice must have length equal to
-    /// `Automaton::patterns_len`.
-    ///
-    /// See
-    /// [`AhoCorasick::try_replace_all_bytes`](crate::AhoCorasick::try_replace_all_bytes)
-    /// for more documentation and examples.
-    fn try_replace_all_bytes<B>(
-        &self,
-        haystack: &[u8],
-        replace_with: &[B],
-    ) -> Result<Vec<u8>, MatchError>
-    where
-        Self: Sized,
-        B: AsRef<[u8]>,
-    {
-        assert_eq!(
-            replace_with.len(),
-            self.patterns_len(),
-            "replace_all requires a replacement for every pattern \
-             in the automaton"
-        );
-        let mut dst = Vec::with_capacity(haystack.len());
-        self.try_replace_all_with_bytes(haystack, &mut dst, |mat, _, dst| {
-            dst.extend(replace_with[mat.pattern()].as_ref());
-            true
-        })?;
-        Ok(dst)
-    }
-
-    /// Replaces all non-overlapping matches in `haystack` by calling the
-    /// `replace_with` closure given.
-    ///
-    /// See
-    /// [`AhoCorasick::try_replace_all_with`](crate::AhoCorasick::try_replace_all_with)
-    /// for more documentation and examples.
-    fn try_replace_all_with<F>(
-        &self,
-        haystack: &str,
-        dst: &mut String,
-        mut replace_with: F,
-    ) -> Result<(), MatchError>
-    where
-        Self: Sized,
-        F: FnMut(&Match, &str, &mut String) -> bool,
-    {
-        let mut last_match = 0;
-        for m in self.try_find_iter(Input::new(haystack))? {
-            // Since there are no restrictions on what kinds of patterns are
-            // in an Aho-Corasick automaton, we might get matches that split
-            // a codepoint, or even matches of a partial codepoint. When that
-            // happens, we just skip the match.
-            if !haystack.is_char_boundary(m.start())
-                || !haystack.is_char_boundary(m.end())
-            {
-                continue;
-            }
-            dst.push_str(&haystack[last_match..m.start()]);
-            last_match = m.end();
-            if !replace_with(&m, &haystack[m.start()..m.end()], dst) {
-                break;
-            };
-        }
-        dst.push_str(&haystack[last_match..]);
-        Ok(())
-    }
-
-    /// Replaces all non-overlapping matches in `haystack` by calling the
-    /// `replace_with` closure given.
-    ///
-    /// See
-    /// [`AhoCorasick::try_replace_all_with_bytes`](crate::AhoCorasick::try_replace_all_with_bytes)
-    /// for more documentation and examples.
-    fn try_replace_all_with_bytes<F>(
-        &self,
-        haystack: &[u8],
-        dst: &mut Vec<u8>,
-        mut replace_with: F,
-    ) -> Result<(), MatchError>
-    where
-        Self: Sized,
-        F: FnMut(&Match, &[u8], &mut Vec<u8>) -> bool,
-    {
-        let mut last_match = 0;
-        for m in self.try_find_iter(Input::new(haystack))? {
-            dst.extend(&haystack[last_match..m.start()]);
-            last_match = m.end();
-            if !replace_with(&m, &haystack[m.start()..m.end()], dst) {
-                break;
-            };
-        }
-        dst.extend(&haystack[last_match..]);
-        Ok(())
-    }
-
-    /// Returns an iterator of non-overlapping matches with this automaton
-    /// from the stream given.
-    ///
-    /// See
-    /// [`AhoCorasick::try_stream_find_iter`](crate::AhoCorasick::try_stream_find_iter)
-    /// for more documentation and examples.
-    #[cfg(feature = "std")]
-    fn try_stream_find_iter<'a, R: std::io::Read>(
-        &'a self,
-        rdr: R,
-    ) -> Result<StreamFindIter<'a, Self, R>, MatchError>
-    where
-        Self: Sized,
-    {
-        Ok(StreamFindIter { it: StreamChunkIter::new(self, rdr)? })
-    }
-
-    /// Replaces all non-overlapping matches in `rdr` with strings from
-    /// `replace_with` depending on the pattern that matched, and writes the
-    /// result to `wtr`. The `replace_with` slice must have length equal to
-    /// `Automaton::patterns_len`.
-    ///
-    /// See
-    /// [`AhoCorasick::try_stream_replace_all`](crate::AhoCorasick::try_stream_replace_all)
-    /// for more documentation and examples.
-    #[cfg(feature = "std")]
-    fn try_stream_replace_all<R, W, B>(
-        &self,
-        rdr: R,
-        wtr: W,
-        replace_with: &[B],
-    ) -> std::io::Result<()>
-    where
-        Self: Sized,
-        R: std::io::Read,
-        W: std::io::Write,
-        B: AsRef<[u8]>,
-    {
-        assert_eq!(
-            replace_with.len(),
-            self.patterns_len(),
-            "streaming replace_all requires a replacement for every pattern \
-             in the automaton",
-        );
-        self.try_stream_replace_all_with(rdr, wtr, |mat, _, wtr| {
-            wtr.write_all(replace_with[mat.pattern()].as_ref())
-        })
-    }
-
-    /// Replaces all non-overlapping matches in `rdr` by calling the
-    /// `replace_with` closure given and writing the result to `wtr`.
-    ///
-    /// See
-    /// [`AhoCorasick::try_stream_replace_all_with`](crate::AhoCorasick::try_stream_replace_all_with)
-    /// for more documentation and examples.
-    #[cfg(feature = "std")]
-    fn try_stream_replace_all_with<R, W, F>(
-        &self,
-        rdr: R,
-        mut wtr: W,
-        mut replace_with: F,
-    ) -> std::io::Result<()>
-    where
-        Self: Sized,
-        R: std::io::Read,
-        W: std::io::Write,
-        F: FnMut(&Match, &[u8], &mut W) -> std::io::Result<()>,
-    {
-        let mut it = StreamChunkIter::new(self, rdr).map_err(|e| {
-            let kind = std::io::ErrorKind::Other;
-            std::io::Error::new(kind, e)
-        })?;
-        while let Some(result) = it.next() {
-            let chunk = result?;
-            match chunk {
-                StreamChunk::NonMatch { bytes, .. } => {
-                    wtr.write_all(bytes)?;
-                }
-                StreamChunk::Match { bytes, mat } => {
-                    replace_with(&mat, bytes, &mut wtr)?;
-                }
-            }
-        }
-        Ok(())
-    }
-}
-
-// SAFETY: This just defers to the underlying 'AcAutomaton' and thus inherits
-// its safety properties.
-unsafe impl<'a, A: Automaton + ?Sized> Automaton for &'a A {
-    #[inline(always)]
-    fn start_state(&self, anchored: Anchored) -> Result<StateID, MatchError> {
-        (**self).start_state(anchored)
-    }
-
-    #[inline(always)]
-    fn next_state(
-        &self,
-        anchored: Anchored,
-        sid: StateID,
-        byte: u8,
-    ) -> StateID {
-        (**self).next_state(anchored, sid, byte)
-    }
-
-    #[inline(always)]
-    fn is_special(&self, sid: StateID) -> bool {
-        (**self).is_special(sid)
-    }
-
-    #[inline(always)]
-    fn is_dead(&self, sid: StateID) -> bool {
-        (**self).is_dead(sid)
-    }
-
-    #[inline(always)]
-    fn is_match(&self, sid: StateID) -> bool {
-        (**self).is_match(sid)
-    }
-
-    #[inline(always)]
-    fn is_start(&self, sid: StateID) -> bool {
-        (**self).is_start(sid)
-    }
-
-    #[inline(always)]
-    fn match_kind(&self) -> MatchKind {
-        (**self).match_kind()
-    }
-
-    #[inline(always)]
-    fn match_len(&self, sid: StateID) -> usize {
-        (**self).match_len(sid)
-    }
-
-    #[inline(always)]
-    fn match_pattern(&self, sid: StateID, index: usize) -> PatternID {
-        (**self).match_pattern(sid, index)
-    }
-
-    #[inline(always)]
-    fn patterns_len(&self) -> usize {
-        (**self).patterns_len()
-    }
-
-    #[inline(always)]
-    fn pattern_len(&self, pid: PatternID) -> usize {
-        (**self).pattern_len(pid)
-    }
-
-    #[inline(always)]
-    fn min_pattern_len(&self) -> usize {
-        (**self).min_pattern_len()
-    }
-
-    #[inline(always)]
-    fn max_pattern_len(&self) -> usize {
-        (**self).max_pattern_len()
-    }
-
-    #[inline(always)]
-    fn memory_usage(&self) -> usize {
-        (**self).memory_usage()
-    }
-
-    #[inline(always)]
-    fn prefilter(&self) -> Option<&Prefilter> {
-        (**self).prefilter()
-    }
-}
-
-/// Represents the current state of an overlapping search.
-///
-/// This is used for overlapping searches since they need to know something
-/// about the previous search. For example, when multiple patterns match at the
-/// same position, this state tracks the last reported pattern so that the next
-/// search knows whether to report another matching pattern or continue with
-/// the search at the next position. Additionally, it also tracks which state
-/// the last search call terminated in and the current offset of the search
-/// in the haystack.
-///
-/// This type provides limited introspection capabilities. The only thing a
-/// caller can do is construct it and pass it around to permit search routines
-/// to use it to track state, and to ask whether a match has been found.
-///
-/// Callers should always provide a fresh state constructed via
-/// [`OverlappingState::start`] when starting a new search. That same state
-/// should be reused for subsequent searches on the same `Input`. The state
-/// given will advance through the haystack itself. Callers can detect the end
-/// of a search when neither an error nor a match is returned.
-///
-/// # Example
-///
-/// This example shows how to manually iterate over all overlapping matches. If
-/// you need this, you might consider using
-/// [`AhoCorasick::find_overlapping_iter`](crate::AhoCorasick::find_overlapping_iter)
-/// instead, but this shows how to correctly use an `OverlappingState`.
-///
-/// ```
-/// use aho_corasick::{
-///     automaton::OverlappingState,
-///     AhoCorasick, Input, Match,
-/// };
-///
-/// let patterns = &["append", "appendage", "app"];
-/// let haystack = "append the app to the appendage";
-///
-/// let ac = AhoCorasick::new(patterns).unwrap();
-/// let mut state = OverlappingState::start();
-/// let mut matches = vec![];
-///
-/// loop {
-///     ac.find_overlapping(haystack, &mut state);
-///     let mat = match state.get_match() {
-///         None => break,
-///         Some(mat) => mat,
-///     };
-///     matches.push(mat);
-/// }
-/// let expected = vec![
-///     Match::must(2, 0..3),
-///     Match::must(0, 0..6),
-///     Match::must(2, 11..14),
-///     Match::must(2, 22..25),
-///     Match::must(0, 22..28),
-///     Match::must(1, 22..31),
-/// ];
-/// assert_eq!(expected, matches);
-/// ```
-#[derive(Clone, Debug)]
-pub struct OverlappingState {
-    /// The match reported by the most recent overlapping search to use this
-    /// state.
-    ///
-    /// If a search does not find any matches, then it is expected to clear
-    /// this value.
-    mat: Option<Match>,
-    /// The state ID of the state at which the search was in when the call
-    /// terminated. When this is a match state, `last_match` must be set to a
-    /// non-None value.
-    ///
-    /// A `None` value indicates the start state of the corresponding
-    /// automaton. We cannot use the actual ID, since any one automaton may
-    /// have many start states, and which one is in use depends on search-time
-    /// factors (such as whether the search is anchored or not).
-    id: Option<StateID>,
-    /// The position of the search.
-    ///
-    /// When `id` is None (i.e., we are starting a search), this is set to
-    /// the beginning of the search as given by the caller regardless of its
-    /// current value. Subsequent calls to an overlapping search pick up at
-    /// this offset.
-    at: usize,
-    /// The index into the matching patterns of the next match to report if the
-    /// current state is a match state. Note that this may be 1 greater than
-    /// the total number of matches to report for the current match state. (In
-    /// which case, no more matches should be reported at the current position
-    /// and the search should advance to the next position.)
-    next_match_index: Option<usize>,
-}
-
-impl OverlappingState {
-    /// Create a new overlapping state that begins at the start state.
-    pub fn start() -> OverlappingState {
-        OverlappingState { mat: None, id: None, at: 0, next_match_index: None }
-    }
-
-    /// Return the match result of the most recent search to execute with this
-    /// state.
-    ///
-    /// Every search will clear this result automatically, such that if no
-    /// match is found, this will always correctly report `None`.
-    pub fn get_match(&self) -> Option<Match> {
-        self.mat
-    }
-}
-
-/// An iterator of non-overlapping matches in a particular haystack.
-///
-/// This iterator yields matches according to the [`MatchKind`] used by this
-/// automaton.
-///
-/// This iterator is constructed via the [`Automaton::try_find_iter`] method.
-///
-/// The type variable `A` refers to the implementation of the [`Automaton`]
-/// trait used to execute the search.
-///
-/// The lifetime `'a` refers to the lifetime of the [`Automaton`]
-/// implementation.
-///
-/// The lifetime `'h` refers to the lifetime of the haystack being searched.
-#[derive(Debug)]
-pub struct FindIter<'a, 'h, A> {
-    /// The automaton used to drive the search.
-    aut: &'a A,
-    /// The input parameters to give to each search call.
-    ///
-    /// The start position of the search is mutated during iteration.
-    input: Input<'h>,
-    /// Records the end offset of the most recent match. This is necessary to
-    /// handle a corner case for preventing empty matches from overlapping with
-    /// the ending bounds of a prior match.
-    last_match_end: Option<usize>,
-}
-
-impl<'a, 'h, A: Automaton> FindIter<'a, 'h, A> {
-    /// Creates a new non-overlapping iterator. If the given automaton would
-    /// return an error on a search with the given input configuration, then
-    /// that error is returned here.
-    fn new(
-        aut: &'a A,
-        input: Input<'h>,
-    ) -> Result<FindIter<'a, 'h, A>, MatchError> {
-        // The only way this search can fail is if we cannot retrieve the start
-        // state. e.g., Asking for an anchored search when only unanchored
-        // searches are supported.
-        let _ = aut.start_state(input.get_anchored())?;
-        Ok(FindIter { aut, input, last_match_end: None })
-    }
-
-    /// Executes a search and returns a match if one is found.
-    ///
-    /// This does not advance the input forward. It just executes a search
-    /// based on the current configuration/offsets.
-    fn search(&self) -> Option<Match> {
-        // The unwrap is OK here because we check at iterator construction time
-        // that no subsequent search call (using the same configuration) will
-        // ever return an error.
-        self.aut
-            .try_find(&self.input)
-            .expect("already checked that no match error can occur")
-    }
-
-    /// Handles the special case of an empty match by ensuring that 1) the
-    /// iterator always advances and 2) empty matches never overlap with other
-    /// matches.
-    ///
-    /// (1) is necessary because we principally make progress by setting the
-    /// starting location of the next search to the ending location of the last
-    /// match. But if a match is empty, then this results in a search that does
-    /// not advance and thus does not terminate.
-    ///
-    /// (2) is not strictly necessary, but makes intuitive sense and matches
-    /// the presiding behavior of most general purpose regex engines.
-    /// (Obviously this crate isn't a regex engine, but we choose to match
-    /// their semantics.) The "intuitive sense" here is that we want to report
-    /// NON-overlapping matches. So for example, given the patterns 'a' and
-    /// '' (an empty string) against the haystack 'a', without the special
-    /// handling, you'd get the matches [0, 1) and [1, 1), where the latter
-    /// overlaps with the end bounds of the former.
-    ///
-    /// Note that we mark this cold and forcefully prevent inlining because
-    /// handling empty matches like this is extremely rare and does require
-    /// quite a bit of code, comparatively. Keeping this code out of the main
-    /// iterator function keeps it smaller and more amenable to inlining
-    /// itself.
-    #[cold]
-    #[inline(never)]
-    fn handle_overlapping_empty_match(
-        &mut self,
-        mut m: Match,
-    ) -> Option<Match> {
-        assert!(m.is_empty());
-        if Some(m.end()) == self.last_match_end {
-            self.input.set_start(self.input.start().checked_add(1).unwrap());
-            m = self.search()?;
-        }
-        Some(m)
-    }
-}
-
-impl<'a, 'h, A: Automaton> Iterator for FindIter<'a, 'h, A> {
-    type Item = Match;
-
-    #[inline(always)]
-    fn next(&mut self) -> Option<Match> {
-        let mut m = self.search()?;
-        if m.is_empty() {
-            m = self.handle_overlapping_empty_match(m)?;
-        }
-        self.input.set_start(m.end());
-        self.last_match_end = Some(m.end());
-        Some(m)
-    }
-}
-
-/// An iterator of overlapping matches in a particular haystack.
-///
-/// This iterator will report all possible matches in a particular haystack,
-/// even when the matches overlap.
-///
-/// This iterator is constructed via the
-/// [`Automaton::try_find_overlapping_iter`] method.
-///
-/// The type variable `A` refers to the implementation of the [`Automaton`]
-/// trait used to execute the search.
-///
-/// The lifetime `'a` refers to the lifetime of the [`Automaton`]
-/// implementation.
-///
-/// The lifetime `'h` refers to the lifetime of the haystack being searched.
-#[derive(Debug)]
-pub struct FindOverlappingIter<'a, 'h, A> {
-    aut: &'a A,
-    input: Input<'h>,
-    state: OverlappingState,
-}
-
-impl<'a, 'h, A: Automaton> Iterator for FindOverlappingIter<'a, 'h, A> {
-    type Item = Match;
-
-    #[inline(always)]
-    fn next(&mut self) -> Option<Match> {
-        self.aut
-            .try_find_overlapping(&self.input, &mut self.state)
-            .expect("already checked that no match error can occur here");
-        self.state.get_match()
-    }
-}
-
-/// An iterator that reports matches in a stream.
-///
-/// This iterator yields elements of type `io::Result<Match>`, where an error
-/// is reported if there was a problem reading from the underlying stream.
-/// The iterator terminates only when the underlying stream reaches `EOF`.
-///
-/// This iterator is constructed via the [`Automaton::try_stream_find_iter`]
-/// method.
-///
-/// The type variable `A` refers to the implementation of the [`Automaton`]
-/// trait used to execute the search.
-///
-/// The type variable `R` refers to the `io::Read` stream that is being read
-/// from.
-///
-/// The lifetime `'a` refers to the lifetime of the [`Automaton`]
-/// implementation.
-#[cfg(feature = "std")]
-#[derive(Debug)]
-pub struct StreamFindIter<'a, A, R> {
-    it: StreamChunkIter<'a, A, R>,
-}
-
-#[cfg(feature = "std")]
-impl<'a, A: Automaton, R: std::io::Read> Iterator
-    for StreamFindIter<'a, A, R>
-{
-    type Item = std::io::Result<Match>;
-
-    fn next(&mut self) -> Option<std::io::Result<Match>> {
-        loop {
-            match self.it.next() {
-                None => return None,
-                Some(Err(err)) => return Some(Err(err)),
-                Some(Ok(StreamChunk::NonMatch { .. })) => {}
-                Some(Ok(StreamChunk::Match { mat, .. })) => {
-                    return Some(Ok(mat));
-                }
-            }
-        }
-    }
-}
-
-/// An iterator that reports matches in a stream.
-///
-/// (This doesn't actually implement the `Iterator` trait because it returns
-/// something with a lifetime attached to a buffer it owns, but that's OK. It
-/// still has a `next` method and is iterator-like enough to be fine.)
-///
-/// This iterator yields elements of type `io::Result<StreamChunk>`, where
-/// an error is reported if there was a problem reading from the underlying
-/// stream. The iterator terminates only when the underlying stream reaches
-/// `EOF`.
-///
-/// The idea here is that each chunk represents either a match or a non-match,
-/// and if you concatenated all of the chunks together, you'd reproduce the
-/// entire contents of the stream, byte-for-byte.
-///
-/// This chunk machinery is a bit complicated and it isn't strictly required
-/// for a stream searcher that just reports matches. But we do need something
-/// like this to deal with the "replacement" API, which needs to know which
-/// chunks it can copy and which it needs to replace.
-#[cfg(feature = "std")]
-#[derive(Debug)]
-struct StreamChunkIter<'a, A, R> {
-    /// The underlying automaton to do the search.
-    aut: &'a A,
-    /// The source of bytes we read from.
-    rdr: R,
-    /// A roll buffer for managing bytes from `rdr`. Basically, this is used
-    /// to handle the case of a match that is split by two different
-    /// calls to `rdr.read()`. This isn't strictly needed if all we needed to
-    /// do was report matches, but here we are reporting chunks of non-matches
-    /// and matches and in order to do that, we really just cannot treat our
-    /// stream as non-overlapping blocks of bytes. We need to permit some
-    /// overlap while we retain bytes from a previous `read` call in memory.
-    buf: crate::util::buffer::Buffer,
-    /// The unanchored starting state of this automaton.
-    start: StateID,
-    /// The state of the automaton.
-    sid: StateID,
-    /// The absolute position over the entire stream.
-    absolute_pos: usize,
-    /// The position we're currently at within `buf`.
-    buffer_pos: usize,
-    /// The buffer position of the end of the bytes that we last returned
-    /// to the caller. Basically, whenever we find a match, we look to see if
-    /// there is a difference between where the match started and the position
-    /// of the last byte we returned to the caller. If there's a difference,
-    /// then we need to return a 'NonMatch' chunk.
-    buffer_reported_pos: usize,
-}
-
-#[cfg(feature = "std")]
-impl<'a, A: Automaton, R: std::io::Read> StreamChunkIter<'a, A, R> {
-    fn new(
-        aut: &'a A,
-        rdr: R,
-    ) -> Result<StreamChunkIter<'a, A, R>, MatchError> {
-        // This restriction is a carry-over from older versions of this crate.
-        // I didn't have the bandwidth to think through how to handle, say,
-        // leftmost-first or leftmost-longest matching, but... it should be
-        // possible? The main problem is that once you see a match state in
-        // leftmost-first semantics, you can't just stop at that point and
-        // report a match. You have to keep going until you either hit a dead
-        // state or EOF. So how do you know when you'll hit a dead state? Well,
-        // you don't. With Aho-Corasick, I believe you can put a bound on it
-        // and say, "once a match has been seen, you'll need to scan forward at
-        // most N bytes" where N=aut.max_pattern_len().
-        //
-        // Which is fine, but it does mean that state about whether we're still
-        // looking for a dead state or not needs to persist across buffer
-        // refills. Which this code doesn't really handle. It does preserve
-        // *some* state across buffer refills, basically ensuring that a match
-        // span is always in memory.
-        if !aut.match_kind().is_standard() {
-            return Err(MatchError::unsupported_stream(aut.match_kind()));
-        }
-        // This is kind of a cop-out, but empty matches are SUPER annoying.
-        // If we know they can't happen (which is what we enforce here), then
-        // it makes a lot of logic much simpler. With that said, I'm open to
-        // supporting this case, but we need to define proper semantics for it
-        // first. It wasn't totally clear to me what it should do at the time
-        // of writing, so I decided to just be conservative.
-        //
-        // It also seems like a very weird case to support anyway. Why search a
-        // stream if you're just going to get a match at every position?
-        //
-        // ¯\_(ツ)_/¯
-        if aut.min_pattern_len() == 0 {
-            return Err(MatchError::unsupported_empty());
-        }
-        let start = aut.start_state(Anchored::No)?;
-        Ok(StreamChunkIter {
-            aut,
-            rdr,
-            buf: crate::util::buffer::Buffer::new(aut.max_pattern_len()),
-            start,
-            sid: start,
-            absolute_pos: 0,
-            buffer_pos: 0,
-            buffer_reported_pos: 0,
-        })
-    }
-
-    fn next(&mut self) -> Option<std::io::Result<StreamChunk>> {
-        // This code is pretty gnarly. It IS simpler than the equivalent code
-        // in the previous aho-corasick release, in part because we inline
-        // automaton traversal here and also in part because we have abdicated
-        // support for automatons that contain an empty pattern.
-        //
-        // I suspect this code could be made a bit simpler by designing a
-        // better buffer abstraction.
-        //
-        // But in general, this code is basically write-only. So you'll need
-        // to go through it step-by-step to grok it. One of the key bits of
-        // complexity is tracking a few different offsets. 'buffer_pos' is
-        // where we are in the buffer for search. 'buffer_reported_pos' is the
-        // position immediately following the last byte in the buffer that
-        // we've returned to the caller. And 'absolute_pos' is the overall
-        // current absolute position of the search in the entire stream, and
-        // this is what match spans are reported in terms of.
-        loop {
-            if self.aut.is_match(self.sid) {
-                let mat = self.get_match();
-                if let Some(r) = self.get_non_match_chunk(mat) {
-                    self.buffer_reported_pos += r.len();
-                    let bytes = &self.buf.buffer()[r];
-                    return Some(Ok(StreamChunk::NonMatch { bytes }));
-                }
-                self.sid = self.start;
-                let r = self.get_match_chunk(mat);
-                self.buffer_reported_pos += r.len();
-                let bytes = &self.buf.buffer()[r];
-                return Some(Ok(StreamChunk::Match { bytes, mat }));
-            }
-            if self.buffer_pos >= self.buf.buffer().len() {
-                if let Some(r) = self.get_pre_roll_non_match_chunk() {
-                    self.buffer_reported_pos += r.len();
-                    let bytes = &self.buf.buffer()[r];
-                    return Some(Ok(StreamChunk::NonMatch { bytes }));
-                }
-                if self.buf.buffer().len() >= self.buf.min_buffer_len() {
-                    self.buffer_pos = self.buf.min_buffer_len();
-                    self.buffer_reported_pos -=
-                        self.buf.buffer().len() - self.buf.min_buffer_len();
-                    self.buf.roll();
-                }
-                match self.buf.fill(&mut self.rdr) {
-                    Err(err) => return Some(Err(err)),
-                    Ok(true) => {}
-                    Ok(false) => {
-                        // We've hit EOF, but if there are still some
-                        // unreported bytes remaining, return them now.
-                        if let Some(r) = self.get_eof_non_match_chunk() {
-                            self.buffer_reported_pos += r.len();
-                            let bytes = &self.buf.buffer()[r];
-                            return Some(Ok(StreamChunk::NonMatch { bytes }));
-                        }
-                        // We've reported everything!
-                        return None;
-                    }
-                }
-            }
-            let start = self.absolute_pos;
-            for &byte in self.buf.buffer()[self.buffer_pos..].iter() {
-                self.sid = self.aut.next_state(Anchored::No, self.sid, byte);
-                self.absolute_pos += 1;
-                if self.aut.is_match(self.sid) {
-                    break;
-                }
-            }
-            self.buffer_pos += self.absolute_pos - start;
-        }
-    }
-
-    /// Return a match chunk for the given match. It is assumed that the match
-    /// ends at the current `buffer_pos`.
-    fn get_match_chunk(&self, mat: Match) -> core::ops::Range<usize> {
-        let start = self.buffer_pos - mat.len();
-        let end = self.buffer_pos;
-        start..end
-    }
-
-    /// Return a non-match chunk, if necessary, just before reporting a match.
-    /// This returns `None` if there is nothing to report. Otherwise, this
-    /// assumes that the given match ends at the current `buffer_pos`.
-    fn get_non_match_chunk(
-        &self,
-        mat: Match,
-    ) -> Option<core::ops::Range<usize>> {
-        let buffer_mat_start = self.buffer_pos - mat.len();
-        if buffer_mat_start > self.buffer_reported_pos {
-            let start = self.buffer_reported_pos;
-            let end = buffer_mat_start;
-            return Some(start..end);
-        }
-        None
-    }
-
-    /// Look for any bytes that should be reported as a non-match just before
-    /// rolling the buffer.
-    ///
-    /// Note that this only reports bytes up to `buffer.len() -
-    /// min_buffer_len`, as it's not possible to know whether the bytes
-    /// following that will participate in a match or not.
-    fn get_pre_roll_non_match_chunk(&self) -> Option<core::ops::Range<usize>> {
-        let end =
-            self.buf.buffer().len().saturating_sub(self.buf.min_buffer_len());
-        if self.buffer_reported_pos < end {
-            return Some(self.buffer_reported_pos..end);
-        }
-        None
-    }
-
-    /// Return any unreported bytes as a non-match up to the end of the buffer.
-    ///
-    /// This should only be called when the entire contents of the buffer have
-    /// been searched and EOF has been hit when trying to fill the buffer.
-    fn get_eof_non_match_chunk(&self) -> Option<core::ops::Range<usize>> {
-        if self.buffer_reported_pos < self.buf.buffer().len() {
-            return Some(self.buffer_reported_pos..self.buf.buffer().len());
-        }
-        None
-    }
-
-    /// Return the match at the current position for the current state.
-    ///
-    /// This panics if `self.aut.is_match(self.sid)` isn't true.
-    fn get_match(&self) -> Match {
-        get_match(self.aut, self.sid, 0, self.absolute_pos)
-    }
-}
-
-/// A single chunk yielded by the stream chunk iterator.
-///
-/// The `'r` lifetime refers to the lifetime of the stream chunk iterator.
-#[cfg(feature = "std")]
-#[derive(Debug)]
-enum StreamChunk<'r> {
-    /// A chunk that does not contain any matches.
-    NonMatch { bytes: &'r [u8] },
-    /// A chunk that precisely contains a match.
-    Match { bytes: &'r [u8], mat: Match },
-}
-
-#[inline(never)]
-pub(crate) fn try_find_fwd<A: Automaton + ?Sized>(
-    aut: &A,
-    input: &Input<'_>,
-) -> Result<Option<Match>, MatchError> {
-    if input.is_done() {
-        return Ok(None);
-    }
-    let earliest = aut.match_kind().is_standard() || input.get_earliest();
-    if input.get_anchored().is_anchored() {
-        try_find_fwd_imp(aut, input, None, Anchored::Yes, earliest)
-    } else if let Some(pre) = aut.prefilter() {
-        if earliest {
-            try_find_fwd_imp(aut, input, Some(pre), Anchored::No, true)
-        } else {
-            try_find_fwd_imp(aut, input, Some(pre), Anchored::No, false)
-        }
-    } else {
-        if earliest {
-            try_find_fwd_imp(aut, input, None, Anchored::No, true)
-        } else {
-            try_find_fwd_imp(aut, input, None, Anchored::No, false)
-        }
-    }
-}
-
-#[inline(always)]
-fn try_find_fwd_imp<A: Automaton + ?Sized>(
-    aut: &A,
-    input: &Input<'_>,
-    pre: Option<&Prefilter>,
-    anchored: Anchored,
-    earliest: bool,
-) -> Result<Option<Match>, MatchError> {
-    let mut sid = aut.start_state(input.get_anchored())?;
-    let mut at = input.start();
-    let mut mat = None;
-    if aut.is_match(sid) {
-        mat = Some(get_match(aut, sid, 0, at));
-        if earliest {
-            return Ok(mat);
-        }
-    }
-    if let Some(pre) = pre {
-        match pre.find_in(input.haystack(), input.get_span()) {
-            Candidate::None => return Ok(None),
-            Candidate::Match(m) => return Ok(Some(m)),
-            Candidate::PossibleStartOfMatch(i) => {
-                at = i;
-            }
-        }
-    }
-    while at < input.end() {
-        // I've tried unrolling this loop and eliding bounds checks, but no
-        // matter what I did, I could not observe a consistent improvement on
-        // any benchmark I could devise. (If someone wants to re-litigate this,
-        // the way to do it is to add an 'next_state_unchecked' method to the
-        // 'Automaton' trait with a default impl that uses 'next_state'. Then
-        // use 'aut.next_state_unchecked' here and implement it on DFA using
-        // unchecked slice index acces.)
-        sid = aut.next_state(anchored, sid, input.haystack()[at]);
-        if aut.is_special(sid) {
-            if aut.is_dead(sid) {
-                return Ok(mat);
-            } else if aut.is_match(sid) {
-                // We use 'at + 1' here because the match state is entered
-                // at the last byte of the pattern. Since we use half-open
-                // intervals, the end of the range of the match is one past the
-                // last byte.
-                let m = get_match(aut, sid, 0, at + 1);
-                // For the automata in this crate, we make a size trade off
-                // where we reuse the same automaton for both anchored and
-                // unanchored searches. We achieve this, principally, by simply
-                // not following failure transitions while computing the next
-                // state. Instead, if we fail to find the next state, we return
-                // a dead state, which instructs the search to stop. (This
-                // is why 'next_state' needs to know whether the search is
-                // anchored or not.) In addition, we have different start
-                // states for anchored and unanchored searches. The latter has
-                // a self-loop where as the former does not.
-                //
-                // In this way, we can use the same trie to execute both
-                // anchored and unanchored searches. There is a catch though.
-                // When building an Aho-Corasick automaton for unanchored
-                // searches, we copy matches from match states to other states
-                // (which would otherwise not be match states) if they are
-                // reachable via a failure transition. In the case of an
-                // anchored search, we *specifically* do not want to report
-                // these matches because they represent matches that start past
-                // the beginning of the search.
-                //
-                // Now we could tweak the automaton somehow to differentiate
-                // anchored from unanchored match states, but this would make
-                // 'aut.is_match' and potentially 'aut.is_special' slower. And
-                // also make the automaton itself more complex.
-                //
-                // Instead, we insert a special hack: if the search is
-                // anchored, we simply ignore matches that don't begin at
-                // the start of the search. This is not quite ideal, but we
-                // do specialize this function in such a way that unanchored
-                // searches don't pay for this additional branch. While this
-                // might cause a search to continue on for more than it
-                // otherwise optimally would, it will be no more than the
-                // longest pattern in the automaton. The reason for this is
-                // that we ensure we don't follow failure transitions during
-                // an anchored search. Combined with using a different anchored
-                // starting state with no self-loop, we guarantee that we'll
-                // at worst move through a number of transitions equal to the
-                // longest pattern.
-                //
-                // Now for DFAs, the whole point of them is to eliminate
-                // failure transitions entirely. So there is no way to say "if
-                // it's an anchored search don't follow failure transitions."
-                // Instead, we actually have to build two entirely separate
-                // automatons into the transition table. One with failure
-                // transitions built into it and another that is effectively
-                // just an encoding of the base trie into a transition table.
-                // DFAs still need this check though, because the match states
-                // still carry matches only reachable via a failure transition.
-                // Why? Because removing them seems difficult, although I
-                // haven't given it a lot of thought.
-                if !(anchored.is_anchored() && m.start() > input.start()) {
-                    mat = Some(m);
-                    if earliest {
-                        return Ok(mat);
-                    }
-                }
-            } else if let Some(pre) = pre {
-                // If we're here, we know it's a special state that is not a
-                // dead or a match state AND that a prefilter is active. Thus,
-                // it must be a start state.
-                debug_assert!(aut.is_start(sid));
-                // We don't care about 'Candidate::Match' here because if such
-                // a match were possible, it would have been returned above
-                // when we run the prefilter before walking the automaton.
-                let span = Span::from(at..input.end());
-                match pre.find_in(input.haystack(), span).into_option() {
-                    None => return Ok(None),
-                    Some(i) => {
-                        if i > at {
-                            at = i;
-                            continue;
-                        }
-                    }
-                }
-            } else {
-                // When pre.is_none(), then starting states should not be
-                // treated as special. That is, without a prefilter, is_special
-                // should only return true when the state is a dead or a match
-                // state.
-                //
-                // It is possible to execute a search without a prefilter even
-                // when the underlying searcher has one: an anchored search.
-                // But in this case, the automaton makes it impossible to move
-                // back to the start state by construction, and thus, we should
-                // never reach this branch.
-                debug_assert!(false, "unreachable");
-            }
-        }
-        at += 1;
-    }
-    Ok(mat)
-}
-
-#[inline(never)]
-fn try_find_overlapping_fwd<A: Automaton + ?Sized>(
-    aut: &A,
-    input: &Input<'_>,
-    state: &mut OverlappingState,
-) -> Result<(), MatchError> {
-    state.mat = None;
-    if input.is_done() {
-        return Ok(());
-    }
-    // Searching with a pattern ID is always anchored, so we should only ever
-    // use a prefilter when no pattern ID is given.
-    if aut.prefilter().is_some() && !input.get_anchored().is_anchored() {
-        let pre = aut.prefilter().unwrap();
-        try_find_overlapping_fwd_imp(aut, input, Some(pre), state)
-    } else {
-        try_find_overlapping_fwd_imp(aut, input, None, state)
-    }
-}
-
-#[inline(always)]
-fn try_find_overlapping_fwd_imp<A: Automaton + ?Sized>(
-    aut: &A,
-    input: &Input<'_>,
-    pre: Option<&Prefilter>,
-    state: &mut OverlappingState,
-) -> Result<(), MatchError> {
-    let mut sid = match state.id {
-        None => {
-            let sid = aut.start_state(input.get_anchored())?;
-            // Handle the case where the start state is a match state. That is,
-            // the empty string is in our automaton. We report every match we
-            // can here before moving on and updating 'state.at' and 'state.id'
-            // to find more matches in other parts of the haystack.
-            if aut.is_match(sid) {
-                let i = state.next_match_index.unwrap_or(0);
-                let len = aut.match_len(sid);
-                if i < len {
-                    state.next_match_index = Some(i + 1);
-                    state.mat = Some(get_match(aut, sid, i, input.start()));
-                    return Ok(());
-                }
-            }
-            state.at = input.start();
-            state.id = Some(sid);
-            state.next_match_index = None;
-            state.mat = None;
-            sid
-        }
-        Some(sid) => {
-            // If we still have matches left to report in this state then
-            // report them until we've exhausted them. Only after that do we
-            // advance to the next offset in the haystack.
-            if let Some(i) = state.next_match_index {
-                let len = aut.match_len(sid);
-                if i < len {
-                    state.next_match_index = Some(i + 1);
-                    state.mat = Some(get_match(aut, sid, i, state.at + 1));
-                    return Ok(());
-                }
-                // Once we've reported all matches at a given position, we need
-                // to advance the search to the next position.
-                state.at += 1;
-                state.next_match_index = None;
-                state.mat = None;
-            }
-            sid
-        }
-    };
-    while state.at < input.end() {
-        sid = aut.next_state(
-            input.get_anchored(),
-            sid,
-            input.haystack()[state.at],
-        );
-        if aut.is_special(sid) {
-            state.id = Some(sid);
-            if aut.is_dead(sid) {
-                return Ok(());
-            } else if aut.is_match(sid) {
-                state.next_match_index = Some(1);
-                state.mat = Some(get_match(aut, sid, 0, state.at + 1));
-                return Ok(());
-            } else if let Some(pre) = pre {
-                // If we're here, we know it's a special state that is not a
-                // dead or a match state AND that a prefilter is active. Thus,
-                // it must be a start state.
-                debug_assert!(aut.is_start(sid));
-                let span = Span::from(state.at..input.end());
-                match pre.find_in(input.haystack(), span).into_option() {
-                    None => return Ok(()),
-                    Some(i) => {
-                        if i > state.at {
-                            state.at = i;
-                            continue;
-                        }
-                    }
-                }
-            } else {
-                // When pre.is_none(), then starting states should not be
-                // treated as special. That is, without a prefilter, is_special
-                // should only return true when the state is a dead or a match
-                // state.
-                //
-                // ... except for one special case: in stream searching, we
-                // currently call overlapping search with a 'None' prefilter,
-                // regardless of whether one exists or not, because stream
-                // searching can't currently deal with prefilters correctly in
-                // all cases.
-            }
-        }
-        state.at += 1;
-    }
-    state.id = Some(sid);
-    Ok(())
-}
-
-#[inline(always)]
-fn get_match<A: Automaton + ?Sized>(
-    aut: &A,
-    sid: StateID,
-    index: usize,
-    at: usize,
-) -> Match {
-    let pid = aut.match_pattern(sid, index);
-    let len = aut.pattern_len(pid);
-    Match::new(pid, (at - len)..at)
-}
-
-/// Write a prefix "state" indicator for fmt::Debug impls. It always writes
-/// exactly two printable bytes to the given formatter.
-///
-/// Specifically, this tries to succinctly distinguish the different types of
-/// states: dead states, start states and match states. It even accounts for
-/// the possible overlappings of different state types. (The only possible
-/// overlapping is that of match and start states.)
-pub(crate) fn fmt_state_indicator<A: Automaton>(
-    f: &mut core::fmt::Formatter<'_>,
-    aut: A,
-    id: StateID,
-) -> core::fmt::Result {
-    if aut.is_dead(id) {
-        write!(f, "D ")?;
-    } else if aut.is_match(id) {
-        if aut.is_start(id) {
-            write!(f, "*>")?;
-        } else {
-            write!(f, "* ")?;
-        }
-    } else if aut.is_start(id) {
-        write!(f, " >")?;
-    } else {
-        write!(f, "  ")?;
-    }
-    Ok(())
-}
-
-/// Return an iterator of transitions in a sparse format given an iterator
-/// of all explicitly defined transitions. The iterator yields ranges of
-/// transitions, such that any adjacent transitions mapped to the same
-/// state are combined into a single range.
-pub(crate) fn sparse_transitions<'a>(
-    mut it: impl Iterator<Item = (u8, StateID)> + 'a,
-) -> impl Iterator<Item = (u8, u8, StateID)> + 'a {
-    let mut cur: Option<(u8, u8, StateID)> = None;
-    core::iter::from_fn(move || {
-        while let Some((class, next)) = it.next() {
-            let (prev_start, prev_end, prev_next) = match cur {
-                Some(x) => x,
-                None => {
-                    cur = Some((class, class, next));
-                    continue;
-                }
-            };
-            if prev_next == next {
-                cur = Some((prev_start, class, prev_next));
-            } else {
-                cur = Some((class, class, next));
-                return Some((prev_start, prev_end, prev_next));
-            }
-        }
-        if let Some((start, end, next)) = cur.take() {
-            return Some((start, end, next));
-        }
-        None
-    })
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/dfa.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/dfa.rs
deleted file mode 100644
index eabd15b7..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/dfa.rs
+++ /dev/null
@@ -1,835 +0,0 @@
-/*!
-Provides direct access to a DFA implementation of Aho-Corasick.
-
-This is a low-level API that generally only needs to be used in niche
-circumstances. When possible, prefer using [`AhoCorasick`](crate::AhoCorasick)
-instead of a DFA directly. Using an `DFA` directly is typically only necessary
-when one needs access to the [`Automaton`] trait implementation.
-*/
-
-use alloc::{vec, vec::Vec};
-
-use crate::{
-    automaton::Automaton,
-    nfa::noncontiguous,
-    util::{
-        alphabet::ByteClasses,
-        error::{BuildError, MatchError},
-        int::{Usize, U32},
-        prefilter::Prefilter,
-        primitives::{IteratorIndexExt, PatternID, SmallIndex, StateID},
-        search::{Anchored, MatchKind, StartKind},
-        special::Special,
-    },
-};
-
-/// A DFA implementation of Aho-Corasick.
-///
-/// When possible, prefer using [`AhoCorasick`](crate::AhoCorasick) instead of
-/// this type directly. Using a `DFA` directly is typically only necessary when
-/// one needs access to the [`Automaton`] trait implementation.
-///
-/// This DFA can only be built by first constructing a [`noncontiguous::NFA`].
-/// Both [`DFA::new`] and [`Builder::build`] do this for you automatically, but
-/// [`Builder::build_from_noncontiguous`] permits doing it explicitly.
-///
-/// A DFA provides the best possible search performance (in this crate) via two
-/// mechanisms:
-///
-/// * All states use a dense representation for their transitions.
-/// * All failure transitions are pre-computed such that they are never
-/// explicitly handled at search time.
-///
-/// These two facts combined mean that every state transition is performed
-/// using a constant number of instructions. However, this comes at
-/// great cost. The memory usage of a DFA can be quite exorbitant.
-/// It is potentially multiple orders of magnitude greater than a
-/// [`contiguous::NFA`](crate::nfa::contiguous::NFA) for example. In exchange,
-/// a DFA will typically have better search speed than a `contiguous::NFA`, but
-/// not by orders of magnitude.
-///
-/// Unless you have a small number of patterns or memory usage is not a concern
-/// and search performance is critical, a DFA is usually not the best choice.
-///
-/// Moreover, unlike the NFAs in this crate, it is costly for a DFA to
-/// support for anchored and unanchored search configurations. Namely,
-/// since failure transitions are pre-computed, supporting both anchored
-/// and unanchored searches requires a duplication of the transition table,
-/// making the memory usage of such a DFA ever bigger. (The NFAs in this crate
-/// unconditionally support both anchored and unanchored searches because there
-/// is essentially no added cost for doing so.) It is for this reason that
-/// a DFA's support for anchored and unanchored searches can be configured
-/// via [`Builder::start_kind`]. By default, a DFA only supports unanchored
-/// searches.
-///
-/// # Example
-///
-/// This example shows how to build an `DFA` directly and use it to execute
-/// [`Automaton::try_find`]:
-///
-/// ```
-/// use aho_corasick::{
-///     automaton::Automaton,
-///     dfa::DFA,
-///     Input, Match,
-/// };
-///
-/// let patterns = &["b", "abc", "abcd"];
-/// let haystack = "abcd";
-///
-/// let nfa = DFA::new(patterns).unwrap();
-/// assert_eq!(
-///     Some(Match::must(0, 1..2)),
-///     nfa.try_find(&Input::new(haystack))?,
-/// );
-/// # Ok::<(), Box<dyn std::error::Error>>(())
-/// ```
-///
-/// It is also possible to implement your own version of `try_find`. See the
-/// [`Automaton`] documentation for an example.
-#[derive(Clone)]
-pub struct DFA {
-    /// The DFA transition table. IDs in this table are pre-multiplied. So
-    /// instead of the IDs being 0, 1, 2, 3, ..., they are 0*stride, 1*stride,
-    /// 2*stride, 3*stride, ...
-    trans: Vec<StateID>,
-    /// The matches for every match state in this DFA. This is first indexed by
-    /// state index (so that's `sid >> stride2`) and then by order in which the
-    /// matches are meant to occur.
-    matches: Vec<Vec<PatternID>>,
-    /// The amount of heap memory used, in bytes, by the inner Vecs of
-    /// 'matches'.
-    matches_memory_usage: usize,
-    /// The length of each pattern. This is used to compute the start offset
-    /// of a match.
-    pattern_lens: Vec<SmallIndex>,
-    /// A prefilter for accelerating searches, if one exists.
-    prefilter: Option<Prefilter>,
-    /// The match semantics built into this DFA.
-    match_kind: MatchKind,
-    /// The total number of states in this DFA.
-    state_len: usize,
-    /// The alphabet size, or total number of equivalence classes, for this
-    /// DFA. Note that the actual number of transitions in each state is
-    /// stride=2^stride2, where stride is the smallest power of 2 greater than
-    /// or equal to alphabet_len. We do things this way so that we can use
-    /// bitshifting to go from a state ID to an index into 'matches'.
-    alphabet_len: usize,
-    /// The exponent with a base 2, such that stride=2^stride2. Given a state
-    /// index 'i', its state identifier is 'i << stride2'. Given a state
-    /// identifier 'sid', its state index is 'sid >> stride2'.
-    stride2: usize,
-    /// The equivalence classes for this DFA. All transitions are defined on
-    /// equivalence classes and not on the 256 distinct byte values.
-    byte_classes: ByteClasses,
-    /// The length of the shortest pattern in this automaton.
-    min_pattern_len: usize,
-    /// The length of the longest pattern in this automaton.
-    max_pattern_len: usize,
-    /// The information required to deduce which states are "special" in this
-    /// DFA.
-    special: Special,
-}
-
-impl DFA {
-    /// Create a new Aho-Corasick DFA using the default configuration.
-    ///
-    /// Use a [`Builder`] if you want to change the configuration.
-    pub fn new<I, P>(patterns: I) -> Result<DFA, BuildError>
-    where
-        I: IntoIterator<Item = P>,
-        P: AsRef<[u8]>,
-    {
-        DFA::builder().build(patterns)
-    }
-
-    /// A convenience method for returning a new Aho-Corasick DFA builder.
-    ///
-    /// This usually permits one to just import the `DFA` type.
-    pub fn builder() -> Builder {
-        Builder::new()
-    }
-}
-
-impl DFA {
-    /// A sentinel state ID indicating that a search should stop once it has
-    /// entered this state. When a search stops, it returns a match if one has
-    /// been found, otherwise no match. A DFA always has an actual dead state
-    /// at this ID.
-    ///
-    /// N.B. DFAs, unlike NFAs, do not have any notion of a FAIL state.
-    /// Namely, the whole point of a DFA is that the FAIL state is completely
-    /// compiled away. That is, DFA construction involves pre-computing the
-    /// failure transitions everywhere, such that failure transitions are no
-    /// longer used at search time. This, combined with its uniformly dense
-    /// representation, are the two most important factors in why it's faster
-    /// than the NFAs in this crate.
-    const DEAD: StateID = StateID::new_unchecked(0);
-
-    /// Adds the given pattern IDs as matches to the given state and also
-    /// records the added memory usage.
-    fn set_matches(
-        &mut self,
-        sid: StateID,
-        pids: impl Iterator<Item = PatternID>,
-    ) {
-        let index = (sid.as_usize() >> self.stride2).checked_sub(2).unwrap();
-        let mut at_least_one = false;
-        for pid in pids {
-            self.matches[index].push(pid);
-            self.matches_memory_usage += PatternID::SIZE;
-            at_least_one = true;
-        }
-        assert!(at_least_one, "match state must have non-empty pids");
-    }
-}
-
-// SAFETY: 'start_state' always returns a valid state ID, 'next_state' always
-// returns a valid state ID given a valid state ID. We otherwise claim that
-// all other methods are correct as well.
-unsafe impl Automaton for DFA {
-    #[inline(always)]
-    fn start_state(&self, anchored: Anchored) -> Result<StateID, MatchError> {
-        // Either of the start state IDs can be DEAD, in which case, support
-        // for that type of search is not provided by this DFA. Which start
-        // state IDs are inactive depends on the 'StartKind' configuration at
-        // DFA construction time.
-        match anchored {
-            Anchored::No => {
-                let start = self.special.start_unanchored_id;
-                if start == DFA::DEAD {
-                    Err(MatchError::invalid_input_unanchored())
-                } else {
-                    Ok(start)
-                }
-            }
-            Anchored::Yes => {
-                let start = self.special.start_anchored_id;
-                if start == DFA::DEAD {
-                    Err(MatchError::invalid_input_anchored())
-                } else {
-                    Ok(start)
-                }
-            }
-        }
-    }
-
-    #[inline(always)]
-    fn next_state(
-        &self,
-        _anchored: Anchored,
-        sid: StateID,
-        byte: u8,
-    ) -> StateID {
-        let class = self.byte_classes.get(byte);
-        self.trans[(sid.as_u32() + u32::from(class)).as_usize()]
-    }
-
-    #[inline(always)]
-    fn is_special(&self, sid: StateID) -> bool {
-        sid <= self.special.max_special_id
-    }
-
-    #[inline(always)]
-    fn is_dead(&self, sid: StateID) -> bool {
-        sid == DFA::DEAD
-    }
-
-    #[inline(always)]
-    fn is_match(&self, sid: StateID) -> bool {
-        !self.is_dead(sid) && sid <= self.special.max_match_id
-    }
-
-    #[inline(always)]
-    fn is_start(&self, sid: StateID) -> bool {
-        sid == self.special.start_unanchored_id
-            || sid == self.special.start_anchored_id
-    }
-
-    #[inline(always)]
-    fn match_kind(&self) -> MatchKind {
-        self.match_kind
-    }
-
-    #[inline(always)]
-    fn patterns_len(&self) -> usize {
-        self.pattern_lens.len()
-    }
-
-    #[inline(always)]
-    fn pattern_len(&self, pid: PatternID) -> usize {
-        self.pattern_lens[pid].as_usize()
-    }
-
-    #[inline(always)]
-    fn min_pattern_len(&self) -> usize {
-        self.min_pattern_len
-    }
-
-    #[inline(always)]
-    fn max_pattern_len(&self) -> usize {
-        self.max_pattern_len
-    }
-
-    #[inline(always)]
-    fn match_len(&self, sid: StateID) -> usize {
-        debug_assert!(self.is_match(sid));
-        let offset = (sid.as_usize() >> self.stride2) - 2;
-        self.matches[offset].len()
-    }
-
-    #[inline(always)]
-    fn match_pattern(&self, sid: StateID, index: usize) -> PatternID {
-        debug_assert!(self.is_match(sid));
-        let offset = (sid.as_usize() >> self.stride2) - 2;
-        self.matches[offset][index]
-    }
-
-    #[inline(always)]
-    fn memory_usage(&self) -> usize {
-        use core::mem::size_of;
-
-        (self.trans.len() * size_of::<u32>())
-            + (self.matches.len() * size_of::<Vec<PatternID>>())
-            + self.matches_memory_usage
-            + (self.pattern_lens.len() * size_of::<SmallIndex>())
-            + self.prefilter.as_ref().map_or(0, |p| p.memory_usage())
-    }
-
-    #[inline(always)]
-    fn prefilter(&self) -> Option<&Prefilter> {
-        self.prefilter.as_ref()
-    }
-}
-
-impl core::fmt::Debug for DFA {
-    fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-        use crate::{
-            automaton::{fmt_state_indicator, sparse_transitions},
-            util::debug::DebugByte,
-        };
-
-        writeln!(f, "dfa::DFA(")?;
-        for index in 0..self.state_len {
-            let sid = StateID::new_unchecked(index << self.stride2);
-            // While we do currently include the FAIL state in the transition
-            // table (to simplify construction), it is never actually used. It
-            // poses problems with the code below because it gets treated as
-            // a match state incidentally when it is, of course, not. So we
-            // special case it. The fail state is always the first state after
-            // the dead state.
-            //
-            // If the construction is changed to remove the fail state (it
-            // probably should be), then this special case should be updated.
-            if index == 1 {
-                writeln!(f, "F {:06}:", sid.as_usize())?;
-                continue;
-            }
-            fmt_state_indicator(f, self, sid)?;
-            write!(f, "{:06}: ", sid.as_usize())?;
-
-            let it = (0..self.byte_classes.alphabet_len()).map(|class| {
-                (class.as_u8(), self.trans[sid.as_usize() + class])
-            });
-            for (i, (start, end, next)) in sparse_transitions(it).enumerate() {
-                if i > 0 {
-                    write!(f, ", ")?;
-                }
-                if start == end {
-                    write!(
-                        f,
-                        "{:?} => {:?}",
-                        DebugByte(start),
-                        next.as_usize()
-                    )?;
-                } else {
-                    write!(
-                        f,
-                        "{:?}-{:?} => {:?}",
-                        DebugByte(start),
-                        DebugByte(end),
-                        next.as_usize()
-                    )?;
-                }
-            }
-            write!(f, "\n")?;
-            if self.is_match(sid) {
-                write!(f, " matches: ")?;
-                for i in 0..self.match_len(sid) {
-                    if i > 0 {
-                        write!(f, ", ")?;
-                    }
-                    let pid = self.match_pattern(sid, i);
-                    write!(f, "{}", pid.as_usize())?;
-                }
-                write!(f, "\n")?;
-            }
-        }
-        writeln!(f, "match kind: {:?}", self.match_kind)?;
-        writeln!(f, "prefilter: {:?}", self.prefilter.is_some())?;
-        writeln!(f, "state length: {:?}", self.state_len)?;
-        writeln!(f, "pattern length: {:?}", self.patterns_len())?;
-        writeln!(f, "shortest pattern length: {:?}", self.min_pattern_len)?;
-        writeln!(f, "longest pattern length: {:?}", self.max_pattern_len)?;
-        writeln!(f, "alphabet length: {:?}", self.alphabet_len)?;
-        writeln!(f, "stride: {:?}", 1 << self.stride2)?;
-        writeln!(f, "byte classes: {:?}", self.byte_classes)?;
-        writeln!(f, "memory usage: {:?}", self.memory_usage())?;
-        writeln!(f, ")")?;
-        Ok(())
-    }
-}
-
-/// A builder for configuring an Aho-Corasick DFA.
-///
-/// This builder has a subset of the options available to a
-/// [`AhoCorasickBuilder`](crate::AhoCorasickBuilder). Of the shared options,
-/// their behavior is identical.
-#[derive(Clone, Debug)]
-pub struct Builder {
-    noncontiguous: noncontiguous::Builder,
-    start_kind: StartKind,
-    byte_classes: bool,
-}
-
-impl Default for Builder {
-    fn default() -> Builder {
-        Builder {
-            noncontiguous: noncontiguous::Builder::new(),
-            start_kind: StartKind::Unanchored,
-            byte_classes: true,
-        }
-    }
-}
-
-impl Builder {
-    /// Create a new builder for configuring an Aho-Corasick DFA.
-    pub fn new() -> Builder {
-        Builder::default()
-    }
-
-    /// Build an Aho-Corasick DFA from the given iterator of patterns.
-    ///
-    /// A builder may be reused to create more DFAs.
-    pub fn build<I, P>(&self, patterns: I) -> Result<DFA, BuildError>
-    where
-        I: IntoIterator<Item = P>,
-        P: AsRef<[u8]>,
-    {
-        let nnfa = self.noncontiguous.build(patterns)?;
-        self.build_from_noncontiguous(&nnfa)
-    }
-
-    /// Build an Aho-Corasick DFA from the given noncontiguous NFA.
-    ///
-    /// Note that when this method is used, only the `start_kind` and
-    /// `byte_classes` settings on this builder are respected. The other
-    /// settings only apply to the initial construction of the Aho-Corasick
-    /// automaton. Since using this method requires that initial construction
-    /// has already completed, all settings impacting only initial construction
-    /// are no longer relevant.
-    pub fn build_from_noncontiguous(
-        &self,
-        nnfa: &noncontiguous::NFA,
-    ) -> Result<DFA, BuildError> {
-        debug!("building DFA");
-        let byte_classes = if self.byte_classes {
-            nnfa.byte_classes().clone()
-        } else {
-            ByteClasses::singletons()
-        };
-        let state_len = match self.start_kind {
-            StartKind::Unanchored | StartKind::Anchored => nnfa.states().len(),
-            StartKind::Both => {
-                // These unwraps are OK because we know that the number of
-                // NFA states is < StateID::LIMIT which is in turn less than
-                // i32::MAX. Thus, there is always room to multiply by 2.
-                // Finally, the number of states is always at least 4 in the
-                // NFA (DEAD, FAIL, START-UNANCHORED, START-ANCHORED), so the
-                // subtraction of 4 is okay.
-                //
-                // Note that we subtract 4 because the "anchored" part of
-                // the DFA duplicates the unanchored part (without failure
-                // transitions), but reuses the DEAD, FAIL and START states.
-                nnfa.states()
-                    .len()
-                    .checked_mul(2)
-                    .unwrap()
-                    .checked_sub(4)
-                    .unwrap()
-            }
-        };
-        let trans_len =
-            match state_len.checked_shl(byte_classes.stride2().as_u32()) {
-                Some(trans_len) => trans_len,
-                None => {
-                    return Err(BuildError::state_id_overflow(
-                        StateID::MAX.as_u64(),
-                        usize::MAX.as_u64(),
-                    ))
-                }
-            };
-        StateID::new(trans_len.checked_sub(byte_classes.stride()).unwrap())
-            .map_err(|e| {
-                BuildError::state_id_overflow(
-                    StateID::MAX.as_u64(),
-                    e.attempted(),
-                )
-            })?;
-        let num_match_states = match self.start_kind {
-            StartKind::Unanchored | StartKind::Anchored => {
-                nnfa.special().max_match_id.as_usize().checked_sub(1).unwrap()
-            }
-            StartKind::Both => nnfa
-                .special()
-                .max_match_id
-                .as_usize()
-                .checked_sub(1)
-                .unwrap()
-                .checked_mul(2)
-                .unwrap(),
-        };
-        let mut dfa = DFA {
-            trans: vec![DFA::DEAD; trans_len],
-            matches: vec![vec![]; num_match_states],
-            matches_memory_usage: 0,
-            pattern_lens: nnfa.pattern_lens_raw().to_vec(),
-            prefilter: nnfa.prefilter().map(|p| p.clone()),
-            match_kind: nnfa.match_kind(),
-            state_len,
-            alphabet_len: byte_classes.alphabet_len(),
-            stride2: byte_classes.stride2(),
-            byte_classes,
-            min_pattern_len: nnfa.min_pattern_len(),
-            max_pattern_len: nnfa.max_pattern_len(),
-            // The special state IDs are set later.
-            special: Special::zero(),
-        };
-        match self.start_kind {
-            StartKind::Both => {
-                self.finish_build_both_starts(nnfa, &mut dfa);
-            }
-            StartKind::Unanchored => {
-                self.finish_build_one_start(Anchored::No, nnfa, &mut dfa);
-            }
-            StartKind::Anchored => {
-                self.finish_build_one_start(Anchored::Yes, nnfa, &mut dfa)
-            }
-        }
-        debug!(
-            "DFA built, <states: {:?}, size: {:?}, \
-             alphabet len: {:?}, stride: {:?}>",
-            dfa.state_len,
-            dfa.memory_usage(),
-            dfa.byte_classes.alphabet_len(),
-            dfa.byte_classes.stride(),
-        );
-        // The vectors can grow ~twice as big during construction because a
-        // Vec amortizes growth. But here, let's shrink things back down to
-        // what we actually need since we're never going to add more to it.
-        dfa.trans.shrink_to_fit();
-        dfa.pattern_lens.shrink_to_fit();
-        dfa.matches.shrink_to_fit();
-        // TODO: We might also want to shrink each Vec inside of `dfa.matches`,
-        // or even better, convert it to one contiguous allocation. But I think
-        // I went with nested allocs for good reason (can't remember), so this
-        // may be tricky to do. I decided not to shrink them here because it
-        // might require a fair bit of work to do. It's unclear whether it's
-        // worth it.
-        Ok(dfa)
-    }
-
-    /// Finishes building a DFA for either unanchored or anchored searches,
-    /// but NOT both.
-    fn finish_build_one_start(
-        &self,
-        anchored: Anchored,
-        nnfa: &noncontiguous::NFA,
-        dfa: &mut DFA,
-    ) {
-        // This function always succeeds because we check above that all of the
-        // states in the NFA can be mapped to DFA state IDs.
-        let stride2 = dfa.stride2;
-        let old2new = |oldsid: StateID| {
-            StateID::new_unchecked(oldsid.as_usize() << stride2)
-        };
-        for (oldsid, state) in nnfa.states().iter().with_state_ids() {
-            let newsid = old2new(oldsid);
-            if state.is_match() {
-                dfa.set_matches(newsid, nnfa.iter_matches(oldsid));
-            }
-            sparse_iter(
-                nnfa,
-                oldsid,
-                &dfa.byte_classes,
-                |byte, class, mut oldnextsid| {
-                    if oldnextsid == noncontiguous::NFA::FAIL {
-                        if anchored.is_anchored() {
-                            oldnextsid = noncontiguous::NFA::DEAD;
-                        } else if state.fail() == noncontiguous::NFA::DEAD {
-                            // This is a special case that avoids following
-                            // DEAD transitions in a non-contiguous NFA.
-                            // Following these transitions is pretty slow
-                            // because the non-contiguous NFA will always use
-                            // a sparse representation for it (because the
-                            // DEAD state is usually treated as a sentinel).
-                            // The *vast* majority of failure states are DEAD
-                            // states, so this winds up being pretty slow if
-                            // we go through the non-contiguous NFA state
-                            // transition logic. Instead, just do it ourselves.
-                            oldnextsid = noncontiguous::NFA::DEAD;
-                        } else {
-                            oldnextsid = nnfa.next_state(
-                                Anchored::No,
-                                state.fail(),
-                                byte,
-                            );
-                        }
-                    }
-                    dfa.trans[newsid.as_usize() + usize::from(class)] =
-                        old2new(oldnextsid);
-                },
-            );
-        }
-        // Now that we've remapped all the IDs in our states, all that's left
-        // is remapping the special state IDs.
-        let old = nnfa.special();
-        let new = &mut dfa.special;
-        new.max_special_id = old2new(old.max_special_id);
-        new.max_match_id = old2new(old.max_match_id);
-        if anchored.is_anchored() {
-            new.start_unanchored_id = DFA::DEAD;
-            new.start_anchored_id = old2new(old.start_anchored_id);
-        } else {
-            new.start_unanchored_id = old2new(old.start_unanchored_id);
-            new.start_anchored_id = DFA::DEAD;
-        }
-    }
-
-    /// Finishes building a DFA that supports BOTH unanchored and anchored
-    /// searches. It works by inter-leaving unanchored states with anchored
-    /// states in the same transition table. This way, we avoid needing to
-    /// re-shuffle states afterward to ensure that our states still look like
-    /// DEAD, MATCH, ..., START-UNANCHORED, START-ANCHORED, NON-MATCH, ...
-    ///
-    /// Honestly this is pretty inscrutable... Simplifications are most
-    /// welcome.
-    fn finish_build_both_starts(
-        &self,
-        nnfa: &noncontiguous::NFA,
-        dfa: &mut DFA,
-    ) {
-        let stride2 = dfa.stride2;
-        let stride = 1 << stride2;
-        let mut remap_unanchored = vec![DFA::DEAD; nnfa.states().len()];
-        let mut remap_anchored = vec![DFA::DEAD; nnfa.states().len()];
-        let mut is_anchored = vec![false; dfa.state_len];
-        let mut newsid = DFA::DEAD;
-        let next_dfa_id =
-            |sid: StateID| StateID::new_unchecked(sid.as_usize() + stride);
-        for (oldsid, state) in nnfa.states().iter().with_state_ids() {
-            if oldsid == noncontiguous::NFA::DEAD
-                || oldsid == noncontiguous::NFA::FAIL
-            {
-                remap_unanchored[oldsid] = newsid;
-                remap_anchored[oldsid] = newsid;
-                newsid = next_dfa_id(newsid);
-            } else if oldsid == nnfa.special().start_unanchored_id
-                || oldsid == nnfa.special().start_anchored_id
-            {
-                if oldsid == nnfa.special().start_unanchored_id {
-                    remap_unanchored[oldsid] = newsid;
-                    remap_anchored[oldsid] = DFA::DEAD;
-                } else {
-                    remap_unanchored[oldsid] = DFA::DEAD;
-                    remap_anchored[oldsid] = newsid;
-                    is_anchored[newsid.as_usize() >> stride2] = true;
-                }
-                if state.is_match() {
-                    dfa.set_matches(newsid, nnfa.iter_matches(oldsid));
-                }
-                sparse_iter(
-                    nnfa,
-                    oldsid,
-                    &dfa.byte_classes,
-                    |_, class, oldnextsid| {
-                        let class = usize::from(class);
-                        if oldnextsid == noncontiguous::NFA::FAIL {
-                            dfa.trans[newsid.as_usize() + class] = DFA::DEAD;
-                        } else {
-                            dfa.trans[newsid.as_usize() + class] = oldnextsid;
-                        }
-                    },
-                );
-                newsid = next_dfa_id(newsid);
-            } else {
-                let unewsid = newsid;
-                newsid = next_dfa_id(newsid);
-                let anewsid = newsid;
-                newsid = next_dfa_id(newsid);
-
-                remap_unanchored[oldsid] = unewsid;
-                remap_anchored[oldsid] = anewsid;
-                is_anchored[anewsid.as_usize() >> stride2] = true;
-                if state.is_match() {
-                    dfa.set_matches(unewsid, nnfa.iter_matches(oldsid));
-                    dfa.set_matches(anewsid, nnfa.iter_matches(oldsid));
-                }
-                sparse_iter(
-                    nnfa,
-                    oldsid,
-                    &dfa.byte_classes,
-                    |byte, class, oldnextsid| {
-                        let class = usize::from(class);
-                        if oldnextsid == noncontiguous::NFA::FAIL {
-                            let oldnextsid =
-                                if state.fail() == noncontiguous::NFA::DEAD {
-                                    noncontiguous::NFA::DEAD
-                                } else {
-                                    nnfa.next_state(
-                                        Anchored::No,
-                                        state.fail(),
-                                        byte,
-                                    )
-                                };
-                            dfa.trans[unewsid.as_usize() + class] = oldnextsid;
-                        } else {
-                            dfa.trans[unewsid.as_usize() + class] = oldnextsid;
-                            dfa.trans[anewsid.as_usize() + class] = oldnextsid;
-                        }
-                    },
-                );
-            }
-        }
-        for i in 0..dfa.state_len {
-            let sid = i << stride2;
-            if is_anchored[i] {
-                for next in dfa.trans[sid..][..stride].iter_mut() {
-                    *next = remap_anchored[*next];
-                }
-            } else {
-                for next in dfa.trans[sid..][..stride].iter_mut() {
-                    *next = remap_unanchored[*next];
-                }
-            }
-        }
-        // Now that we've remapped all the IDs in our states, all that's left
-        // is remapping the special state IDs.
-        let old = nnfa.special();
-        let new = &mut dfa.special;
-        new.max_special_id = remap_anchored[old.max_special_id];
-        new.max_match_id = remap_anchored[old.max_match_id];
-        new.start_unanchored_id = remap_unanchored[old.start_unanchored_id];
-        new.start_anchored_id = remap_anchored[old.start_anchored_id];
-    }
-
-    /// Set the desired match semantics.
-    ///
-    /// This only applies when using [`Builder::build`] and not
-    /// [`Builder::build_from_noncontiguous`].
-    ///
-    /// See
-    /// [`AhoCorasickBuilder::match_kind`](crate::AhoCorasickBuilder::match_kind)
-    /// for more documentation and examples.
-    pub fn match_kind(&mut self, kind: MatchKind) -> &mut Builder {
-        self.noncontiguous.match_kind(kind);
-        self
-    }
-
-    /// Enable ASCII-aware case insensitive matching.
-    ///
-    /// This only applies when using [`Builder::build`] and not
-    /// [`Builder::build_from_noncontiguous`].
-    ///
-    /// See
-    /// [`AhoCorasickBuilder::ascii_case_insensitive`](crate::AhoCorasickBuilder::ascii_case_insensitive)
-    /// for more documentation and examples.
-    pub fn ascii_case_insensitive(&mut self, yes: bool) -> &mut Builder {
-        self.noncontiguous.ascii_case_insensitive(yes);
-        self
-    }
-
-    /// Enable heuristic prefilter optimizations.
-    ///
-    /// This only applies when using [`Builder::build`] and not
-    /// [`Builder::build_from_noncontiguous`].
-    ///
-    /// See
-    /// [`AhoCorasickBuilder::prefilter`](crate::AhoCorasickBuilder::prefilter)
-    /// for more documentation and examples.
-    pub fn prefilter(&mut self, yes: bool) -> &mut Builder {
-        self.noncontiguous.prefilter(yes);
-        self
-    }
-
-    /// Sets the starting state configuration for the automaton.
-    ///
-    /// See
-    /// [`AhoCorasickBuilder::start_kind`](crate::AhoCorasickBuilder::start_kind)
-    /// for more documentation and examples.
-    pub fn start_kind(&mut self, kind: StartKind) -> &mut Builder {
-        self.start_kind = kind;
-        self
-    }
-
-    /// A debug setting for whether to attempt to shrink the size of the
-    /// automaton's alphabet or not.
-    ///
-    /// This should never be enabled unless you're debugging an automaton.
-    /// Namely, disabling byte classes makes transitions easier to reason
-    /// about, since they use the actual bytes instead of equivalence classes.
-    /// Disabling this confers no performance benefit at search time.
-    ///
-    /// See
-    /// [`AhoCorasickBuilder::byte_classes`](crate::AhoCorasickBuilder::byte_classes)
-    /// for more documentation and examples.
-    pub fn byte_classes(&mut self, yes: bool) -> &mut Builder {
-        self.byte_classes = yes;
-        self
-    }
-}
-
-/// Iterate over all possible equivalence class transitions in this state.
-/// The closure is called for all transitions with a distinct equivalence
-/// class, even those not explicitly represented in this sparse state. For
-/// any implicitly defined transitions, the given closure is called with
-/// the fail state ID.
-///
-/// The closure is guaranteed to be called precisely
-/// `byte_classes.alphabet_len()` times, once for every possible class in
-/// ascending order.
-fn sparse_iter<F: FnMut(u8, u8, StateID)>(
-    nnfa: &noncontiguous::NFA,
-    oldsid: StateID,
-    classes: &ByteClasses,
-    mut f: F,
-) {
-    let mut prev_class = None;
-    let mut byte = 0usize;
-    for t in nnfa.iter_trans(oldsid) {
-        while byte < usize::from(t.byte()) {
-            let rep = byte.as_u8();
-            let class = classes.get(rep);
-            byte += 1;
-            if prev_class != Some(class) {
-                f(rep, class, noncontiguous::NFA::FAIL);
-                prev_class = Some(class);
-            }
-        }
-        let rep = t.byte();
-        let class = classes.get(rep);
-        byte += 1;
-        if prev_class != Some(class) {
-            f(rep, class, t.next());
-            prev_class = Some(class);
-        }
-    }
-    for b in byte..=255 {
-        let rep = b.as_u8();
-        let class = classes.get(rep);
-        if prev_class != Some(class) {
-            f(rep, class, noncontiguous::NFA::FAIL);
-            prev_class = Some(class);
-        }
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/lib.rs
deleted file mode 100644
index 20e8b811..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/lib.rs
+++ /dev/null
@@ -1,326 +0,0 @@
-/*!
-A library for finding occurrences of many patterns at once. This library
-provides multiple pattern search principally through an implementation of the
-[Aho-Corasick algorithm](https://en.wikipedia.org/wiki/Aho%E2%80%93Corasick_algorithm),
-which builds a fast finite state machine for executing searches in linear time.
-
-Additionally, this library provides a number of configuration options for
-building the automaton that permit controlling the space versus time trade
-off. Other features include simple ASCII case insensitive matching, finding
-overlapping matches, replacements, searching streams and even searching and
-replacing text in streams.
-
-Finally, unlike most other Aho-Corasick implementations, this one
-supports enabling [leftmost-first](MatchKind::LeftmostFirst) or
-[leftmost-longest](MatchKind::LeftmostLongest) match semantics, using a
-(seemingly) novel alternative construction algorithm. For more details on what
-match semantics means, see the [`MatchKind`] type.
-
-# Overview
-
-This section gives a brief overview of the primary types in this crate:
-
-* [`AhoCorasick`] is the primary type and represents an Aho-Corasick automaton.
-This is the type you use to execute searches.
-* [`AhoCorasickBuilder`] can be used to build an Aho-Corasick automaton, and
-supports configuring a number of options.
-* [`Match`] represents a single match reported by an Aho-Corasick automaton.
-Each match has two pieces of information: the pattern that matched and the
-start and end byte offsets corresponding to the position in the haystack at
-which it matched.
-
-# Example: basic searching
-
-This example shows how to search for occurrences of multiple patterns
-simultaneously. Each match includes the pattern that matched along with the
-byte offsets of the match.
-
-```
-use aho_corasick::{AhoCorasick, PatternID};
-
-let patterns = &["apple", "maple", "Snapple"];
-let haystack = "Nobody likes maple in their apple flavored Snapple.";
-
-let ac = AhoCorasick::new(patterns).unwrap();
-let mut matches = vec![];
-for mat in ac.find_iter(haystack) {
-    matches.push((mat.pattern(), mat.start(), mat.end()));
-}
-assert_eq!(matches, vec![
-    (PatternID::must(1), 13, 18),
-    (PatternID::must(0), 28, 33),
-    (PatternID::must(2), 43, 50),
-]);
-```
-
-# Example: case insensitivity
-
-This is like the previous example, but matches `Snapple` case insensitively
-using `AhoCorasickBuilder`:
-
-```
-use aho_corasick::{AhoCorasick, PatternID};
-
-let patterns = &["apple", "maple", "snapple"];
-let haystack = "Nobody likes maple in their apple flavored Snapple.";
-
-let ac = AhoCorasick::builder()
-    .ascii_case_insensitive(true)
-    .build(patterns)
-    .unwrap();
-let mut matches = vec![];
-for mat in ac.find_iter(haystack) {
-    matches.push((mat.pattern(), mat.start(), mat.end()));
-}
-assert_eq!(matches, vec![
-    (PatternID::must(1), 13, 18),
-    (PatternID::must(0), 28, 33),
-    (PatternID::must(2), 43, 50),
-]);
-```
-
-# Example: replacing matches in a stream
-
-This example shows how to execute a search and replace on a stream without
-loading the entire stream into memory first.
-
-```
-# #[cfg(feature = "std")] {
-use aho_corasick::AhoCorasick;
-
-# fn example() -> Result<(), std::io::Error> {
-let patterns = &["fox", "brown", "quick"];
-let replace_with = &["sloth", "grey", "slow"];
-
-// In a real example, these might be `std::fs::File`s instead. All you need to
-// do is supply a pair of `std::io::Read` and `std::io::Write` implementations.
-let rdr = "The quick brown fox.";
-let mut wtr = vec![];
-
-let ac = AhoCorasick::new(patterns).unwrap();
-ac.try_stream_replace_all(rdr.as_bytes(), &mut wtr, replace_with)?;
-assert_eq!(b"The slow grey sloth.".to_vec(), wtr);
-# Ok(()) }; example().unwrap()
-# }
-```
-
-# Example: finding the leftmost first match
-
-In the textbook description of Aho-Corasick, its formulation is typically
-structured such that it reports all possible matches, even when they overlap
-with another. In many cases, overlapping matches may not be desired, such as
-the case of finding all successive non-overlapping matches like you might with
-a standard regular expression.
-
-Unfortunately the "obvious" way to modify the Aho-Corasick algorithm to do
-this doesn't always work in the expected way, since it will report matches as
-soon as they are seen. For example, consider matching the regex `Samwise|Sam`
-against the text `Samwise`. Most regex engines (that are Perl-like, or
-non-POSIX) will report `Samwise` as a match, but the standard Aho-Corasick
-algorithm modified for reporting non-overlapping matches will report `Sam`.
-
-A novel contribution of this library is the ability to change the match
-semantics of Aho-Corasick (without additional search time overhead) such that
-`Samwise` is reported instead. For example, here's the standard approach:
-
-```
-use aho_corasick::AhoCorasick;
-
-let patterns = &["Samwise", "Sam"];
-let haystack = "Samwise";
-
-let ac = AhoCorasick::new(patterns).unwrap();
-let mat = ac.find(haystack).expect("should have a match");
-assert_eq!("Sam", &haystack[mat.start()..mat.end()]);
-```
-
-And now here's the leftmost-first version, which matches how a Perl-like
-regex will work:
-
-```
-use aho_corasick::{AhoCorasick, MatchKind};
-
-let patterns = &["Samwise", "Sam"];
-let haystack = "Samwise";
-
-let ac = AhoCorasick::builder()
-    .match_kind(MatchKind::LeftmostFirst)
-    .build(patterns)
-    .unwrap();
-let mat = ac.find(haystack).expect("should have a match");
-assert_eq!("Samwise", &haystack[mat.start()..mat.end()]);
-```
-
-In addition to leftmost-first semantics, this library also supports
-leftmost-longest semantics, which match the POSIX behavior of a regular
-expression alternation. See [`MatchKind`] for more details.
-
-# Prefilters
-
-While an Aho-Corasick automaton can perform admirably when compared to more
-naive solutions, it is generally slower than more specialized algorithms that
-are accelerated using vector instructions such as SIMD.
-
-For that reason, this library will internally use a "prefilter" to attempt
-to accelerate searches when possible. Currently, this library has several
-different algorithms it might use depending on the patterns provided. Once the
-number of patterns gets too big, prefilters are no longer used.
-
-While a prefilter is generally good to have on by default since it works
-well in the common case, it can lead to less predictable or even sub-optimal
-performance in some cases. For that reason, prefilters can be explicitly
-disabled via [`AhoCorasickBuilder::prefilter`].
-
-# Lower level APIs
-
-This crate also provides several sub-modules that collectively expose many of
-the implementation details of the main [`AhoCorasick`] type. Most users of this
-library can completely ignore the submodules and their contents, but if you
-needed finer grained control, some parts of them may be useful to you. Here is
-a brief overview of each and why you might want to use them:
-
-* The [`packed`] sub-module contains a lower level API for using fast
-vectorized routines for finding a small number of patterns in a haystack.
-You might want to use this API when you want to completely side-step using
-Aho-Corasick automata. Otherwise, the fast vectorized routines are used
-automatically as prefilters for `AhoCorasick` searches whenever possible.
-* The [`automaton`] sub-module provides a lower level finite state
-machine interface that the various Aho-Corasick implementations in
-this crate implement. This sub-module's main contribution is the
-[`Automaton`](automaton::Automaton) trait, which permits manually walking the
-state transitions of an Aho-Corasick automaton.
-* The [`dfa`] and [`nfa`] sub-modules provide DFA and NFA implementations of
-the aforementioned `Automaton` trait. The main reason one might want to use
-these sub-modules is to get access to a type that implements the `Automaton`
-trait. (The top-level `AhoCorasick` type does not implement the `Automaton`
-trait.)
-
-As mentioned above, if you aren't sure whether you need these sub-modules,
-you should be able to safely ignore them and just focus on the [`AhoCorasick`]
-type.
-
-# Crate features
-
-This crate exposes a few features for controlling dependency usage and whether
-this crate can be used without the standard library.
-
-* **std** -
-  Enables support for the standard library. This feature is enabled by
-  default. When disabled, only `core` and `alloc` are used. At an API
-  level, enabling `std` enables `std::error::Error` trait impls for the
-  various error types, and higher level stream search routines such as
-  [`AhoCorasick::try_stream_find_iter`]. But the `std` feature is also required
-  to enable vectorized prefilters. Prefilters can greatly accelerate searches,
-  but generally only apply when the number of patterns is small (less than
-  ~100).
-* **perf-literal** -
-  Enables support for literal prefilters that use vectorized routines from
-  external crates. This feature is enabled by default. If you're only using
-  Aho-Corasick for large numbers of patterns or otherwise can abide lower
-  throughput when searching with a small number of patterns, then it is
-  reasonable to disable this feature.
-* **logging** -
-  Enables a dependency on the `log` crate and emits messages to aide in
-  diagnostics. This feature is disabled by default.
-*/
-
-#![no_std]
-#![deny(missing_docs)]
-#![deny(rustdoc::broken_intra_doc_links)]
-#![cfg_attr(docsrs, feature(doc_auto_cfg))]
-
-extern crate alloc;
-#[cfg(any(test, feature = "std"))]
-extern crate std;
-
-#[cfg(doctest)]
-doc_comment::doctest!("../README.md");
-
-#[cfg(feature = "std")]
-pub use crate::ahocorasick::StreamFindIter;
-pub use crate::{
-    ahocorasick::{
-        AhoCorasick, AhoCorasickBuilder, AhoCorasickKind, FindIter,
-        FindOverlappingIter,
-    },
-    util::{
-        error::{BuildError, MatchError, MatchErrorKind},
-        primitives::{PatternID, PatternIDError},
-        search::{Anchored, Input, Match, MatchKind, Span, StartKind},
-    },
-};
-
-#[macro_use]
-mod macros;
-
-mod ahocorasick;
-pub mod automaton;
-pub mod dfa;
-pub mod nfa;
-pub mod packed;
-#[cfg(test)]
-mod tests;
-// I wrote out the module for implementing fst::Automaton only to later realize
-// that this would make fst a public dependency and fst is not at 1.0 yet. I
-// decided to just keep the code in tree, but build it only during tests.
-//
-// TODO: I think I've changed my mind again. I'm considering pushing it out
-// into either a separate crate or into 'fst' directly as an optional feature.
-// #[cfg(test)]
-// #[allow(dead_code)]
-// mod transducer;
-pub(crate) mod util;
-
-#[cfg(test)]
-mod testoibits {
-    use std::panic::{RefUnwindSafe, UnwindSafe};
-
-    use super::*;
-
-    fn assert_all<T: Send + Sync + UnwindSafe + RefUnwindSafe>() {}
-
-    #[test]
-    fn oibits_main() {
-        assert_all::<AhoCorasick>();
-        assert_all::<AhoCorasickBuilder>();
-        assert_all::<AhoCorasickKind>();
-        assert_all::<FindIter>();
-        assert_all::<FindOverlappingIter>();
-
-        assert_all::<BuildError>();
-        assert_all::<MatchError>();
-        assert_all::<MatchErrorKind>();
-
-        assert_all::<Anchored>();
-        assert_all::<Input>();
-        assert_all::<Match>();
-        assert_all::<MatchKind>();
-        assert_all::<Span>();
-        assert_all::<StartKind>();
-    }
-
-    #[test]
-    fn oibits_automaton() {
-        use crate::{automaton, dfa::DFA};
-
-        assert_all::<automaton::FindIter<DFA>>();
-        assert_all::<automaton::FindOverlappingIter<DFA>>();
-        #[cfg(feature = "std")]
-        assert_all::<automaton::StreamFindIter<DFA, std::io::Stdin>>();
-        assert_all::<automaton::OverlappingState>();
-
-        assert_all::<automaton::Prefilter>();
-        assert_all::<automaton::Candidate>();
-    }
-
-    #[test]
-    fn oibits_packed() {
-        use crate::packed;
-
-        assert_all::<packed::Config>();
-        assert_all::<packed::Builder>();
-        assert_all::<packed::Searcher>();
-        assert_all::<packed::FindIter>();
-        assert_all::<packed::MatchKind>();
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/macros.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/macros.rs
deleted file mode 100644
index fc73e6e..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/macros.rs
+++ /dev/null
@@ -1,18 +0,0 @@
-#![allow(unused_macros)]
-
-macro_rules! log {
-    ($($tt:tt)*) => {
-        #[cfg(feature = "logging")]
-        {
-            $($tt)*
-        }
-    }
-}
-
-macro_rules! debug {
-    ($($tt:tt)*) => { log!(log::debug!($($tt)*)) }
-}
-
-macro_rules! trace {
-    ($($tt:tt)*) => { log!(log::trace!($($tt)*)) }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/nfa/contiguous.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/nfa/contiguous.rs
deleted file mode 100644
index 29c1621..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/nfa/contiguous.rs
+++ /dev/null
@@ -1,1141 +0,0 @@
-/*!
-Provides a contiguous NFA implementation of Aho-Corasick.
-
-This is a low-level API that generally only needs to be used in niche
-circumstances. When possible, prefer using [`AhoCorasick`](crate::AhoCorasick)
-instead of a contiguous NFA directly. Using an `NFA` directly is typically only
-necessary when one needs access to the [`Automaton`] trait implementation.
-*/
-
-use alloc::{vec, vec::Vec};
-
-use crate::{
-    automaton::Automaton,
-    nfa::noncontiguous,
-    util::{
-        alphabet::ByteClasses,
-        error::{BuildError, MatchError},
-        int::{Usize, U16, U32},
-        prefilter::Prefilter,
-        primitives::{IteratorIndexExt, PatternID, SmallIndex, StateID},
-        search::{Anchored, MatchKind},
-        special::Special,
-    },
-};
-
-/// A contiguous NFA implementation of Aho-Corasick.
-///
-/// When possible, prefer using [`AhoCorasick`](crate::AhoCorasick) instead of
-/// this type directly. Using an `NFA` directly is typically only necessary
-/// when one needs access to the [`Automaton`] trait implementation.
-///
-/// This NFA can only be built by first constructing a [`noncontiguous::NFA`].
-/// Both [`NFA::new`] and [`Builder::build`] do this for you automatically, but
-/// [`Builder::build_from_noncontiguous`] permits doing it explicitly.
-///
-/// The main difference between a noncontiguous NFA and a contiguous NFA is
-/// that the latter represents all of its states and transitions in a single
-/// allocation, where as the former uses a separate allocation for each state.
-/// Doing this at construction time while keeping a low memory footprint isn't
-/// feasible, which is primarily why there are two different NFA types: one
-/// that does the least amount of work possible to build itself, and another
-/// that does a little extra work to compact itself and make state transitions
-/// faster by making some states use a dense representation.
-///
-/// Because a contiguous NFA uses a single allocation, there is a lot more
-/// opportunity for compression tricks to reduce the heap memory used. Indeed,
-/// it is not uncommon for a contiguous NFA to use an order of magnitude less
-/// heap memory than a noncontiguous NFA. Since building a contiguous NFA
-/// usually only takes a fraction of the time it takes to build a noncontiguous
-/// NFA, the overall build time is not much slower. Thus, in most cases, a
-/// contiguous NFA is the best choice.
-///
-/// Since a contiguous NFA uses various tricks for compression and to achieve
-/// faster state transitions, currently, its limit on the number of states
-/// is somewhat smaller than what a noncontiguous NFA can achieve. Generally
-/// speaking, you shouldn't expect to run into this limit if the number of
-/// patterns is under 1 million. It is plausible that this limit will be
-/// increased in the future. If the limit is reached, building a contiguous NFA
-/// will return an error. Often, since building a contiguous NFA is relatively
-/// cheap, it can make sense to always try it even if you aren't sure if it
-/// will fail or not. If it does, you can always fall back to a noncontiguous
-/// NFA. (Indeed, the main [`AhoCorasick`](crate::AhoCorasick) type employs a
-/// strategy similar to this at construction time.)
-///
-/// # Example
-///
-/// This example shows how to build an `NFA` directly and use it to execute
-/// [`Automaton::try_find`]:
-///
-/// ```
-/// use aho_corasick::{
-///     automaton::Automaton,
-///     nfa::contiguous::NFA,
-///     Input, Match,
-/// };
-///
-/// let patterns = &["b", "abc", "abcd"];
-/// let haystack = "abcd";
-///
-/// let nfa = NFA::new(patterns).unwrap();
-/// assert_eq!(
-///     Some(Match::must(0, 1..2)),
-///     nfa.try_find(&Input::new(haystack))?,
-/// );
-/// # Ok::<(), Box<dyn std::error::Error>>(())
-/// ```
-///
-/// It is also possible to implement your own version of `try_find`. See the
-/// [`Automaton`] documentation for an example.
-#[derive(Clone)]
-pub struct NFA {
-    /// The raw NFA representation. Each state is packed with a header
-    /// (containing the format of the state, the failure transition and, for
-    /// a sparse state, the number of transitions), its transitions and any
-    /// matching pattern IDs for match states.
-    repr: Vec<u32>,
-    /// The length of each pattern. This is used to compute the start offset
-    /// of a match.
-    pattern_lens: Vec<SmallIndex>,
-    /// The total number of states in this NFA.
-    state_len: usize,
-    /// A prefilter for accelerating searches, if one exists.
-    prefilter: Option<Prefilter>,
-    /// The match semantics built into this NFA.
-    match_kind: MatchKind,
-    /// The alphabet size, or total number of equivalence classes, for this
-    /// NFA. Dense states always have this many transitions.
-    alphabet_len: usize,
-    /// The equivalence classes for this NFA. All transitions, dense and
-    /// sparse, are defined on equivalence classes and not on the 256 distinct
-    /// byte values.
-    byte_classes: ByteClasses,
-    /// The length of the shortest pattern in this automaton.
-    min_pattern_len: usize,
-    /// The length of the longest pattern in this automaton.
-    max_pattern_len: usize,
-    /// The information required to deduce which states are "special" in this
-    /// NFA.
-    special: Special,
-}
-
-impl NFA {
-    /// Create a new Aho-Corasick contiguous NFA using the default
-    /// configuration.
-    ///
-    /// Use a [`Builder`] if you want to change the configuration.
-    pub fn new<I, P>(patterns: I) -> Result<NFA, BuildError>
-    where
-        I: IntoIterator<Item = P>,
-        P: AsRef<[u8]>,
-    {
-        NFA::builder().build(patterns)
-    }
-
-    /// A convenience method for returning a new Aho-Corasick contiguous NFA
-    /// builder.
-    ///
-    /// This usually permits one to just import the `NFA` type.
-    pub fn builder() -> Builder {
-        Builder::new()
-    }
-}
-
-impl NFA {
-    /// A sentinel state ID indicating that a search should stop once it has
-    /// entered this state. When a search stops, it returns a match if one
-    /// has been found, otherwise no match. A contiguous NFA always has an
-    /// actual dead state at this ID.
-    const DEAD: StateID = StateID::new_unchecked(0);
-    /// Another sentinel state ID indicating that a search should move through
-    /// current state's failure transition.
-    ///
-    /// Note that unlike DEAD, this does not actually point to a valid state
-    /// in a contiguous NFA. (noncontiguous::NFA::FAIL does point to a valid
-    /// state.) Instead, this points to the position that is guaranteed to
-    /// never be a valid state ID (by making sure it points to a place in the
-    /// middle of the encoding of the DEAD state). Since we never need to
-    /// actually look at the FAIL state itself, this works out.
-    ///
-    /// By why do it this way? So that FAIL is a constant. I don't have any
-    /// concrete evidence that this materially helps matters, but it's easy to
-    /// do. The alternative would be making the FAIL ID point to the second
-    /// state, which could be made a constant but is a little trickier to do.
-    /// The easiest path is to just make the FAIL state a runtime value, but
-    /// since comparisons with FAIL occur in perf critical parts of the search,
-    /// we want it to be as tight as possible and not waste any registers.
-    ///
-    /// Very hand wavy... But the code complexity that results from this is
-    /// very mild.
-    const FAIL: StateID = StateID::new_unchecked(1);
-}
-
-// SAFETY: 'start_state' always returns a valid state ID, 'next_state' always
-// returns a valid state ID given a valid state ID. We otherwise claim that
-// all other methods are correct as well.
-unsafe impl Automaton for NFA {
-    #[inline(always)]
-    fn start_state(&self, anchored: Anchored) -> Result<StateID, MatchError> {
-        match anchored {
-            Anchored::No => Ok(self.special.start_unanchored_id),
-            Anchored::Yes => Ok(self.special.start_anchored_id),
-        }
-    }
-
-    #[inline(always)]
-    fn next_state(
-        &self,
-        anchored: Anchored,
-        mut sid: StateID,
-        byte: u8,
-    ) -> StateID {
-        let repr = &self.repr;
-        let class = self.byte_classes.get(byte);
-        let u32tosid = StateID::from_u32_unchecked;
-        loop {
-            let o = sid.as_usize();
-            let kind = repr[o] & 0xFF;
-            // I tried to encapsulate the "next transition" logic into its own
-            // function, but it seemed to always result in sub-optimal codegen
-            // that led to real and significant slowdowns. So we just inline
-            // the logic here.
-            //
-            // I've also tried a lot of different ways to speed up this
-            // routine, and most of them have failed.
-            if kind == State::KIND_DENSE {
-                let next = u32tosid(repr[o + 2 + usize::from(class)]);
-                if next != NFA::FAIL {
-                    return next;
-                }
-            } else if kind == State::KIND_ONE {
-                if class == repr[o].low_u16().high_u8() {
-                    return u32tosid(repr[o + 2]);
-                }
-            } else {
-                // NOTE: I tried a SWAR technique in the loop below, but found
-                // it slower. See the 'swar' test in the tests for this module.
-                let trans_len = kind.as_usize();
-                let classes_len = u32_len(trans_len);
-                let trans_offset = o + 2 + classes_len;
-                for (i, &chunk) in
-                    repr[o + 2..][..classes_len].iter().enumerate()
-                {
-                    let classes = chunk.to_ne_bytes();
-                    if classes[0] == class {
-                        return u32tosid(repr[trans_offset + i * 4]);
-                    }
-                    if classes[1] == class {
-                        return u32tosid(repr[trans_offset + i * 4 + 1]);
-                    }
-                    if classes[2] == class {
-                        return u32tosid(repr[trans_offset + i * 4 + 2]);
-                    }
-                    if classes[3] == class {
-                        return u32tosid(repr[trans_offset + i * 4 + 3]);
-                    }
-                }
-            }
-            // For an anchored search, we never follow failure transitions
-            // because failure transitions lead us down a path to matching
-            // a *proper* suffix of the path we were on. Thus, it can only
-            // produce matches that appear after the beginning of the search.
-            if anchored.is_anchored() {
-                return NFA::DEAD;
-            }
-            sid = u32tosid(repr[o + 1]);
-        }
-    }
-
-    #[inline(always)]
-    fn is_special(&self, sid: StateID) -> bool {
-        sid <= self.special.max_special_id
-    }
-
-    #[inline(always)]
-    fn is_dead(&self, sid: StateID) -> bool {
-        sid == NFA::DEAD
-    }
-
-    #[inline(always)]
-    fn is_match(&self, sid: StateID) -> bool {
-        !self.is_dead(sid) && sid <= self.special.max_match_id
-    }
-
-    #[inline(always)]
-    fn is_start(&self, sid: StateID) -> bool {
-        sid == self.special.start_unanchored_id
-            || sid == self.special.start_anchored_id
-    }
-
-    #[inline(always)]
-    fn match_kind(&self) -> MatchKind {
-        self.match_kind
-    }
-
-    #[inline(always)]
-    fn patterns_len(&self) -> usize {
-        self.pattern_lens.len()
-    }
-
-    #[inline(always)]
-    fn pattern_len(&self, pid: PatternID) -> usize {
-        self.pattern_lens[pid].as_usize()
-    }
-
-    #[inline(always)]
-    fn min_pattern_len(&self) -> usize {
-        self.min_pattern_len
-    }
-
-    #[inline(always)]
-    fn max_pattern_len(&self) -> usize {
-        self.max_pattern_len
-    }
-
-    #[inline(always)]
-    fn match_len(&self, sid: StateID) -> usize {
-        State::match_len(self.alphabet_len, &self.repr[sid.as_usize()..])
-    }
-
-    #[inline(always)]
-    fn match_pattern(&self, sid: StateID, index: usize) -> PatternID {
-        State::match_pattern(
-            self.alphabet_len,
-            &self.repr[sid.as_usize()..],
-            index,
-        )
-    }
-
-    #[inline(always)]
-    fn memory_usage(&self) -> usize {
-        use core::mem::size_of;
-
-        (self.repr.len() * size_of::<u32>())
-            + (self.pattern_lens.len() * size_of::<SmallIndex>())
-            + self.prefilter.as_ref().map_or(0, |p| p.memory_usage())
-    }
-
-    #[inline(always)]
-    fn prefilter(&self) -> Option<&Prefilter> {
-        self.prefilter.as_ref()
-    }
-}
-
-impl core::fmt::Debug for NFA {
-    fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-        use crate::automaton::fmt_state_indicator;
-
-        writeln!(f, "contiguous::NFA(")?;
-        let mut sid = NFA::DEAD; // always the first state and always present
-        loop {
-            let raw = &self.repr[sid.as_usize()..];
-            if raw.is_empty() {
-                break;
-            }
-            let is_match = self.is_match(sid);
-            let state = State::read(self.alphabet_len, is_match, raw);
-            fmt_state_indicator(f, self, sid)?;
-            write!(
-                f,
-                "{:06}({:06}): ",
-                sid.as_usize(),
-                state.fail.as_usize()
-            )?;
-            state.fmt(f)?;
-            write!(f, "\n")?;
-            if self.is_match(sid) {
-                write!(f, "         matches: ")?;
-                for i in 0..state.match_len {
-                    let pid = State::match_pattern(self.alphabet_len, raw, i);
-                    if i > 0 {
-                        write!(f, ", ")?;
-                    }
-                    write!(f, "{}", pid.as_usize())?;
-                }
-                write!(f, "\n")?;
-            }
-            // The FAIL state doesn't actually have space for a state allocated
-            // for it, so we have to treat it as a special case. write below
-            // the DEAD state.
-            if sid == NFA::DEAD {
-                writeln!(f, "F {:06}:", NFA::FAIL.as_usize())?;
-            }
-            let len = State::len(self.alphabet_len, is_match, raw);
-            sid = StateID::new(sid.as_usize().checked_add(len).unwrap())
-                .unwrap();
-        }
-        writeln!(f, "match kind: {:?}", self.match_kind)?;
-        writeln!(f, "prefilter: {:?}", self.prefilter.is_some())?;
-        writeln!(f, "state length: {:?}", self.state_len)?;
-        writeln!(f, "pattern length: {:?}", self.patterns_len())?;
-        writeln!(f, "shortest pattern length: {:?}", self.min_pattern_len)?;
-        writeln!(f, "longest pattern length: {:?}", self.max_pattern_len)?;
-        writeln!(f, "alphabet length: {:?}", self.alphabet_len)?;
-        writeln!(f, "byte classes: {:?}", self.byte_classes)?;
-        writeln!(f, "memory usage: {:?}", self.memory_usage())?;
-        writeln!(f, ")")?;
-
-        Ok(())
-    }
-}
-
-/// The "in memory" representation a single dense or sparse state.
-///
-/// A `State`'s in memory representation is not ever actually materialized
-/// during a search with a contiguous NFA. Doing so would be too slow. (Indeed,
-/// the only time a `State` is actually constructed is in `Debug` impls.)
-/// Instead, a `State` exposes a number of static methods for reading certain
-/// things from the raw binary encoding of the state.
-#[derive(Clone)]
-struct State<'a> {
-    /// The state to transition to when 'class_to_next' yields a transition
-    /// to the FAIL state.
-    fail: StateID,
-    /// The number of pattern IDs in this state. For a non-match state, this is
-    /// always zero. Otherwise it is always bigger than zero.
-    match_len: usize,
-    /// The sparse or dense representation of the transitions for this state.
-    trans: StateTrans<'a>,
-}
-
-/// The underlying representation of sparse or dense transitions for a state.
-///
-/// Note that like `State`, we don't typically construct values of this type
-/// during a search since we don't always need all values and thus would
-/// represent a lot of wasteful work.
-#[derive(Clone)]
-enum StateTrans<'a> {
-    /// A sparse representation of transitions for a state, where only non-FAIL
-    /// transitions are explicitly represented.
-    Sparse {
-        classes: &'a [u32],
-        /// The transitions for this state, where each transition is packed
-        /// into a u32. The low 8 bits correspond to the byte class for the
-        /// transition, and the high 24 bits correspond to the next state ID.
-        ///
-        /// This packing is why the max state ID allowed for a contiguous
-        /// NFA is 2^24-1.
-        nexts: &'a [u32],
-    },
-    /// A "one transition" state that is never a match state.
-    ///
-    /// These are by far the most common state, so we use a specialized and
-    /// very compact representation for them.
-    One {
-        /// The element of this NFA's alphabet that this transition is
-        /// defined for.
-        class: u8,
-        /// The state this should transition to if the current symbol is
-        /// equal to 'class'.
-        next: u32,
-    },
-    /// A dense representation of transitions for a state, where all
-    /// transitions are explicitly represented, including transitions to the
-    /// FAIL state.
-    Dense {
-        /// A dense set of transitions to other states. The transitions may
-        /// point to a FAIL state, in which case, the search should try the
-        /// same transition lookup at 'fail'.
-        ///
-        /// Note that this is indexed by byte equivalence classes and not
-        /// byte values. That means 'class_to_next[byte]' is wrong and
-        /// 'class_to_next[classes.get(byte)]' is correct. The number of
-        /// transitions is always equivalent to 'classes.alphabet_len()'.
-        class_to_next: &'a [u32],
-    },
-}
-
-impl<'a> State<'a> {
-    /// The offset of where the "kind" of a state is stored. If it isn't one
-    /// of the sentinel values below, then it's a sparse state and the kind
-    /// corresponds to the number of transitions in the state.
-    const KIND: usize = 0;
-
-    /// A sentinel value indicating that the state uses a dense representation.
-    const KIND_DENSE: u32 = 0xFF;
-    /// A sentinel value indicating that the state uses a special "one
-    /// transition" encoding. In practice, non-match states with one transition
-    /// make up the overwhelming majority of all states in any given
-    /// Aho-Corasick automaton, so we can specialize them using a very compact
-    /// representation.
-    const KIND_ONE: u32 = 0xFE;
-
-    /// The maximum number of transitions to encode as a sparse state. Usually
-    /// states with a lot of transitions are either very rare, or occur near
-    /// the start state. In the latter case, they are probably dense already
-    /// anyway. In the former case, making them dense is fine because they're
-    /// rare.
-    ///
-    /// This needs to be small enough to permit each of the sentinel values for
-    /// 'KIND' above. Namely, a sparse state embeds the number of transitions
-    /// into the 'KIND'. Basically, "sparse" is a state kind too, but it's the
-    /// "else" branch.
-    ///
-    /// N.B. There isn't anything particularly magical about 127 here. I
-    /// just picked it because I figured any sparse state with this many
-    /// transitions is going to be exceptionally rare, and if it did have this
-    /// many transitions, then it would be quite slow to do a linear scan on
-    /// the transitions during a search anyway.
-    const MAX_SPARSE_TRANSITIONS: usize = 127;
-
-    /// Remap state IDs in-place.
-    ///
-    /// `state` should be the the raw binary encoding of a state. (The start
-    /// of the slice must correspond to the start of the state, but the slice
-    /// may extend past the end of the encoding of the state.)
-    fn remap(
-        alphabet_len: usize,
-        old_to_new: &[StateID],
-        state: &mut [u32],
-    ) -> Result<(), BuildError> {
-        let kind = State::kind(state);
-        if kind == State::KIND_DENSE {
-            state[1] = old_to_new[state[1].as_usize()].as_u32();
-            for next in state[2..][..alphabet_len].iter_mut() {
-                *next = old_to_new[next.as_usize()].as_u32();
-            }
-        } else if kind == State::KIND_ONE {
-            state[1] = old_to_new[state[1].as_usize()].as_u32();
-            state[2] = old_to_new[state[2].as_usize()].as_u32();
-        } else {
-            let trans_len = State::sparse_trans_len(state);
-            let classes_len = u32_len(trans_len);
-            state[1] = old_to_new[state[1].as_usize()].as_u32();
-            for next in state[2 + classes_len..][..trans_len].iter_mut() {
-                *next = old_to_new[next.as_usize()].as_u32();
-            }
-        }
-        Ok(())
-    }
-
-    /// Returns the length, in number of u32s, of this state.
-    ///
-    /// This is useful for reading states consecutively, e.g., in the Debug
-    /// impl without needing to store a separate map from state index to state
-    /// identifier.
-    ///
-    /// `state` should be the the raw binary encoding of a state. (The start
-    /// of the slice must correspond to the start of the state, but the slice
-    /// may extend past the end of the encoding of the state.)
-    fn len(alphabet_len: usize, is_match: bool, state: &[u32]) -> usize {
-        let kind_len = 1;
-        let fail_len = 1;
-        let kind = State::kind(state);
-        let (classes_len, trans_len) = if kind == State::KIND_DENSE {
-            (0, alphabet_len)
-        } else if kind == State::KIND_ONE {
-            (0, 1)
-        } else {
-            let trans_len = State::sparse_trans_len(state);
-            let classes_len = u32_len(trans_len);
-            (classes_len, trans_len)
-        };
-        let match_len = if !is_match {
-            0
-        } else if State::match_len(alphabet_len, state) == 1 {
-            // This is a special case because when there is one pattern ID for
-            // a match state, it is represented by a single u32 with its high
-            // bit set (which is impossible for a valid pattern ID).
-            1
-        } else {
-            // We add 1 to include the u32 that indicates the number of
-            // pattern IDs that follow.
-            1 + State::match_len(alphabet_len, state)
-        };
-        kind_len + fail_len + classes_len + trans_len + match_len
-    }
-
-    /// Returns the kind of this state.
-    ///
-    /// This only includes the low byte.
-    #[inline(always)]
-    fn kind(state: &[u32]) -> u32 {
-        state[State::KIND] & 0xFF
-    }
-
-    /// Get the number of sparse transitions in this state. This can never
-    /// be more than State::MAX_SPARSE_TRANSITIONS, as all states with more
-    /// transitions are encoded as dense states.
-    ///
-    /// `state` should be the the raw binary encoding of a sparse state. (The
-    /// start of the slice must correspond to the start of the state, but the
-    /// slice may extend past the end of the encoding of the state.) If this
-    /// isn't a sparse state, then the return value is unspecified.
-    ///
-    /// Do note that this is only legal to call on a sparse state. So for
-    /// example, "one transition" state is not a sparse state, so it would not
-    /// be legal to call this method on such a state.
-    #[inline(always)]
-    fn sparse_trans_len(state: &[u32]) -> usize {
-        (state[State::KIND] & 0xFF).as_usize()
-    }
-
-    /// Returns the total number of matching pattern IDs in this state. Calling
-    /// this on a state that isn't a match results in unspecified behavior.
-    /// Thus, the returned number is never 0 for all correct calls.
-    ///
-    /// `state` should be the the raw binary encoding of a state. (The start
-    /// of the slice must correspond to the start of the state, but the slice
-    /// may extend past the end of the encoding of the state.)
-    #[inline(always)]
-    fn match_len(alphabet_len: usize, state: &[u32]) -> usize {
-        // We don't need to handle KIND_ONE here because it can never be a
-        // match state.
-        let packed = if State::kind(state) == State::KIND_DENSE {
-            let start = 2 + alphabet_len;
-            state[start].as_usize()
-        } else {
-            let trans_len = State::sparse_trans_len(state);
-            let classes_len = u32_len(trans_len);
-            let start = 2 + classes_len + trans_len;
-            state[start].as_usize()
-        };
-        if packed & (1 << 31) == 0 {
-            packed
-        } else {
-            1
-        }
-    }
-
-    /// Returns the pattern ID corresponding to the given index for the state
-    /// given. The `index` provided must be less than the number of pattern IDs
-    /// in this state.
-    ///
-    /// `state` should be the the raw binary encoding of a state. (The start of
-    /// the slice must correspond to the start of the state, but the slice may
-    /// extend past the end of the encoding of the state.)
-    ///
-    /// If the given state is not a match state or if the index is out of
-    /// bounds, then this has unspecified behavior.
-    #[inline(always)]
-    fn match_pattern(
-        alphabet_len: usize,
-        state: &[u32],
-        index: usize,
-    ) -> PatternID {
-        // We don't need to handle KIND_ONE here because it can never be a
-        // match state.
-        let start = if State::kind(state) == State::KIND_DENSE {
-            2 + alphabet_len
-        } else {
-            let trans_len = State::sparse_trans_len(state);
-            let classes_len = u32_len(trans_len);
-            2 + classes_len + trans_len
-        };
-        let packed = state[start];
-        let pid = if packed & (1 << 31) == 0 {
-            state[start + 1 + index]
-        } else {
-            assert_eq!(0, index);
-            packed & !(1 << 31)
-        };
-        PatternID::from_u32_unchecked(pid)
-    }
-
-    /// Read a state's binary encoding to its in-memory representation.
-    ///
-    /// `alphabet_len` should be the total number of transitions defined for
-    /// dense states.
-    ///
-    /// `is_match` should be true if this state is a match state and false
-    /// otherwise.
-    ///
-    /// `state` should be the the raw binary encoding of a state. (The start
-    /// of the slice must correspond to the start of the state, but the slice
-    /// may extend past the end of the encoding of the state.)
-    fn read(
-        alphabet_len: usize,
-        is_match: bool,
-        state: &'a [u32],
-    ) -> State<'a> {
-        let kind = State::kind(state);
-        let match_len =
-            if !is_match { 0 } else { State::match_len(alphabet_len, state) };
-        let (trans, fail) = if kind == State::KIND_DENSE {
-            let fail = StateID::from_u32_unchecked(state[1]);
-            let class_to_next = &state[2..][..alphabet_len];
-            (StateTrans::Dense { class_to_next }, fail)
-        } else if kind == State::KIND_ONE {
-            let fail = StateID::from_u32_unchecked(state[1]);
-            let class = state[State::KIND].low_u16().high_u8();
-            let next = state[2];
-            (StateTrans::One { class, next }, fail)
-        } else {
-            let fail = StateID::from_u32_unchecked(state[1]);
-            let trans_len = State::sparse_trans_len(state);
-            let classes_len = u32_len(trans_len);
-            let classes = &state[2..][..classes_len];
-            let nexts = &state[2 + classes_len..][..trans_len];
-            (StateTrans::Sparse { classes, nexts }, fail)
-        };
-        State { fail, match_len, trans }
-    }
-
-    /// Encode the "old" state from a noncontiguous NFA to its binary
-    /// representation to the given `dst` slice. `classes` should be the byte
-    /// classes computed for the noncontiguous NFA that the given state came
-    /// from.
-    ///
-    /// This returns an error if `dst` became so big that `StateID`s can no
-    /// longer be created for new states. Otherwise, it returns the state ID of
-    /// the new state created.
-    ///
-    /// When `force_dense` is true, then the encoded state will always use a
-    /// dense format. Otherwise, the choice between dense and sparse will be
-    /// automatically chosen based on the old state.
-    fn write(
-        nnfa: &noncontiguous::NFA,
-        oldsid: StateID,
-        old: &noncontiguous::State,
-        classes: &ByteClasses,
-        dst: &mut Vec<u32>,
-        force_dense: bool,
-    ) -> Result<StateID, BuildError> {
-        let sid = StateID::new(dst.len()).map_err(|e| {
-            BuildError::state_id_overflow(StateID::MAX.as_u64(), e.attempted())
-        })?;
-        let old_len = nnfa.iter_trans(oldsid).count();
-        // For states with a lot of transitions, we might as well just make
-        // them dense. These kinds of hot states tend to be very rare, so we're
-        // okay with it. This also gives us more sentinels in the state's
-        // 'kind', which lets us create different state kinds to save on
-        // space.
-        let kind = if force_dense || old_len > State::MAX_SPARSE_TRANSITIONS {
-            State::KIND_DENSE
-        } else if old_len == 1 && !old.is_match() {
-            State::KIND_ONE
-        } else {
-            // For a sparse state, the kind is just the number of transitions.
-            u32::try_from(old_len).unwrap()
-        };
-        if kind == State::KIND_DENSE {
-            dst.push(kind);
-            dst.push(old.fail().as_u32());
-            State::write_dense_trans(nnfa, oldsid, classes, dst)?;
-        } else if kind == State::KIND_ONE {
-            let t = nnfa.iter_trans(oldsid).next().unwrap();
-            let class = u32::from(classes.get(t.byte()));
-            dst.push(kind | (class << 8));
-            dst.push(old.fail().as_u32());
-            dst.push(t.next().as_u32());
-        } else {
-            dst.push(kind);
-            dst.push(old.fail().as_u32());
-            State::write_sparse_trans(nnfa, oldsid, classes, dst)?;
-        }
-        // Now finally write the number of matches and the matches themselves.
-        if old.is_match() {
-            let matches_len = nnfa.iter_matches(oldsid).count();
-            if matches_len == 1 {
-                let pid = nnfa.iter_matches(oldsid).next().unwrap().as_u32();
-                assert_eq!(0, pid & (1 << 31));
-                dst.push((1 << 31) | pid);
-            } else {
-                assert_eq!(0, matches_len & (1 << 31));
-                dst.push(matches_len.as_u32());
-                dst.extend(nnfa.iter_matches(oldsid).map(|pid| pid.as_u32()));
-            }
-        }
-        Ok(sid)
-    }
-
-    /// Encode the "old" state transitions from a noncontiguous NFA to its
-    /// binary sparse representation to the given `dst` slice. `classes` should
-    /// be the byte classes computed for the noncontiguous NFA that the given
-    /// state came from.
-    ///
-    /// This returns an error if `dst` became so big that `StateID`s can no
-    /// longer be created for new states.
-    fn write_sparse_trans(
-        nnfa: &noncontiguous::NFA,
-        oldsid: StateID,
-        classes: &ByteClasses,
-        dst: &mut Vec<u32>,
-    ) -> Result<(), BuildError> {
-        let (mut chunk, mut len) = ([0; 4], 0);
-        for t in nnfa.iter_trans(oldsid) {
-            chunk[len] = classes.get(t.byte());
-            len += 1;
-            if len == 4 {
-                dst.push(u32::from_ne_bytes(chunk));
-                chunk = [0; 4];
-                len = 0;
-            }
-        }
-        if len > 0 {
-            // In the case where the number of transitions isn't divisible
-            // by 4, the last u32 chunk will have some left over room. In
-            // this case, we "just" repeat the last equivalence class. By
-            // doing this, we know the leftover faux transitions will never
-            // be followed because if they were, it would have been followed
-            // prior to it in the last equivalence class. This saves us some
-            // branching in the search time state transition code.
-            let repeat = chunk[len - 1];
-            while len < 4 {
-                chunk[len] = repeat;
-                len += 1;
-            }
-            dst.push(u32::from_ne_bytes(chunk));
-        }
-        for t in nnfa.iter_trans(oldsid) {
-            dst.push(t.next().as_u32());
-        }
-        Ok(())
-    }
-
-    /// Encode the "old" state transitions from a noncontiguous NFA to its
-    /// binary dense representation to the given `dst` slice. `classes` should
-    /// be the byte classes computed for the noncontiguous NFA that the given
-    /// state came from.
-    ///
-    /// This returns an error if `dst` became so big that `StateID`s can no
-    /// longer be created for new states.
-    fn write_dense_trans(
-        nnfa: &noncontiguous::NFA,
-        oldsid: StateID,
-        classes: &ByteClasses,
-        dst: &mut Vec<u32>,
-    ) -> Result<(), BuildError> {
-        // Our byte classes let us shrink the size of our dense states to the
-        // number of equivalence classes instead of just fixing it to 256.
-        // Any non-explicitly defined transition is just a transition to the
-        // FAIL state, so we fill that in first and then overwrite them with
-        // explicitly defined transitions. (Most states probably only have one
-        // or two explicitly defined transitions.)
-        //
-        // N.B. Remember that while building the contiguous NFA, we use state
-        // IDs from the noncontiguous NFA. It isn't until we've added all
-        // states that we go back and map noncontiguous IDs to contiguous IDs.
-        let start = dst.len();
-        dst.extend(
-            core::iter::repeat(noncontiguous::NFA::FAIL.as_u32())
-                .take(classes.alphabet_len()),
-        );
-        assert!(start < dst.len(), "equivalence classes are never empty");
-        for t in nnfa.iter_trans(oldsid) {
-            dst[start + usize::from(classes.get(t.byte()))] =
-                t.next().as_u32();
-        }
-        Ok(())
-    }
-
-    /// Return an iterator over every explicitly defined transition in this
-    /// state.
-    fn transitions<'b>(&'b self) -> impl Iterator<Item = (u8, StateID)> + 'b {
-        let mut i = 0;
-        core::iter::from_fn(move || match self.trans {
-            StateTrans::Sparse { classes, nexts } => {
-                if i >= nexts.len() {
-                    return None;
-                }
-                let chunk = classes[i / 4];
-                let class = chunk.to_ne_bytes()[i % 4];
-                let next = StateID::from_u32_unchecked(nexts[i]);
-                i += 1;
-                Some((class, next))
-            }
-            StateTrans::One { class, next } => {
-                if i == 0 {
-                    i += 1;
-                    Some((class, StateID::from_u32_unchecked(next)))
-                } else {
-                    None
-                }
-            }
-            StateTrans::Dense { class_to_next } => {
-                if i >= class_to_next.len() {
-                    return None;
-                }
-                let class = i.as_u8();
-                let next = StateID::from_u32_unchecked(class_to_next[i]);
-                i += 1;
-                Some((class, next))
-            }
-        })
-    }
-}
-
-impl<'a> core::fmt::Debug for State<'a> {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        use crate::{automaton::sparse_transitions, util::debug::DebugByte};
-
-        let it = sparse_transitions(self.transitions())
-            // Writing out all FAIL transitions is quite noisy. Instead, we
-            // just require readers of the output to assume anything absent
-            // maps to the FAIL transition.
-            .filter(|&(_, _, sid)| sid != NFA::FAIL)
-            .enumerate();
-        for (i, (start, end, sid)) in it {
-            if i > 0 {
-                write!(f, ", ")?;
-            }
-            if start == end {
-                write!(f, "{:?} => {:?}", DebugByte(start), sid.as_usize())?;
-            } else {
-                write!(
-                    f,
-                    "{:?}-{:?} => {:?}",
-                    DebugByte(start),
-                    DebugByte(end),
-                    sid.as_usize()
-                )?;
-            }
-        }
-        Ok(())
-    }
-}
-
-/// A builder for configuring an Aho-Corasick contiguous NFA.
-///
-/// This builder has a subset of the options available to a
-/// [`AhoCorasickBuilder`](crate::AhoCorasickBuilder). Of the shared options,
-/// their behavior is identical.
-#[derive(Clone, Debug)]
-pub struct Builder {
-    noncontiguous: noncontiguous::Builder,
-    dense_depth: usize,
-    byte_classes: bool,
-}
-
-impl Default for Builder {
-    fn default() -> Builder {
-        Builder {
-            noncontiguous: noncontiguous::Builder::new(),
-            dense_depth: 2,
-            byte_classes: true,
-        }
-    }
-}
-
-impl Builder {
-    /// Create a new builder for configuring an Aho-Corasick contiguous NFA.
-    pub fn new() -> Builder {
-        Builder::default()
-    }
-
-    /// Build an Aho-Corasick contiguous NFA from the given iterator of
-    /// patterns.
-    ///
-    /// A builder may be reused to create more NFAs.
-    pub fn build<I, P>(&self, patterns: I) -> Result<NFA, BuildError>
-    where
-        I: IntoIterator<Item = P>,
-        P: AsRef<[u8]>,
-    {
-        let nnfa = self.noncontiguous.build(patterns)?;
-        self.build_from_noncontiguous(&nnfa)
-    }
-
-    /// Build an Aho-Corasick contiguous NFA from the given noncontiguous NFA.
-    ///
-    /// Note that when this method is used, only the `dense_depth` and
-    /// `byte_classes` settings on this builder are respected. The other
-    /// settings only apply to the initial construction of the Aho-Corasick
-    /// automaton. Since using this method requires that initial construction
-    /// has already completed, all settings impacting only initial construction
-    /// are no longer relevant.
-    pub fn build_from_noncontiguous(
-        &self,
-        nnfa: &noncontiguous::NFA,
-    ) -> Result<NFA, BuildError> {
-        debug!("building contiguous NFA");
-        let byte_classes = if self.byte_classes {
-            nnfa.byte_classes().clone()
-        } else {
-            ByteClasses::singletons()
-        };
-        let mut index_to_state_id = vec![NFA::DEAD; nnfa.states().len()];
-        let mut nfa = NFA {
-            repr: vec![],
-            pattern_lens: nnfa.pattern_lens_raw().to_vec(),
-            state_len: nnfa.states().len(),
-            prefilter: nnfa.prefilter().map(|p| p.clone()),
-            match_kind: nnfa.match_kind(),
-            alphabet_len: byte_classes.alphabet_len(),
-            byte_classes,
-            min_pattern_len: nnfa.min_pattern_len(),
-            max_pattern_len: nnfa.max_pattern_len(),
-            // The special state IDs are set later.
-            special: Special::zero(),
-        };
-        for (oldsid, state) in nnfa.states().iter().with_state_ids() {
-            // We don't actually encode a fail state since it isn't necessary.
-            // But we still want to make sure any FAIL ids are mapped
-            // correctly.
-            if oldsid == noncontiguous::NFA::FAIL {
-                index_to_state_id[oldsid] = NFA::FAIL;
-                continue;
-            }
-            let force_dense = state.depth().as_usize() < self.dense_depth;
-            let newsid = State::write(
-                nnfa,
-                oldsid,
-                state,
-                &nfa.byte_classes,
-                &mut nfa.repr,
-                force_dense,
-            )?;
-            index_to_state_id[oldsid] = newsid;
-        }
-        for &newsid in index_to_state_id.iter() {
-            if newsid == NFA::FAIL {
-                continue;
-            }
-            let state = &mut nfa.repr[newsid.as_usize()..];
-            State::remap(nfa.alphabet_len, &index_to_state_id, state)?;
-        }
-        // Now that we've remapped all the IDs in our states, all that's left
-        // is remapping the special state IDs.
-        let remap = &index_to_state_id;
-        let old = nnfa.special();
-        let new = &mut nfa.special;
-        new.max_special_id = remap[old.max_special_id];
-        new.max_match_id = remap[old.max_match_id];
-        new.start_unanchored_id = remap[old.start_unanchored_id];
-        new.start_anchored_id = remap[old.start_anchored_id];
-        debug!(
-            "contiguous NFA built, <states: {:?}, size: {:?}, \
-             alphabet len: {:?}>",
-            nfa.state_len,
-            nfa.memory_usage(),
-            nfa.byte_classes.alphabet_len(),
-        );
-        // The vectors can grow ~twice as big during construction because a
-        // Vec amortizes growth. But here, let's shrink things back down to
-        // what we actually need since we're never going to add more to it.
-        nfa.repr.shrink_to_fit();
-        nfa.pattern_lens.shrink_to_fit();
-        Ok(nfa)
-    }
-
-    /// Set the desired match semantics.
-    ///
-    /// This only applies when using [`Builder::build`] and not
-    /// [`Builder::build_from_noncontiguous`].
-    ///
-    /// See
-    /// [`AhoCorasickBuilder::match_kind`](crate::AhoCorasickBuilder::match_kind)
-    /// for more documentation and examples.
-    pub fn match_kind(&mut self, kind: MatchKind) -> &mut Builder {
-        self.noncontiguous.match_kind(kind);
-        self
-    }
-
-    /// Enable ASCII-aware case insensitive matching.
-    ///
-    /// This only applies when using [`Builder::build`] and not
-    /// [`Builder::build_from_noncontiguous`].
-    ///
-    /// See
-    /// [`AhoCorasickBuilder::ascii_case_insensitive`](crate::AhoCorasickBuilder::ascii_case_insensitive)
-    /// for more documentation and examples.
-    pub fn ascii_case_insensitive(&mut self, yes: bool) -> &mut Builder {
-        self.noncontiguous.ascii_case_insensitive(yes);
-        self
-    }
-
-    /// Enable heuristic prefilter optimizations.
-    ///
-    /// This only applies when using [`Builder::build`] and not
-    /// [`Builder::build_from_noncontiguous`].
-    ///
-    /// See
-    /// [`AhoCorasickBuilder::prefilter`](crate::AhoCorasickBuilder::prefilter)
-    /// for more documentation and examples.
-    pub fn prefilter(&mut self, yes: bool) -> &mut Builder {
-        self.noncontiguous.prefilter(yes);
-        self
-    }
-
-    /// Set the limit on how many states use a dense representation for their
-    /// transitions. Other states will generally use a sparse representation.
-    ///
-    /// See
-    /// [`AhoCorasickBuilder::dense_depth`](crate::AhoCorasickBuilder::dense_depth)
-    /// for more documentation and examples.
-    pub fn dense_depth(&mut self, depth: usize) -> &mut Builder {
-        self.dense_depth = depth;
-        self
-    }
-
-    /// A debug setting for whether to attempt to shrink the size of the
-    /// automaton's alphabet or not.
-    ///
-    /// This should never be enabled unless you're debugging an automaton.
-    /// Namely, disabling byte classes makes transitions easier to reason
-    /// about, since they use the actual bytes instead of equivalence classes.
-    /// Disabling this confers no performance benefit at search time.
-    ///
-    /// See
-    /// [`AhoCorasickBuilder::byte_classes`](crate::AhoCorasickBuilder::byte_classes)
-    /// for more documentation and examples.
-    pub fn byte_classes(&mut self, yes: bool) -> &mut Builder {
-        self.byte_classes = yes;
-        self
-    }
-}
-
-/// Computes the number of u32 values needed to represent one byte per the
-/// number of transitions given.
-fn u32_len(ntrans: usize) -> usize {
-    if ntrans % 4 == 0 {
-        ntrans >> 2
-    } else {
-        (ntrans >> 2) + 1
-    }
-}
-
-#[cfg(test)]
-mod tests {
-    // This test demonstrates a SWAR technique I tried in the sparse transition
-    // code inside of 'next_state'. Namely, sparse transitions work by
-    // iterating over u32 chunks, with each chunk containing up to 4 classes
-    // corresponding to 4 transitions. This SWAR technique lets us find a
-    // matching transition without converting the u32 to a [u8; 4].
-    //
-    // It turned out to be a little slower unfortunately, which isn't too
-    // surprising, since this is likely a throughput oriented optimization.
-    // Loop unrolling doesn't really help us because the vast majority of
-    // states have very few transitions.
-    //
-    // Anyway, this code was a little tricky to write, so I converted it to a
-    // test in case someone figures out how to use it more effectively than
-    // I could.
-    //
-    // (This also only works on little endian. So big endian would need to be
-    // accounted for if we ever decided to use this I think.)
-    #[cfg(target_endian = "little")]
-    #[test]
-    fn swar() {
-        use super::*;
-
-        fn has_zero_byte(x: u32) -> u32 {
-            const LO_U32: u32 = 0x01010101;
-            const HI_U32: u32 = 0x80808080;
-
-            x.wrapping_sub(LO_U32) & !x & HI_U32
-        }
-
-        fn broadcast(b: u8) -> u32 {
-            (u32::from(b)) * (u32::MAX / 255)
-        }
-
-        fn index_of(x: u32) -> usize {
-            let o =
-                (((x - 1) & 0x01010101).wrapping_mul(0x01010101) >> 24) - 1;
-            o.as_usize()
-        }
-
-        let bytes: [u8; 4] = [b'1', b'A', b'a', b'z'];
-        let chunk = u32::from_ne_bytes(bytes);
-
-        let needle = broadcast(b'1');
-        assert_eq!(0, index_of(has_zero_byte(needle ^ chunk)));
-        let needle = broadcast(b'A');
-        assert_eq!(1, index_of(has_zero_byte(needle ^ chunk)));
-        let needle = broadcast(b'a');
-        assert_eq!(2, index_of(has_zero_byte(needle ^ chunk)));
-        let needle = broadcast(b'z');
-        assert_eq!(3, index_of(has_zero_byte(needle ^ chunk)));
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/nfa/mod.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/nfa/mod.rs
deleted file mode 100644
index 93f4dc2..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/nfa/mod.rs
+++ /dev/null
@@ -1,40 +0,0 @@
-/*!
-Provides direct access to NFA implementations of Aho-Corasick.
-
-The principle characteristic of an NFA in this crate is that it may
-transition through multiple states per byte of haystack. In Aho-Corasick
-parlance, NFAs follow failure transitions during a search. In contrast,
-a [`DFA`](crate::dfa::DFA) pre-computes all failure transitions during
-compilation at the expense of a much bigger memory footprint.
-
-Currently, there are two NFA implementations provided: noncontiguous and
-contiguous. The names reflect their internal representation, and consequently,
-the trade offs associated with them:
-
-* A [`noncontiguous::NFA`] uses a separate allocation for every NFA state to
-represent its transitions in a sparse format. This is ideal for building an
-NFA, since it cheaply permits different states to have a different number of
-transitions. A noncontiguous NFA is where the main Aho-Corasick construction
-algorithm is implemented. All other Aho-Corasick implementations are built by
-first constructing a noncontiguous NFA.
-* A [`contiguous::NFA`] is uses a single allocation to represent all states,
-while still encoding most states as sparse states but permitting states near
-the starting state to have a dense representation. The dense representation
-uses more memory, but permits computing transitions during a search more
-quickly. By only making the most active states dense (the states near the
-starting state), a contiguous NFA better balances memory usage with search
-speed. The single contiguous allocation also uses less overhead per state and
-enables compression tricks where most states only use 8 bytes of heap memory.
-
-When given the choice between these two, you almost always want to pick a
-contiguous NFA. It takes only a little longer to build, but both its memory
-usage and search speed are typically much better than a noncontiguous NFA. A
-noncontiguous NFA is useful when prioritizing build times, or when there are
-so many patterns that a contiguous NFA could not be built. (Currently, because
-of both memory and search speed improvements, a contiguous NFA has a smaller
-internal limit on the total number of NFA states it can represent. But you
-would likely need to have hundreds of thousands or even millions of patterns
-before you hit this limit.)
-*/
-pub mod contiguous;
-pub mod noncontiguous;
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/nfa/noncontiguous.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/nfa/noncontiguous.rs
deleted file mode 100644
index af32617..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/nfa/noncontiguous.rs
+++ /dev/null
@@ -1,1762 +0,0 @@
-/*!
-Provides a noncontiguous NFA implementation of Aho-Corasick.
-
-This is a low-level API that generally only needs to be used in niche
-circumstances. When possible, prefer using [`AhoCorasick`](crate::AhoCorasick)
-instead of a noncontiguous NFA directly. Using an `NFA` directly is typically
-only necessary when one needs access to the [`Automaton`] trait implementation.
-*/
-
-use alloc::{
-    collections::{BTreeSet, VecDeque},
-    vec,
-    vec::Vec,
-};
-
-use crate::{
-    automaton::Automaton,
-    util::{
-        alphabet::{ByteClassSet, ByteClasses},
-        error::{BuildError, MatchError},
-        prefilter::{self, opposite_ascii_case, Prefilter},
-        primitives::{IteratorIndexExt, PatternID, SmallIndex, StateID},
-        remapper::Remapper,
-        search::{Anchored, MatchKind},
-        special::Special,
-    },
-};
-
-/// A noncontiguous NFA implementation of Aho-Corasick.
-///
-/// When possible, prefer using [`AhoCorasick`](crate::AhoCorasick) instead of
-/// this type directly. Using an `NFA` directly is typically only necessary
-/// when one needs access to the [`Automaton`] trait implementation.
-///
-/// This NFA represents the "core" implementation of Aho-Corasick in this
-/// crate. Namely, constructing this NFA involving building a trie and then
-/// filling in the failure transitions between states, similar to what is
-/// described in any standard textbook description of Aho-Corasick.
-///
-/// In order to minimize heap usage and to avoid additional construction costs,
-/// this implementation represents the transitions of all states as distinct
-/// sparse memory allocations. This is where it gets its name from. That is,
-/// this NFA has no contiguous memory allocation for its transition table. Each
-/// state gets its own allocation.
-///
-/// While the sparse representation keeps memory usage to somewhat reasonable
-/// levels, it is still quite large and also results in somewhat mediocre
-/// search performance. For this reason, it is almost always a good idea to
-/// use a [`contiguous::NFA`](crate::nfa::contiguous::NFA) instead. It is
-/// marginally slower to build, but has higher throughput and can sometimes use
-/// an order of magnitude less memory. The main reason to use a noncontiguous
-/// NFA is when you need the fastest possible construction time, or when a
-/// contiguous NFA does not have the desired capacity. (The total number of NFA
-/// states it can have is fewer than a noncontiguous NFA.)
-///
-/// # Example
-///
-/// This example shows how to build an `NFA` directly and use it to execute
-/// [`Automaton::try_find`]:
-///
-/// ```
-/// use aho_corasick::{
-///     automaton::Automaton,
-///     nfa::noncontiguous::NFA,
-///     Input, Match,
-/// };
-///
-/// let patterns = &["b", "abc", "abcd"];
-/// let haystack = "abcd";
-///
-/// let nfa = NFA::new(patterns).unwrap();
-/// assert_eq!(
-///     Some(Match::must(0, 1..2)),
-///     nfa.try_find(&Input::new(haystack))?,
-/// );
-/// # Ok::<(), Box<dyn std::error::Error>>(())
-/// ```
-///
-/// It is also possible to implement your own version of `try_find`. See the
-/// [`Automaton`] documentation for an example.
-#[derive(Clone)]
-pub struct NFA {
-    /// The match semantics built into this NFA.
-    match_kind: MatchKind,
-    /// A set of states. Each state defines its own transitions, a fail
-    /// transition and a set of indices corresponding to matches.
-    ///
-    /// The first state is always the fail state, which is used only as a
-    /// sentinel. Namely, in the final NFA, no transition into the fail state
-    /// exists. (Well, they do, but they aren't followed. Instead, the state's
-    /// failure transition is followed.)
-    ///
-    /// The second state (index 1) is always the dead state. Dead states are
-    /// in every automaton, but only used when leftmost-{first,longest} match
-    /// semantics are enabled. Specifically, they instruct search to stop
-    /// at specific points in order to report the correct match location. In
-    /// the standard Aho-Corasick construction, there are no transitions to
-    /// the dead state.
-    ///
-    /// The third state (index 2) is generally intended to be the starting or
-    /// "root" state.
-    states: Vec<State>,
-    /// Transitions stored in a sparse representation via a linked list.
-    ///
-    /// Each transition contains three pieces of information: the byte it
-    /// is defined for, the state it transitions to and a link to the next
-    /// transition in the same state (or `StateID::ZERO` if it is the last
-    /// transition).
-    ///
-    /// The first transition for each state is determined by `State::sparse`.
-    ///
-    /// Note that this contains a complete set of all transitions in this NFA,
-    /// including states that have a dense representation for transitions.
-    /// (Adding dense transitions for a state doesn't remove its sparse
-    /// transitions, since deleting transitions from this particular sparse
-    /// representation would be fairly expensive.)
-    sparse: Vec<Transition>,
-    /// Transitions stored in a dense representation.
-    ///
-    /// A state has a row in this table if and only if `State::dense` is
-    /// not equal to `StateID::ZERO`. When not zero, there are precisely
-    /// `NFA::byte_classes::alphabet_len()` entries beginning at `State::dense`
-    /// in this table.
-    ///
-    /// Generally a very small minority of states have a dense representation
-    /// since it uses so much memory.
-    dense: Vec<StateID>,
-    /// Matches stored in linked list for each state.
-    ///
-    /// Like sparse transitions, each match has a link to the next match in the
-    /// state.
-    ///
-    /// The first match for each state is determined by `State::matches`.
-    matches: Vec<Match>,
-    /// The length, in bytes, of each pattern in this NFA. This slice is
-    /// indexed by `PatternID`.
-    ///
-    /// The number of entries in this vector corresponds to the total number of
-    /// patterns in this automaton.
-    pattern_lens: Vec<SmallIndex>,
-    /// A prefilter for quickly skipping to candidate matches, if pertinent.
-    prefilter: Option<Prefilter>,
-    /// A set of equivalence classes in terms of bytes. We compute this while
-    /// building the NFA, but don't use it in the NFA's states. Instead, we
-    /// use this for building the DFA. We store it on the NFA since it's easy
-    /// to compute while visiting the patterns.
-    byte_classes: ByteClasses,
-    /// The length, in bytes, of the shortest pattern in this automaton. This
-    /// information is useful for detecting whether an automaton matches the
-    /// empty string or not.
-    min_pattern_len: usize,
-    /// The length, in bytes, of the longest pattern in this automaton. This
-    /// information is useful for keeping correct buffer sizes when searching
-    /// on streams.
-    max_pattern_len: usize,
-    /// The information required to deduce which states are "special" in this
-    /// NFA.
-    ///
-    /// Since the DEAD and FAIL states are always the first two states and
-    /// there are only ever two start states (which follow all of the match
-    /// states), it follows that we can determine whether a state is a fail,
-    /// dead, match or start with just a few comparisons on the ID itself:
-    ///
-    ///    is_dead(sid): sid == NFA::DEAD
-    ///    is_fail(sid): sid == NFA::FAIL
-    ///   is_match(sid): NFA::FAIL < sid && sid <= max_match_id
-    ///   is_start(sid): sid == start_unanchored_id || sid == start_anchored_id
-    ///
-    /// Note that this only applies to the NFA after it has been constructed.
-    /// During construction, the start states are the first ones added and the
-    /// match states are inter-leaved with non-match states. Once all of the
-    /// states have been added, the states are shuffled such that the above
-    /// predicates hold.
-    special: Special,
-}
-
-impl NFA {
-    /// Create a new Aho-Corasick noncontiguous NFA using the default
-    /// configuration.
-    ///
-    /// Use a [`Builder`] if you want to change the configuration.
-    pub fn new<I, P>(patterns: I) -> Result<NFA, BuildError>
-    where
-        I: IntoIterator<Item = P>,
-        P: AsRef<[u8]>,
-    {
-        NFA::builder().build(patterns)
-    }
-
-    /// A convenience method for returning a new Aho-Corasick noncontiguous NFA
-    /// builder.
-    ///
-    /// This usually permits one to just import the `NFA` type.
-    pub fn builder() -> Builder {
-        Builder::new()
-    }
-}
-
-impl NFA {
-    /// The DEAD state is a sentinel state like the FAIL state. The DEAD state
-    /// instructs any search to stop and return any currently recorded match,
-    /// or no match otherwise. Generally speaking, it is impossible for an
-    /// unanchored standard search to enter a DEAD state. But an anchored
-    /// search can, and so to can a leftmost search.
-    ///
-    /// We put DEAD before FAIL so that DEAD is always 0. We repeat this
-    /// decision across the other Aho-Corasicm automata, so that DEAD
-    /// states there are always 0 too. It's not that we need all of the
-    /// implementations to agree, but rather, the contiguous NFA and the DFA
-    /// use a sort of "premultiplied" state identifier where the only state
-    /// whose ID is always known and constant is the first state. Subsequent
-    /// state IDs depend on how much space has already been used in the
-    /// transition table.
-    pub(crate) const DEAD: StateID = StateID::new_unchecked(0);
-    /// The FAIL state mostly just corresponds to the ID of any transition on a
-    /// state that isn't explicitly defined. When one transitions into the FAIL
-    /// state, one must follow the previous state's failure transition before
-    /// doing the next state lookup. In this way, FAIL is more of a sentinel
-    /// than a state that one actually transitions into. In particular, it is
-    /// never exposed in the `Automaton` interface.
-    pub(crate) const FAIL: StateID = StateID::new_unchecked(1);
-
-    /// Returns the equivalence classes of bytes found while constructing
-    /// this NFA.
-    ///
-    /// Note that the NFA doesn't actually make use of these equivalence
-    /// classes. Instead, these are useful for building the DFA when desired.
-    pub(crate) fn byte_classes(&self) -> &ByteClasses {
-        &self.byte_classes
-    }
-
-    /// Returns a slice containing the length of each pattern in this searcher.
-    /// It is indexed by `PatternID` and has length `NFA::patterns_len`.
-    ///
-    /// This is exposed for convenience when building a contiguous NFA. But it
-    /// can be reconstructed from the `Automaton` API if necessary.
-    pub(crate) fn pattern_lens_raw(&self) -> &[SmallIndex] {
-        &self.pattern_lens
-    }
-
-    /// Returns a slice of all states in this non-contiguous NFA.
-    pub(crate) fn states(&self) -> &[State] {
-        &self.states
-    }
-
-    /// Returns the underlying "special" state information for this NFA.
-    pub(crate) fn special(&self) -> &Special {
-        &self.special
-    }
-
-    /// Swaps the states at `id1` and `id2`.
-    ///
-    /// This does not update the transitions of any state to account for the
-    /// state swap.
-    pub(crate) fn swap_states(&mut self, id1: StateID, id2: StateID) {
-        self.states.swap(id1.as_usize(), id2.as_usize());
-    }
-
-    /// Re-maps all state IDs in this NFA according to the `map` function
-    /// given.
-    pub(crate) fn remap(&mut self, map: impl Fn(StateID) -> StateID) {
-        let alphabet_len = self.byte_classes.alphabet_len();
-        for state in self.states.iter_mut() {
-            state.fail = map(state.fail);
-            let mut link = state.sparse;
-            while link != StateID::ZERO {
-                let t = &mut self.sparse[link];
-                t.next = map(t.next);
-                link = t.link;
-            }
-            if state.dense != StateID::ZERO {
-                let start = state.dense.as_usize();
-                for next in self.dense[start..][..alphabet_len].iter_mut() {
-                    *next = map(*next);
-                }
-            }
-        }
-    }
-
-    /// Iterate over all of the transitions for the given state ID.
-    pub(crate) fn iter_trans(
-        &self,
-        sid: StateID,
-    ) -> impl Iterator<Item = Transition> + '_ {
-        let mut link = self.states[sid].sparse;
-        core::iter::from_fn(move || {
-            if link == StateID::ZERO {
-                return None;
-            }
-            let t = self.sparse[link];
-            link = t.link;
-            Some(t)
-        })
-    }
-
-    /// Iterate over all of the matches for the given state ID.
-    pub(crate) fn iter_matches(
-        &self,
-        sid: StateID,
-    ) -> impl Iterator<Item = PatternID> + '_ {
-        let mut link = self.states[sid].matches;
-        core::iter::from_fn(move || {
-            if link == StateID::ZERO {
-                return None;
-            }
-            let m = self.matches[link];
-            link = m.link;
-            Some(m.pid)
-        })
-    }
-
-    /// Return the link following the one given. If the one given is the last
-    /// link for the given state, then return `None`.
-    ///
-    /// If no previous link is given, then this returns the first link in the
-    /// state, if one exists.
-    ///
-    /// This is useful for manually iterating over the transitions in a single
-    /// state without borrowing the NFA. This permits mutating other parts of
-    /// the NFA during iteration. Namely, one can access the transition pointed
-    /// to by the link via `self.sparse[link]`.
-    fn next_link(
-        &self,
-        sid: StateID,
-        prev: Option<StateID>,
-    ) -> Option<StateID> {
-        let link =
-            prev.map_or(self.states[sid].sparse, |p| self.sparse[p].link);
-        if link == StateID::ZERO {
-            None
-        } else {
-            Some(link)
-        }
-    }
-
-    /// Follow the transition for the given byte in the given state. If no such
-    /// transition exists, then the FAIL state ID is returned.
-    #[inline(always)]
-    fn follow_transition(&self, sid: StateID, byte: u8) -> StateID {
-        let s = &self.states[sid];
-        // This is a special case that targets starting states and states
-        // near a start state. Namely, after the initial trie is constructed,
-        // we look for states close to the start state to convert to a dense
-        // representation for their transitions. This winds up using a lot more
-        // memory per state in exchange for faster transition lookups. But
-        // since we only do this for a small number of states (by default), the
-        // memory usage is usually minimal.
-        //
-        // This has *massive* benefit when executing searches because the
-        // unanchored starting state is by far the hottest state and is
-        // frequently visited. Moreover, the 'for' loop below that works
-        // decently on an actually sparse state is disastrous on a state that
-        // is nearly or completely dense.
-        if s.dense == StateID::ZERO {
-            self.follow_transition_sparse(sid, byte)
-        } else {
-            let class = usize::from(self.byte_classes.get(byte));
-            self.dense[s.dense.as_usize() + class]
-        }
-    }
-
-    /// Like `follow_transition`, but always uses the sparse representation.
-    #[inline(always)]
-    fn follow_transition_sparse(&self, sid: StateID, byte: u8) -> StateID {
-        for t in self.iter_trans(sid) {
-            if byte <= t.byte {
-                if byte == t.byte {
-                    return t.next;
-                }
-                break;
-            }
-        }
-        NFA::FAIL
-    }
-
-    /// Set the transition for the given byte to the state ID given.
-    ///
-    /// Note that one should not set transitions to the FAIL state. It is not
-    /// technically incorrect, but it wastes space. If a transition is not
-    /// defined, then it is automatically assumed to lead to the FAIL state.
-    fn add_transition(
-        &mut self,
-        prev: StateID,
-        byte: u8,
-        next: StateID,
-    ) -> Result<(), BuildError> {
-        if self.states[prev].dense != StateID::ZERO {
-            let dense = self.states[prev].dense;
-            let class = usize::from(self.byte_classes.get(byte));
-            self.dense[dense.as_usize() + class] = next;
-        }
-
-        let head = self.states[prev].sparse;
-        if head == StateID::ZERO || byte < self.sparse[head].byte {
-            let new_link = self.alloc_transition()?;
-            self.sparse[new_link] = Transition { byte, next, link: head };
-            self.states[prev].sparse = new_link;
-            return Ok(());
-        } else if byte == self.sparse[head].byte {
-            self.sparse[head].next = next;
-            return Ok(());
-        }
-
-        // We handled the only cases where the beginning of the transition
-        // chain needs to change. At this point, we now know that there is
-        // at least one entry in the transition chain and the byte for that
-        // transition is less than the byte for the transition we're adding.
-        let (mut link_prev, mut link_next) = (head, self.sparse[head].link);
-        while link_next != StateID::ZERO && byte > self.sparse[link_next].byte
-        {
-            link_prev = link_next;
-            link_next = self.sparse[link_next].link;
-        }
-        if link_next == StateID::ZERO || byte < self.sparse[link_next].byte {
-            let link = self.alloc_transition()?;
-            self.sparse[link] = Transition { byte, next, link: link_next };
-            self.sparse[link_prev].link = link;
-        } else {
-            assert_eq!(byte, self.sparse[link_next].byte);
-            self.sparse[link_next].next = next;
-        }
-        Ok(())
-    }
-
-    /// This sets every possible transition (all 255 of them) for the given
-    /// state to the name `next` value.
-    ///
-    /// This is useful for efficiently initializing start/dead states.
-    ///
-    /// # Panics
-    ///
-    /// This requires that the state has no transitions added to it already.
-    /// If it has any transitions, then this panics. It will also panic if
-    /// the state has been densified prior to calling this.
-    fn init_full_state(
-        &mut self,
-        prev: StateID,
-        next: StateID,
-    ) -> Result<(), BuildError> {
-        assert_eq!(
-            StateID::ZERO,
-            self.states[prev].dense,
-            "state must not be dense yet"
-        );
-        assert_eq!(
-            StateID::ZERO,
-            self.states[prev].sparse,
-            "state must have zero transitions"
-        );
-        let mut prev_link = StateID::ZERO;
-        for byte in 0..=255 {
-            let new_link = self.alloc_transition()?;
-            self.sparse[new_link] =
-                Transition { byte, next, link: StateID::ZERO };
-            if prev_link == StateID::ZERO {
-                self.states[prev].sparse = new_link;
-            } else {
-                self.sparse[prev_link].link = new_link;
-            }
-            prev_link = new_link;
-        }
-        Ok(())
-    }
-
-    /// Add a match for the given pattern ID to the state for the given ID.
-    fn add_match(
-        &mut self,
-        sid: StateID,
-        pid: PatternID,
-    ) -> Result<(), BuildError> {
-        let head = self.states[sid].matches;
-        let mut link = head;
-        while self.matches[link].link != StateID::ZERO {
-            link = self.matches[link].link;
-        }
-        let new_match_link = self.alloc_match()?;
-        self.matches[new_match_link].pid = pid;
-        if link == StateID::ZERO {
-            self.states[sid].matches = new_match_link;
-        } else {
-            self.matches[link].link = new_match_link;
-        }
-        Ok(())
-    }
-
-    /// Copy matches from the `src` state to the `dst` state. This is useful
-    /// when a match state can be reached via a failure transition. In which
-    /// case, you'll want to copy the matches (if any) from the state reached
-    /// by the failure transition to the original state you were at.
-    fn copy_matches(
-        &mut self,
-        src: StateID,
-        dst: StateID,
-    ) -> Result<(), BuildError> {
-        let head_dst = self.states[dst].matches;
-        let mut link_dst = head_dst;
-        while self.matches[link_dst].link != StateID::ZERO {
-            link_dst = self.matches[link_dst].link;
-        }
-        let mut link_src = self.states[src].matches;
-        while link_src != StateID::ZERO {
-            let new_match_link =
-                StateID::new(self.matches.len()).map_err(|e| {
-                    BuildError::state_id_overflow(
-                        StateID::MAX.as_u64(),
-                        e.attempted(),
-                    )
-                })?;
-            self.matches.push(Match {
-                pid: self.matches[link_src].pid,
-                link: StateID::ZERO,
-            });
-            if link_dst == StateID::ZERO {
-                self.states[dst].matches = new_match_link;
-            } else {
-                self.matches[link_dst].link = new_match_link;
-            }
-
-            link_dst = new_match_link;
-            link_src = self.matches[link_src].link;
-        }
-        Ok(())
-    }
-
-    /// Create a new entry in `NFA::trans`, if there's room, and return that
-    /// entry's ID. If there's no room, then an error is returned.
-    fn alloc_transition(&mut self) -> Result<StateID, BuildError> {
-        let id = StateID::new(self.sparse.len()).map_err(|e| {
-            BuildError::state_id_overflow(StateID::MAX.as_u64(), e.attempted())
-        })?;
-        self.sparse.push(Transition::default());
-        Ok(id)
-    }
-
-    /// Create a new entry in `NFA::matches`, if there's room, and return that
-    /// entry's ID. If there's no room, then an error is returned.
-    fn alloc_match(&mut self) -> Result<StateID, BuildError> {
-        let id = StateID::new(self.matches.len()).map_err(|e| {
-            BuildError::state_id_overflow(StateID::MAX.as_u64(), e.attempted())
-        })?;
-        self.matches.push(Match::default());
-        Ok(id)
-    }
-
-    /// Create a new set of `N` transitions in this NFA's dense transition
-    /// table. The ID return corresponds to the index at which the `N`
-    /// transitions begin. So `id+0` is the first transition and `id+(N-1)` is
-    /// the last.
-    ///
-    /// `N` is determined via `NFA::byte_classes::alphabet_len`.
-    fn alloc_dense_state(&mut self) -> Result<StateID, BuildError> {
-        let id = StateID::new(self.dense.len()).map_err(|e| {
-            BuildError::state_id_overflow(StateID::MAX.as_u64(), e.attempted())
-        })?;
-        // We use FAIL because it's the correct default. If a state doesn't
-        // have a transition defined for every possible byte value, then the
-        // transition function should return NFA::FAIL.
-        self.dense.extend(
-            core::iter::repeat(NFA::FAIL)
-                .take(self.byte_classes.alphabet_len()),
-        );
-        Ok(id)
-    }
-
-    /// Allocate and add a fresh state to the underlying NFA and return its
-    /// ID (guaranteed to be one more than the ID of the previously allocated
-    /// state). If the ID would overflow `StateID`, then this returns an error.
-    fn alloc_state(&mut self, depth: usize) -> Result<StateID, BuildError> {
-        // This is OK because we error when building the trie if we see a
-        // pattern whose length cannot fit into a 'SmallIndex', and the longest
-        // possible depth corresponds to the length of the longest pattern.
-        let depth = SmallIndex::new(depth)
-            .expect("patterns longer than SmallIndex::MAX are not allowed");
-        let id = StateID::new(self.states.len()).map_err(|e| {
-            BuildError::state_id_overflow(StateID::MAX.as_u64(), e.attempted())
-        })?;
-        self.states.push(State {
-            sparse: StateID::ZERO,
-            dense: StateID::ZERO,
-            matches: StateID::ZERO,
-            fail: self.special.start_unanchored_id,
-            depth,
-        });
-        Ok(id)
-    }
-}
-
-// SAFETY: 'start_state' always returns a valid state ID, 'next_state' always
-// returns a valid state ID given a valid state ID. We otherwise claim that
-// all other methods are correct as well.
-unsafe impl Automaton for NFA {
-    #[inline(always)]
-    fn start_state(&self, anchored: Anchored) -> Result<StateID, MatchError> {
-        match anchored {
-            Anchored::No => Ok(self.special.start_unanchored_id),
-            Anchored::Yes => Ok(self.special.start_anchored_id),
-        }
-    }
-
-    #[inline(always)]
-    fn next_state(
-        &self,
-        anchored: Anchored,
-        mut sid: StateID,
-        byte: u8,
-    ) -> StateID {
-        // This terminates since:
-        //
-        // 1. state.fail never points to the FAIL state.
-        // 2. All state.fail values point to a state closer to the start state.
-        // 3. The start state has no transitions to the FAIL state.
-        loop {
-            let next = self.follow_transition(sid, byte);
-            if next != NFA::FAIL {
-                return next;
-            }
-            // For an anchored search, we never follow failure transitions
-            // because failure transitions lead us down a path to matching
-            // a *proper* suffix of the path we were on. Thus, it can only
-            // produce matches that appear after the beginning of the search.
-            if anchored.is_anchored() {
-                return NFA::DEAD;
-            }
-            sid = self.states[sid].fail();
-        }
-    }
-
-    #[inline(always)]
-    fn is_special(&self, sid: StateID) -> bool {
-        sid <= self.special.max_special_id
-    }
-
-    #[inline(always)]
-    fn is_dead(&self, sid: StateID) -> bool {
-        sid == NFA::DEAD
-    }
-
-    #[inline(always)]
-    fn is_match(&self, sid: StateID) -> bool {
-        // N.B. This returns true when sid==NFA::FAIL but that's okay because
-        // NFA::FAIL is not actually a valid state ID from the perspective of
-        // the Automaton trait. Namely, it is never returned by 'start_state'
-        // or by 'next_state'. So we don't need to care about it here.
-        !self.is_dead(sid) && sid <= self.special.max_match_id
-    }
-
-    #[inline(always)]
-    fn is_start(&self, sid: StateID) -> bool {
-        sid == self.special.start_unanchored_id
-            || sid == self.special.start_anchored_id
-    }
-
-    #[inline(always)]
-    fn match_kind(&self) -> MatchKind {
-        self.match_kind
-    }
-
-    #[inline(always)]
-    fn patterns_len(&self) -> usize {
-        self.pattern_lens.len()
-    }
-
-    #[inline(always)]
-    fn pattern_len(&self, pid: PatternID) -> usize {
-        self.pattern_lens[pid].as_usize()
-    }
-
-    #[inline(always)]
-    fn min_pattern_len(&self) -> usize {
-        self.min_pattern_len
-    }
-
-    #[inline(always)]
-    fn max_pattern_len(&self) -> usize {
-        self.max_pattern_len
-    }
-
-    #[inline(always)]
-    fn match_len(&self, sid: StateID) -> usize {
-        self.iter_matches(sid).count()
-    }
-
-    #[inline(always)]
-    fn match_pattern(&self, sid: StateID, index: usize) -> PatternID {
-        self.iter_matches(sid).nth(index).unwrap()
-    }
-
-    #[inline(always)]
-    fn memory_usage(&self) -> usize {
-        self.states.len() * core::mem::size_of::<State>()
-            + self.sparse.len() * core::mem::size_of::<Transition>()
-            + self.matches.len() * core::mem::size_of::<Match>()
-            + self.dense.len() * StateID::SIZE
-            + self.pattern_lens.len() * SmallIndex::SIZE
-            + self.prefilter.as_ref().map_or(0, |p| p.memory_usage())
-    }
-
-    #[inline(always)]
-    fn prefilter(&self) -> Option<&Prefilter> {
-        self.prefilter.as_ref()
-    }
-}
-
-/// A representation of a sparse NFA state for an Aho-Corasick automaton.
-///
-/// It contains the transitions to the next state, a failure transition for
-/// cases where there exists no other transition for the current input byte
-/// and the matches implied by visiting this state (if any).
-#[derive(Clone, Debug)]
-pub(crate) struct State {
-    /// A pointer to `NFA::trans` corresponding to the head of a linked list
-    /// containing all of the transitions for this state.
-    ///
-    /// This is `StateID::ZERO` if and only if this state has zero transitions.
-    sparse: StateID,
-    /// A pointer to a row of `N` transitions in `NFA::dense`. These
-    /// transitions correspond precisely to what is obtained by traversing
-    /// `sparse`, but permits constant time lookup.
-    ///
-    /// When this is zero (which is true for most states in the default
-    /// configuration), then this state has no dense representation.
-    ///
-    /// Note that `N` is equal to `NFA::byte_classes::alphabet_len()`. This is
-    /// typically much less than 256 (the maximum value).
-    dense: StateID,
-    /// A pointer to `NFA::matches` corresponding to the head of a linked list
-    /// containing all of the matches for this state.
-    ///
-    /// This is `StateID::ZERO` if and only if this state is not a match state.
-    matches: StateID,
-    /// The state that should be transitioned to if the current byte in the
-    /// haystack does not have a corresponding transition defined in this
-    /// state.
-    fail: StateID,
-    /// The depth of this state. Specifically, this is the distance from this
-    /// state to the starting state. (For the special sentinel states DEAD and
-    /// FAIL, their depth is always 0.) The depth of a starting state is 0.
-    ///
-    /// Note that depth is currently not used in this non-contiguous NFA. It
-    /// may in the future, but it is used in the contiguous NFA. Namely, it
-    /// permits an optimization where states near the starting state have their
-    /// transitions stored in a dense fashion, but all other states have their
-    /// transitions stored in a sparse fashion. (This non-contiguous NFA uses
-    /// a sparse representation for all states unconditionally.) In any case,
-    /// this is really the only convenient place to compute and store this
-    /// information, which we need when building the contiguous NFA.
-    depth: SmallIndex,
-}
-
-impl State {
-    /// Return true if and only if this state is a match state.
-    pub(crate) fn is_match(&self) -> bool {
-        self.matches != StateID::ZERO
-    }
-
-    /// Returns the failure transition for this state.
-    pub(crate) fn fail(&self) -> StateID {
-        self.fail
-    }
-
-    /// Returns the depth of this state. That is, the number of transitions
-    /// this state is from the start state of the NFA.
-    pub(crate) fn depth(&self) -> SmallIndex {
-        self.depth
-    }
-}
-
-/// A single transition in a non-contiguous NFA.
-#[derive(Clone, Copy, Default)]
-#[repr(packed)]
-pub(crate) struct Transition {
-    byte: u8,
-    next: StateID,
-    link: StateID,
-}
-
-impl Transition {
-    /// Return the byte for which this transition is defined.
-    pub(crate) fn byte(&self) -> u8 {
-        self.byte
-    }
-
-    /// Return the ID of the state that this transition points to.
-    pub(crate) fn next(&self) -> StateID {
-        self.next
-    }
-
-    /// Return the ID of the next transition.
-    fn link(&self) -> StateID {
-        self.link
-    }
-}
-
-impl core::fmt::Debug for Transition {
-    fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-        write!(
-            f,
-            "Transition(byte: {:X?}, next: {:?}, link: {:?})",
-            self.byte,
-            self.next().as_usize(),
-            self.link().as_usize()
-        )
-    }
-}
-
-/// A single match in a non-contiguous NFA.
-#[derive(Clone, Copy, Default)]
-struct Match {
-    pid: PatternID,
-    link: StateID,
-}
-
-impl Match {
-    /// Return the pattern ID for this match.
-    pub(crate) fn pattern(&self) -> PatternID {
-        self.pid
-    }
-
-    /// Return the ID of the next match.
-    fn link(&self) -> StateID {
-        self.link
-    }
-}
-
-impl core::fmt::Debug for Match {
-    fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-        write!(
-            f,
-            "Match(pid: {:?}, link: {:?})",
-            self.pattern().as_usize(),
-            self.link().as_usize()
-        )
-    }
-}
-
-/// A builder for configuring an Aho-Corasick noncontiguous NFA.
-///
-/// This builder has a subset of the options available to a
-/// [`AhoCorasickBuilder`](crate::AhoCorasickBuilder). Of the shared options,
-/// their behavior is identical.
-#[derive(Clone, Debug)]
-pub struct Builder {
-    match_kind: MatchKind,
-    prefilter: bool,
-    ascii_case_insensitive: bool,
-    dense_depth: usize,
-}
-
-impl Default for Builder {
-    fn default() -> Builder {
-        Builder {
-            match_kind: MatchKind::default(),
-            prefilter: true,
-            ascii_case_insensitive: false,
-            dense_depth: 3,
-        }
-    }
-}
-
-impl Builder {
-    /// Create a new builder for configuring an Aho-Corasick noncontiguous NFA.
-    pub fn new() -> Builder {
-        Builder::default()
-    }
-
-    /// Build an Aho-Corasick noncontiguous NFA from the given iterator of
-    /// patterns.
-    ///
-    /// A builder may be reused to create more NFAs.
-    pub fn build<I, P>(&self, patterns: I) -> Result<NFA, BuildError>
-    where
-        I: IntoIterator<Item = P>,
-        P: AsRef<[u8]>,
-    {
-        debug!("building non-contiguous NFA");
-        let nfa = Compiler::new(self)?.compile(patterns)?;
-        debug!(
-            "non-contiguous NFA built, <states: {:?}, size: {:?}>",
-            nfa.states.len(),
-            nfa.memory_usage()
-        );
-        Ok(nfa)
-    }
-
-    /// Set the desired match semantics.
-    ///
-    /// See
-    /// [`AhoCorasickBuilder::match_kind`](crate::AhoCorasickBuilder::match_kind)
-    /// for more documentation and examples.
-    pub fn match_kind(&mut self, kind: MatchKind) -> &mut Builder {
-        self.match_kind = kind;
-        self
-    }
-
-    /// Enable ASCII-aware case insensitive matching.
-    ///
-    /// See
-    /// [`AhoCorasickBuilder::ascii_case_insensitive`](crate::AhoCorasickBuilder::ascii_case_insensitive)
-    /// for more documentation and examples.
-    pub fn ascii_case_insensitive(&mut self, yes: bool) -> &mut Builder {
-        self.ascii_case_insensitive = yes;
-        self
-    }
-
-    /// Set the limit on how many states use a dense representation for their
-    /// transitions. Other states will generally use a sparse representation.
-    ///
-    /// See
-    /// [`AhoCorasickBuilder::dense_depth`](crate::AhoCorasickBuilder::dense_depth)
-    /// for more documentation and examples.
-    pub fn dense_depth(&mut self, depth: usize) -> &mut Builder {
-        self.dense_depth = depth;
-        self
-    }
-
-    /// Enable heuristic prefilter optimizations.
-    ///
-    /// See
-    /// [`AhoCorasickBuilder::prefilter`](crate::AhoCorasickBuilder::prefilter)
-    /// for more documentation and examples.
-    pub fn prefilter(&mut self, yes: bool) -> &mut Builder {
-        self.prefilter = yes;
-        self
-    }
-}
-
-/// A compiler uses a builder configuration and builds up the NFA formulation
-/// of an Aho-Corasick automaton. This roughly corresponds to the standard
-/// formulation described in textbooks, with some tweaks to support leftmost
-/// searching.
-#[derive(Debug)]
-struct Compiler<'a> {
-    builder: &'a Builder,
-    prefilter: prefilter::Builder,
-    nfa: NFA,
-    byteset: ByteClassSet,
-}
-
-impl<'a> Compiler<'a> {
-    fn new(builder: &'a Builder) -> Result<Compiler<'a>, BuildError> {
-        let prefilter = prefilter::Builder::new(builder.match_kind)
-            .ascii_case_insensitive(builder.ascii_case_insensitive);
-        Ok(Compiler {
-            builder,
-            prefilter,
-            nfa: NFA {
-                match_kind: builder.match_kind,
-                states: vec![],
-                sparse: vec![],
-                dense: vec![],
-                matches: vec![],
-                pattern_lens: vec![],
-                prefilter: None,
-                byte_classes: ByteClasses::singletons(),
-                min_pattern_len: usize::MAX,
-                max_pattern_len: 0,
-                special: Special::zero(),
-            },
-            byteset: ByteClassSet::empty(),
-        })
-    }
-
-    fn compile<I, P>(mut self, patterns: I) -> Result<NFA, BuildError>
-    where
-        I: IntoIterator<Item = P>,
-        P: AsRef<[u8]>,
-    {
-        // Add dummy transition/match links, so that no valid link will point
-        // to another link at index 0.
-        self.nfa.sparse.push(Transition::default());
-        self.nfa.matches.push(Match::default());
-        // Add a dummy dense transition so that no states can have dense==0
-        // represent a valid pointer to dense transitions. This permits
-        // dense==0 to be a sentinel indicating "no dense transitions."
-        self.nfa.dense.push(NFA::DEAD);
-        // the dead state, only used for leftmost and fixed to id==0
-        self.nfa.alloc_state(0)?;
-        // the fail state, which is never entered and fixed to id==1
-        self.nfa.alloc_state(0)?;
-        // unanchored start state, initially fixed to id==2 but later shuffled
-        // to appear after all non-start match states.
-        self.nfa.special.start_unanchored_id = self.nfa.alloc_state(0)?;
-        // anchored start state, initially fixed to id==3 but later shuffled
-        // to appear after unanchored start state.
-        self.nfa.special.start_anchored_id = self.nfa.alloc_state(0)?;
-        // Initialize the unanchored starting state in order to make it dense,
-        // and thus make transition lookups on this state faster.
-        self.init_unanchored_start_state()?;
-        // Set all transitions on the DEAD state to point to itself. This way,
-        // the DEAD state can never be escaped. It MUST be used as a sentinel
-        // in any correct search.
-        self.add_dead_state_loop()?;
-        // Build the base trie from the given patterns.
-        self.build_trie(patterns)?;
-        self.nfa.states.shrink_to_fit();
-        // Turn our set of bytes into equivalent classes. This NFA
-        // implementation uses byte classes only for states that use a dense
-        // representation of transitions. (And that's why this comes before
-        // `self.densify()`, as the byte classes need to be set first.)
-        self.nfa.byte_classes = self.byteset.byte_classes();
-        // Add transitions (and maybe matches) to the anchored starting state.
-        // The anchored starting state is used for anchored searches. The only
-        // mechanical difference between it and the unanchored start state is
-        // that missing transitions map to the DEAD state instead of the FAIL
-        // state.
-        self.set_anchored_start_state()?;
-        // Rewrite transitions to the FAIL state on the unanchored start state
-        // as self-transitions. This keeps the start state active at all times.
-        self.add_unanchored_start_state_loop();
-        // Make some (possibly zero) states use a dense representation for
-        // transitions. It's important to do this right after the states
-        // and non-failure transitions are solidified. That way, subsequent
-        // accesses (particularly `fill_failure_transitions`) will benefit from
-        // the faster transition lookup in densified states.
-        self.densify()?;
-        // The meat of the Aho-Corasick algorithm: compute and write failure
-        // transitions. i.e., the state to move to when a transition isn't
-        // defined in the current state. These are epsilon transitions and thus
-        // make this formulation an NFA.
-        self.fill_failure_transitions()?;
-        // Handle a special case under leftmost semantics when at least one
-        // of the patterns is the empty string.
-        self.close_start_state_loop_for_leftmost();
-        // Shuffle states so that we have DEAD, FAIL, MATCH, ..., START, START,
-        // NON-MATCH, ... This permits us to very quickly query the type of
-        // the state we're currently in during a search.
-        self.shuffle();
-        self.nfa.prefilter = self.prefilter.build();
-        // Store the maximum ID of all *relevant* special states. Start states
-        // are only relevant when we have a prefilter, otherwise, there is zero
-        // reason to care about whether a state is a start state or not during
-        // a search. Indeed, without a prefilter, we are careful to explicitly
-        // NOT care about start states, otherwise the search can ping pong
-        // between the unrolled loop and the handling of special-status states
-        // and destroy perf.
-        self.nfa.special.max_special_id = if self.nfa.prefilter.is_some() {
-            // Why the anchored starting state? Because we always put it
-            // after the unanchored starting state and it is therefore the
-            // maximum. Why put unanchored followed by anchored? No particular
-            // reason, but that's how the states are logically organized in the
-            // Thompson NFA implementation found in regex-automata. ¯\_(ツ)_/¯
-            self.nfa.special.start_anchored_id
-        } else {
-            self.nfa.special.max_match_id
-        };
-        self.nfa.sparse.shrink_to_fit();
-        self.nfa.dense.shrink_to_fit();
-        self.nfa.matches.shrink_to_fit();
-        self.nfa.pattern_lens.shrink_to_fit();
-        Ok(self.nfa)
-    }
-
-    /// This sets up the initial prefix trie that makes up the Aho-Corasick
-    /// automaton. Effectively, it creates the basic structure of the
-    /// automaton, where every pattern given has a path from the start state to
-    /// the end of the pattern.
-    fn build_trie<I, P>(&mut self, patterns: I) -> Result<(), BuildError>
-    where
-        I: IntoIterator<Item = P>,
-        P: AsRef<[u8]>,
-    {
-        'PATTERNS: for (i, pat) in patterns.into_iter().enumerate() {
-            let pid = PatternID::new(i).map_err(|e| {
-                BuildError::pattern_id_overflow(
-                    PatternID::MAX.as_u64(),
-                    e.attempted(),
-                )
-            })?;
-            let pat = pat.as_ref();
-            let patlen = SmallIndex::new(pat.len())
-                .map_err(|_| BuildError::pattern_too_long(pid, pat.len()))?;
-            self.nfa.min_pattern_len =
-                core::cmp::min(self.nfa.min_pattern_len, pat.len());
-            self.nfa.max_pattern_len =
-                core::cmp::max(self.nfa.max_pattern_len, pat.len());
-            assert_eq!(
-                i,
-                self.nfa.pattern_lens.len(),
-                "expected number of patterns to match pattern ID"
-            );
-            self.nfa.pattern_lens.push(patlen);
-            // We add the pattern to the prefilter here because the pattern
-            // ID in the prefilter is determined with respect to the patterns
-            // added to the prefilter. That is, it isn't the ID we have here,
-            // but the one determined by its own accounting of patterns.
-            // To ensure they line up, we add every pattern we see to the
-            // prefilter, even if some patterns ultimately are impossible to
-            // match (in leftmost-first semantics specifically).
-            //
-            // Another way of doing this would be to expose an API in the
-            // prefilter to permit setting your own pattern IDs. Or to just use
-            // our own map and go between them. But this case is sufficiently
-            // rare that we don't bother and just make sure they're in sync.
-            if self.builder.prefilter {
-                self.prefilter.add(pat);
-            }
-
-            let mut prev = self.nfa.special.start_unanchored_id;
-            let mut saw_match = false;
-            for (depth, &b) in pat.iter().enumerate() {
-                // When leftmost-first match semantics are requested, we
-                // specifically stop adding patterns when a previously added
-                // pattern is a prefix of it. We avoid adding it because
-                // leftmost-first semantics imply that the pattern can never
-                // match. This is not just an optimization to save space! It
-                // is necessary for correctness. In fact, this is the only
-                // difference in the automaton between the implementations for
-                // leftmost-first and leftmost-longest.
-                saw_match = saw_match || self.nfa.states[prev].is_match();
-                if self.builder.match_kind.is_leftmost_first() && saw_match {
-                    // Skip to the next pattern immediately. This avoids
-                    // incorrectly adding a match after this loop terminates.
-                    continue 'PATTERNS;
-                }
-
-                // Add this byte to our equivalence classes. These don't
-                // get used while building the trie, but other Aho-Corasick
-                // implementations may use them.
-                self.byteset.set_range(b, b);
-                if self.builder.ascii_case_insensitive {
-                    let b = opposite_ascii_case(b);
-                    self.byteset.set_range(b, b);
-                }
-
-                // If the transition from prev using the current byte already
-                // exists, then just move through it. Otherwise, add a new
-                // state. We track the depth here so that we can determine
-                // how to represent transitions. States near the start state
-                // use a dense representation that uses more memory but is
-                // faster. Other states use a sparse representation that uses
-                // less memory but is slower.
-                let next = self.nfa.follow_transition(prev, b);
-                if next != NFA::FAIL {
-                    prev = next;
-                } else {
-                    let next = self.nfa.alloc_state(depth)?;
-                    self.nfa.add_transition(prev, b, next)?;
-                    if self.builder.ascii_case_insensitive {
-                        let b = opposite_ascii_case(b);
-                        self.nfa.add_transition(prev, b, next)?;
-                    }
-                    prev = next;
-                }
-            }
-            // Once the pattern has been added, log the match in the final
-            // state that it reached.
-            self.nfa.add_match(prev, pid)?;
-        }
-        Ok(())
-    }
-
-    /// This routine creates failure transitions according to the standard
-    /// textbook formulation of the Aho-Corasick algorithm, with a couple small
-    /// tweaks to support "leftmost" semantics.
-    ///
-    /// Building failure transitions is the most interesting part of building
-    /// the Aho-Corasick automaton, because they are what allow searches to
-    /// be performed in linear time. Specifically, a failure transition is
-    /// a single transition associated with each state that points back to
-    /// the longest proper suffix of the pattern being searched. The failure
-    /// transition is followed whenever there exists no transition on the
-    /// current state for the current input byte. If there is no other proper
-    /// suffix, then the failure transition points back to the starting state.
-    ///
-    /// For example, let's say we built an Aho-Corasick automaton with the
-    /// following patterns: 'abcd' and 'cef'. The trie looks like this:
-    ///
-    /// ```ignore
-    ///          a - S1 - b - S2 - c - S3 - d - S4*
-    ///         /
-    ///     S0 - c - S5 - e - S6 - f - S7*
-    /// ```
-    ///
-    /// At this point, it should be fairly straight-forward to see how this
-    /// trie can be used in a simplistic way. At any given position in the
-    /// text we're searching (called the "subject" string), all we need to do
-    /// is follow the transitions in the trie by consuming one transition for
-    /// each byte in the subject string. If we reach a match state, then we can
-    /// report that location as a match.
-    ///
-    /// The trick comes when searching a subject string like 'abcef'. We'll
-    /// initially follow the transition from S0 to S1 and wind up in S3 after
-    /// observng the 'c' byte. At this point, the next byte is 'e' but state
-    /// S3 has no transition for 'e', so the search fails. We then would need
-    /// to restart the search at the next position in 'abcef', which
-    /// corresponds to 'b'. The match would fail, but the next search starting
-    /// at 'c' would finally succeed. The problem with this approach is that
-    /// we wind up searching the subject string potentially many times. In
-    /// effect, this makes the algorithm have worst case `O(n * m)` complexity,
-    /// where `n ~ len(subject)` and `m ~ len(all patterns)`. We would instead
-    /// like to achieve a `O(n + m)` worst case complexity.
-    ///
-    /// This is where failure transitions come in. Instead of dying at S3 in
-    /// the first search, the automaton can instruct the search to move to
-    /// another part of the automaton that corresponds to a suffix of what
-    /// we've seen so far. Recall that we've seen 'abc' in the subject string,
-    /// and the automaton does indeed have a non-empty suffix, 'c', that could
-    /// potentially lead to another match. Thus, the actual Aho-Corasick
-    /// automaton for our patterns in this case looks like this:
-    ///
-    /// ```ignore
-    ///          a - S1 - b - S2 - c - S3 - d - S4*
-    ///         /                      /
-    ///        /       ----------------
-    ///       /       /
-    ///     S0 - c - S5 - e - S6 - f - S7*
-    /// ```
-    ///
-    /// That is, we have a failure transition from S3 to S5, which is followed
-    /// exactly in cases when we are in state S3 but see any byte other than
-    /// 'd' (that is, we've "failed" to find a match in this portion of our
-    /// trie). We know we can transition back to S5 because we've already seen
-    /// a 'c' byte, so we don't need to re-scan it. We can then pick back up
-    /// with the search starting at S5 and complete our match.
-    ///
-    /// Adding failure transitions to a trie is fairly simple, but subtle. The
-    /// key issue is that you might have multiple failure transition that you
-    /// need to follow. For example, look at the trie for the patterns
-    /// 'abcd', 'b', 'bcd' and 'cd':
-    ///
-    /// ```ignore
-    ///          - a - S1 - b - S2* - c - S3 - d - S4*
-    ///         /               /         /
-    ///        /         -------   -------
-    ///       /         /         /
-    ///     S0 --- b - S5* - c - S6 - d - S7*
-    ///       \                  /
-    ///        \         --------
-    ///         \       /
-    ///          - c - S8 - d - S9*
-    /// ```
-    ///
-    /// The failure transitions for this trie are defined from S2 to S5,
-    /// S3 to S6 and S6 to S8. Moreover, state S2 needs to track that it
-    /// corresponds to a match, since its failure transition to S5 is itself
-    /// a match state.
-    ///
-    /// Perhaps simplest way to think about adding these failure transitions
-    /// is recursively. That is, if you know the failure transitions for every
-    /// possible previous state that could be visited (e.g., when computing the
-    /// failure transition for S3, you already know the failure transitions
-    /// for S0, S1 and S2), then you can simply follow the failure transition
-    /// of the previous state and check whether the incoming transition is
-    /// defined after following the failure transition.
-    ///
-    /// For example, when determining the failure state for S3, by our
-    /// assumptions, we already know that there is a failure transition from
-    /// S2 (the previous state) to S5. So we follow that transition and check
-    /// whether the transition connecting S2 to S3 is defined. Indeed, it is,
-    /// as there is a transition from S5 to S6 for the byte 'c'. If no such
-    /// transition existed, we could keep following the failure transitions
-    /// until we reach the start state, which is the failure transition for
-    /// every state that has no corresponding proper suffix.
-    ///
-    /// We don't actually use recursion to implement this, but instead, use a
-    /// breadth first search of the automaton. Our base case is the start
-    /// state, whose failure transition is just a transition to itself.
-    ///
-    /// When building a leftmost automaton, we proceed as above, but only
-    /// include a subset of failure transitions. Namely, we omit any failure
-    /// transitions that appear after a match state in the trie. This is
-    /// because failure transitions always point back to a proper suffix of
-    /// what has been seen so far. Thus, following a failure transition after
-    /// a match implies looking for a match that starts after the one that has
-    /// already been seen, which is of course therefore not the leftmost match.
-    ///
-    /// N.B. I came up with this algorithm on my own, and after scouring all of
-    /// the other AC implementations I know of (Perl, Snort, many on GitHub).
-    /// I couldn't find any that implement leftmost semantics like this.
-    /// Perl of course needs leftmost-first semantics, but they implement it
-    /// with a seeming hack at *search* time instead of encoding it into the
-    /// automaton. There are also a couple Java libraries that support leftmost
-    /// longest semantics, but they do it by building a queue of matches at
-    /// search time, which is even worse than what Perl is doing. ---AG
-    fn fill_failure_transitions(&mut self) -> Result<(), BuildError> {
-        let is_leftmost = self.builder.match_kind.is_leftmost();
-        let start_uid = self.nfa.special.start_unanchored_id;
-        // Initialize the queue for breadth first search with all transitions
-        // out of the start state. We handle the start state specially because
-        // we only want to follow non-self transitions. If we followed self
-        // transitions, then this would never terminate.
-        let mut queue = VecDeque::new();
-        let mut seen = self.queued_set();
-        let mut prev_link = None;
-        while let Some(link) = self.nfa.next_link(start_uid, prev_link) {
-            prev_link = Some(link);
-            let t = self.nfa.sparse[link];
-
-            // Skip anything we've seen before and any self-transitions on the
-            // start state.
-            if start_uid == t.next() || seen.contains(t.next) {
-                continue;
-            }
-            queue.push_back(t.next);
-            seen.insert(t.next);
-            // Under leftmost semantics, if a state immediately following
-            // the start state is a match state, then we never want to
-            // follow its failure transition since the failure transition
-            // necessarily leads back to the start state, which we never
-            // want to do for leftmost matching after a match has been
-            // found.
-            //
-            // We apply the same logic to non-start states below as well.
-            if is_leftmost && self.nfa.states[t.next].is_match() {
-                self.nfa.states[t.next].fail = NFA::DEAD;
-            }
-        }
-        while let Some(id) = queue.pop_front() {
-            let mut prev_link = None;
-            while let Some(link) = self.nfa.next_link(id, prev_link) {
-                prev_link = Some(link);
-                let t = self.nfa.sparse[link];
-
-                if seen.contains(t.next) {
-                    // The only way to visit a duplicate state in a transition
-                    // list is when ASCII case insensitivity is enabled. In
-                    // this case, we want to skip it since it's redundant work.
-                    // But it would also end up duplicating matches, which
-                    // results in reporting duplicate matches in some cases.
-                    // See the 'acasei010' regression test.
-                    continue;
-                }
-                queue.push_back(t.next);
-                seen.insert(t.next);
-
-                // As above for start states, under leftmost semantics, once
-                // we see a match all subsequent states should have no failure
-                // transitions because failure transitions always imply looking
-                // for a match that is a suffix of what has been seen so far
-                // (where "seen so far" corresponds to the string formed by
-                // following the transitions from the start state to the
-                // current state). Under leftmost semantics, we specifically do
-                // not want to allow this to happen because we always want to
-                // report the match found at the leftmost position.
-                //
-                // The difference between leftmost-first and leftmost-longest
-                // occurs previously while we build the trie. For
-                // leftmost-first, we simply omit any entries that would
-                // otherwise require passing through a match state.
-                //
-                // Note that for correctness, the failure transition has to be
-                // set to the dead state for ALL states following a match, not
-                // just the match state itself. However, by setting the failure
-                // transition to the dead state on all match states, the dead
-                // state will automatically propagate to all subsequent states
-                // via the failure state computation below.
-                if is_leftmost && self.nfa.states[t.next].is_match() {
-                    self.nfa.states[t.next].fail = NFA::DEAD;
-                    continue;
-                }
-                let mut fail = self.nfa.states[id].fail;
-                while self.nfa.follow_transition(fail, t.byte) == NFA::FAIL {
-                    fail = self.nfa.states[fail].fail;
-                }
-                fail = self.nfa.follow_transition(fail, t.byte);
-                self.nfa.states[t.next].fail = fail;
-                self.nfa.copy_matches(fail, t.next)?;
-            }
-            // If the start state is a match state, then this automaton can
-            // match the empty string. This implies all states are match states
-            // since every position matches the empty string, so copy the
-            // matches from the start state to every state. Strictly speaking,
-            // this is only necessary for overlapping matches since each
-            // non-empty non-start match state needs to report empty matches
-            // in addition to its own. For the non-overlapping case, such
-            // states only report the first match, which is never empty since
-            // it isn't a start state.
-            if !is_leftmost {
-                self.nfa
-                    .copy_matches(self.nfa.special.start_unanchored_id, id)?;
-            }
-        }
-        Ok(())
-    }
-
-    /// Shuffle the states so that they appear in this sequence:
-    ///
-    ///   DEAD, FAIL, MATCH..., START, START, NON-MATCH...
-    ///
-    /// The idea here is that if we know how special states are laid out in our
-    /// transition table, then we can determine what "kind" of state we're in
-    /// just by comparing our current state ID with a particular value. In this
-    /// way, we avoid doing extra memory lookups.
-    ///
-    /// Before shuffling begins, our states look something like this:
-    ///
-    ///   DEAD, FAIL, START, START, (MATCH | NON-MATCH)...
-    ///
-    /// So all we need to do is move all of the MATCH states so that they
-    /// all appear before any NON-MATCH state, like so:
-    ///
-    ///   DEAD, FAIL, START, START, MATCH... NON-MATCH...
-    ///
-    /// Then it's just a simple matter of swapping the two START states with
-    /// the last two MATCH states.
-    ///
-    /// (This is the same technique used for fully compiled DFAs in
-    /// regex-automata.)
-    fn shuffle(&mut self) {
-        let old_start_uid = self.nfa.special.start_unanchored_id;
-        let old_start_aid = self.nfa.special.start_anchored_id;
-        assert!(old_start_uid < old_start_aid);
-        assert_eq!(
-            3,
-            old_start_aid.as_usize(),
-            "anchored start state should be at index 3"
-        );
-        // We implement shuffling by a sequence of pairwise swaps of states.
-        // Since we have a number of things referencing states via their
-        // IDs and swapping them changes their IDs, we need to record every
-        // swap we make so that we can remap IDs. The remapper handles this
-        // book-keeping for us.
-        let mut remapper = Remapper::new(&self.nfa, 0);
-        // The way we proceed here is by moving all match states so that
-        // they directly follow the start states. So it will go: DEAD, FAIL,
-        // START-UNANCHORED, START-ANCHORED, MATCH, ..., NON-MATCH, ...
-        //
-        // To do that, we proceed forward through all states after
-        // START-ANCHORED and swap match states so that they appear before all
-        // non-match states.
-        let mut next_avail = StateID::from(4u8);
-        for i in next_avail.as_usize()..self.nfa.states.len() {
-            let sid = StateID::new(i).unwrap();
-            if !self.nfa.states[sid].is_match() {
-                continue;
-            }
-            remapper.swap(&mut self.nfa, sid, next_avail);
-            // The key invariant here is that only non-match states exist
-            // between 'next_avail' and 'sid' (with them being potentially
-            // equivalent). Thus, incrementing 'next_avail' by 1 is guaranteed
-            // to land on the leftmost non-match state. (Unless 'next_avail'
-            // and 'sid' are equivalent, in which case, a swap will occur but
-            // it is a no-op.)
-            next_avail = StateID::new(next_avail.one_more()).unwrap();
-        }
-        // Now we'd like to move the start states to immediately following the
-        // match states. (The start states may themselves be match states, but
-        // we'll handle that later.) We arrange the states this way so that we
-        // don't necessarily need to check whether a state is a start state or
-        // not before checking whether a state is a match state. For example,
-        // we'd like to be able to write this as our state machine loop:
-        //
-        //   sid = start()
-        //   for byte in haystack:
-        //     sid = next(sid, byte)
-        //     if sid <= nfa.max_start_id:
-        //       if sid <= nfa.max_dead_id:
-        //         # search complete
-        //       elif sid <= nfa.max_match_id:
-        //         # found match
-        //
-        // The important context here is that we might not want to look for
-        // start states at all. Namely, if a searcher doesn't have a prefilter,
-        // then there is no reason to care about whether we're in a start state
-        // or not. And indeed, if we did check for it, this very hot loop would
-        // ping pong between the special state handling and the main state
-        // transition logic. This in turn stalls the CPU by killing branch
-        // prediction.
-        //
-        // So essentially, we really want to be able to "forget" that start
-        // states even exist and this is why we put them at the end.
-        let new_start_aid =
-            StateID::new(next_avail.as_usize().checked_sub(1).unwrap())
-                .unwrap();
-        remapper.swap(&mut self.nfa, old_start_aid, new_start_aid);
-        let new_start_uid =
-            StateID::new(next_avail.as_usize().checked_sub(2).unwrap())
-                .unwrap();
-        remapper.swap(&mut self.nfa, old_start_uid, new_start_uid);
-        let new_max_match_id =
-            StateID::new(next_avail.as_usize().checked_sub(3).unwrap())
-                .unwrap();
-        self.nfa.special.max_match_id = new_max_match_id;
-        self.nfa.special.start_unanchored_id = new_start_uid;
-        self.nfa.special.start_anchored_id = new_start_aid;
-        // If one start state is a match state, then they both are.
-        if self.nfa.states[self.nfa.special.start_anchored_id].is_match() {
-            self.nfa.special.max_match_id = self.nfa.special.start_anchored_id;
-        }
-        remapper.remap(&mut self.nfa);
-    }
-
-    /// Attempts to convert the transition representation of a subset of states
-    /// in this NFA from sparse to dense. This can greatly improve search
-    /// performance since states with a higher number of transitions tend to
-    /// correlate with very active states.
-    ///
-    /// We generally only densify states that are close to the start state.
-    /// These tend to be the most active states and thus benefit from a dense
-    /// representation more than other states.
-    ///
-    /// This tends to best balance between memory usage and performance. In
-    /// particular, the *vast majority* of all states in a typical Aho-Corasick
-    /// automaton have only 1 transition and are usually farther from the start
-    /// state and thus don't get densified.
-    ///
-    /// Note that this doesn't remove the sparse representation of transitions
-    /// for states that are densified. It could be done, but actually removing
-    /// entries from `NFA::sparse` is likely more expensive than it's worth.
-    fn densify(&mut self) -> Result<(), BuildError> {
-        for i in 0..self.nfa.states.len() {
-            let sid = StateID::new(i).unwrap();
-            // Don't bother densifying states that are only used as sentinels.
-            if sid == NFA::DEAD || sid == NFA::FAIL {
-                continue;
-            }
-            // Only densify states that are "close enough" to the start state.
-            if self.nfa.states[sid].depth.as_usize()
-                >= self.builder.dense_depth
-            {
-                continue;
-            }
-            let dense = self.nfa.alloc_dense_state()?;
-            let mut prev_link = None;
-            while let Some(link) = self.nfa.next_link(sid, prev_link) {
-                prev_link = Some(link);
-                let t = self.nfa.sparse[link];
-
-                let class = usize::from(self.nfa.byte_classes.get(t.byte));
-                let index = dense.as_usize() + class;
-                self.nfa.dense[index] = t.next;
-            }
-            self.nfa.states[sid].dense = dense;
-        }
-        Ok(())
-    }
-
-    /// Returns a set that tracked queued states.
-    ///
-    /// This is only necessary when ASCII case insensitivity is enabled, since
-    /// it is the only way to visit the same state twice. Otherwise, this
-    /// returns an inert set that nevers adds anything and always reports
-    /// `false` for every member test.
-    fn queued_set(&self) -> QueuedSet {
-        if self.builder.ascii_case_insensitive {
-            QueuedSet::active()
-        } else {
-            QueuedSet::inert()
-        }
-    }
-
-    /// Initializes the unanchored start state by making it dense. This is
-    /// achieved by explicitly setting every transition to the FAIL state.
-    /// This isn't necessary for correctness, since any missing transition is
-    /// automatically assumed to be mapped to the FAIL state. We do this to
-    /// make the unanchored starting state dense, and thus in turn make
-    /// transition lookups on it faster. (Which is worth doing because it's
-    /// the most active state.)
-    fn init_unanchored_start_state(&mut self) -> Result<(), BuildError> {
-        let start_uid = self.nfa.special.start_unanchored_id;
-        let start_aid = self.nfa.special.start_anchored_id;
-        self.nfa.init_full_state(start_uid, NFA::FAIL)?;
-        self.nfa.init_full_state(start_aid, NFA::FAIL)?;
-        Ok(())
-    }
-
-    /// Setup the anchored start state by copying all of the transitions and
-    /// matches from the unanchored starting state with one change: the failure
-    /// transition is changed to the DEAD state, so that for any undefined
-    /// transitions, the search will stop.
-    fn set_anchored_start_state(&mut self) -> Result<(), BuildError> {
-        let start_uid = self.nfa.special.start_unanchored_id;
-        let start_aid = self.nfa.special.start_anchored_id;
-        let (mut uprev_link, mut aprev_link) = (None, None);
-        loop {
-            let unext = self.nfa.next_link(start_uid, uprev_link);
-            let anext = self.nfa.next_link(start_aid, aprev_link);
-            let (ulink, alink) = match (unext, anext) {
-                (Some(ulink), Some(alink)) => (ulink, alink),
-                (None, None) => break,
-                _ => unreachable!(),
-            };
-            uprev_link = Some(ulink);
-            aprev_link = Some(alink);
-            self.nfa.sparse[alink].next = self.nfa.sparse[ulink].next;
-        }
-        self.nfa.copy_matches(start_uid, start_aid)?;
-        // This is the main difference between the unanchored and anchored
-        // starting states. If a lookup on an anchored starting state fails,
-        // then the search should stop.
-        //
-        // N.B. This assumes that the loop on the unanchored starting state
-        // hasn't been created yet.
-        self.nfa.states[start_aid].fail = NFA::DEAD;
-        Ok(())
-    }
-
-    /// Set the failure transitions on the start state to loop back to the
-    /// start state. This effectively permits the Aho-Corasick automaton to
-    /// match at any position. This is also required for finding the next
-    /// state to terminate, namely, finding the next state should never return
-    /// a fail_id.
-    ///
-    /// This must be done after building the initial trie, since trie
-    /// construction depends on transitions to `fail_id` to determine whether a
-    /// state already exists or not.
-    fn add_unanchored_start_state_loop(&mut self) {
-        let start_uid = self.nfa.special.start_unanchored_id;
-        let mut prev_link = None;
-        while let Some(link) = self.nfa.next_link(start_uid, prev_link) {
-            prev_link = Some(link);
-            if self.nfa.sparse[link].next() == NFA::FAIL {
-                self.nfa.sparse[link].next = start_uid;
-            }
-        }
-    }
-
-    /// Remove the start state loop by rewriting any transitions on the start
-    /// state back to the start state with transitions to the dead state.
-    ///
-    /// The loop is only closed when two conditions are met: the start state
-    /// is a match state and the match kind is leftmost-first or
-    /// leftmost-longest.
-    ///
-    /// The reason for this is that under leftmost semantics, a start state
-    /// that is also a match implies that we should never restart the search
-    /// process. We allow normal transitions out of the start state, but if
-    /// none exist, we transition to the dead state, which signals that
-    /// searching should stop.
-    fn close_start_state_loop_for_leftmost(&mut self) {
-        let start_uid = self.nfa.special.start_unanchored_id;
-        let start = &mut self.nfa.states[start_uid];
-        let dense = start.dense;
-        if self.builder.match_kind.is_leftmost() && start.is_match() {
-            let mut prev_link = None;
-            while let Some(link) = self.nfa.next_link(start_uid, prev_link) {
-                prev_link = Some(link);
-                if self.nfa.sparse[link].next() == start_uid {
-                    self.nfa.sparse[link].next = NFA::DEAD;
-                    if dense != StateID::ZERO {
-                        let b = self.nfa.sparse[link].byte;
-                        let class = usize::from(self.nfa.byte_classes.get(b));
-                        self.nfa.dense[dense.as_usize() + class] = NFA::DEAD;
-                    }
-                }
-            }
-        }
-    }
-
-    /// Sets all transitions on the dead state to point back to the dead state.
-    /// Normally, missing transitions map back to the failure state, but the
-    /// point of the dead state is to act as a sink that can never be escaped.
-    fn add_dead_state_loop(&mut self) -> Result<(), BuildError> {
-        self.nfa.init_full_state(NFA::DEAD, NFA::DEAD)?;
-        Ok(())
-    }
-}
-
-/// A set of state identifiers used to avoid revisiting the same state multiple
-/// times when filling in failure transitions.
-///
-/// This set has an "inert" and an "active" mode. When inert, the set never
-/// stores anything and always returns `false` for every member test. This is
-/// useful to avoid the performance and memory overhead of maintaining this
-/// set when it is not needed.
-#[derive(Debug)]
-struct QueuedSet {
-    set: Option<BTreeSet<StateID>>,
-}
-
-impl QueuedSet {
-    /// Return an inert set that returns `false` for every state ID membership
-    /// test.
-    fn inert() -> QueuedSet {
-        QueuedSet { set: None }
-    }
-
-    /// Return an active set that tracks state ID membership.
-    fn active() -> QueuedSet {
-        QueuedSet { set: Some(BTreeSet::new()) }
-    }
-
-    /// Inserts the given state ID into this set. (If the set is inert, then
-    /// this is a no-op.)
-    fn insert(&mut self, state_id: StateID) {
-        if let Some(ref mut set) = self.set {
-            set.insert(state_id);
-        }
-    }
-
-    /// Returns true if and only if the given state ID is in this set. If the
-    /// set is inert, this always returns false.
-    fn contains(&self, state_id: StateID) -> bool {
-        match self.set {
-            None => false,
-            Some(ref set) => set.contains(&state_id),
-        }
-    }
-}
-
-impl core::fmt::Debug for NFA {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        use crate::{
-            automaton::{fmt_state_indicator, sparse_transitions},
-            util::debug::DebugByte,
-        };
-
-        writeln!(f, "noncontiguous::NFA(")?;
-        for (sid, state) in self.states.iter().with_state_ids() {
-            // The FAIL state doesn't actually have space for a state allocated
-            // for it, so we have to treat it as a special case.
-            if sid == NFA::FAIL {
-                writeln!(f, "F {:06}:", sid.as_usize())?;
-                continue;
-            }
-            fmt_state_indicator(f, self, sid)?;
-            write!(
-                f,
-                "{:06}({:06}): ",
-                sid.as_usize(),
-                state.fail.as_usize()
-            )?;
-
-            let it = sparse_transitions(
-                self.iter_trans(sid).map(|t| (t.byte, t.next)),
-            )
-            .enumerate();
-            for (i, (start, end, sid)) in it {
-                if i > 0 {
-                    write!(f, ", ")?;
-                }
-                if start == end {
-                    write!(
-                        f,
-                        "{:?} => {:?}",
-                        DebugByte(start),
-                        sid.as_usize()
-                    )?;
-                } else {
-                    write!(
-                        f,
-                        "{:?}-{:?} => {:?}",
-                        DebugByte(start),
-                        DebugByte(end),
-                        sid.as_usize()
-                    )?;
-                }
-            }
-
-            write!(f, "\n")?;
-            if self.is_match(sid) {
-                write!(f, "         matches: ")?;
-                for (i, pid) in self.iter_matches(sid).enumerate() {
-                    if i > 0 {
-                        write!(f, ", ")?;
-                    }
-                    write!(f, "{}", pid.as_usize())?;
-                }
-                write!(f, "\n")?;
-            }
-        }
-        writeln!(f, "match kind: {:?}", self.match_kind)?;
-        writeln!(f, "prefilter: {:?}", self.prefilter.is_some())?;
-        writeln!(f, "state length: {:?}", self.states.len())?;
-        writeln!(f, "pattern length: {:?}", self.patterns_len())?;
-        writeln!(f, "shortest pattern length: {:?}", self.min_pattern_len)?;
-        writeln!(f, "longest pattern length: {:?}", self.max_pattern_len)?;
-        writeln!(f, "memory usage: {:?}", self.memory_usage())?;
-        writeln!(f, ")")?;
-        Ok(())
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/api.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/api.rs
deleted file mode 100644
index 44f0bc9b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/api.rs
+++ /dev/null
@@ -1,687 +0,0 @@
-use alloc::sync::Arc;
-
-use crate::{
-    packed::{pattern::Patterns, rabinkarp::RabinKarp, teddy},
-    util::search::{Match, Span},
-};
-
-/// This is a limit placed on the total number of patterns we're willing to try
-/// and match at once. As more sophisticated algorithms are added, this number
-/// may be increased.
-const PATTERN_LIMIT: usize = 128;
-
-/// A knob for controlling the match semantics of a packed multiple string
-/// searcher.
-///
-/// This differs from the [`MatchKind`](crate::MatchKind) type in the top-level
-/// crate module in that it doesn't support "standard" match semantics,
-/// and instead only supports leftmost-first or leftmost-longest. Namely,
-/// "standard" semantics cannot be easily supported by packed searchers.
-///
-/// For more information on the distinction between leftmost-first and
-/// leftmost-longest, see the docs on the top-level `MatchKind` type.
-///
-/// Unlike the top-level `MatchKind` type, the default match semantics for this
-/// type are leftmost-first.
-#[derive(Clone, Copy, Debug, Eq, PartialEq)]
-#[non_exhaustive]
-pub enum MatchKind {
-    /// Use leftmost-first match semantics, which reports leftmost matches.
-    /// When there are multiple possible leftmost matches, the match
-    /// corresponding to the pattern that appeared earlier when constructing
-    /// the automaton is reported.
-    ///
-    /// This is the default.
-    LeftmostFirst,
-    /// Use leftmost-longest match semantics, which reports leftmost matches.
-    /// When there are multiple possible leftmost matches, the longest match
-    /// is chosen.
-    LeftmostLongest,
-}
-
-impl Default for MatchKind {
-    fn default() -> MatchKind {
-        MatchKind::LeftmostFirst
-    }
-}
-
-/// The configuration for a packed multiple pattern searcher.
-///
-/// The configuration is currently limited only to being able to select the
-/// match semantics (leftmost-first or leftmost-longest) of a searcher. In the
-/// future, more knobs may be made available.
-///
-/// A configuration produces a [`packed::Builder`](Builder), which in turn can
-/// be used to construct a [`packed::Searcher`](Searcher) for searching.
-///
-/// # Example
-///
-/// This example shows how to use leftmost-longest semantics instead of the
-/// default (leftmost-first).
-///
-/// ```
-/// use aho_corasick::{packed::{Config, MatchKind}, PatternID};
-///
-/// # fn example() -> Option<()> {
-/// let searcher = Config::new()
-///     .match_kind(MatchKind::LeftmostLongest)
-///     .builder()
-///     .add("foo")
-///     .add("foobar")
-///     .build()?;
-/// let matches: Vec<PatternID> = searcher
-///     .find_iter("foobar")
-///     .map(|mat| mat.pattern())
-///     .collect();
-/// assert_eq!(vec![PatternID::must(1)], matches);
-/// # Some(()) }
-/// # if cfg!(all(feature = "std", any(
-/// #     target_arch = "x86_64", target_arch = "aarch64",
-/// # ))) {
-/// #     example().unwrap()
-/// # } else {
-/// #     assert!(example().is_none());
-/// # }
-/// ```
-#[derive(Clone, Debug)]
-pub struct Config {
-    kind: MatchKind,
-    force: Option<ForceAlgorithm>,
-    only_teddy_fat: Option<bool>,
-    only_teddy_256bit: Option<bool>,
-    heuristic_pattern_limits: bool,
-}
-
-/// An internal option for forcing the use of a particular packed algorithm.
-///
-/// When an algorithm is forced, if a searcher could not be constructed for it,
-/// then no searcher will be returned even if an alternative algorithm would
-/// work.
-#[derive(Clone, Debug)]
-enum ForceAlgorithm {
-    Teddy,
-    RabinKarp,
-}
-
-impl Default for Config {
-    fn default() -> Config {
-        Config::new()
-    }
-}
-
-impl Config {
-    /// Create a new default configuration. A default configuration uses
-    /// leftmost-first match semantics.
-    pub fn new() -> Config {
-        Config {
-            kind: MatchKind::LeftmostFirst,
-            force: None,
-            only_teddy_fat: None,
-            only_teddy_256bit: None,
-            heuristic_pattern_limits: true,
-        }
-    }
-
-    /// Create a packed builder from this configuration. The builder can be
-    /// used to accumulate patterns and create a [`Searcher`] from them.
-    pub fn builder(&self) -> Builder {
-        Builder::from_config(self.clone())
-    }
-
-    /// Set the match semantics for this configuration.
-    pub fn match_kind(&mut self, kind: MatchKind) -> &mut Config {
-        self.kind = kind;
-        self
-    }
-
-    /// An undocumented method for forcing the use of the Teddy algorithm.
-    ///
-    /// This is only exposed for more precise testing and benchmarks. Callers
-    /// should not use it as it is not part of the API stability guarantees of
-    /// this crate.
-    #[doc(hidden)]
-    pub fn only_teddy(&mut self, yes: bool) -> &mut Config {
-        if yes {
-            self.force = Some(ForceAlgorithm::Teddy);
-        } else {
-            self.force = None;
-        }
-        self
-    }
-
-    /// An undocumented method for forcing the use of the Fat Teddy algorithm.
-    ///
-    /// This is only exposed for more precise testing and benchmarks. Callers
-    /// should not use it as it is not part of the API stability guarantees of
-    /// this crate.
-    #[doc(hidden)]
-    pub fn only_teddy_fat(&mut self, yes: Option<bool>) -> &mut Config {
-        self.only_teddy_fat = yes;
-        self
-    }
-
-    /// An undocumented method for forcing the use of SSE (`Some(false)`) or
-    /// AVX (`Some(true)`) algorithms.
-    ///
-    /// This is only exposed for more precise testing and benchmarks. Callers
-    /// should not use it as it is not part of the API stability guarantees of
-    /// this crate.
-    #[doc(hidden)]
-    pub fn only_teddy_256bit(&mut self, yes: Option<bool>) -> &mut Config {
-        self.only_teddy_256bit = yes;
-        self
-    }
-
-    /// An undocumented method for forcing the use of the Rabin-Karp algorithm.
-    ///
-    /// This is only exposed for more precise testing and benchmarks. Callers
-    /// should not use it as it is not part of the API stability guarantees of
-    /// this crate.
-    #[doc(hidden)]
-    pub fn only_rabin_karp(&mut self, yes: bool) -> &mut Config {
-        if yes {
-            self.force = Some(ForceAlgorithm::RabinKarp);
-        } else {
-            self.force = None;
-        }
-        self
-    }
-
-    /// Request that heuristic limitations on the number of patterns be
-    /// employed. This useful to disable for benchmarking where one wants to
-    /// explore how Teddy performs on large number of patterns even if the
-    /// heuristics would otherwise refuse construction.
-    ///
-    /// This is enabled by default.
-    pub fn heuristic_pattern_limits(&mut self, yes: bool) -> &mut Config {
-        self.heuristic_pattern_limits = yes;
-        self
-    }
-}
-
-/// A builder for constructing a packed searcher from a collection of patterns.
-///
-/// # Example
-///
-/// This example shows how to use a builder to construct a searcher. By
-/// default, leftmost-first match semantics are used.
-///
-/// ```
-/// use aho_corasick::{packed::{Builder, MatchKind}, PatternID};
-///
-/// # fn example() -> Option<()> {
-/// let searcher = Builder::new()
-///     .add("foobar")
-///     .add("foo")
-///     .build()?;
-/// let matches: Vec<PatternID> = searcher
-///     .find_iter("foobar")
-///     .map(|mat| mat.pattern())
-///     .collect();
-/// assert_eq!(vec![PatternID::ZERO], matches);
-/// # Some(()) }
-/// # if cfg!(all(feature = "std", any(
-/// #     target_arch = "x86_64", target_arch = "aarch64",
-/// # ))) {
-/// #     example().unwrap()
-/// # } else {
-/// #     assert!(example().is_none());
-/// # }
-/// ```
-#[derive(Clone, Debug)]
-pub struct Builder {
-    /// The configuration of this builder and subsequent matcher.
-    config: Config,
-    /// Set to true if the builder detects that a matcher cannot be built.
-    inert: bool,
-    /// The patterns provided by the caller.
-    patterns: Patterns,
-}
-
-impl Builder {
-    /// Create a new builder for constructing a multi-pattern searcher. This
-    /// constructor uses the default configuration.
-    pub fn new() -> Builder {
-        Builder::from_config(Config::new())
-    }
-
-    fn from_config(config: Config) -> Builder {
-        Builder { config, inert: false, patterns: Patterns::new() }
-    }
-
-    /// Build a searcher from the patterns added to this builder so far.
-    pub fn build(&self) -> Option<Searcher> {
-        if self.inert || self.patterns.is_empty() {
-            return None;
-        }
-        let mut patterns = self.patterns.clone();
-        patterns.set_match_kind(self.config.kind);
-        let patterns = Arc::new(patterns);
-        let rabinkarp = RabinKarp::new(&patterns);
-        // Effectively, we only want to return a searcher if we can use Teddy,
-        // since Teddy is our only fast packed searcher at the moment.
-        // Rabin-Karp is only used when searching haystacks smaller than what
-        // Teddy can support. Thus, the only way to get a Rabin-Karp searcher
-        // is to force it using undocumented APIs (for tests/benchmarks).
-        let (search_kind, minimum_len) = match self.config.force {
-            None | Some(ForceAlgorithm::Teddy) => {
-                debug!("trying to build Teddy packed matcher");
-                let teddy = match self.build_teddy(Arc::clone(&patterns)) {
-                    None => return None,
-                    Some(teddy) => teddy,
-                };
-                let minimum_len = teddy.minimum_len();
-                (SearchKind::Teddy(teddy), minimum_len)
-            }
-            Some(ForceAlgorithm::RabinKarp) => {
-                debug!("using Rabin-Karp packed matcher");
-                (SearchKind::RabinKarp, 0)
-            }
-        };
-        Some(Searcher { patterns, rabinkarp, search_kind, minimum_len })
-    }
-
-    fn build_teddy(&self, patterns: Arc<Patterns>) -> Option<teddy::Searcher> {
-        teddy::Builder::new()
-            .only_256bit(self.config.only_teddy_256bit)
-            .only_fat(self.config.only_teddy_fat)
-            .heuristic_pattern_limits(self.config.heuristic_pattern_limits)
-            .build(patterns)
-    }
-
-    /// Add the given pattern to this set to match.
-    ///
-    /// The order in which patterns are added is significant. Namely, when
-    /// using leftmost-first match semantics, then when multiple patterns can
-    /// match at a particular location, the pattern that was added first is
-    /// used as the match.
-    ///
-    /// If the number of patterns added exceeds the amount supported by packed
-    /// searchers, then the builder will stop accumulating patterns and render
-    /// itself inert. At this point, constructing a searcher will always return
-    /// `None`.
-    pub fn add<P: AsRef<[u8]>>(&mut self, pattern: P) -> &mut Builder {
-        if self.inert {
-            return self;
-        } else if self.patterns.len() >= PATTERN_LIMIT {
-            self.inert = true;
-            self.patterns.reset();
-            return self;
-        }
-        // Just in case PATTERN_LIMIT increases beyond u16::MAX.
-        assert!(self.patterns.len() <= core::u16::MAX as usize);
-
-        let pattern = pattern.as_ref();
-        if pattern.is_empty() {
-            self.inert = true;
-            self.patterns.reset();
-            return self;
-        }
-        self.patterns.add(pattern);
-        self
-    }
-
-    /// Add the given iterator of patterns to this set to match.
-    ///
-    /// The iterator must yield elements that can be converted into a `&[u8]`.
-    ///
-    /// The order in which patterns are added is significant. Namely, when
-    /// using leftmost-first match semantics, then when multiple patterns can
-    /// match at a particular location, the pattern that was added first is
-    /// used as the match.
-    ///
-    /// If the number of patterns added exceeds the amount supported by packed
-    /// searchers, then the builder will stop accumulating patterns and render
-    /// itself inert. At this point, constructing a searcher will always return
-    /// `None`.
-    pub fn extend<I, P>(&mut self, patterns: I) -> &mut Builder
-    where
-        I: IntoIterator<Item = P>,
-        P: AsRef<[u8]>,
-    {
-        for p in patterns {
-            self.add(p);
-        }
-        self
-    }
-
-    /// Returns the number of patterns added to this builder.
-    pub fn len(&self) -> usize {
-        self.patterns.len()
-    }
-
-    /// Returns the length, in bytes, of the shortest pattern added.
-    pub fn minimum_len(&self) -> usize {
-        self.patterns.minimum_len()
-    }
-}
-
-impl Default for Builder {
-    fn default() -> Builder {
-        Builder::new()
-    }
-}
-
-/// A packed searcher for quickly finding occurrences of multiple patterns.
-///
-/// If callers need more flexible construction, or if one wants to change the
-/// match semantics (either leftmost-first or leftmost-longest), then one can
-/// use the [`Config`] and/or [`Builder`] types for more fine grained control.
-///
-/// # Example
-///
-/// This example shows how to create a searcher from an iterator of patterns.
-/// By default, leftmost-first match semantics are used.
-///
-/// ```
-/// use aho_corasick::{packed::{MatchKind, Searcher}, PatternID};
-///
-/// # fn example() -> Option<()> {
-/// let searcher = Searcher::new(["foobar", "foo"].iter().cloned())?;
-/// let matches: Vec<PatternID> = searcher
-///     .find_iter("foobar")
-///     .map(|mat| mat.pattern())
-///     .collect();
-/// assert_eq!(vec![PatternID::ZERO], matches);
-/// # Some(()) }
-/// # if cfg!(all(feature = "std", any(
-/// #     target_arch = "x86_64", target_arch = "aarch64",
-/// # ))) {
-/// #     example().unwrap()
-/// # } else {
-/// #     assert!(example().is_none());
-/// # }
-/// ```
-#[derive(Clone, Debug)]
-pub struct Searcher {
-    patterns: Arc<Patterns>,
-    rabinkarp: RabinKarp,
-    search_kind: SearchKind,
-    minimum_len: usize,
-}
-
-#[derive(Clone, Debug)]
-enum SearchKind {
-    Teddy(teddy::Searcher),
-    RabinKarp,
-}
-
-impl Searcher {
-    /// A convenience function for constructing a searcher from an iterator
-    /// of things that can be converted to a `&[u8]`.
-    ///
-    /// If a searcher could not be constructed (either because of an
-    /// unsupported CPU or because there are too many patterns), then `None`
-    /// is returned.
-    ///
-    /// # Example
-    ///
-    /// Basic usage:
-    ///
-    /// ```
-    /// use aho_corasick::{packed::{MatchKind, Searcher}, PatternID};
-    ///
-    /// # fn example() -> Option<()> {
-    /// let searcher = Searcher::new(["foobar", "foo"].iter().cloned())?;
-    /// let matches: Vec<PatternID> = searcher
-    ///     .find_iter("foobar")
-    ///     .map(|mat| mat.pattern())
-    ///     .collect();
-    /// assert_eq!(vec![PatternID::ZERO], matches);
-    /// # Some(()) }
-    /// # if cfg!(all(feature = "std", any(
-    /// #     target_arch = "x86_64", target_arch = "aarch64",
-    /// # ))) {
-    /// #     example().unwrap()
-    /// # } else {
-    /// #     assert!(example().is_none());
-    /// # }
-    /// ```
-    pub fn new<I, P>(patterns: I) -> Option<Searcher>
-    where
-        I: IntoIterator<Item = P>,
-        P: AsRef<[u8]>,
-    {
-        Builder::new().extend(patterns).build()
-    }
-
-    /// A convenience function for calling `Config::new()`.
-    ///
-    /// This is useful for avoiding an additional import.
-    pub fn config() -> Config {
-        Config::new()
-    }
-
-    /// A convenience function for calling `Builder::new()`.
-    ///
-    /// This is useful for avoiding an additional import.
-    pub fn builder() -> Builder {
-        Builder::new()
-    }
-
-    /// Return the first occurrence of any of the patterns in this searcher,
-    /// according to its match semantics, in the given haystack. The `Match`
-    /// returned will include the identifier of the pattern that matched, which
-    /// corresponds to the index of the pattern (starting from `0`) in which it
-    /// was added.
-    ///
-    /// # Example
-    ///
-    /// Basic usage:
-    ///
-    /// ```
-    /// use aho_corasick::{packed::{MatchKind, Searcher}, PatternID};
-    ///
-    /// # fn example() -> Option<()> {
-    /// let searcher = Searcher::new(["foobar", "foo"].iter().cloned())?;
-    /// let mat = searcher.find("foobar")?;
-    /// assert_eq!(PatternID::ZERO, mat.pattern());
-    /// assert_eq!(0, mat.start());
-    /// assert_eq!(6, mat.end());
-    /// # Some(()) }
-    /// # if cfg!(all(feature = "std", any(
-    /// #     target_arch = "x86_64", target_arch = "aarch64",
-    /// # ))) {
-    /// #     example().unwrap()
-    /// # } else {
-    /// #     assert!(example().is_none());
-    /// # }
-    /// ```
-    #[inline]
-    pub fn find<B: AsRef<[u8]>>(&self, haystack: B) -> Option<Match> {
-        let haystack = haystack.as_ref();
-        self.find_in(haystack, Span::from(0..haystack.len()))
-    }
-
-    /// Return the first occurrence of any of the patterns in this searcher,
-    /// according to its match semantics, in the given haystack starting from
-    /// the given position.
-    ///
-    /// The `Match` returned will include the identifier of the pattern that
-    /// matched, which corresponds to the index of the pattern (starting from
-    /// `0`) in which it was added. The offsets in the `Match` will be relative
-    /// to the start of `haystack` (and not `at`).
-    ///
-    /// # Example
-    ///
-    /// Basic usage:
-    ///
-    /// ```
-    /// use aho_corasick::{packed::{MatchKind, Searcher}, PatternID, Span};
-    ///
-    /// # fn example() -> Option<()> {
-    /// let haystack = "foofoobar";
-    /// let searcher = Searcher::new(["foobar", "foo"].iter().cloned())?;
-    /// let mat = searcher.find_in(haystack, Span::from(3..haystack.len()))?;
-    /// assert_eq!(PatternID::ZERO, mat.pattern());
-    /// assert_eq!(3, mat.start());
-    /// assert_eq!(9, mat.end());
-    /// # Some(()) }
-    /// # if cfg!(all(feature = "std", any(
-    /// #     target_arch = "x86_64", target_arch = "aarch64",
-    /// # ))) {
-    /// #     example().unwrap()
-    /// # } else {
-    /// #     assert!(example().is_none());
-    /// # }
-    /// ```
-    #[inline]
-    pub fn find_in<B: AsRef<[u8]>>(
-        &self,
-        haystack: B,
-        span: Span,
-    ) -> Option<Match> {
-        let haystack = haystack.as_ref();
-        match self.search_kind {
-            SearchKind::Teddy(ref teddy) => {
-                if haystack[span].len() < teddy.minimum_len() {
-                    return self.find_in_slow(haystack, span);
-                }
-                teddy.find(&haystack[..span.end], span.start)
-            }
-            SearchKind::RabinKarp => {
-                self.rabinkarp.find_at(&haystack[..span.end], span.start)
-            }
-        }
-    }
-
-    /// Return an iterator of non-overlapping occurrences of the patterns in
-    /// this searcher, according to its match semantics, in the given haystack.
-    ///
-    /// # Example
-    ///
-    /// Basic usage:
-    ///
-    /// ```
-    /// use aho_corasick::{packed::{MatchKind, Searcher}, PatternID};
-    ///
-    /// # fn example() -> Option<()> {
-    /// let searcher = Searcher::new(["foobar", "foo"].iter().cloned())?;
-    /// let matches: Vec<PatternID> = searcher
-    ///     .find_iter("foobar fooba foofoo")
-    ///     .map(|mat| mat.pattern())
-    ///     .collect();
-    /// assert_eq!(vec![
-    ///     PatternID::must(0),
-    ///     PatternID::must(1),
-    ///     PatternID::must(1),
-    ///     PatternID::must(1),
-    /// ], matches);
-    /// # Some(()) }
-    /// # if cfg!(all(feature = "std", any(
-    /// #     target_arch = "x86_64", target_arch = "aarch64",
-    /// # ))) {
-    /// #     example().unwrap()
-    /// # } else {
-    /// #     assert!(example().is_none());
-    /// # }
-    /// ```
-    #[inline]
-    pub fn find_iter<'a, 'b, B: ?Sized + AsRef<[u8]>>(
-        &'a self,
-        haystack: &'b B,
-    ) -> FindIter<'a, 'b> {
-        let haystack = haystack.as_ref();
-        let span = Span::from(0..haystack.len());
-        FindIter { searcher: self, haystack, span }
-    }
-
-    /// Returns the match kind used by this packed searcher.
-    ///
-    /// # Examples
-    ///
-    /// Basic usage:
-    ///
-    /// ```
-    /// use aho_corasick::packed::{MatchKind, Searcher};
-    ///
-    /// # fn example() -> Option<()> {
-    /// let searcher = Searcher::new(["foobar", "foo"].iter().cloned())?;
-    /// // leftmost-first is the default.
-    /// assert_eq!(&MatchKind::LeftmostFirst, searcher.match_kind());
-    /// # Some(()) }
-    /// # if cfg!(all(feature = "std", any(
-    /// #     target_arch = "x86_64", target_arch = "aarch64",
-    /// # ))) {
-    /// #     example().unwrap()
-    /// # } else {
-    /// #     assert!(example().is_none());
-    /// # }
-    /// ```
-    #[inline]
-    pub fn match_kind(&self) -> &MatchKind {
-        self.patterns.match_kind()
-    }
-
-    /// Returns the minimum length of a haystack that is required in order for
-    /// packed searching to be effective.
-    ///
-    /// In some cases, the underlying packed searcher may not be able to search
-    /// very short haystacks. When that occurs, the implementation will defer
-    /// to a slower non-packed searcher (which is still generally faster than
-    /// Aho-Corasick for a small number of patterns). However, callers may
-    /// want to avoid ever using the slower variant, which one can do by
-    /// never passing a haystack shorter than the minimum length returned by
-    /// this method.
-    #[inline]
-    pub fn minimum_len(&self) -> usize {
-        self.minimum_len
-    }
-
-    /// Returns the approximate total amount of heap used by this searcher, in
-    /// units of bytes.
-    #[inline]
-    pub fn memory_usage(&self) -> usize {
-        self.patterns.memory_usage()
-            + self.rabinkarp.memory_usage()
-            + self.search_kind.memory_usage()
-    }
-
-    /// Use a slow (non-packed) searcher.
-    ///
-    /// This is useful when a packed searcher could be constructed, but could
-    /// not be used to search a specific haystack. For example, if Teddy was
-    /// built but the haystack is smaller than ~34 bytes, then Teddy might not
-    /// be able to run.
-    fn find_in_slow(&self, haystack: &[u8], span: Span) -> Option<Match> {
-        self.rabinkarp.find_at(&haystack[..span.end], span.start)
-    }
-}
-
-impl SearchKind {
-    fn memory_usage(&self) -> usize {
-        match *self {
-            SearchKind::Teddy(ref ted) => ted.memory_usage(),
-            SearchKind::RabinKarp => 0,
-        }
-    }
-}
-
-/// An iterator over non-overlapping matches from a packed searcher.
-///
-/// The lifetime `'s` refers to the lifetime of the underlying [`Searcher`],
-/// while the lifetime `'h` refers to the lifetime of the haystack being
-/// searched.
-#[derive(Debug)]
-pub struct FindIter<'s, 'h> {
-    searcher: &'s Searcher,
-    haystack: &'h [u8],
-    span: Span,
-}
-
-impl<'s, 'h> Iterator for FindIter<'s, 'h> {
-    type Item = Match;
-
-    fn next(&mut self) -> Option<Match> {
-        if self.span.start > self.span.end {
-            return None;
-        }
-        match self.searcher.find_in(&self.haystack, self.span) {
-            None => None,
-            Some(m) => {
-                self.span.start = m.end();
-                Some(m)
-            }
-        }
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/ext.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/ext.rs
deleted file mode 100644
index b689642..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/ext.rs
+++ /dev/null
@@ -1,39 +0,0 @@
-/// A trait for adding some helper routines to pointers.
-pub(crate) trait Pointer {
-    /// Returns the distance, in units of `T`, between `self` and `origin`.
-    ///
-    /// # Safety
-    ///
-    /// Same as `ptr::offset_from` in addition to `self >= origin`.
-    unsafe fn distance(self, origin: Self) -> usize;
-
-    /// Casts this pointer to `usize`.
-    ///
-    /// Callers should not convert the `usize` back to a pointer if at all
-    /// possible. (And if you believe it's necessary, open an issue to discuss
-    /// why. Otherwise, it has the potential to violate pointer provenance.)
-    /// The purpose of this function is just to be able to do arithmetic, i.e.,
-    /// computing offsets or alignments.
-    fn as_usize(self) -> usize;
-}
-
-impl<T> Pointer for *const T {
-    unsafe fn distance(self, origin: *const T) -> usize {
-        // TODO: Replace with `ptr::sub_ptr` once stabilized.
-        usize::try_from(self.offset_from(origin)).unwrap_unchecked()
-    }
-
-    fn as_usize(self) -> usize {
-        self as usize
-    }
-}
-
-impl<T> Pointer for *mut T {
-    unsafe fn distance(self, origin: *mut T) -> usize {
-        (self as *const T).distance(origin as *const T)
-    }
-
-    fn as_usize(self) -> usize {
-        (self as *const T).as_usize()
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/mod.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/mod.rs
deleted file mode 100644
index 3990bc93..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/mod.rs
+++ /dev/null
@@ -1,120 +0,0 @@
-/*!
-Provides packed multiple substring search, principally for a small number of
-patterns.
-
-This sub-module provides vectorized routines for quickly finding
-matches of a small number of patterns. In general, users of this crate
-shouldn't need to interface with this module directly, as the primary
-[`AhoCorasick`](crate::AhoCorasick) searcher will use these routines
-automatically as a prefilter when applicable. However, in some cases, callers
-may want to bypass the Aho-Corasick machinery entirely and use this vectorized
-searcher directly.
-
-# Overview
-
-The primary types in this sub-module are:
-
-* [`Searcher`] executes the actual search algorithm to report matches in a
-haystack.
-* [`Builder`] accumulates patterns incrementally and can construct a
-`Searcher`.
-* [`Config`] permits tuning the searcher, and itself will produce a `Builder`
-(which can then be used to build a `Searcher`). Currently, the only tuneable
-knob are the match semantics, but this may be expanded in the future.
-
-# Examples
-
-This example shows how to create a searcher from an iterator of patterns.
-By default, leftmost-first match semantics are used. (See the top-level
-[`MatchKind`] type for more details about match semantics, which apply
-similarly to packed substring search.)
-
-```
-use aho_corasick::{packed::{MatchKind, Searcher}, PatternID};
-
-# fn example() -> Option<()> {
-let searcher = Searcher::new(["foobar", "foo"].iter().cloned())?;
-let matches: Vec<PatternID> = searcher
-    .find_iter("foobar")
-    .map(|mat| mat.pattern())
-    .collect();
-assert_eq!(vec![PatternID::ZERO], matches);
-# Some(()) }
-# if cfg!(all(feature = "std", any(
-#     target_arch = "x86_64", target_arch = "aarch64",
-# ))) {
-#     example().unwrap()
-# } else {
-#     assert!(example().is_none());
-# }
-```
-
-This example shows how to use [`Config`] to change the match semantics to
-leftmost-longest:
-
-```
-use aho_corasick::{packed::{Config, MatchKind}, PatternID};
-
-# fn example() -> Option<()> {
-let searcher = Config::new()
-    .match_kind(MatchKind::LeftmostLongest)
-    .builder()
-    .add("foo")
-    .add("foobar")
-    .build()?;
-let matches: Vec<PatternID> = searcher
-    .find_iter("foobar")
-    .map(|mat| mat.pattern())
-    .collect();
-assert_eq!(vec![PatternID::must(1)], matches);
-# Some(()) }
-# if cfg!(all(feature = "std", any(
-#     target_arch = "x86_64", target_arch = "aarch64",
-# ))) {
-#     example().unwrap()
-# } else {
-#     assert!(example().is_none());
-# }
-```
-
-# Packed substring searching
-
-Packed substring searching refers to the use of SIMD (Single Instruction,
-Multiple Data) to accelerate the detection of matches in a haystack. Unlike
-conventional algorithms, such as Aho-Corasick, SIMD algorithms for substring
-search tend to do better with a small number of patterns, where as Aho-Corasick
-generally maintains reasonably consistent performance regardless of the number
-of patterns you give it. Because of this, the vectorized searcher in this
-sub-module cannot be used as a general purpose searcher, since building the
-searcher may fail even when given a small number of patterns. However, in
-exchange, when searching for a small number of patterns, searching can be quite
-a bit faster than Aho-Corasick (sometimes by an order of magnitude).
-
-The key take away here is that constructing a searcher from a list of patterns
-is a fallible operation with no clear rules for when it will fail. While the
-precise conditions under which building a searcher can fail is specifically an
-implementation detail, here are some common reasons:
-
-* Too many patterns were given. Typically, the limit is on the order of 100 or
-  so, but this limit may fluctuate based on available CPU features.
-* The available packed algorithms require CPU features that aren't available.
-  For example, currently, this crate only provides packed algorithms for
-  `x86_64` and `aarch64`. Therefore, constructing a packed searcher on any
-  other target will always fail.
-* Zero patterns were given, or one of the patterns given was empty. Packed
-  searchers require at least one pattern and that all patterns are non-empty.
-* Something else about the nature of the patterns (typically based on
-  heuristics) suggests that a packed searcher would perform very poorly, so
-  no searcher is built.
-*/
-
-pub use crate::packed::api::{Builder, Config, FindIter, MatchKind, Searcher};
-
-mod api;
-mod ext;
-mod pattern;
-mod rabinkarp;
-mod teddy;
-#[cfg(all(feature = "std", test))]
-mod tests;
-mod vector;
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/pattern.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/pattern.rs
deleted file mode 100644
index 95aca4d..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/pattern.rs
+++ /dev/null
@@ -1,480 +0,0 @@
-use core::{cmp, fmt, mem, u16, usize};
-
-use alloc::{boxed::Box, string::String, vec, vec::Vec};
-
-use crate::{
-    packed::{api::MatchKind, ext::Pointer},
-    PatternID,
-};
-
-/// A non-empty collection of non-empty patterns to search for.
-///
-/// This collection of patterns is what is passed around to both execute
-/// searches and to construct the searchers themselves. Namely, this permits
-/// searches to avoid copying all of the patterns, and allows us to keep only
-/// one copy throughout all packed searchers.
-///
-/// Note that this collection is not a set. The same pattern can appear more
-/// than once.
-#[derive(Clone, Debug)]
-pub(crate) struct Patterns {
-    /// The match semantics supported by this collection of patterns.
-    ///
-    /// The match semantics determines the order of the iterator over patterns.
-    /// For leftmost-first, patterns are provided in the same order as were
-    /// provided by the caller. For leftmost-longest, patterns are provided in
-    /// descending order of length, with ties broken by the order in which they
-    /// were provided by the caller.
-    kind: MatchKind,
-    /// The collection of patterns, indexed by their identifier.
-    by_id: Vec<Vec<u8>>,
-    /// The order of patterns defined for iteration, given by pattern
-    /// identifiers. The order of `by_id` and `order` is always the same for
-    /// leftmost-first semantics, but may be different for leftmost-longest
-    /// semantics.
-    order: Vec<PatternID>,
-    /// The length of the smallest pattern, in bytes.
-    minimum_len: usize,
-    /// The total number of pattern bytes across the entire collection. This
-    /// is used for reporting total heap usage in constant time.
-    total_pattern_bytes: usize,
-}
-
-// BREADCRUMBS: I think we want to experiment with a different bucket
-// representation. Basically, each bucket is just a Range<usize> to a single
-// contiguous allocation? Maybe length-prefixed patterns or something? The
-// idea is to try to get rid of the pointer chasing in verification. I don't
-// know that that is the issue, but I suspect it is.
-
-impl Patterns {
-    /// Create a new collection of patterns for the given match semantics. The
-    /// ID of each pattern is the index of the pattern at which it occurs in
-    /// the `by_id` slice.
-    ///
-    /// If any of the patterns in the slice given are empty, then this panics.
-    /// Similarly, if the number of patterns given is zero, then this also
-    /// panics.
-    pub(crate) fn new() -> Patterns {
-        Patterns {
-            kind: MatchKind::default(),
-            by_id: vec![],
-            order: vec![],
-            minimum_len: usize::MAX,
-            total_pattern_bytes: 0,
-        }
-    }
-
-    /// Add a pattern to this collection.
-    ///
-    /// This panics if the pattern given is empty.
-    pub(crate) fn add(&mut self, bytes: &[u8]) {
-        assert!(!bytes.is_empty());
-        assert!(self.by_id.len() <= u16::MAX as usize);
-
-        let id = PatternID::new(self.by_id.len()).unwrap();
-        self.order.push(id);
-        self.by_id.push(bytes.to_vec());
-        self.minimum_len = cmp::min(self.minimum_len, bytes.len());
-        self.total_pattern_bytes += bytes.len();
-    }
-
-    /// Set the match kind semantics for this collection of patterns.
-    ///
-    /// If the kind is not set, then the default is leftmost-first.
-    pub(crate) fn set_match_kind(&mut self, kind: MatchKind) {
-        self.kind = kind;
-        match self.kind {
-            MatchKind::LeftmostFirst => {
-                self.order.sort();
-            }
-            MatchKind::LeftmostLongest => {
-                let (order, by_id) = (&mut self.order, &mut self.by_id);
-                order.sort_by(|&id1, &id2| {
-                    by_id[id1].len().cmp(&by_id[id2].len()).reverse()
-                });
-            }
-        }
-    }
-
-    /// Return the number of patterns in this collection.
-    ///
-    /// This is guaranteed to be greater than zero.
-    pub(crate) fn len(&self) -> usize {
-        self.by_id.len()
-    }
-
-    /// Returns true if and only if this collection of patterns is empty.
-    pub(crate) fn is_empty(&self) -> bool {
-        self.len() == 0
-    }
-
-    /// Returns the approximate total amount of heap used by these patterns, in
-    /// units of bytes.
-    pub(crate) fn memory_usage(&self) -> usize {
-        self.order.len() * mem::size_of::<PatternID>()
-            + self.by_id.len() * mem::size_of::<Vec<u8>>()
-            + self.total_pattern_bytes
-    }
-
-    /// Clears all heap memory associated with this collection of patterns and
-    /// resets all state such that it is a valid empty collection.
-    pub(crate) fn reset(&mut self) {
-        self.kind = MatchKind::default();
-        self.by_id.clear();
-        self.order.clear();
-        self.minimum_len = usize::MAX;
-    }
-
-    /// Returns the length, in bytes, of the smallest pattern.
-    ///
-    /// This is guaranteed to be at least one.
-    pub(crate) fn minimum_len(&self) -> usize {
-        self.minimum_len
-    }
-
-    /// Returns the match semantics used by these patterns.
-    pub(crate) fn match_kind(&self) -> &MatchKind {
-        &self.kind
-    }
-
-    /// Return the pattern with the given identifier. If such a pattern does
-    /// not exist, then this panics.
-    pub(crate) fn get(&self, id: PatternID) -> Pattern<'_> {
-        Pattern(&self.by_id[id])
-    }
-
-    /// Return the pattern with the given identifier without performing bounds
-    /// checks.
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that a pattern with the given identifier exists
-    /// before using this method.
-    pub(crate) unsafe fn get_unchecked(&self, id: PatternID) -> Pattern<'_> {
-        Pattern(self.by_id.get_unchecked(id.as_usize()))
-    }
-
-    /// Return an iterator over all the patterns in this collection, in the
-    /// order in which they should be matched.
-    ///
-    /// Specifically, in a naive multi-pattern matcher, the following is
-    /// guaranteed to satisfy the match semantics of this collection of
-    /// patterns:
-    ///
-    /// ```ignore
-    /// for i in 0..haystack.len():
-    ///   for p in patterns.iter():
-    ///     if haystack[i..].starts_with(p.bytes()):
-    ///       return Match(p.id(), i, i + p.bytes().len())
-    /// ```
-    ///
-    /// Namely, among the patterns in a collection, if they are matched in
-    /// the order provided by this iterator, then the result is guaranteed
-    /// to satisfy the correct match semantics. (Either leftmost-first or
-    /// leftmost-longest.)
-    pub(crate) fn iter(&self) -> PatternIter<'_> {
-        PatternIter { patterns: self, i: 0 }
-    }
-}
-
-/// An iterator over the patterns in the `Patterns` collection.
-///
-/// The order of the patterns provided by this iterator is consistent with the
-/// match semantics of the originating collection of patterns.
-///
-/// The lifetime `'p` corresponds to the lifetime of the collection of patterns
-/// this is iterating over.
-#[derive(Debug)]
-pub(crate) struct PatternIter<'p> {
-    patterns: &'p Patterns,
-    i: usize,
-}
-
-impl<'p> Iterator for PatternIter<'p> {
-    type Item = (PatternID, Pattern<'p>);
-
-    fn next(&mut self) -> Option<(PatternID, Pattern<'p>)> {
-        if self.i >= self.patterns.len() {
-            return None;
-        }
-        let id = self.patterns.order[self.i];
-        let p = self.patterns.get(id);
-        self.i += 1;
-        Some((id, p))
-    }
-}
-
-/// A pattern that is used in packed searching.
-#[derive(Clone)]
-pub(crate) struct Pattern<'a>(&'a [u8]);
-
-impl<'a> fmt::Debug for Pattern<'a> {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        f.debug_struct("Pattern")
-            .field("lit", &String::from_utf8_lossy(&self.0))
-            .finish()
-    }
-}
-
-impl<'p> Pattern<'p> {
-    /// Returns the length of this pattern, in bytes.
-    pub(crate) fn len(&self) -> usize {
-        self.0.len()
-    }
-
-    /// Returns the bytes of this pattern.
-    pub(crate) fn bytes(&self) -> &[u8] {
-        &self.0
-    }
-
-    /// Returns the first `len` low nybbles from this pattern. If this pattern
-    /// is shorter than `len`, then this panics.
-    pub(crate) fn low_nybbles(&self, len: usize) -> Box<[u8]> {
-        let mut nybs = vec![0; len].into_boxed_slice();
-        for (i, byte) in self.bytes().iter().take(len).enumerate() {
-            nybs[i] = byte & 0xF;
-        }
-        nybs
-    }
-
-    /// Returns true if this pattern is a prefix of the given bytes.
-    #[inline(always)]
-    pub(crate) fn is_prefix(&self, bytes: &[u8]) -> bool {
-        is_prefix(bytes, self.bytes())
-    }
-
-    /// Returns true if this pattern is a prefix of the haystack given by the
-    /// raw `start` and `end` pointers.
-    ///
-    /// # Safety
-    ///
-    /// * It must be the case that `start < end` and that the distance between
-    /// them is at least equal to `V::BYTES`. That is, it must always be valid
-    /// to do at least an unaligned load of `V` at `start`.
-    /// * Both `start` and `end` must be valid for reads.
-    /// * Both `start` and `end` must point to an initialized value.
-    /// * Both `start` and `end` must point to the same allocated object and
-    /// must either be in bounds or at most one byte past the end of the
-    /// allocated object.
-    /// * Both `start` and `end` must be _derived from_ a pointer to the same
-    /// object.
-    /// * The distance between `start` and `end` must not overflow `isize`.
-    /// * The distance being in bounds must not rely on "wrapping around" the
-    /// address space.
-    #[inline(always)]
-    pub(crate) unsafe fn is_prefix_raw(
-        &self,
-        start: *const u8,
-        end: *const u8,
-    ) -> bool {
-        let patlen = self.bytes().len();
-        let haylen = end.distance(start);
-        if patlen > haylen {
-            return false;
-        }
-        // SAFETY: We've checked that the haystack has length at least equal
-        // to this pattern. All other safety concerns are the responsibility
-        // of the caller.
-        is_equal_raw(start, self.bytes().as_ptr(), patlen)
-    }
-}
-
-/// Returns true if and only if `needle` is a prefix of `haystack`.
-///
-/// This uses a latency optimized variant of `memcmp` internally which *might*
-/// make this faster for very short strings.
-///
-/// # Inlining
-///
-/// This routine is marked `inline(always)`. If you want to call this function
-/// in a way that is not always inlined, you'll need to wrap a call to it in
-/// another function that is marked as `inline(never)` or just `inline`.
-#[inline(always)]
-fn is_prefix(haystack: &[u8], needle: &[u8]) -> bool {
-    if needle.len() > haystack.len() {
-        return false;
-    }
-    // SAFETY: Our pointers are derived directly from borrowed slices which
-    // uphold all of our safety guarantees except for length. We account for
-    // length with the check above.
-    unsafe { is_equal_raw(haystack.as_ptr(), needle.as_ptr(), needle.len()) }
-}
-
-/// Compare corresponding bytes in `x` and `y` for equality.
-///
-/// That is, this returns true if and only if `x.len() == y.len()` and
-/// `x[i] == y[i]` for all `0 <= i < x.len()`.
-///
-/// Note that this isn't used. We only use it in tests as a convenient way
-/// of testing `is_equal_raw`.
-///
-/// # Inlining
-///
-/// This routine is marked `inline(always)`. If you want to call this function
-/// in a way that is not always inlined, you'll need to wrap a call to it in
-/// another function that is marked as `inline(never)` or just `inline`.
-///
-/// # Motivation
-///
-/// Why not use slice equality instead? Well, slice equality usually results in
-/// a call out to the current platform's `libc` which might not be inlineable
-/// or have other overhead. This routine isn't guaranteed to be a win, but it
-/// might be in some cases.
-#[cfg(test)]
-#[inline(always)]
-fn is_equal(x: &[u8], y: &[u8]) -> bool {
-    if x.len() != y.len() {
-        return false;
-    }
-    // SAFETY: Our pointers are derived directly from borrowed slices which
-    // uphold all of our safety guarantees except for length. We account for
-    // length with the check above.
-    unsafe { is_equal_raw(x.as_ptr(), y.as_ptr(), x.len()) }
-}
-
-/// Compare `n` bytes at the given pointers for equality.
-///
-/// This returns true if and only if `*x.add(i) == *y.add(i)` for all
-/// `0 <= i < n`.
-///
-/// # Inlining
-///
-/// This routine is marked `inline(always)`. If you want to call this function
-/// in a way that is not always inlined, you'll need to wrap a call to it in
-/// another function that is marked as `inline(never)` or just `inline`.
-///
-/// # Motivation
-///
-/// Why not use slice equality instead? Well, slice equality usually results in
-/// a call out to the current platform's `libc` which might not be inlineable
-/// or have other overhead. This routine isn't guaranteed to be a win, but it
-/// might be in some cases.
-///
-/// # Safety
-///
-/// * Both `x` and `y` must be valid for reads of up to `n` bytes.
-/// * Both `x` and `y` must point to an initialized value.
-/// * Both `x` and `y` must each point to an allocated object and
-/// must either be in bounds or at most one byte past the end of the
-/// allocated object. `x` and `y` do not need to point to the same allocated
-/// object, but they may.
-/// * Both `x` and `y` must be _derived from_ a pointer to their respective
-/// allocated objects.
-/// * The distance between `x` and `x+n` must not overflow `isize`. Similarly
-/// for `y` and `y+n`.
-/// * The distance being in bounds must not rely on "wrapping around" the
-/// address space.
-#[inline(always)]
-unsafe fn is_equal_raw(mut x: *const u8, mut y: *const u8, n: usize) -> bool {
-    // If we don't have enough bytes to do 4-byte at a time loads, then
-    // handle each possible length specially. Note that I used to have a
-    // byte-at-a-time loop here and that turned out to be quite a bit slower
-    // for the memmem/pathological/defeat-simple-vector-alphabet benchmark.
-    if n < 4 {
-        return match n {
-            0 => true,
-            1 => x.read() == y.read(),
-            2 => {
-                x.cast::<u16>().read_unaligned()
-                    == y.cast::<u16>().read_unaligned()
-            }
-            // I also tried copy_nonoverlapping here and it looks like the
-            // codegen is the same.
-            3 => x.cast::<[u8; 3]>().read() == y.cast::<[u8; 3]>().read(),
-            _ => unreachable!(),
-        };
-    }
-    // When we have 4 or more bytes to compare, then proceed in chunks of 4 at
-    // a time using unaligned loads.
-    //
-    // Also, why do 4 byte loads instead of, say, 8 byte loads? The reason is
-    // that this particular version of memcmp is likely to be called with tiny
-    // needles. That means that if we do 8 byte loads, then a higher proportion
-    // of memcmp calls will use the slower variant above. With that said, this
-    // is a hypothesis and is only loosely supported by benchmarks. There's
-    // likely some improvement that could be made here. The main thing here
-    // though is to optimize for latency, not throughput.
-
-    // SAFETY: The caller is responsible for ensuring the pointers we get are
-    // valid and readable for at least `n` bytes. We also do unaligned loads,
-    // so there's no need to ensure we're aligned. (This is justified by this
-    // routine being specifically for short strings.)
-    let xend = x.add(n.wrapping_sub(4));
-    let yend = y.add(n.wrapping_sub(4));
-    while x < xend {
-        let vx = x.cast::<u32>().read_unaligned();
-        let vy = y.cast::<u32>().read_unaligned();
-        if vx != vy {
-            return false;
-        }
-        x = x.add(4);
-        y = y.add(4);
-    }
-    let vx = xend.cast::<u32>().read_unaligned();
-    let vy = yend.cast::<u32>().read_unaligned();
-    vx == vy
-}
-
-#[cfg(test)]
-mod tests {
-    use super::*;
-
-    #[test]
-    fn equals_different_lengths() {
-        assert!(!is_equal(b"", b"a"));
-        assert!(!is_equal(b"a", b""));
-        assert!(!is_equal(b"ab", b"a"));
-        assert!(!is_equal(b"a", b"ab"));
-    }
-
-    #[test]
-    fn equals_mismatch() {
-        let one_mismatch = [
-            (&b"a"[..], &b"x"[..]),
-            (&b"ab"[..], &b"ax"[..]),
-            (&b"abc"[..], &b"abx"[..]),
-            (&b"abcd"[..], &b"abcx"[..]),
-            (&b"abcde"[..], &b"abcdx"[..]),
-            (&b"abcdef"[..], &b"abcdex"[..]),
-            (&b"abcdefg"[..], &b"abcdefx"[..]),
-            (&b"abcdefgh"[..], &b"abcdefgx"[..]),
-            (&b"abcdefghi"[..], &b"abcdefghx"[..]),
-            (&b"abcdefghij"[..], &b"abcdefghix"[..]),
-            (&b"abcdefghijk"[..], &b"abcdefghijx"[..]),
-            (&b"abcdefghijkl"[..], &b"abcdefghijkx"[..]),
-            (&b"abcdefghijklm"[..], &b"abcdefghijklx"[..]),
-            (&b"abcdefghijklmn"[..], &b"abcdefghijklmx"[..]),
-        ];
-        for (x, y) in one_mismatch {
-            assert_eq!(x.len(), y.len(), "lengths should match");
-            assert!(!is_equal(x, y));
-            assert!(!is_equal(y, x));
-        }
-    }
-
-    #[test]
-    fn equals_yes() {
-        assert!(is_equal(b"", b""));
-        assert!(is_equal(b"a", b"a"));
-        assert!(is_equal(b"ab", b"ab"));
-        assert!(is_equal(b"abc", b"abc"));
-        assert!(is_equal(b"abcd", b"abcd"));
-        assert!(is_equal(b"abcde", b"abcde"));
-        assert!(is_equal(b"abcdef", b"abcdef"));
-        assert!(is_equal(b"abcdefg", b"abcdefg"));
-        assert!(is_equal(b"abcdefgh", b"abcdefgh"));
-        assert!(is_equal(b"abcdefghi", b"abcdefghi"));
-    }
-
-    #[test]
-    fn prefix() {
-        assert!(is_prefix(b"", b""));
-        assert!(is_prefix(b"a", b""));
-        assert!(is_prefix(b"ab", b""));
-        assert!(is_prefix(b"foo", b"foo"));
-        assert!(is_prefix(b"foobar", b"foo"));
-
-        assert!(!is_prefix(b"foo", b"fob"));
-        assert!(!is_prefix(b"foobar", b"fob"));
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/rabinkarp.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/rabinkarp.rs
deleted file mode 100644
index fdd8a6f..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/rabinkarp.rs
+++ /dev/null
@@ -1,168 +0,0 @@
-use alloc::{sync::Arc, vec, vec::Vec};
-
-use crate::{packed::pattern::Patterns, util::search::Match, PatternID};
-
-/// The type of the rolling hash used in the Rabin-Karp algorithm.
-type Hash = usize;
-
-/// The number of buckets to store our patterns in. We don't want this to be
-/// too big in order to avoid wasting memory, but we don't want it to be too
-/// small either to avoid spending too much time confirming literals.
-///
-/// The number of buckets MUST be a power of two. Otherwise, determining the
-/// bucket from a hash will slow down the code considerably. Using a power
-/// of two means `hash % NUM_BUCKETS` can compile down to a simple `and`
-/// instruction.
-const NUM_BUCKETS: usize = 64;
-
-/// An implementation of the Rabin-Karp algorithm. The main idea of this
-/// algorithm is to maintain a rolling hash as it moves through the input, and
-/// then check whether that hash corresponds to the same hash for any of the
-/// patterns we're looking for.
-///
-/// A draw back of naively scaling Rabin-Karp to multiple patterns is that
-/// it requires all of the patterns to be the same length, which in turn
-/// corresponds to the number of bytes to hash. We adapt this to work for
-/// multiple patterns of varying size by fixing the number of bytes to hash
-/// to be the length of the smallest pattern. We also split the patterns into
-/// several buckets to hopefully make the confirmation step faster.
-///
-/// Wikipedia has a decent explanation, if a bit heavy on the theory:
-/// https://en.wikipedia.org/wiki/Rabin%E2%80%93Karp_algorithm
-///
-/// But ESMAJ provides something a bit more concrete:
-/// https://www-igm.univ-mlv.fr/~lecroq/string/node5.html
-#[derive(Clone, Debug)]
-pub(crate) struct RabinKarp {
-    /// The patterns we're searching for.
-    patterns: Arc<Patterns>,
-    /// The order of patterns in each bucket is significant. Namely, they are
-    /// arranged such that the first one to match is the correct match. This
-    /// may not necessarily correspond to the order provided by the caller.
-    /// For example, if leftmost-longest semantics are used, then the patterns
-    /// are sorted by their length in descending order. If leftmost-first
-    /// semantics are used, then the patterns are sorted by their pattern ID
-    /// in ascending order (which corresponds to the caller's order).
-    buckets: Vec<Vec<(Hash, PatternID)>>,
-    /// The length of the hashing window. Generally, this corresponds to the
-    /// length of the smallest pattern.
-    hash_len: usize,
-    /// The factor to subtract out of a hash before updating it with a new
-    /// byte.
-    hash_2pow: usize,
-}
-
-impl RabinKarp {
-    /// Compile a new Rabin-Karp matcher from the patterns given.
-    ///
-    /// This panics if any of the patterns in the collection are empty, or if
-    /// the collection is itself empty.
-    pub(crate) fn new(patterns: &Arc<Patterns>) -> RabinKarp {
-        assert!(patterns.len() >= 1);
-        let hash_len = patterns.minimum_len();
-        assert!(hash_len >= 1);
-
-        let mut hash_2pow = 1usize;
-        for _ in 1..hash_len {
-            hash_2pow = hash_2pow.wrapping_shl(1);
-        }
-
-        let mut rk = RabinKarp {
-            patterns: Arc::clone(patterns),
-            buckets: vec![vec![]; NUM_BUCKETS],
-            hash_len,
-            hash_2pow,
-        };
-        for (id, pat) in patterns.iter() {
-            let hash = rk.hash(&pat.bytes()[..rk.hash_len]);
-            let bucket = hash % NUM_BUCKETS;
-            rk.buckets[bucket].push((hash, id));
-        }
-        rk
-    }
-
-    /// Return the first matching pattern in the given haystack, begining the
-    /// search at `at`.
-    pub(crate) fn find_at(
-        &self,
-        haystack: &[u8],
-        mut at: usize,
-    ) -> Option<Match> {
-        assert_eq!(NUM_BUCKETS, self.buckets.len());
-
-        if at + self.hash_len > haystack.len() {
-            return None;
-        }
-        let mut hash = self.hash(&haystack[at..at + self.hash_len]);
-        loop {
-            let bucket = &self.buckets[hash % NUM_BUCKETS];
-            for &(phash, pid) in bucket {
-                if phash == hash {
-                    if let Some(c) = self.verify(pid, haystack, at) {
-                        return Some(c);
-                    }
-                }
-            }
-            if at + self.hash_len >= haystack.len() {
-                return None;
-            }
-            hash = self.update_hash(
-                hash,
-                haystack[at],
-                haystack[at + self.hash_len],
-            );
-            at += 1;
-        }
-    }
-
-    /// Returns the approximate total amount of heap used by this searcher, in
-    /// units of bytes.
-    pub(crate) fn memory_usage(&self) -> usize {
-        self.buckets.len() * core::mem::size_of::<Vec<(Hash, PatternID)>>()
-            + self.patterns.len() * core::mem::size_of::<(Hash, PatternID)>()
-    }
-
-    /// Verify whether the pattern with the given id matches at
-    /// `haystack[at..]`.
-    ///
-    /// We tag this function as `cold` because it helps improve codegen.
-    /// Intuitively, it would seem like inlining it would be better. However,
-    /// the only time this is called and a match is not found is when there
-    /// there is a hash collision, or when a prefix of a pattern matches but
-    /// the entire pattern doesn't match. This is hopefully fairly rare, and
-    /// if it does occur a lot, it's going to be slow no matter what we do.
-    #[cold]
-    fn verify(
-        &self,
-        id: PatternID,
-        haystack: &[u8],
-        at: usize,
-    ) -> Option<Match> {
-        let pat = self.patterns.get(id);
-        if pat.is_prefix(&haystack[at..]) {
-            Some(Match::new(id, at..at + pat.len()))
-        } else {
-            None
-        }
-    }
-
-    /// Hash the given bytes.
-    fn hash(&self, bytes: &[u8]) -> Hash {
-        assert_eq!(self.hash_len, bytes.len());
-
-        let mut hash = 0usize;
-        for &b in bytes {
-            hash = hash.wrapping_shl(1).wrapping_add(b as usize);
-        }
-        hash
-    }
-
-    /// Update the hash given based on removing `old_byte` at the beginning
-    /// of some byte string, and appending `new_byte` to the end of that same
-    /// byte string.
-    fn update_hash(&self, prev: Hash, old_byte: u8, new_byte: u8) -> Hash {
-        prev.wrapping_sub((old_byte as usize).wrapping_mul(self.hash_2pow))
-            .wrapping_shl(1)
-            .wrapping_add(new_byte as usize)
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/teddy/README.md b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/teddy/README.md
deleted file mode 100644
index f0928cbe..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/teddy/README.md
+++ /dev/null
@@ -1,386 +0,0 @@
-Teddy is a SIMD accelerated multiple substring matching algorithm. The name
-and the core ideas in the algorithm were learned from the [Hyperscan][1_u]
-project. The implementation in this repository was mostly motivated for use in
-accelerating regex searches by searching for small sets of required literals
-extracted from the regex.
-
-
-# Background
-
-The key idea of Teddy is to do *packed* substring matching. In the literature,
-packed substring matching is the idea of examining multiple bytes in a haystack
-at a time to detect matches. Implementations of, for example, memchr (which
-detects matches of a single byte) have been doing this for years. Only
-recently, with the introduction of various SIMD instructions, has this been
-extended to substring matching. The PCMPESTRI instruction (and its relatives),
-for example, implements substring matching in hardware. It is, however, limited
-to substrings of length 16 bytes or fewer, but this restriction is fine in a
-regex engine, since we rarely care about the performance difference between
-searching for a 16 byte literal and a 16 + N literal; 16 is already long
-enough. The key downside of the PCMPESTRI instruction, on current (2016) CPUs
-at least, is its latency and throughput. As a result, it is often faster to
-do substring search with a Boyer-Moore (or Two-Way) variant and a well placed
-memchr to quickly skip through the haystack.
-
-There are fewer results from the literature on packed substring matching,
-and even fewer for packed multiple substring matching. Ben-Kiki et al. [2]
-describes use of PCMPESTRI for substring matching, but is mostly theoretical
-and hand-waves performance. There is other theoretical work done by Bille [3]
-as well.
-
-The rest of the work in the field, as far as I'm aware, is by Faro and Kulekci
-and is generally focused on multiple pattern search. Their first paper [4a]
-introduces the concept of a fingerprint, which is computed for every block of
-N bytes in every pattern. The haystack is then scanned N bytes at a time and
-a fingerprint is computed in the same way it was computed for blocks in the
-patterns. If the fingerprint corresponds to one that was found in a pattern,
-then a verification step follows to confirm that one of the substrings with the
-corresponding fingerprint actually matches at the current location. Various
-implementation tricks are employed to make sure the fingerprint lookup is fast;
-typically by truncating the fingerprint. (This may, of course, provoke more
-steps in the verification process, so a balance must be struck.)
-
-The main downside of [4a] is that the minimum substring length is 32 bytes,
-presumably because of how the algorithm uses certain SIMD instructions. This
-essentially makes it useless for general purpose regex matching, where a small
-number of short patterns is far more likely.
-
-Faro and Kulekci published another paper [4b] that is conceptually very similar
-to [4a]. The key difference is that it uses the CRC32 instruction (introduced
-as part of SSE 4.2) to compute fingerprint values. This also enables the
-algorithm to work effectively on substrings as short as 7 bytes with 4 byte
-windows. 7 bytes is unfortunately still too long. The window could be
-technically shrunk to 2 bytes, thereby reducing minimum length to 3, but the
-small window size ends up negating most performance benefits—and it's likely
-the common case in a general purpose regex engine.
-
-Faro and Kulekci also published [4c] that appears to be intended as a
-replacement to using PCMPESTRI. In particular, it is specifically motivated by
-the high throughput/latency time of PCMPESTRI and therefore chooses other SIMD
-instructions that are faster. While this approach works for short substrings,
-I personally couldn't see a way to generalize it to multiple substring search.
-
-Faro and Kulekci have another paper [4d] that I haven't been able to read
-because it is behind a paywall.
-
-
-# Teddy
-
-Finally, we get to Teddy. If the above literature review is complete, then it
-appears that Teddy is a novel algorithm. More than that, in my experience, it
-completely blows away the competition for short substrings, which is exactly
-what we want in a general purpose regex engine. Again, the algorithm appears
-to be developed by the authors of [Hyperscan][1_u]. Hyperscan was open sourced
-late 2015, and no earlier history could be found. Therefore, tracking the exact
-provenance of the algorithm with respect to the published literature seems
-difficult.
-
-At a high level, Teddy works somewhat similarly to the fingerprint algorithms
-published by Faro and Kulekci, but Teddy does it in a way that scales a bit
-better. Namely:
-
-1. Teddy's core algorithm scans the haystack in 16 (for SSE, or 32 for AVX)
-   byte chunks. 16 (or 32) is significant because it corresponds to the number
-   of bytes in a SIMD vector.
-2. Bitwise operations are performed on each chunk to discover if any region of
-   it matches a set of precomputed fingerprints from the patterns. If there are
-   matches, then a verification step is performed. In this implementation, our
-   verification step is naive. This can be improved upon.
-
-The details to make this work are quite clever. First, we must choose how to
-pick our fingerprints. In Hyperscan's implementation, I *believe* they use the
-last N bytes of each substring, where N must be at least the minimum length of
-any substring in the set being searched. In this implementation, we use the
-first N bytes of each substring. (The tradeoffs between these choices aren't
-yet clear to me.) We then must figure out how to quickly test whether an
-occurrence of any fingerprint from the set of patterns appears in a 16 byte
-block from the haystack. To keep things simple, let's assume N = 1 and examine
-some examples to motivate the approach. Here are our patterns:
-
-```ignore
-foo
-bar
-baz
-```
-
-The corresponding fingerprints, for N = 1, are `f`, `b` and `b`. Now let's set
-our 16 byte block to:
-
-```ignore
-bat cat foo bump
-xxxxxxxxxxxxxxxx
-```
-
-To cut to the chase, Teddy works by using bitsets. In particular, Teddy creates
-a mask that allows us to quickly compute membership of a fingerprint in a 16
-byte block that also tells which pattern the fingerprint corresponds to. In
-this case, our fingerprint is a single byte, so an appropriate abstraction is
-a map from a single byte to a list of patterns that contain that fingerprint:
-
-```ignore
-f |--> foo
-b |--> bar, baz
-```
-
-Now, all we need to do is figure out how to represent this map in vector space
-and use normal SIMD operations to perform a lookup. The first simplification
-we can make is to represent our patterns as bit fields occupying a single
-byte. This is important, because a single SIMD vector can store 16 bytes.
-
-```ignore
-f |--> 00000001
-b |--> 00000010, 00000100
-```
-
-How do we perform lookup though? It turns out that SSSE3 introduced a very cool
-instruction called PSHUFB. The instruction takes two SIMD vectors, `A` and `B`,
-and returns a third vector `C`. All vectors are treated as 16 8-bit integers.
-`C` is formed by `C[i] = A[B[i]]`. (This is a bit of a simplification, but true
-for the purposes of this algorithm. For full details, see [Intel's Intrinsics
-Guide][5_u].) This essentially lets us use the values in `B` to lookup values
-in `A`.
-
-If we could somehow cause `B` to contain our 16 byte block from the haystack,
-and if `A` could contain our bitmasks, then we'd end up with something like
-this for `A`:
-
-```ignore
-    0x00 0x01 ... 0x62      ... 0x66      ... 0xFF
-A = 0    0        00000110      00000001      0
-```
-
-And if `B` contains our window from our haystack, we could use shuffle to take
-the values from `B` and use them to look up our bitsets in `A`. But of course,
-we can't do this because `A` in the above example contains 256 bytes, which
-is much larger than the size of a SIMD vector.
-
-Nybbles to the rescue! A nybble is 4 bits. Instead of one mask to hold all of
-our bitsets, we can use two masks, where one mask corresponds to the lower four
-bits of our fingerprint and the other mask corresponds to the upper four bits.
-So our map now looks like:
-
-```ignore
-'f' & 0xF = 0x6 |--> 00000001
-'f' >> 4  = 0x6 |--> 00000111
-'b' & 0xF = 0x2 |--> 00000110
-'b' >> 4  = 0x6 |--> 00000111
-```
-
-Notice that the bitsets for each nybble correspond to the union of all
-fingerprints that contain that nybble. For example, both `f` and `b` have the
-same upper 4 bits but differ on the lower 4 bits. Putting this together, we
-have `A0`, `A1` and `B`, where `A0` is our mask for the lower nybble, `A1` is
-our mask for the upper nybble and `B` is our 16 byte block from the haystack:
-
-```ignore
-      0x00 0x01 0x02      0x03 ... 0x06      ... 0xF
-A0 =  0    0    00000110  0        00000001      0
-A1 =  0    0    0         0        00000111      0
-B  =  b    a    t         _        t             p
-B  =  0x62 0x61 0x74      0x20     0x74          0x70
-```
-
-But of course, we can't use `B` with `PSHUFB` yet, since its values are 8 bits,
-and we need indexes that are at most 4 bits (corresponding to one of 16
-values). We can apply the same transformation to split `B` into lower and upper
-nybbles as we did `A`. As before, `B0` corresponds to the lower nybbles and
-`B1` corresponds to the upper nybbles:
-
-```ignore
-     b   a   t   _   c   a   t   _   f   o   o   _   b   u   m   p
-B0 = 0x2 0x1 0x4 0x0 0x3 0x1 0x4 0x0 0x6 0xF 0xF 0x0 0x2 0x5 0xD 0x0
-B1 = 0x6 0x6 0x7 0x2 0x6 0x6 0x7 0x2 0x6 0x6 0x6 0x2 0x6 0x7 0x6 0x7
-```
-
-And now we have a nice correspondence. `B0` can index `A0` and `B1` can index
-`A1`. Here's what we get when we apply `C0 = PSHUFB(A0, B0)`:
-
-```ignore
-     b         a        ... f         o         ... p
-     A0[0x2]   A0[0x1]      A0[0x6]   A0[0xF]       A0[0x0]
-C0 = 00000110  0            00000001  0             0
-```
-
-And `C1 = PSHUFB(A1, B1)`:
-
-```ignore
-     b         a        ... f         o        ... p
-     A1[0x6]   A1[0x6]      A1[0x6]   A1[0x6]      A1[0x7]
-C1 = 00000111  00000111     00000111  00000111     0
-```
-
-Notice how neither one of `C0` or `C1` is guaranteed to report fully correct
-results all on its own. For example, `C1` claims that `b` is a fingerprint for
-the pattern `foo` (since `A1[0x6] = 00000111`), and that `o` is a fingerprint
-for all of our patterns. But if we combined `C0` and `C1` with an `AND`
-operation:
-
-```ignore
-     b         a        ... f         o        ... p
-C  = 00000110  0            00000001  0            0
-```
-
-Then we now have that `C[i]` contains a bitset corresponding to the matching
-fingerprints in a haystack's 16 byte block, where `i` is the `ith` byte in that
-block.
-
-Once we have that, we can look for the position of the least significant bit
-in `C`. (Least significant because we only target little endian here. Thus,
-the least significant bytes correspond to bytes in our haystack at a lower
-address.) That position, modulo `8`, gives us the pattern that the fingerprint
-matches. That position, integer divided by `8`, also gives us the byte offset
-that the fingerprint occurs in inside the 16 byte haystack block. Using those
-two pieces of information, we can run a verification procedure that tries
-to match all substrings containing that fingerprint at that position in the
-haystack.
-
-
-# Implementation notes
-
-The problem with the algorithm as described above is that it uses a single byte
-for a fingerprint. This will work well if the fingerprints are rare in the
-haystack (e.g., capital letters or special characters in normal English text),
-but if the fingerprints are common, you'll wind up spending too much time in
-the verification step, which effectively negates the performance benefits of
-scanning 16 bytes at a time. Remember, the key to the performance of this
-algorithm is to do as little work as possible per 16 (or 32) bytes.
-
-This algorithm can be extrapolated in a relatively straight-forward way to use
-larger fingerprints. That is, instead of a single byte prefix, we might use a
-two or three byte prefix. The implementation here implements N = {1, 2, 3}
-and always picks the largest N possible. The rationale is that the bigger the
-fingerprint, the fewer verification steps we'll do. Of course, if N is too
-large, then we'll end up doing too much on each step.
-
-The way to extend it is:
-
-1. Add a mask for each byte in the fingerprint. (Remember that each mask is
-   composed of two SIMD vectors.) This results in a value of `C` for each byte
-   in the fingerprint while searching.
-2. When testing each 16 (or 32) byte block, each value of `C` must be shifted
-   so that they are aligned. Once aligned, they should all be `AND`'d together.
-   This will give you only the bitsets corresponding to the full match of the
-   fingerprint. To do this, one needs to save the last byte (for N=2) or last
-   two bytes (for N=3) from the previous iteration, and then line them up with
-   the first one or two bytes of the next iteration.
-
-## Verification
-
-Verification generally follows the procedure outlined above. The tricky parts
-are in the right formulation of operations to get our bits out of our vectors.
-We have a limited set of operations available to us on SIMD vectors as 128-bit
-or 256-bit numbers, so we wind up needing to rip out 2 (or 4) 64-bit integers
-from our vectors, and then run our verification step on each of those. The
-verification step looks at the least significant bit set, and from its
-position, we can derive the byte offset and bucket. (Again, as described
-above.) Once we know the bucket, we do a fairly naive exhaustive search for
-every literal in that bucket. (Hyperscan is a bit smarter here and uses a hash
-table, but I haven't had time to thoroughly explore that. A few initial
-half-hearted attempts resulted in worse performance.)
-
-## AVX
-
-The AVX version of Teddy extrapolates almost perfectly from the SSE version.
-The only hickup is that PALIGNR is used to align chunks in the 16-bit version,
-and there is no equivalent instruction in AVX. AVX does have VPALIGNR, but it
-only works within 128-bit lanes. So there's a bit of tomfoolery to get around
-this by shuffling the vectors before calling VPALIGNR.
-
-The only other aspect to AVX is that since our masks are still fundamentally
-16-bytes (0x0-0xF), they are duplicated to 32-bytes, so that they can apply to
-32-byte chunks.
-
-## Fat Teddy
-
-In the version of Teddy described above, 8 buckets are used to group patterns
-that we want to search for. However, when AVX is available, we can extend the
-number of buckets to 16 by permitting each byte in our masks to use 16-bits
-instead of 8-bits to represent the buckets it belongs to. (This variant is also
-in Hyperscan.) However, what we give up is the ability to scan 32 bytes at a
-time, even though we're using AVX. Instead, we have to scan 16 bytes at a time.
-What we gain, though, is (hopefully) less work in our verification routine.
-It patterns are more spread out across more buckets, then there should overall
-be fewer false positives. In general, Fat Teddy permits us to grow our capacity
-a bit and search for more literals before Teddy gets overwhelmed.
-
-The tricky part of Fat Teddy is in how we adjust our masks and our verification
-procedure. For the masks, we simply represent the first 8 buckets in each of
-the low 16 bytes, and then the second 8 buckets in each of the high 16 bytes.
-Then, in the search loop, instead of loading 32 bytes from the haystack, we
-load the same 16 bytes from the haystack into both the low and high 16 byte
-portions of our 256-bit vector. So for example, a mask might look like this:
-
-    bits:   00100001 00000000 ... 11000000 00000000 00000001 ... 00000000
-    byte:      31       30           16       15       14            0
-    offset:    15       14           0        15       14            0
-    buckets:  8-15     8-15         8-15      0-7      0-7           0-7
-
-Where `byte` is the position in the vector (higher numbers corresponding to
-more significant bits), `offset` is the corresponding position in the haystack
-chunk, and `buckets` corresponds to the bucket assignments for that particular
-byte.
-
-In particular, notice that the bucket assignments for offset `0` are spread
-out between bytes `0` and `16`. This works well for the chunk-by-chunk search
-procedure, but verification really wants to process all bucket assignments for
-each offset at once. Otherwise, we might wind up finding a match at offset
-`1` in one the first 8 buckets, when we really should have reported a match
-at offset `0` in one of the second 8 buckets. (Because we want the leftmost
-match.)
-
-Thus, for verification, we rearrange the above vector such that it is a
-sequence of 16-bit integers, where the least significant 16-bit integer
-corresponds to all of the bucket assignments for offset `0`. So with the
-above vector, the least significant 16-bit integer would be
-
-    11000000 000000
-
-which was taken from bytes `16` and `0`. Then the verification step pretty much
-runs as described, except with 16 buckets instead of 8.
-
-
-# References
-
-- **[1]** [Hyperscan on GitHub](https://github.com/intel/hyperscan),
-    [webpage](https://www.hyperscan.io/)
-- **[2a]** Ben-Kiki, O., Bille, P., Breslauer, D., Gasieniec, L., Grossi, R.,
-    & Weimann, O. (2011).
-    _Optimal packed string matching_.
-    In LIPIcs-Leibniz International Proceedings in Informatics (Vol. 13).
-    Schloss Dagstuhl-Leibniz-Zentrum fuer Informatik.
-    DOI: 10.4230/LIPIcs.FSTTCS.2011.423.
-    [PDF](https://drops.dagstuhl.de/opus/volltexte/2011/3355/pdf/37.pdf).
-- **[2b]** Ben-Kiki, O., Bille, P., Breslauer, D., Ga̧sieniec, L., Grossi, R.,
-    & Weimann, O. (2014).
-    _Towards optimal packed string matching_.
-    Theoretical Computer Science, 525, 111-129.
-    DOI: 10.1016/j.tcs.2013.06.013.
-    [PDF](https://www.cs.haifa.ac.il/~oren/Publications/bpsm.pdf).
-- **[3]** Bille, P. (2011).
-    _Fast searching in packed strings_.
-    Journal of Discrete Algorithms, 9(1), 49-56.
-    DOI: 10.1016/j.jda.2010.09.003.
-    [PDF](https://www.sciencedirect.com/science/article/pii/S1570866710000353).
-- **[4a]** Faro, S., & Külekci, M. O. (2012, October).
-    _Fast multiple string matching using streaming SIMD extensions technology_.
-    In String Processing and Information Retrieval (pp. 217-228).
-    Springer Berlin Heidelberg.
-    DOI: 10.1007/978-3-642-34109-0_23.
-    [PDF](https://www.dmi.unict.it/faro/papers/conference/faro32.pdf).
-- **[4b]** Faro, S., & Külekci, M. O. (2013, September).
-    _Towards a Very Fast Multiple String Matching Algorithm for Short Patterns_.
-    In Stringology (pp. 78-91).
-    [PDF](https://www.dmi.unict.it/faro/papers/conference/faro36.pdf).
-- **[4c]** Faro, S., & Külekci, M. O. (2013, January).
-    _Fast packed string matching for short patterns_.
-    In Proceedings of the Meeting on Algorithm Engineering & Expermiments
-    (pp. 113-121).
-    Society for Industrial and Applied Mathematics.
-    [PDF](https://arxiv.org/pdf/1209.6449.pdf).
-- **[4d]** Faro, S., & Külekci, M. O. (2014).
-    _Fast and flexible packed string matching_.
-    Journal of Discrete Algorithms, 28, 61-72.
-    DOI: 10.1016/j.jda.2014.07.003.
-
-[1_u]: https://github.com/intel/hyperscan
-[5_u]: https://software.intel.com/sites/landingpage/IntrinsicsGuide
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/teddy/builder.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/teddy/builder.rs
deleted file mode 100644
index e9bb68b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/teddy/builder.rs
+++ /dev/null
@@ -1,792 +0,0 @@
-use core::{
-    fmt::Debug,
-    panic::{RefUnwindSafe, UnwindSafe},
-};
-
-use alloc::sync::Arc;
-
-use crate::packed::{ext::Pointer, pattern::Patterns, teddy::generic::Match};
-
-/// A builder for constructing a Teddy matcher.
-///
-/// The builder primarily permits fine grained configuration of the Teddy
-/// matcher. Most options are made only available for testing/benchmarking
-/// purposes. In reality, options are automatically determined by the nature
-/// and number of patterns given to the builder.
-#[derive(Clone, Debug)]
-pub(crate) struct Builder {
-    /// When none, this is automatically determined. Otherwise, `false` means
-    /// slim Teddy is used (8 buckets) and `true` means fat Teddy is used
-    /// (16 buckets). Fat Teddy requires AVX2, so if that CPU feature isn't
-    /// available and Fat Teddy was requested, no matcher will be built.
-    only_fat: Option<bool>,
-    /// When none, this is automatically determined. Otherwise, `false` means
-    /// that 128-bit vectors will be used (up to SSSE3 instructions) where as
-    /// `true` means that 256-bit vectors will be used. As with `fat`, if
-    /// 256-bit vectors are requested and they aren't available, then a
-    /// searcher will not be built.
-    only_256bit: Option<bool>,
-    /// When true (the default), the number of patterns will be used as a
-    /// heuristic for refusing construction of a Teddy searcher. The point here
-    /// is that too many patterns can overwhelm Teddy. But this can be disabled
-    /// in cases where the caller knows better.
-    heuristic_pattern_limits: bool,
-}
-
-impl Default for Builder {
-    fn default() -> Builder {
-        Builder::new()
-    }
-}
-
-impl Builder {
-    /// Create a new builder for configuring a Teddy matcher.
-    pub(crate) fn new() -> Builder {
-        Builder {
-            only_fat: None,
-            only_256bit: None,
-            heuristic_pattern_limits: true,
-        }
-    }
-
-    /// Build a matcher for the set of patterns given. If a matcher could not
-    /// be built, then `None` is returned.
-    ///
-    /// Generally, a matcher isn't built if the necessary CPU features aren't
-    /// available, an unsupported target or if the searcher is believed to be
-    /// slower than standard techniques (i.e., if there are too many literals).
-    pub(crate) fn build(&self, patterns: Arc<Patterns>) -> Option<Searcher> {
-        self.build_imp(patterns)
-    }
-
-    /// Require the use of Fat (true) or Slim (false) Teddy. Fat Teddy uses
-    /// 16 buckets where as Slim Teddy uses 8 buckets. More buckets are useful
-    /// for a larger set of literals.
-    ///
-    /// `None` is the default, which results in an automatic selection based
-    /// on the number of literals and available CPU features.
-    pub(crate) fn only_fat(&mut self, yes: Option<bool>) -> &mut Builder {
-        self.only_fat = yes;
-        self
-    }
-
-    /// Request the use of 256-bit vectors (true) or 128-bit vectors (false).
-    /// Generally, a larger vector size is better since it either permits
-    /// matching more patterns or matching more bytes in the haystack at once.
-    ///
-    /// `None` is the default, which results in an automatic selection based on
-    /// the number of literals and available CPU features.
-    pub(crate) fn only_256bit(&mut self, yes: Option<bool>) -> &mut Builder {
-        self.only_256bit = yes;
-        self
-    }
-
-    /// Request that heuristic limitations on the number of patterns be
-    /// employed. This useful to disable for benchmarking where one wants to
-    /// explore how Teddy performs on large number of patterns even if the
-    /// heuristics would otherwise refuse construction.
-    ///
-    /// This is enabled by default.
-    pub(crate) fn heuristic_pattern_limits(
-        &mut self,
-        yes: bool,
-    ) -> &mut Builder {
-        self.heuristic_pattern_limits = yes;
-        self
-    }
-
-    fn build_imp(&self, patterns: Arc<Patterns>) -> Option<Searcher> {
-        let patlimit = self.heuristic_pattern_limits;
-        // There's no particular reason why we limit ourselves to little endian
-        // here, but it seems likely that some parts of Teddy as they are
-        // currently written (e.g., the uses of `trailing_zeros`) are likely
-        // wrong on non-little-endian targets. Such things are likely easy to
-        // fix, but at the time of writing (2023/09/18), I actually do not know
-        // how to test this code on a big-endian target. So for now, we're
-        // conservative and just bail out.
-        if !cfg!(target_endian = "little") {
-            debug!("skipping Teddy because target isn't little endian");
-            return None;
-        }
-        // Too many patterns will overwhelm Teddy and likely lead to slow
-        // downs, typically in the verification step.
-        if patlimit && patterns.len() > 64 {
-            debug!("skipping Teddy because of too many patterns");
-            return None;
-        }
-
-        #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
-        {
-            use self::x86_64::{FatAVX2, SlimAVX2, SlimSSSE3};
-
-            let mask_len = core::cmp::min(4, patterns.minimum_len());
-            let beefy = patterns.len() > 32;
-            let has_avx2 = self::x86_64::is_available_avx2();
-            let has_ssse3 = has_avx2 || self::x86_64::is_available_ssse3();
-            let use_avx2 = if self.only_256bit == Some(true) {
-                if !has_avx2 {
-                    debug!(
-                    "skipping Teddy because avx2 was demanded but unavailable"
-                );
-                    return None;
-                }
-                true
-            } else if self.only_256bit == Some(false) {
-                if !has_ssse3 {
-                    debug!(
-                    "skipping Teddy because ssse3 was demanded but unavailable"
-                );
-                    return None;
-                }
-                false
-            } else if !has_ssse3 && !has_avx2 {
-                debug!(
-                    "skipping Teddy because ssse3 and avx2 are unavailable"
-                );
-                return None;
-            } else {
-                has_avx2
-            };
-            let fat = match self.only_fat {
-                None => use_avx2 && beefy,
-                Some(false) => false,
-                Some(true) if !use_avx2 => {
-                    debug!(
-                        "skipping Teddy because fat was demanded, but fat \
-                         Teddy requires avx2 which is unavailable"
-                    );
-                    return None;
-                }
-                Some(true) => true,
-            };
-            // Just like for aarch64, it's possible that too many patterns will
-            // overhwelm Teddy. Unlike aarch64 though, we have Fat teddy which
-            // helps things scale a bit more by spreading patterns over more
-            // buckets.
-            //
-            // These thresholds were determined by looking at the measurements
-            // for the rust/aho-corasick/packed/leftmost-first and
-            // rust/aho-corasick/dfa/leftmost-first engines on the `teddy/`
-            // benchmarks.
-            if patlimit && mask_len == 1 && patterns.len() > 16 {
-                debug!(
-                    "skipping Teddy (mask len: 1) because there are \
-                             too many patterns",
-                );
-                return None;
-            }
-            match (mask_len, use_avx2, fat) {
-                (1, false, _) => {
-                    debug!("Teddy choice: 128-bit slim, 1 byte");
-                    SlimSSSE3::<1>::new(&patterns)
-                }
-                (1, true, false) => {
-                    debug!("Teddy choice: 256-bit slim, 1 byte");
-                    SlimAVX2::<1>::new(&patterns)
-                }
-                (1, true, true) => {
-                    debug!("Teddy choice: 256-bit fat, 1 byte");
-                    FatAVX2::<1>::new(&patterns)
-                }
-                (2, false, _) => {
-                    debug!("Teddy choice: 128-bit slim, 2 bytes");
-                    SlimSSSE3::<2>::new(&patterns)
-                }
-                (2, true, false) => {
-                    debug!("Teddy choice: 256-bit slim, 2 bytes");
-                    SlimAVX2::<2>::new(&patterns)
-                }
-                (2, true, true) => {
-                    debug!("Teddy choice: 256-bit fat, 2 bytes");
-                    FatAVX2::<2>::new(&patterns)
-                }
-                (3, false, _) => {
-                    debug!("Teddy choice: 128-bit slim, 3 bytes");
-                    SlimSSSE3::<3>::new(&patterns)
-                }
-                (3, true, false) => {
-                    debug!("Teddy choice: 256-bit slim, 3 bytes");
-                    SlimAVX2::<3>::new(&patterns)
-                }
-                (3, true, true) => {
-                    debug!("Teddy choice: 256-bit fat, 3 bytes");
-                    FatAVX2::<3>::new(&patterns)
-                }
-                (4, false, _) => {
-                    debug!("Teddy choice: 128-bit slim, 4 bytes");
-                    SlimSSSE3::<4>::new(&patterns)
-                }
-                (4, true, false) => {
-                    debug!("Teddy choice: 256-bit slim, 4 bytes");
-                    SlimAVX2::<4>::new(&patterns)
-                }
-                (4, true, true) => {
-                    debug!("Teddy choice: 256-bit fat, 4 bytes");
-                    FatAVX2::<4>::new(&patterns)
-                }
-                _ => {
-                    debug!("no supported Teddy configuration found");
-                    None
-                }
-            }
-        }
-        #[cfg(all(
-            target_arch = "aarch64",
-            target_feature = "neon",
-            target_endian = "little"
-        ))]
-        {
-            use self::aarch64::SlimNeon;
-
-            let mask_len = core::cmp::min(4, patterns.minimum_len());
-            if self.only_256bit == Some(true) {
-                debug!(
-                    "skipping Teddy because 256-bits were demanded \
-                     but unavailable"
-                );
-                return None;
-            }
-            if self.only_fat == Some(true) {
-                debug!(
-                    "skipping Teddy because fat was demanded but unavailable"
-                );
-            }
-            // Since we don't have Fat teddy in aarch64 (I think we'd want at
-            // least 256-bit vectors for that), we need to be careful not to
-            // allow too many patterns as it might overwhelm Teddy. Generally
-            // speaking, as the mask length goes up, the more patterns we can
-            // handle because the mask length results in fewer candidates
-            // generated.
-            //
-            // These thresholds were determined by looking at the measurements
-            // for the rust/aho-corasick/packed/leftmost-first and
-            // rust/aho-corasick/dfa/leftmost-first engines on the `teddy/`
-            // benchmarks.
-            match mask_len {
-                1 => {
-                    if patlimit && patterns.len() > 16 {
-                        debug!(
-                            "skipping Teddy (mask len: 1) because there are \
-                             too many patterns",
-                        );
-                    }
-                    debug!("Teddy choice: 128-bit slim, 1 byte");
-                    SlimNeon::<1>::new(&patterns)
-                }
-                2 => {
-                    if patlimit && patterns.len() > 32 {
-                        debug!(
-                            "skipping Teddy (mask len: 2) because there are \
-                             too many patterns",
-                        );
-                    }
-                    debug!("Teddy choice: 128-bit slim, 2 bytes");
-                    SlimNeon::<2>::new(&patterns)
-                }
-                3 => {
-                    if patlimit && patterns.len() > 48 {
-                        debug!(
-                            "skipping Teddy (mask len: 3) because there are \
-                             too many patterns",
-                        );
-                    }
-                    debug!("Teddy choice: 128-bit slim, 3 bytes");
-                    SlimNeon::<3>::new(&patterns)
-                }
-                4 => {
-                    debug!("Teddy choice: 128-bit slim, 4 bytes");
-                    SlimNeon::<4>::new(&patterns)
-                }
-                _ => {
-                    debug!("no supported Teddy configuration found");
-                    None
-                }
-            }
-        }
-        #[cfg(not(any(
-            all(target_arch = "x86_64", target_feature = "sse2"),
-            all(
-                target_arch = "aarch64",
-                target_feature = "neon",
-                target_endian = "little"
-            )
-        )))]
-        {
-            None
-        }
-    }
-}
-
-/// A searcher that dispatches to one of several possible Teddy variants.
-#[derive(Clone, Debug)]
-pub(crate) struct Searcher {
-    /// The Teddy variant we use. We use dynamic dispatch under the theory that
-    /// it results in better codegen then a enum, although this is a specious
-    /// claim.
-    ///
-    /// This `Searcher` is essentially a wrapper for a `SearcherT` trait
-    /// object. We just make `memory_usage` and `minimum_len` available without
-    /// going through dynamic dispatch.
-    imp: Arc<dyn SearcherT>,
-    /// Total heap memory used by the Teddy variant.
-    memory_usage: usize,
-    /// The minimum haystack length this searcher can handle. It is intended
-    /// for callers to use some other search routine (such as Rabin-Karp) in
-    /// cases where the haystack (or remainer of the haystack) is too short.
-    minimum_len: usize,
-}
-
-impl Searcher {
-    /// Look for the leftmost occurrence of any pattern in this search in the
-    /// given haystack starting at the given position.
-    ///
-    /// # Panics
-    ///
-    /// This panics when `haystack[at..].len()` is less than the minimum length
-    /// for this haystack.
-    #[inline(always)]
-    pub(crate) fn find(
-        &self,
-        haystack: &[u8],
-        at: usize,
-    ) -> Option<crate::Match> {
-        // SAFETY: The Teddy implementations all require a minimum haystack
-        // length, and this is required for safety. Therefore, we assert it
-        // here in order to make this method sound.
-        assert!(haystack[at..].len() >= self.minimum_len);
-        let hayptr = haystack.as_ptr();
-        // SAFETY: Construction of the searcher guarantees that we are able
-        // to run it in the current environment (i.e., we won't get an AVX2
-        // searcher on a x86-64 CPU without AVX2 support). Also, the pointers
-        // are valid as they are derived directly from a borrowed slice.
-        let teddym = unsafe {
-            self.imp.find(hayptr.add(at), hayptr.add(haystack.len()))?
-        };
-        let start = teddym.start().as_usize().wrapping_sub(hayptr.as_usize());
-        let end = teddym.end().as_usize().wrapping_sub(hayptr.as_usize());
-        let span = crate::Span { start, end };
-        // OK because we won't permit the construction of a searcher that
-        // could report a pattern ID bigger than what can fit in the crate-wide
-        // PatternID type.
-        let pid = crate::PatternID::new_unchecked(teddym.pattern().as_usize());
-        let m = crate::Match::new(pid, span);
-        Some(m)
-    }
-
-    /// Returns the approximate total amount of heap used by this type, in
-    /// units of bytes.
-    #[inline(always)]
-    pub(crate) fn memory_usage(&self) -> usize {
-        self.memory_usage
-    }
-
-    /// Returns the minimum length, in bytes, that a haystack must be in order
-    /// to use it with this searcher.
-    #[inline(always)]
-    pub(crate) fn minimum_len(&self) -> usize {
-        self.minimum_len
-    }
-}
-
-/// A trait that provides dynamic dispatch over the different possible Teddy
-/// variants on the same algorithm.
-///
-/// On `x86_64` for example, it isn't known until runtime which of 12 possible
-/// variants will be used. One might use one of the four slim 128-bit vector
-/// variants, or one of the four 256-bit vector variants or even one of the
-/// four fat 256-bit vector variants.
-///
-/// Since this choice is generally made when the Teddy searcher is constructed
-/// and this choice is based on the patterns given and what the current CPU
-/// supports, it follows that there must be some kind of indirection at search
-/// time that "selects" the variant chosen at build time.
-///
-/// There are a few different ways to go about this. One approach is to use an
-/// enum. It works fine, but in my experiments, this generally results in worse
-/// codegen. Another approach, which is what we use here, is dynamic dispatch
-/// via a trait object. We basically implement this trait for each possible
-/// variant, select the variant we want at build time and convert it to a
-/// trait object for use at search time.
-///
-/// Another approach is to use function pointers and stick each of the possible
-/// variants into a union. This is essentially isomorphic to the dynamic
-/// dispatch approach, but doesn't require any allocations. Since this crate
-/// requires `alloc`, there's no real reason (AFAIK) to go down this path. (The
-/// `memchr` crate does this.)
-trait SearcherT:
-    Debug + Send + Sync + UnwindSafe + RefUnwindSafe + 'static
-{
-    /// Execute a search on the given haystack (identified by `start` and `end`
-    /// raw pointers).
-    ///
-    /// # Safety
-    ///
-    /// Essentially, the `start` and `end` pointers must be valid and point
-    /// to a haystack one can read. As long as you derive them from, for
-    /// example, a `&[u8]`, they should automatically satisfy all of the safety
-    /// obligations:
-    ///
-    /// * Both `start` and `end` must be valid for reads.
-    /// * Both `start` and `end` must point to an initialized value.
-    /// * Both `start` and `end` must point to the same allocated object and
-    /// must either be in bounds or at most one byte past the end of the
-    /// allocated object.
-    /// * Both `start` and `end` must be _derived from_ a pointer to the same
-    /// object.
-    /// * The distance between `start` and `end` must not overflow `isize`.
-    /// * The distance being in bounds must not rely on "wrapping around" the
-    /// address space.
-    /// * It must be the case that `start <= end`.
-    /// * `end - start` must be greater than the minimum length for this
-    /// searcher.
-    ///
-    /// Also, it is expected that implementations of this trait will tag this
-    /// method with a `target_feature` attribute. Callers must ensure that
-    /// they are executing this method in an environment where that attribute
-    /// is valid.
-    unsafe fn find(&self, start: *const u8, end: *const u8) -> Option<Match>;
-}
-
-#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
-mod x86_64 {
-    use core::arch::x86_64::{__m128i, __m256i};
-
-    use alloc::sync::Arc;
-
-    use crate::packed::{
-        ext::Pointer,
-        pattern::Patterns,
-        teddy::generic::{self, Match},
-    };
-
-    use super::{Searcher, SearcherT};
-
-    #[derive(Clone, Debug)]
-    pub(super) struct SlimSSSE3<const BYTES: usize> {
-        slim128: generic::Slim<__m128i, BYTES>,
-    }
-
-    // Defines SlimSSSE3 wrapper functions for 1, 2, 3 and 4 bytes.
-    macro_rules! slim_ssse3 {
-        ($len:expr) => {
-            impl SlimSSSE3<$len> {
-                /// Creates a new searcher using "slim" Teddy with 128-bit
-                /// vectors. If SSSE3 is not available in the current
-                /// environment, then this returns `None`.
-                pub(super) fn new(
-                    patterns: &Arc<Patterns>,
-                ) -> Option<Searcher> {
-                    if !is_available_ssse3() {
-                        return None;
-                    }
-                    Some(unsafe { SlimSSSE3::<$len>::new_unchecked(patterns) })
-                }
-
-                /// Creates a new searcher using "slim" Teddy with 256-bit
-                /// vectors without checking whether SSSE3 is available or not.
-                ///
-                /// # Safety
-                ///
-                /// Callers must ensure that SSSE3 is available in the current
-                /// environment.
-                #[target_feature(enable = "ssse3")]
-                unsafe fn new_unchecked(patterns: &Arc<Patterns>) -> Searcher {
-                    let slim128 = generic::Slim::<__m128i, $len>::new(
-                        Arc::clone(patterns),
-                    );
-                    let memory_usage = slim128.memory_usage();
-                    let minimum_len = slim128.minimum_len();
-                    let imp = Arc::new(SlimSSSE3 { slim128 });
-                    Searcher { imp, memory_usage, minimum_len }
-                }
-            }
-
-            impl SearcherT for SlimSSSE3<$len> {
-                #[target_feature(enable = "ssse3")]
-                #[inline]
-                unsafe fn find(
-                    &self,
-                    start: *const u8,
-                    end: *const u8,
-                ) -> Option<Match> {
-                    // SAFETY: All obligations except for `target_feature` are
-                    // passed to the caller. Our use of `target_feature` is
-                    // safe because construction of this type requires that the
-                    // requisite target features are available.
-                    self.slim128.find(start, end)
-                }
-            }
-        };
-    }
-
-    slim_ssse3!(1);
-    slim_ssse3!(2);
-    slim_ssse3!(3);
-    slim_ssse3!(4);
-
-    #[derive(Clone, Debug)]
-    pub(super) struct SlimAVX2<const BYTES: usize> {
-        slim128: generic::Slim<__m128i, BYTES>,
-        slim256: generic::Slim<__m256i, BYTES>,
-    }
-
-    // Defines SlimAVX2 wrapper functions for 1, 2, 3 and 4 bytes.
-    macro_rules! slim_avx2 {
-        ($len:expr) => {
-            impl SlimAVX2<$len> {
-                /// Creates a new searcher using "slim" Teddy with 256-bit
-                /// vectors. If AVX2 is not available in the current
-                /// environment, then this returns `None`.
-                pub(super) fn new(
-                    patterns: &Arc<Patterns>,
-                ) -> Option<Searcher> {
-                    if !is_available_avx2() {
-                        return None;
-                    }
-                    Some(unsafe { SlimAVX2::<$len>::new_unchecked(patterns) })
-                }
-
-                /// Creates a new searcher using "slim" Teddy with 256-bit
-                /// vectors without checking whether AVX2 is available or not.
-                ///
-                /// # Safety
-                ///
-                /// Callers must ensure that AVX2 is available in the current
-                /// environment.
-                #[target_feature(enable = "avx2")]
-                unsafe fn new_unchecked(patterns: &Arc<Patterns>) -> Searcher {
-                    let slim128 = generic::Slim::<__m128i, $len>::new(
-                        Arc::clone(&patterns),
-                    );
-                    let slim256 = generic::Slim::<__m256i, $len>::new(
-                        Arc::clone(&patterns),
-                    );
-                    let memory_usage =
-                        slim128.memory_usage() + slim256.memory_usage();
-                    let minimum_len = slim128.minimum_len();
-                    let imp = Arc::new(SlimAVX2 { slim128, slim256 });
-                    Searcher { imp, memory_usage, minimum_len }
-                }
-            }
-
-            impl SearcherT for SlimAVX2<$len> {
-                #[target_feature(enable = "avx2")]
-                #[inline]
-                unsafe fn find(
-                    &self,
-                    start: *const u8,
-                    end: *const u8,
-                ) -> Option<Match> {
-                    // SAFETY: All obligations except for `target_feature` are
-                    // passed to the caller. Our use of `target_feature` is
-                    // safe because construction of this type requires that the
-                    // requisite target features are available.
-                    let len = end.distance(start);
-                    if len < self.slim256.minimum_len() {
-                        self.slim128.find(start, end)
-                    } else {
-                        self.slim256.find(start, end)
-                    }
-                }
-            }
-        };
-    }
-
-    slim_avx2!(1);
-    slim_avx2!(2);
-    slim_avx2!(3);
-    slim_avx2!(4);
-
-    #[derive(Clone, Debug)]
-    pub(super) struct FatAVX2<const BYTES: usize> {
-        fat256: generic::Fat<__m256i, BYTES>,
-    }
-
-    // Defines SlimAVX2 wrapper functions for 1, 2, 3 and 4 bytes.
-    macro_rules! fat_avx2 {
-        ($len:expr) => {
-            impl FatAVX2<$len> {
-                /// Creates a new searcher using "slim" Teddy with 256-bit
-                /// vectors. If AVX2 is not available in the current
-                /// environment, then this returns `None`.
-                pub(super) fn new(
-                    patterns: &Arc<Patterns>,
-                ) -> Option<Searcher> {
-                    if !is_available_avx2() {
-                        return None;
-                    }
-                    Some(unsafe { FatAVX2::<$len>::new_unchecked(patterns) })
-                }
-
-                /// Creates a new searcher using "slim" Teddy with 256-bit
-                /// vectors without checking whether AVX2 is available or not.
-                ///
-                /// # Safety
-                ///
-                /// Callers must ensure that AVX2 is available in the current
-                /// environment.
-                #[target_feature(enable = "avx2")]
-                unsafe fn new_unchecked(patterns: &Arc<Patterns>) -> Searcher {
-                    let fat256 = generic::Fat::<__m256i, $len>::new(
-                        Arc::clone(&patterns),
-                    );
-                    let memory_usage = fat256.memory_usage();
-                    let minimum_len = fat256.minimum_len();
-                    let imp = Arc::new(FatAVX2 { fat256 });
-                    Searcher { imp, memory_usage, minimum_len }
-                }
-            }
-
-            impl SearcherT for FatAVX2<$len> {
-                #[target_feature(enable = "avx2")]
-                #[inline]
-                unsafe fn find(
-                    &self,
-                    start: *const u8,
-                    end: *const u8,
-                ) -> Option<Match> {
-                    // SAFETY: All obligations except for `target_feature` are
-                    // passed to the caller. Our use of `target_feature` is
-                    // safe because construction of this type requires that the
-                    // requisite target features are available.
-                    self.fat256.find(start, end)
-                }
-            }
-        };
-    }
-
-    fat_avx2!(1);
-    fat_avx2!(2);
-    fat_avx2!(3);
-    fat_avx2!(4);
-
-    #[inline]
-    pub(super) fn is_available_ssse3() -> bool {
-        #[cfg(not(target_feature = "sse2"))]
-        {
-            false
-        }
-        #[cfg(target_feature = "sse2")]
-        {
-            #[cfg(target_feature = "ssse3")]
-            {
-                true
-            }
-            #[cfg(not(target_feature = "ssse3"))]
-            {
-                #[cfg(feature = "std")]
-                {
-                    std::is_x86_feature_detected!("ssse3")
-                }
-                #[cfg(not(feature = "std"))]
-                {
-                    false
-                }
-            }
-        }
-    }
-
-    #[inline]
-    pub(super) fn is_available_avx2() -> bool {
-        #[cfg(not(target_feature = "sse2"))]
-        {
-            false
-        }
-        #[cfg(target_feature = "sse2")]
-        {
-            #[cfg(target_feature = "avx2")]
-            {
-                true
-            }
-            #[cfg(not(target_feature = "avx2"))]
-            {
-                #[cfg(feature = "std")]
-                {
-                    std::is_x86_feature_detected!("avx2")
-                }
-                #[cfg(not(feature = "std"))]
-                {
-                    false
-                }
-            }
-        }
-    }
-}
-
-#[cfg(all(
-    target_arch = "aarch64",
-    target_feature = "neon",
-    target_endian = "little"
-))]
-mod aarch64 {
-    use core::arch::aarch64::uint8x16_t;
-
-    use alloc::sync::Arc;
-
-    use crate::packed::{
-        pattern::Patterns,
-        teddy::generic::{self, Match},
-    };
-
-    use super::{Searcher, SearcherT};
-
-    #[derive(Clone, Debug)]
-    pub(super) struct SlimNeon<const BYTES: usize> {
-        slim128: generic::Slim<uint8x16_t, BYTES>,
-    }
-
-    // Defines SlimSSSE3 wrapper functions for 1, 2, 3 and 4 bytes.
-    macro_rules! slim_neon {
-        ($len:expr) => {
-            impl SlimNeon<$len> {
-                /// Creates a new searcher using "slim" Teddy with 128-bit
-                /// vectors. If SSSE3 is not available in the current
-                /// environment, then this returns `None`.
-                pub(super) fn new(
-                    patterns: &Arc<Patterns>,
-                ) -> Option<Searcher> {
-                    Some(unsafe { SlimNeon::<$len>::new_unchecked(patterns) })
-                }
-
-                /// Creates a new searcher using "slim" Teddy with 256-bit
-                /// vectors without checking whether SSSE3 is available or not.
-                ///
-                /// # Safety
-                ///
-                /// Callers must ensure that SSSE3 is available in the current
-                /// environment.
-                #[target_feature(enable = "neon")]
-                unsafe fn new_unchecked(patterns: &Arc<Patterns>) -> Searcher {
-                    let slim128 = generic::Slim::<uint8x16_t, $len>::new(
-                        Arc::clone(patterns),
-                    );
-                    let memory_usage = slim128.memory_usage();
-                    let minimum_len = slim128.minimum_len();
-                    let imp = Arc::new(SlimNeon { slim128 });
-                    Searcher { imp, memory_usage, minimum_len }
-                }
-            }
-
-            impl SearcherT for SlimNeon<$len> {
-                #[target_feature(enable = "neon")]
-                #[inline]
-                unsafe fn find(
-                    &self,
-                    start: *const u8,
-                    end: *const u8,
-                ) -> Option<Match> {
-                    // SAFETY: All obligations except for `target_feature` are
-                    // passed to the caller. Our use of `target_feature` is
-                    // safe because construction of this type requires that the
-                    // requisite target features are available.
-                    self.slim128.find(start, end)
-                }
-            }
-        };
-    }
-
-    slim_neon!(1);
-    slim_neon!(2);
-    slim_neon!(3);
-    slim_neon!(4);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/teddy/generic.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/teddy/generic.rs
deleted file mode 100644
index 2aacd00..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/teddy/generic.rs
+++ /dev/null
@@ -1,1382 +0,0 @@
-use core::fmt::Debug;
-
-use alloc::{
-    boxed::Box, collections::BTreeMap, format, sync::Arc, vec, vec::Vec,
-};
-
-use crate::{
-    packed::{
-        ext::Pointer,
-        pattern::Patterns,
-        vector::{FatVector, Vector},
-    },
-    util::int::U32,
-    PatternID,
-};
-
-/// A match type specialized to the Teddy implementations below.
-///
-/// Essentially, instead of representing a match at byte offsets, we use
-/// raw pointers. This is because the implementations below operate on raw
-/// pointers, and so this is a more natural return type based on how the
-/// implementation works.
-///
-/// Also, the `PatternID` used here is a `u16`.
-#[derive(Clone, Copy, Debug)]
-pub(crate) struct Match {
-    pid: PatternID,
-    start: *const u8,
-    end: *const u8,
-}
-
-impl Match {
-    /// Returns the ID of the pattern that matched.
-    pub(crate) fn pattern(&self) -> PatternID {
-        self.pid
-    }
-
-    /// Returns a pointer into the haystack at which the match starts.
-    pub(crate) fn start(&self) -> *const u8 {
-        self.start
-    }
-
-    /// Returns a pointer into the haystack at which the match ends.
-    pub(crate) fn end(&self) -> *const u8 {
-        self.end
-    }
-}
-
-/// A "slim" Teddy implementation that is generic over both the vector type
-/// and the minimum length of the patterns being searched for.
-///
-/// Only 1, 2, 3 and 4 bytes are supported as minimum lengths.
-#[derive(Clone, Debug)]
-pub(crate) struct Slim<V, const BYTES: usize> {
-    /// A generic data structure for doing "slim" Teddy verification.
-    teddy: Teddy<8>,
-    /// The masks used as inputs to the shuffle operation to generate
-    /// candidates (which are fed into the verification routines).
-    masks: [Mask<V>; BYTES],
-}
-
-impl<V: Vector, const BYTES: usize> Slim<V, BYTES> {
-    /// Create a new "slim" Teddy searcher for the given patterns.
-    ///
-    /// # Panics
-    ///
-    /// This panics when `BYTES` is any value other than 1, 2, 3 or 4.
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    #[inline(always)]
-    pub(crate) unsafe fn new(patterns: Arc<Patterns>) -> Slim<V, BYTES> {
-        assert!(
-            1 <= BYTES && BYTES <= 4,
-            "only 1, 2, 3 or 4 bytes are supported"
-        );
-        let teddy = Teddy::new(patterns);
-        let masks = SlimMaskBuilder::from_teddy(&teddy);
-        Slim { teddy, masks }
-    }
-
-    /// Returns the approximate total amount of heap used by this type, in
-    /// units of bytes.
-    #[inline(always)]
-    pub(crate) fn memory_usage(&self) -> usize {
-        self.teddy.memory_usage()
-    }
-
-    /// Returns the minimum length, in bytes, that a haystack must be in order
-    /// to use it with this searcher.
-    #[inline(always)]
-    pub(crate) fn minimum_len(&self) -> usize {
-        V::BYTES + (BYTES - 1)
-    }
-}
-
-impl<V: Vector> Slim<V, 1> {
-    /// Look for an occurrences of the patterns in this finder in the haystack
-    /// given by the `start` and `end` pointers.
-    ///
-    /// If no match could be found, then `None` is returned.
-    ///
-    /// # Safety
-    ///
-    /// The given pointers representing the haystack must be valid to read
-    /// from. They must also point to a region of memory that is at least the
-    /// minimum length required by this searcher.
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    #[inline(always)]
-    pub(crate) unsafe fn find(
-        &self,
-        start: *const u8,
-        end: *const u8,
-    ) -> Option<Match> {
-        let len = end.distance(start);
-        debug_assert!(len >= self.minimum_len());
-        let mut cur = start;
-        while cur <= end.sub(V::BYTES) {
-            if let Some(m) = self.find_one(cur, end) {
-                return Some(m);
-            }
-            cur = cur.add(V::BYTES);
-        }
-        if cur < end {
-            cur = end.sub(V::BYTES);
-            if let Some(m) = self.find_one(cur, end) {
-                return Some(m);
-            }
-        }
-        None
-    }
-
-    /// Look for a match starting at the `V::BYTES` at and after `cur`. If
-    /// there isn't one, then `None` is returned.
-    ///
-    /// # Safety
-    ///
-    /// The given pointers representing the haystack must be valid to read
-    /// from. They must also point to a region of memory that is at least the
-    /// minimum length required by this searcher.
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    #[inline(always)]
-    unsafe fn find_one(
-        &self,
-        cur: *const u8,
-        end: *const u8,
-    ) -> Option<Match> {
-        let c = self.candidate(cur);
-        if !c.is_zero() {
-            if let Some(m) = self.teddy.verify(cur, end, c) {
-                return Some(m);
-            }
-        }
-        None
-    }
-
-    /// Look for a candidate match (represented as a vector) starting at the
-    /// `V::BYTES` at and after `cur`. If there isn't one, then a vector with
-    /// all bits set to zero is returned.
-    ///
-    /// # Safety
-    ///
-    /// The given pointer representing the haystack must be valid to read
-    /// from.
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    #[inline(always)]
-    unsafe fn candidate(&self, cur: *const u8) -> V {
-        let chunk = V::load_unaligned(cur);
-        Mask::members1(chunk, self.masks)
-    }
-}
-
-impl<V: Vector> Slim<V, 2> {
-    /// See Slim<V, 1>::find.
-    #[inline(always)]
-    pub(crate) unsafe fn find(
-        &self,
-        start: *const u8,
-        end: *const u8,
-    ) -> Option<Match> {
-        let len = end.distance(start);
-        debug_assert!(len >= self.minimum_len());
-        let mut cur = start.add(1);
-        let mut prev0 = V::splat(0xFF);
-        while cur <= end.sub(V::BYTES) {
-            if let Some(m) = self.find_one(cur, end, &mut prev0) {
-                return Some(m);
-            }
-            cur = cur.add(V::BYTES);
-        }
-        if cur < end {
-            cur = end.sub(V::BYTES);
-            prev0 = V::splat(0xFF);
-            if let Some(m) = self.find_one(cur, end, &mut prev0) {
-                return Some(m);
-            }
-        }
-        None
-    }
-
-    /// See Slim<V, 1>::find_one.
-    #[inline(always)]
-    unsafe fn find_one(
-        &self,
-        cur: *const u8,
-        end: *const u8,
-        prev0: &mut V,
-    ) -> Option<Match> {
-        let c = self.candidate(cur, prev0);
-        if !c.is_zero() {
-            if let Some(m) = self.teddy.verify(cur.sub(1), end, c) {
-                return Some(m);
-            }
-        }
-        None
-    }
-
-    /// See Slim<V, 1>::candidate.
-    #[inline(always)]
-    unsafe fn candidate(&self, cur: *const u8, prev0: &mut V) -> V {
-        let chunk = V::load_unaligned(cur);
-        let (res0, res1) = Mask::members2(chunk, self.masks);
-        let res0prev0 = res0.shift_in_one_byte(*prev0);
-        let res = res0prev0.and(res1);
-        *prev0 = res0;
-        res
-    }
-}
-
-impl<V: Vector> Slim<V, 3> {
-    /// See Slim<V, 1>::find.
-    #[inline(always)]
-    pub(crate) unsafe fn find(
-        &self,
-        start: *const u8,
-        end: *const u8,
-    ) -> Option<Match> {
-        let len = end.distance(start);
-        debug_assert!(len >= self.minimum_len());
-        let mut cur = start.add(2);
-        let mut prev0 = V::splat(0xFF);
-        let mut prev1 = V::splat(0xFF);
-        while cur <= end.sub(V::BYTES) {
-            if let Some(m) = self.find_one(cur, end, &mut prev0, &mut prev1) {
-                return Some(m);
-            }
-            cur = cur.add(V::BYTES);
-        }
-        if cur < end {
-            cur = end.sub(V::BYTES);
-            prev0 = V::splat(0xFF);
-            prev1 = V::splat(0xFF);
-            if let Some(m) = self.find_one(cur, end, &mut prev0, &mut prev1) {
-                return Some(m);
-            }
-        }
-        None
-    }
-
-    /// See Slim<V, 1>::find_one.
-    #[inline(always)]
-    unsafe fn find_one(
-        &self,
-        cur: *const u8,
-        end: *const u8,
-        prev0: &mut V,
-        prev1: &mut V,
-    ) -> Option<Match> {
-        let c = self.candidate(cur, prev0, prev1);
-        if !c.is_zero() {
-            if let Some(m) = self.teddy.verify(cur.sub(2), end, c) {
-                return Some(m);
-            }
-        }
-        None
-    }
-
-    /// See Slim<V, 1>::candidate.
-    #[inline(always)]
-    unsafe fn candidate(
-        &self,
-        cur: *const u8,
-        prev0: &mut V,
-        prev1: &mut V,
-    ) -> V {
-        let chunk = V::load_unaligned(cur);
-        let (res0, res1, res2) = Mask::members3(chunk, self.masks);
-        let res0prev0 = res0.shift_in_two_bytes(*prev0);
-        let res1prev1 = res1.shift_in_one_byte(*prev1);
-        let res = res0prev0.and(res1prev1).and(res2);
-        *prev0 = res0;
-        *prev1 = res1;
-        res
-    }
-}
-
-impl<V: Vector> Slim<V, 4> {
-    /// See Slim<V, 1>::find.
-    #[inline(always)]
-    pub(crate) unsafe fn find(
-        &self,
-        start: *const u8,
-        end: *const u8,
-    ) -> Option<Match> {
-        let len = end.distance(start);
-        debug_assert!(len >= self.minimum_len());
-        let mut cur = start.add(3);
-        let mut prev0 = V::splat(0xFF);
-        let mut prev1 = V::splat(0xFF);
-        let mut prev2 = V::splat(0xFF);
-        while cur <= end.sub(V::BYTES) {
-            if let Some(m) =
-                self.find_one(cur, end, &mut prev0, &mut prev1, &mut prev2)
-            {
-                return Some(m);
-            }
-            cur = cur.add(V::BYTES);
-        }
-        if cur < end {
-            cur = end.sub(V::BYTES);
-            prev0 = V::splat(0xFF);
-            prev1 = V::splat(0xFF);
-            prev2 = V::splat(0xFF);
-            if let Some(m) =
-                self.find_one(cur, end, &mut prev0, &mut prev1, &mut prev2)
-            {
-                return Some(m);
-            }
-        }
-        None
-    }
-
-    /// See Slim<V, 1>::find_one.
-    #[inline(always)]
-    unsafe fn find_one(
-        &self,
-        cur: *const u8,
-        end: *const u8,
-        prev0: &mut V,
-        prev1: &mut V,
-        prev2: &mut V,
-    ) -> Option<Match> {
-        let c = self.candidate(cur, prev0, prev1, prev2);
-        if !c.is_zero() {
-            if let Some(m) = self.teddy.verify(cur.sub(3), end, c) {
-                return Some(m);
-            }
-        }
-        None
-    }
-
-    /// See Slim<V, 1>::candidate.
-    #[inline(always)]
-    unsafe fn candidate(
-        &self,
-        cur: *const u8,
-        prev0: &mut V,
-        prev1: &mut V,
-        prev2: &mut V,
-    ) -> V {
-        let chunk = V::load_unaligned(cur);
-        let (res0, res1, res2, res3) = Mask::members4(chunk, self.masks);
-        let res0prev0 = res0.shift_in_three_bytes(*prev0);
-        let res1prev1 = res1.shift_in_two_bytes(*prev1);
-        let res2prev2 = res2.shift_in_one_byte(*prev2);
-        let res = res0prev0.and(res1prev1).and(res2prev2).and(res3);
-        *prev0 = res0;
-        *prev1 = res1;
-        *prev2 = res2;
-        res
-    }
-}
-
-/// A "fat" Teddy implementation that is generic over both the vector type
-/// and the minimum length of the patterns being searched for.
-///
-/// Only 1, 2, 3 and 4 bytes are supported as minimum lengths.
-#[derive(Clone, Debug)]
-pub(crate) struct Fat<V, const BYTES: usize> {
-    /// A generic data structure for doing "fat" Teddy verification.
-    teddy: Teddy<16>,
-    /// The masks used as inputs to the shuffle operation to generate
-    /// candidates (which are fed into the verification routines).
-    masks: [Mask<V>; BYTES],
-}
-
-impl<V: FatVector, const BYTES: usize> Fat<V, BYTES> {
-    /// Create a new "fat" Teddy searcher for the given patterns.
-    ///
-    /// # Panics
-    ///
-    /// This panics when `BYTES` is any value other than 1, 2, 3 or 4.
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    #[inline(always)]
-    pub(crate) unsafe fn new(patterns: Arc<Patterns>) -> Fat<V, BYTES> {
-        assert!(
-            1 <= BYTES && BYTES <= 4,
-            "only 1, 2, 3 or 4 bytes are supported"
-        );
-        let teddy = Teddy::new(patterns);
-        let masks = FatMaskBuilder::from_teddy(&teddy);
-        Fat { teddy, masks }
-    }
-
-    /// Returns the approximate total amount of heap used by this type, in
-    /// units of bytes.
-    #[inline(always)]
-    pub(crate) fn memory_usage(&self) -> usize {
-        self.teddy.memory_usage()
-    }
-
-    /// Returns the minimum length, in bytes, that a haystack must be in order
-    /// to use it with this searcher.
-    #[inline(always)]
-    pub(crate) fn minimum_len(&self) -> usize {
-        V::Half::BYTES + (BYTES - 1)
-    }
-}
-
-impl<V: FatVector> Fat<V, 1> {
-    /// Look for an occurrences of the patterns in this finder in the haystack
-    /// given by the `start` and `end` pointers.
-    ///
-    /// If no match could be found, then `None` is returned.
-    ///
-    /// # Safety
-    ///
-    /// The given pointers representing the haystack must be valid to read
-    /// from. They must also point to a region of memory that is at least the
-    /// minimum length required by this searcher.
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    #[inline(always)]
-    pub(crate) unsafe fn find(
-        &self,
-        start: *const u8,
-        end: *const u8,
-    ) -> Option<Match> {
-        let len = end.distance(start);
-        debug_assert!(len >= self.minimum_len());
-        let mut cur = start;
-        while cur <= end.sub(V::Half::BYTES) {
-            if let Some(m) = self.find_one(cur, end) {
-                return Some(m);
-            }
-            cur = cur.add(V::Half::BYTES);
-        }
-        if cur < end {
-            cur = end.sub(V::Half::BYTES);
-            if let Some(m) = self.find_one(cur, end) {
-                return Some(m);
-            }
-        }
-        None
-    }
-
-    /// Look for a match starting at the `V::BYTES` at and after `cur`. If
-    /// there isn't one, then `None` is returned.
-    ///
-    /// # Safety
-    ///
-    /// The given pointers representing the haystack must be valid to read
-    /// from. They must also point to a region of memory that is at least the
-    /// minimum length required by this searcher.
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    #[inline(always)]
-    unsafe fn find_one(
-        &self,
-        cur: *const u8,
-        end: *const u8,
-    ) -> Option<Match> {
-        let c = self.candidate(cur);
-        if !c.is_zero() {
-            if let Some(m) = self.teddy.verify(cur, end, c) {
-                return Some(m);
-            }
-        }
-        None
-    }
-
-    /// Look for a candidate match (represented as a vector) starting at the
-    /// `V::BYTES` at and after `cur`. If there isn't one, then a vector with
-    /// all bits set to zero is returned.
-    ///
-    /// # Safety
-    ///
-    /// The given pointer representing the haystack must be valid to read
-    /// from.
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    #[inline(always)]
-    unsafe fn candidate(&self, cur: *const u8) -> V {
-        let chunk = V::load_half_unaligned(cur);
-        Mask::members1(chunk, self.masks)
-    }
-}
-
-impl<V: FatVector> Fat<V, 2> {
-    /// See `Fat<V, 1>::find`.
-    #[inline(always)]
-    pub(crate) unsafe fn find(
-        &self,
-        start: *const u8,
-        end: *const u8,
-    ) -> Option<Match> {
-        let len = end.distance(start);
-        debug_assert!(len >= self.minimum_len());
-        let mut cur = start.add(1);
-        let mut prev0 = V::splat(0xFF);
-        while cur <= end.sub(V::Half::BYTES) {
-            if let Some(m) = self.find_one(cur, end, &mut prev0) {
-                return Some(m);
-            }
-            cur = cur.add(V::Half::BYTES);
-        }
-        if cur < end {
-            cur = end.sub(V::Half::BYTES);
-            prev0 = V::splat(0xFF);
-            if let Some(m) = self.find_one(cur, end, &mut prev0) {
-                return Some(m);
-            }
-        }
-        None
-    }
-
-    /// See `Fat<V, 1>::find_one`.
-    #[inline(always)]
-    unsafe fn find_one(
-        &self,
-        cur: *const u8,
-        end: *const u8,
-        prev0: &mut V,
-    ) -> Option<Match> {
-        let c = self.candidate(cur, prev0);
-        if !c.is_zero() {
-            if let Some(m) = self.teddy.verify(cur.sub(1), end, c) {
-                return Some(m);
-            }
-        }
-        None
-    }
-
-    /// See `Fat<V, 1>::candidate`.
-    #[inline(always)]
-    unsafe fn candidate(&self, cur: *const u8, prev0: &mut V) -> V {
-        let chunk = V::load_half_unaligned(cur);
-        let (res0, res1) = Mask::members2(chunk, self.masks);
-        let res0prev0 = res0.half_shift_in_one_byte(*prev0);
-        let res = res0prev0.and(res1);
-        *prev0 = res0;
-        res
-    }
-}
-
-impl<V: FatVector> Fat<V, 3> {
-    /// See `Fat<V, 1>::find`.
-    #[inline(always)]
-    pub(crate) unsafe fn find(
-        &self,
-        start: *const u8,
-        end: *const u8,
-    ) -> Option<Match> {
-        let len = end.distance(start);
-        debug_assert!(len >= self.minimum_len());
-        let mut cur = start.add(2);
-        let mut prev0 = V::splat(0xFF);
-        let mut prev1 = V::splat(0xFF);
-        while cur <= end.sub(V::Half::BYTES) {
-            if let Some(m) = self.find_one(cur, end, &mut prev0, &mut prev1) {
-                return Some(m);
-            }
-            cur = cur.add(V::Half::BYTES);
-        }
-        if cur < end {
-            cur = end.sub(V::Half::BYTES);
-            prev0 = V::splat(0xFF);
-            prev1 = V::splat(0xFF);
-            if let Some(m) = self.find_one(cur, end, &mut prev0, &mut prev1) {
-                return Some(m);
-            }
-        }
-        None
-    }
-
-    /// See `Fat<V, 1>::find_one`.
-    #[inline(always)]
-    unsafe fn find_one(
-        &self,
-        cur: *const u8,
-        end: *const u8,
-        prev0: &mut V,
-        prev1: &mut V,
-    ) -> Option<Match> {
-        let c = self.candidate(cur, prev0, prev1);
-        if !c.is_zero() {
-            if let Some(m) = self.teddy.verify(cur.sub(2), end, c) {
-                return Some(m);
-            }
-        }
-        None
-    }
-
-    /// See `Fat<V, 1>::candidate`.
-    #[inline(always)]
-    unsafe fn candidate(
-        &self,
-        cur: *const u8,
-        prev0: &mut V,
-        prev1: &mut V,
-    ) -> V {
-        let chunk = V::load_half_unaligned(cur);
-        let (res0, res1, res2) = Mask::members3(chunk, self.masks);
-        let res0prev0 = res0.half_shift_in_two_bytes(*prev0);
-        let res1prev1 = res1.half_shift_in_one_byte(*prev1);
-        let res = res0prev0.and(res1prev1).and(res2);
-        *prev0 = res0;
-        *prev1 = res1;
-        res
-    }
-}
-
-impl<V: FatVector> Fat<V, 4> {
-    /// See `Fat<V, 1>::find`.
-    #[inline(always)]
-    pub(crate) unsafe fn find(
-        &self,
-        start: *const u8,
-        end: *const u8,
-    ) -> Option<Match> {
-        let len = end.distance(start);
-        debug_assert!(len >= self.minimum_len());
-        let mut cur = start.add(3);
-        let mut prev0 = V::splat(0xFF);
-        let mut prev1 = V::splat(0xFF);
-        let mut prev2 = V::splat(0xFF);
-        while cur <= end.sub(V::Half::BYTES) {
-            if let Some(m) =
-                self.find_one(cur, end, &mut prev0, &mut prev1, &mut prev2)
-            {
-                return Some(m);
-            }
-            cur = cur.add(V::Half::BYTES);
-        }
-        if cur < end {
-            cur = end.sub(V::Half::BYTES);
-            prev0 = V::splat(0xFF);
-            prev1 = V::splat(0xFF);
-            prev2 = V::splat(0xFF);
-            if let Some(m) =
-                self.find_one(cur, end, &mut prev0, &mut prev1, &mut prev2)
-            {
-                return Some(m);
-            }
-        }
-        None
-    }
-
-    /// See `Fat<V, 1>::find_one`.
-    #[inline(always)]
-    unsafe fn find_one(
-        &self,
-        cur: *const u8,
-        end: *const u8,
-        prev0: &mut V,
-        prev1: &mut V,
-        prev2: &mut V,
-    ) -> Option<Match> {
-        let c = self.candidate(cur, prev0, prev1, prev2);
-        if !c.is_zero() {
-            if let Some(m) = self.teddy.verify(cur.sub(3), end, c) {
-                return Some(m);
-            }
-        }
-        None
-    }
-
-    /// See `Fat<V, 1>::candidate`.
-    #[inline(always)]
-    unsafe fn candidate(
-        &self,
-        cur: *const u8,
-        prev0: &mut V,
-        prev1: &mut V,
-        prev2: &mut V,
-    ) -> V {
-        let chunk = V::load_half_unaligned(cur);
-        let (res0, res1, res2, res3) = Mask::members4(chunk, self.masks);
-        let res0prev0 = res0.half_shift_in_three_bytes(*prev0);
-        let res1prev1 = res1.half_shift_in_two_bytes(*prev1);
-        let res2prev2 = res2.half_shift_in_one_byte(*prev2);
-        let res = res0prev0.and(res1prev1).and(res2prev2).and(res3);
-        *prev0 = res0;
-        *prev1 = res1;
-        *prev2 = res2;
-        res
-    }
-}
-
-/// The common elements of all "slim" and "fat" Teddy search implementations.
-///
-/// Essentially, this contains the patterns and the buckets. Namely, it
-/// contains enough to implement the verification step after candidates are
-/// identified via the shuffle masks.
-///
-/// It is generic over the number of buckets used. In general, the number of
-/// buckets is either 8 (for "slim" Teddy) or 16 (for "fat" Teddy). The generic
-/// parameter isn't really meant to be instantiated for any value other than
-/// 8 or 16, although it is technically possible. The main hiccup is that there
-/// is some bit-shifting done in the critical part of verification that could
-/// be quite expensive if `N` is not a multiple of 2.
-#[derive(Clone, Debug)]
-struct Teddy<const BUCKETS: usize> {
-    /// The patterns we are searching for.
-    ///
-    /// A pattern string can be found by its `PatternID`.
-    patterns: Arc<Patterns>,
-    /// The allocation of patterns in buckets. This only contains the IDs of
-    /// patterns. In order to do full verification, callers must provide the
-    /// actual patterns when using Teddy.
-    buckets: [Vec<PatternID>; BUCKETS],
-    // N.B. The above representation is very simple, but it definitely results
-    // in ping-ponging between different allocations during verification. I've
-    // tried experimenting with other representations that flatten the pattern
-    // strings into a single allocation, but it doesn't seem to help much.
-    // Probably everything is small enough to fit into cache anyway, and so the
-    // pointer chasing isn't a big deal?
-    //
-    // One other avenue I haven't explored is some kind of hashing trick
-    // that let's us do another high-confidence check before launching into
-    // `memcmp`.
-}
-
-impl<const BUCKETS: usize> Teddy<BUCKETS> {
-    /// Create a new generic data structure for Teddy verification.
-    fn new(patterns: Arc<Patterns>) -> Teddy<BUCKETS> {
-        assert_ne!(0, patterns.len(), "Teddy requires at least one pattern");
-        assert_ne!(
-            0,
-            patterns.minimum_len(),
-            "Teddy does not support zero-length patterns"
-        );
-        assert!(
-            BUCKETS == 8 || BUCKETS == 16,
-            "Teddy only supports 8 or 16 buckets"
-        );
-        // MSRV(1.63): Use core::array::from_fn below instead of allocating a
-        // superfluous outer Vec. Not a big deal (especially given the BTreeMap
-        // allocation below), but nice to not do it.
-        let buckets =
-            <[Vec<PatternID>; BUCKETS]>::try_from(vec![vec![]; BUCKETS])
-                .unwrap();
-        let mut t = Teddy { patterns, buckets };
-
-        let mut map: BTreeMap<Box<[u8]>, usize> = BTreeMap::new();
-        for (id, pattern) in t.patterns.iter() {
-            // We try to be slightly clever in how we assign patterns into
-            // buckets. Generally speaking, we want patterns with the same
-            // prefix to be in the same bucket, since it minimizes the amount
-            // of time we spend churning through buckets in the verification
-            // step.
-            //
-            // So we could assign patterns with the same N-prefix (where N is
-            // the size of the mask, which is one of {1, 2, 3}) to the same
-            // bucket. However, case insensitive searches are fairly common, so
-            // we'd for example, ideally want to treat `abc` and `ABC` as if
-            // they shared the same prefix. ASCII has the nice property that
-            // the lower 4 bits of A and a are the same, so we therefore group
-            // patterns with the same low-nybble-N-prefix into the same bucket.
-            //
-            // MOREOVER, this is actually necessary for correctness! In
-            // particular, by grouping patterns with the same prefix into the
-            // same bucket, we ensure that we preserve correct leftmost-first
-            // and leftmost-longest match semantics. In addition to the fact
-            // that `patterns.iter()` iterates in the correct order, this
-            // guarantees that all possible ambiguous matches will occur in
-            // the same bucket. The verification routine could be adjusted to
-            // support correct leftmost match semantics regardless of bucket
-            // allocation, but that results in a performance hit. It's much
-            // nicer to be able to just stop as soon as a match is found.
-            let lonybs = pattern.low_nybbles(t.mask_len());
-            if let Some(&bucket) = map.get(&lonybs) {
-                t.buckets[bucket].push(id);
-            } else {
-                // N.B. We assign buckets in reverse because it shouldn't have
-                // any influence on performance, but it does make it harder to
-                // get leftmost match semantics accidentally correct.
-                let bucket = (BUCKETS - 1) - (id.as_usize() % BUCKETS);
-                t.buckets[bucket].push(id);
-                map.insert(lonybs, bucket);
-            }
-        }
-        t
-    }
-
-    /// Verify whether there are any matches starting at or after `cur` in the
-    /// haystack. The candidate chunk given should correspond to 8-bit bitsets
-    /// for N buckets.
-    ///
-    /// # Safety
-    ///
-    /// The given pointers representing the haystack must be valid to read
-    /// from.
-    #[inline(always)]
-    unsafe fn verify64(
-        &self,
-        cur: *const u8,
-        end: *const u8,
-        mut candidate_chunk: u64,
-    ) -> Option<Match> {
-        while candidate_chunk != 0 {
-            let bit = candidate_chunk.trailing_zeros().as_usize();
-            candidate_chunk &= !(1 << bit);
-
-            let cur = cur.add(bit / BUCKETS);
-            let bucket = bit % BUCKETS;
-            if let Some(m) = self.verify_bucket(cur, end, bucket) {
-                return Some(m);
-            }
-        }
-        None
-    }
-
-    /// Verify whether there are any matches starting at `at` in the given
-    /// `haystack` corresponding only to patterns in the given bucket.
-    ///
-    /// # Safety
-    ///
-    /// The given pointers representing the haystack must be valid to read
-    /// from.
-    ///
-    /// The bucket index must be less than or equal to `self.buckets.len()`.
-    #[inline(always)]
-    unsafe fn verify_bucket(
-        &self,
-        cur: *const u8,
-        end: *const u8,
-        bucket: usize,
-    ) -> Option<Match> {
-        debug_assert!(bucket < self.buckets.len());
-        // SAFETY: The caller must ensure that the bucket index is correct.
-        for pid in self.buckets.get_unchecked(bucket).iter().copied() {
-            // SAFETY: This is safe because we are guaranteed that every
-            // index in a Teddy bucket is a valid index into `pats`, by
-            // construction.
-            debug_assert!(pid.as_usize() < self.patterns.len());
-            let pat = self.patterns.get_unchecked(pid);
-            if pat.is_prefix_raw(cur, end) {
-                let start = cur;
-                let end = start.add(pat.len());
-                return Some(Match { pid, start, end });
-            }
-        }
-        None
-    }
-
-    /// Returns the total number of masks required by the patterns in this
-    /// Teddy searcher.
-    ///
-    /// Basically, the mask length corresponds to the type of Teddy searcher
-    /// to use: a 1-byte, 2-byte, 3-byte or 4-byte searcher. The bigger the
-    /// better, typically, since searching for longer substrings usually
-    /// decreases the rate of false positives. Therefore, the number of masks
-    /// needed is the length of the shortest pattern in this searcher. If the
-    /// length of the shortest pattern (in bytes) is bigger than 4, then the
-    /// mask length is 4 since there are no Teddy searchers for more than 4
-    /// bytes.
-    fn mask_len(&self) -> usize {
-        core::cmp::min(4, self.patterns.minimum_len())
-    }
-
-    /// Returns the approximate total amount of heap used by this type, in
-    /// units of bytes.
-    fn memory_usage(&self) -> usize {
-        // This is an upper bound rather than a precise accounting. No
-        // particular reason, other than it's probably very close to actual
-        // memory usage in practice.
-        self.patterns.len() * core::mem::size_of::<PatternID>()
-    }
-}
-
-impl Teddy<8> {
-    /// Runs the verification routine for "slim" Teddy.
-    ///
-    /// The candidate given should be a collection of 8-bit bitsets (one bitset
-    /// per lane), where the ith bit is set in the jth lane if and only if the
-    /// byte occurring at `at + j` in `cur` is in the bucket `i`.
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    ///
-    /// The given pointers must be valid to read from.
-    #[inline(always)]
-    unsafe fn verify<V: Vector>(
-        &self,
-        mut cur: *const u8,
-        end: *const u8,
-        candidate: V,
-    ) -> Option<Match> {
-        debug_assert!(!candidate.is_zero());
-        // Convert the candidate into 64-bit chunks, and then verify each of
-        // those chunks.
-        candidate.for_each_64bit_lane(
-            #[inline(always)]
-            |_, chunk| {
-                let result = self.verify64(cur, end, chunk);
-                cur = cur.add(8);
-                result
-            },
-        )
-    }
-}
-
-impl Teddy<16> {
-    /// Runs the verification routine for "fat" Teddy.
-    ///
-    /// The candidate given should be a collection of 8-bit bitsets (one bitset
-    /// per lane), where the ith bit is set in the jth lane if and only if the
-    /// byte occurring at `at + (j < 16 ? j : j - 16)` in `cur` is in the
-    /// bucket `j < 16 ? i : i + 8`.
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    ///
-    /// The given pointers must be valid to read from.
-    #[inline(always)]
-    unsafe fn verify<V: FatVector>(
-        &self,
-        mut cur: *const u8,
-        end: *const u8,
-        candidate: V,
-    ) -> Option<Match> {
-        // This is a bit tricky, but we basically want to convert our
-        // candidate, which looks like this (assuming a 256-bit vector):
-        //
-        //     a31 a30 ... a17 a16 a15 a14 ... a01 a00
-        //
-        // where each a(i) is an 8-bit bitset corresponding to the activated
-        // buckets, to this
-        //
-        //     a31 a15 a30 a14 a29 a13 ... a18 a02 a17 a01 a16 a00
-        //
-        // Namely, for Fat Teddy, the high 128-bits of the candidate correspond
-        // to the same bytes in the haystack in the low 128-bits (so we only
-        // scan 16 bytes at a time), but are for buckets 8-15 instead of 0-7.
-        //
-        // The verification routine wants to look at all potentially matching
-        // buckets before moving on to the next lane. So for example, both
-        // a16 and a00 both correspond to the first byte in our window; a00
-        // contains buckets 0-7 and a16 contains buckets 8-15. Specifically,
-        // a16 should be checked before a01. So the transformation shown above
-        // allows us to use our normal verification procedure with one small
-        // change: we treat each bitset as 16 bits instead of 8 bits.
-        debug_assert!(!candidate.is_zero());
-
-        // Swap the 128-bit lanes in the candidate vector.
-        let swapped = candidate.swap_halves();
-        // Interleave the bytes from the low 128-bit lanes, starting with
-        // cand first.
-        let r1 = candidate.interleave_low_8bit_lanes(swapped);
-        // Interleave the bytes from the high 128-bit lanes, starting with
-        // cand first.
-        let r2 = candidate.interleave_high_8bit_lanes(swapped);
-        // Now just take the 2 low 64-bit integers from both r1 and r2. We
-        // can drop the high 64-bit integers because they are a mirror image
-        // of the low 64-bit integers. All we care about are the low 128-bit
-        // lanes of r1 and r2. Combined, they contain all our 16-bit bitsets
-        // laid out in the desired order, as described above.
-        r1.for_each_low_64bit_lane(
-            r2,
-            #[inline(always)]
-            |_, chunk| {
-                let result = self.verify64(cur, end, chunk);
-                cur = cur.add(4);
-                result
-            },
-        )
-    }
-}
-
-/// A vector generic mask for the low and high nybbles in a set of patterns.
-/// Each 8-bit lane `j` in a vector corresponds to a bitset where the `i`th bit
-/// is set if and only if the nybble `j` is in the bucket `i` at a particular
-/// position.
-///
-/// This is slightly tweaked dependending on whether Slim or Fat Teddy is being
-/// used. For Slim Teddy, the bitsets in the lower half are the same as the
-/// bitsets in the higher half, so that we can search `V::BYTES` bytes at a
-/// time. (Remember, the nybbles in the haystack are used as indices into these
-/// masks, and 256-bit shuffles only operate on 128-bit lanes.)
-///
-/// For Fat Teddy, the bitsets are not repeated, but instead, the high half
-/// bits correspond to an addition 8 buckets. So that a bitset `00100010` has
-/// buckets 1 and 5 set if it's in the lower half, but has buckets 9 and 13 set
-/// if it's in the higher half.
-#[derive(Clone, Copy, Debug)]
-struct Mask<V> {
-    lo: V,
-    hi: V,
-}
-
-impl<V: Vector> Mask<V> {
-    /// Return a candidate for Teddy (fat or slim) that is searching for 1-byte
-    /// candidates.
-    ///
-    /// If a candidate is returned, it will be a collection of 8-bit bitsets
-    /// (one bitset per lane), where the ith bit is set in the jth lane if and
-    /// only if the byte occurring at the jth lane in `chunk` is in the bucket
-    /// `i`. If no candidate is found, then the vector returned will have all
-    /// lanes set to zero.
-    ///
-    /// `chunk` should correspond to a `V::BYTES` window of the haystack (where
-    /// the least significant byte corresponds to the start of the window). For
-    /// fat Teddy, the haystack window length should be `V::BYTES / 2`, with
-    /// the window repeated in each half of the vector.
-    ///
-    /// `mask1` should correspond to a low/high mask for the first byte of all
-    /// patterns that are being searched.
-    #[inline(always)]
-    unsafe fn members1(chunk: V, masks: [Mask<V>; 1]) -> V {
-        let lomask = V::splat(0xF);
-        let hlo = chunk.and(lomask);
-        let hhi = chunk.shift_8bit_lane_right::<4>().and(lomask);
-        let locand = masks[0].lo.shuffle_bytes(hlo);
-        let hicand = masks[0].hi.shuffle_bytes(hhi);
-        locand.and(hicand)
-    }
-
-    /// Return a candidate for Teddy (fat or slim) that is searching for 2-byte
-    /// candidates.
-    ///
-    /// If candidates are returned, each will be a collection of 8-bit bitsets
-    /// (one bitset per lane), where the ith bit is set in the jth lane if and
-    /// only if the byte occurring at the jth lane in `chunk` is in the bucket
-    /// `i`. Each candidate returned corresponds to the first and second bytes
-    /// of the patterns being searched. If no candidate is found, then all of
-    /// the lanes will be set to zero in at least one of the vectors returned.
-    ///
-    /// `chunk` should correspond to a `V::BYTES` window of the haystack (where
-    /// the least significant byte corresponds to the start of the window). For
-    /// fat Teddy, the haystack window length should be `V::BYTES / 2`, with
-    /// the window repeated in each half of the vector.
-    ///
-    /// The masks should correspond to the masks computed for the first and
-    /// second bytes of all patterns that are being searched.
-    #[inline(always)]
-    unsafe fn members2(chunk: V, masks: [Mask<V>; 2]) -> (V, V) {
-        let lomask = V::splat(0xF);
-        let hlo = chunk.and(lomask);
-        let hhi = chunk.shift_8bit_lane_right::<4>().and(lomask);
-
-        let locand1 = masks[0].lo.shuffle_bytes(hlo);
-        let hicand1 = masks[0].hi.shuffle_bytes(hhi);
-        let cand1 = locand1.and(hicand1);
-
-        let locand2 = masks[1].lo.shuffle_bytes(hlo);
-        let hicand2 = masks[1].hi.shuffle_bytes(hhi);
-        let cand2 = locand2.and(hicand2);
-
-        (cand1, cand2)
-    }
-
-    /// Return a candidate for Teddy (fat or slim) that is searching for 3-byte
-    /// candidates.
-    ///
-    /// If candidates are returned, each will be a collection of 8-bit bitsets
-    /// (one bitset per lane), where the ith bit is set in the jth lane if and
-    /// only if the byte occurring at the jth lane in `chunk` is in the bucket
-    /// `i`. Each candidate returned corresponds to the first, second and third
-    /// bytes of the patterns being searched. If no candidate is found, then
-    /// all of the lanes will be set to zero in at least one of the vectors
-    /// returned.
-    ///
-    /// `chunk` should correspond to a `V::BYTES` window of the haystack (where
-    /// the least significant byte corresponds to the start of the window). For
-    /// fat Teddy, the haystack window length should be `V::BYTES / 2`, with
-    /// the window repeated in each half of the vector.
-    ///
-    /// The masks should correspond to the masks computed for the first, second
-    /// and third bytes of all patterns that are being searched.
-    #[inline(always)]
-    unsafe fn members3(chunk: V, masks: [Mask<V>; 3]) -> (V, V, V) {
-        let lomask = V::splat(0xF);
-        let hlo = chunk.and(lomask);
-        let hhi = chunk.shift_8bit_lane_right::<4>().and(lomask);
-
-        let locand1 = masks[0].lo.shuffle_bytes(hlo);
-        let hicand1 = masks[0].hi.shuffle_bytes(hhi);
-        let cand1 = locand1.and(hicand1);
-
-        let locand2 = masks[1].lo.shuffle_bytes(hlo);
-        let hicand2 = masks[1].hi.shuffle_bytes(hhi);
-        let cand2 = locand2.and(hicand2);
-
-        let locand3 = masks[2].lo.shuffle_bytes(hlo);
-        let hicand3 = masks[2].hi.shuffle_bytes(hhi);
-        let cand3 = locand3.and(hicand3);
-
-        (cand1, cand2, cand3)
-    }
-
-    /// Return a candidate for Teddy (fat or slim) that is searching for 4-byte
-    /// candidates.
-    ///
-    /// If candidates are returned, each will be a collection of 8-bit bitsets
-    /// (one bitset per lane), where the ith bit is set in the jth lane if and
-    /// only if the byte occurring at the jth lane in `chunk` is in the bucket
-    /// `i`. Each candidate returned corresponds to the first, second, third
-    /// and fourth bytes of the patterns being searched. If no candidate is
-    /// found, then all of the lanes will be set to zero in at least one of the
-    /// vectors returned.
-    ///
-    /// `chunk` should correspond to a `V::BYTES` window of the haystack (where
-    /// the least significant byte corresponds to the start of the window). For
-    /// fat Teddy, the haystack window length should be `V::BYTES / 2`, with
-    /// the window repeated in each half of the vector.
-    ///
-    /// The masks should correspond to the masks computed for the first,
-    /// second, third and fourth bytes of all patterns that are being searched.
-    #[inline(always)]
-    unsafe fn members4(chunk: V, masks: [Mask<V>; 4]) -> (V, V, V, V) {
-        let lomask = V::splat(0xF);
-        let hlo = chunk.and(lomask);
-        let hhi = chunk.shift_8bit_lane_right::<4>().and(lomask);
-
-        let locand1 = masks[0].lo.shuffle_bytes(hlo);
-        let hicand1 = masks[0].hi.shuffle_bytes(hhi);
-        let cand1 = locand1.and(hicand1);
-
-        let locand2 = masks[1].lo.shuffle_bytes(hlo);
-        let hicand2 = masks[1].hi.shuffle_bytes(hhi);
-        let cand2 = locand2.and(hicand2);
-
-        let locand3 = masks[2].lo.shuffle_bytes(hlo);
-        let hicand3 = masks[2].hi.shuffle_bytes(hhi);
-        let cand3 = locand3.and(hicand3);
-
-        let locand4 = masks[3].lo.shuffle_bytes(hlo);
-        let hicand4 = masks[3].hi.shuffle_bytes(hhi);
-        let cand4 = locand4.and(hicand4);
-
-        (cand1, cand2, cand3, cand4)
-    }
-}
-
-/// Represents the low and high nybble masks that will be used during
-/// search. Each mask is 32 bytes wide, although only the first 16 bytes are
-/// used for 128-bit vectors.
-///
-/// Each byte in the mask corresponds to a 8-bit bitset, where bit `i` is set
-/// if and only if the corresponding nybble is in the ith bucket. The index of
-/// the byte (0-15, inclusive) corresponds to the nybble.
-///
-/// Each mask is used as the target of a shuffle, where the indices for the
-/// shuffle are taken from the haystack. AND'ing the shuffles for both the
-/// low and high masks together also results in 8-bit bitsets, but where bit
-/// `i` is set if and only if the correspond *byte* is in the ith bucket.
-#[derive(Clone, Default)]
-struct SlimMaskBuilder {
-    lo: [u8; 32],
-    hi: [u8; 32],
-}
-
-impl SlimMaskBuilder {
-    /// Update this mask by adding the given byte to the given bucket. The
-    /// given bucket must be in the range 0-7.
-    ///
-    /// # Panics
-    ///
-    /// When `bucket >= 8`.
-    fn add(&mut self, bucket: usize, byte: u8) {
-        assert!(bucket < 8);
-
-        let bucket = u8::try_from(bucket).unwrap();
-        let byte_lo = usize::from(byte & 0xF);
-        let byte_hi = usize::from((byte >> 4) & 0xF);
-        // When using 256-bit vectors, we need to set this bucket assignment in
-        // the low and high 128-bit portions of the mask. This allows us to
-        // process 32 bytes at a time. Namely, AVX2 shuffles operate on each
-        // of the 128-bit lanes, rather than the full 256-bit vector at once.
-        self.lo[byte_lo] |= 1 << bucket;
-        self.lo[byte_lo + 16] |= 1 << bucket;
-        self.hi[byte_hi] |= 1 << bucket;
-        self.hi[byte_hi + 16] |= 1 << bucket;
-    }
-
-    /// Turn this builder into a vector mask.
-    ///
-    /// # Panics
-    ///
-    /// When `V` represents a vector bigger than what `MaskBytes` can contain.
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    #[inline(always)]
-    unsafe fn build<V: Vector>(&self) -> Mask<V> {
-        assert!(V::BYTES <= self.lo.len());
-        assert!(V::BYTES <= self.hi.len());
-        Mask {
-            lo: V::load_unaligned(self.lo[..].as_ptr()),
-            hi: V::load_unaligned(self.hi[..].as_ptr()),
-        }
-    }
-
-    /// A convenience function for building `N` vector masks from a slim
-    /// `Teddy` value.
-    ///
-    /// # Panics
-    ///
-    /// When `V` represents a vector bigger than what `MaskBytes` can contain.
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    #[inline(always)]
-    unsafe fn from_teddy<const BYTES: usize, V: Vector>(
-        teddy: &Teddy<8>,
-    ) -> [Mask<V>; BYTES] {
-        // MSRV(1.63): Use core::array::from_fn to just build the array here
-        // instead of creating a vector and turning it into an array.
-        let mut mask_builders = vec![SlimMaskBuilder::default(); BYTES];
-        for (bucket_index, bucket) in teddy.buckets.iter().enumerate() {
-            for pid in bucket.iter().copied() {
-                let pat = teddy.patterns.get(pid);
-                for (i, builder) in mask_builders.iter_mut().enumerate() {
-                    builder.add(bucket_index, pat.bytes()[i]);
-                }
-            }
-        }
-        let array =
-            <[SlimMaskBuilder; BYTES]>::try_from(mask_builders).unwrap();
-        array.map(|builder| builder.build())
-    }
-}
-
-impl Debug for SlimMaskBuilder {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        let (mut parts_lo, mut parts_hi) = (vec![], vec![]);
-        for i in 0..32 {
-            parts_lo.push(format!("{:02}: {:08b}", i, self.lo[i]));
-            parts_hi.push(format!("{:02}: {:08b}", i, self.hi[i]));
-        }
-        f.debug_struct("SlimMaskBuilder")
-            .field("lo", &parts_lo)
-            .field("hi", &parts_hi)
-            .finish()
-    }
-}
-
-/// Represents the low and high nybble masks that will be used during "fat"
-/// Teddy search.
-///
-/// Each mask is 32 bytes wide, and at the time of writing, only 256-bit vectors
-/// support fat Teddy.
-///
-/// A fat Teddy mask is like a slim Teddy mask, except that instead of
-/// repeating the bitsets in the high and low 128-bits in 256-bit vectors, the
-/// high and low 128-bit halves each represent distinct buckets. (Bringing the
-/// total to 16 instead of 8.) This permits spreading the patterns out a bit
-/// more and thus putting less pressure on verification to be fast.
-///
-/// Each byte in the mask corresponds to a 8-bit bitset, where bit `i` is set
-/// if and only if the corresponding nybble is in the ith bucket. The index of
-/// the byte (0-15, inclusive) corresponds to the nybble.
-#[derive(Clone, Copy, Default)]
-struct FatMaskBuilder {
-    lo: [u8; 32],
-    hi: [u8; 32],
-}
-
-impl FatMaskBuilder {
-    /// Update this mask by adding the given byte to the given bucket. The
-    /// given bucket must be in the range 0-15.
-    ///
-    /// # Panics
-    ///
-    /// When `bucket >= 16`.
-    fn add(&mut self, bucket: usize, byte: u8) {
-        assert!(bucket < 16);
-
-        let bucket = u8::try_from(bucket).unwrap();
-        let byte_lo = usize::from(byte & 0xF);
-        let byte_hi = usize::from((byte >> 4) & 0xF);
-        // Unlike slim teddy, fat teddy only works with AVX2. For fat teddy,
-        // the high 128 bits of our mask correspond to buckets 8-15, while the
-        // low 128 bits correspond to buckets 0-7.
-        if bucket < 8 {
-            self.lo[byte_lo] |= 1 << bucket;
-            self.hi[byte_hi] |= 1 << bucket;
-        } else {
-            self.lo[byte_lo + 16] |= 1 << (bucket % 8);
-            self.hi[byte_hi + 16] |= 1 << (bucket % 8);
-        }
-    }
-
-    /// Turn this builder into a vector mask.
-    ///
-    /// # Panics
-    ///
-    /// When `V` represents a vector bigger than what `MaskBytes` can contain.
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    #[inline(always)]
-    unsafe fn build<V: Vector>(&self) -> Mask<V> {
-        assert!(V::BYTES <= self.lo.len());
-        assert!(V::BYTES <= self.hi.len());
-        Mask {
-            lo: V::load_unaligned(self.lo[..].as_ptr()),
-            hi: V::load_unaligned(self.hi[..].as_ptr()),
-        }
-    }
-
-    /// A convenience function for building `N` vector masks from a fat
-    /// `Teddy` value.
-    ///
-    /// # Panics
-    ///
-    /// When `V` represents a vector bigger than what `MaskBytes` can contain.
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    #[inline(always)]
-    unsafe fn from_teddy<const BYTES: usize, V: Vector>(
-        teddy: &Teddy<16>,
-    ) -> [Mask<V>; BYTES] {
-        // MSRV(1.63): Use core::array::from_fn to just build the array here
-        // instead of creating a vector and turning it into an array.
-        let mut mask_builders = vec![FatMaskBuilder::default(); BYTES];
-        for (bucket_index, bucket) in teddy.buckets.iter().enumerate() {
-            for pid in bucket.iter().copied() {
-                let pat = teddy.patterns.get(pid);
-                for (i, builder) in mask_builders.iter_mut().enumerate() {
-                    builder.add(bucket_index, pat.bytes()[i]);
-                }
-            }
-        }
-        let array =
-            <[FatMaskBuilder; BYTES]>::try_from(mask_builders).unwrap();
-        array.map(|builder| builder.build())
-    }
-}
-
-impl Debug for FatMaskBuilder {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        let (mut parts_lo, mut parts_hi) = (vec![], vec![]);
-        for i in 0..32 {
-            parts_lo.push(format!("{:02}: {:08b}", i, self.lo[i]));
-            parts_hi.push(format!("{:02}: {:08b}", i, self.hi[i]));
-        }
-        f.debug_struct("FatMaskBuilder")
-            .field("lo", &parts_lo)
-            .field("hi", &parts_hi)
-            .finish()
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/teddy/mod.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/teddy/mod.rs
deleted file mode 100644
index 26cfcdc..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/teddy/mod.rs
+++ /dev/null
@@ -1,9 +0,0 @@
-// Regrettable, but Teddy stuff just isn't used on all targets. And for some
-// targets, like aarch64, only "slim" Teddy is used and so "fat" Teddy gets a
-// bunch of dead-code warnings. Just not worth trying to squash them. Blech.
-#![allow(dead_code)]
-
-pub(crate) use self::builder::{Builder, Searcher};
-
-mod builder;
-mod generic;
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/tests.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/tests.rs
deleted file mode 100644
index 2b0d44e..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/tests.rs
+++ /dev/null
@@ -1,583 +0,0 @@
-use std::collections::HashMap;
-
-use alloc::{
-    format,
-    string::{String, ToString},
-    vec,
-    vec::Vec,
-};
-
-use crate::{
-    packed::{Config, MatchKind},
-    util::search::Match,
-};
-
-/// A description of a single test against a multi-pattern searcher.
-///
-/// A single test may not necessarily pass on every configuration of a
-/// searcher. The tests are categorized and grouped appropriately below.
-#[derive(Clone, Debug, Eq, PartialEq)]
-struct SearchTest {
-    /// The name of this test, for debugging.
-    name: &'static str,
-    /// The patterns to search for.
-    patterns: &'static [&'static str],
-    /// The text to search.
-    haystack: &'static str,
-    /// Each match is a triple of (pattern_index, start, end), where
-    /// pattern_index is an index into `patterns` and `start`/`end` are indices
-    /// into `haystack`.
-    matches: &'static [(usize, usize, usize)],
-}
-
-struct SearchTestOwned {
-    offset: usize,
-    name: String,
-    patterns: Vec<String>,
-    haystack: String,
-    matches: Vec<(usize, usize, usize)>,
-}
-
-impl SearchTest {
-    fn variations(&self) -> Vec<SearchTestOwned> {
-        let count = if cfg!(miri) { 1 } else { 261 };
-        let mut tests = vec![];
-        for i in 0..count {
-            tests.push(self.offset_prefix(i));
-            tests.push(self.offset_suffix(i));
-            tests.push(self.offset_both(i));
-        }
-        tests
-    }
-
-    fn offset_both(&self, off: usize) -> SearchTestOwned {
-        SearchTestOwned {
-            offset: off,
-            name: self.name.to_string(),
-            patterns: self.patterns.iter().map(|s| s.to_string()).collect(),
-            haystack: format!(
-                "{}{}{}",
-                "Z".repeat(off),
-                self.haystack,
-                "Z".repeat(off)
-            ),
-            matches: self
-                .matches
-                .iter()
-                .map(|&(id, s, e)| (id, s + off, e + off))
-                .collect(),
-        }
-    }
-
-    fn offset_prefix(&self, off: usize) -> SearchTestOwned {
-        SearchTestOwned {
-            offset: off,
-            name: self.name.to_string(),
-            patterns: self.patterns.iter().map(|s| s.to_string()).collect(),
-            haystack: format!("{}{}", "Z".repeat(off), self.haystack),
-            matches: self
-                .matches
-                .iter()
-                .map(|&(id, s, e)| (id, s + off, e + off))
-                .collect(),
-        }
-    }
-
-    fn offset_suffix(&self, off: usize) -> SearchTestOwned {
-        SearchTestOwned {
-            offset: off,
-            name: self.name.to_string(),
-            patterns: self.patterns.iter().map(|s| s.to_string()).collect(),
-            haystack: format!("{}{}", self.haystack, "Z".repeat(off)),
-            matches: self.matches.to_vec(),
-        }
-    }
-}
-
-/// Short-hand constructor for SearchTest. We use it a lot below.
-macro_rules! t {
-    ($name:ident, $patterns:expr, $haystack:expr, $matches:expr) => {
-        SearchTest {
-            name: stringify!($name),
-            patterns: $patterns,
-            haystack: $haystack,
-            matches: $matches,
-        }
-    };
-}
-
-/// A collection of test groups.
-type TestCollection = &'static [&'static [SearchTest]];
-
-// Define several collections corresponding to the different type of match
-// semantics supported. These collections have some overlap, but each
-// collection should have some tests that no other collection has.
-
-/// Tests for leftmost-first match semantics.
-const PACKED_LEFTMOST_FIRST: TestCollection =
-    &[BASICS, LEFTMOST, LEFTMOST_FIRST, REGRESSION, TEDDY];
-
-/// Tests for leftmost-longest match semantics.
-const PACKED_LEFTMOST_LONGEST: TestCollection =
-    &[BASICS, LEFTMOST, LEFTMOST_LONGEST, REGRESSION, TEDDY];
-
-// Now define the individual tests that make up the collections above.
-
-/// A collection of tests for the that should always be true regardless of
-/// match semantics. That is, all combinations of leftmost-{first, longest}
-/// should produce the same answer.
-const BASICS: &'static [SearchTest] = &[
-    t!(basic001, &["a"], "", &[]),
-    t!(basic010, &["a"], "a", &[(0, 0, 1)]),
-    t!(basic020, &["a"], "aa", &[(0, 0, 1), (0, 1, 2)]),
-    t!(basic030, &["a"], "aaa", &[(0, 0, 1), (0, 1, 2), (0, 2, 3)]),
-    t!(basic040, &["a"], "aba", &[(0, 0, 1), (0, 2, 3)]),
-    t!(basic050, &["a"], "bba", &[(0, 2, 3)]),
-    t!(basic060, &["a"], "bbb", &[]),
-    t!(basic070, &["a"], "bababbbba", &[(0, 1, 2), (0, 3, 4), (0, 8, 9)]),
-    t!(basic100, &["aa"], "", &[]),
-    t!(basic110, &["aa"], "aa", &[(0, 0, 2)]),
-    t!(basic120, &["aa"], "aabbaa", &[(0, 0, 2), (0, 4, 6)]),
-    t!(basic130, &["aa"], "abbab", &[]),
-    t!(basic140, &["aa"], "abbabaa", &[(0, 5, 7)]),
-    t!(basic150, &["aaa"], "aaa", &[(0, 0, 3)]),
-    t!(basic200, &["abc"], "abc", &[(0, 0, 3)]),
-    t!(basic210, &["abc"], "zazabzabcz", &[(0, 6, 9)]),
-    t!(basic220, &["abc"], "zazabczabcz", &[(0, 3, 6), (0, 7, 10)]),
-    t!(basic230, &["abcd"], "abcd", &[(0, 0, 4)]),
-    t!(basic240, &["abcd"], "zazabzabcdz", &[(0, 6, 10)]),
-    t!(basic250, &["abcd"], "zazabcdzabcdz", &[(0, 3, 7), (0, 8, 12)]),
-    t!(basic300, &["a", "b"], "", &[]),
-    t!(basic310, &["a", "b"], "z", &[]),
-    t!(basic320, &["a", "b"], "b", &[(1, 0, 1)]),
-    t!(basic330, &["a", "b"], "a", &[(0, 0, 1)]),
-    t!(
-        basic340,
-        &["a", "b"],
-        "abba",
-        &[(0, 0, 1), (1, 1, 2), (1, 2, 3), (0, 3, 4),]
-    ),
-    t!(
-        basic350,
-        &["b", "a"],
-        "abba",
-        &[(1, 0, 1), (0, 1, 2), (0, 2, 3), (1, 3, 4),]
-    ),
-    t!(basic360, &["abc", "bc"], "xbc", &[(1, 1, 3),]),
-    t!(basic400, &["foo", "bar"], "", &[]),
-    t!(basic410, &["foo", "bar"], "foobar", &[(0, 0, 3), (1, 3, 6),]),
-    t!(basic420, &["foo", "bar"], "barfoo", &[(1, 0, 3), (0, 3, 6),]),
-    t!(basic430, &["foo", "bar"], "foofoo", &[(0, 0, 3), (0, 3, 6),]),
-    t!(basic440, &["foo", "bar"], "barbar", &[(1, 0, 3), (1, 3, 6),]),
-    t!(basic450, &["foo", "bar"], "bafofoo", &[(0, 4, 7),]),
-    t!(basic460, &["bar", "foo"], "bafofoo", &[(1, 4, 7),]),
-    t!(basic470, &["foo", "bar"], "fobabar", &[(1, 4, 7),]),
-    t!(basic480, &["bar", "foo"], "fobabar", &[(0, 4, 7),]),
-    t!(basic700, &["yabcdef", "abcdezghi"], "yabcdefghi", &[(0, 0, 7),]),
-    t!(basic710, &["yabcdef", "abcdezghi"], "yabcdezghi", &[(1, 1, 10),]),
-    t!(
-        basic720,
-        &["yabcdef", "bcdeyabc", "abcdezghi"],
-        "yabcdezghi",
-        &[(2, 1, 10),]
-    ),
-    t!(basic810, &["abcd", "bcd", "cd"], "abcd", &[(0, 0, 4),]),
-    t!(basic820, &["bcd", "cd", "abcd"], "abcd", &[(2, 0, 4),]),
-    t!(basic830, &["abc", "bc"], "zazabcz", &[(0, 3, 6),]),
-    t!(
-        basic840,
-        &["ab", "ba"],
-        "abababa",
-        &[(0, 0, 2), (0, 2, 4), (0, 4, 6),]
-    ),
-    t!(basic850, &["foo", "foo"], "foobarfoo", &[(0, 0, 3), (0, 6, 9),]),
-];
-
-/// Tests for leftmost match semantics. These should pass for both
-/// leftmost-first and leftmost-longest match kinds. Stated differently, among
-/// ambiguous matches, the longest match and the match that appeared first when
-/// constructing the automaton should always be the same.
-const LEFTMOST: &'static [SearchTest] = &[
-    t!(leftmost000, &["ab", "ab"], "abcd", &[(0, 0, 2)]),
-    t!(leftmost030, &["a", "ab"], "aa", &[(0, 0, 1), (0, 1, 2)]),
-    t!(leftmost031, &["ab", "a"], "aa", &[(1, 0, 1), (1, 1, 2)]),
-    t!(leftmost032, &["ab", "a"], "xayabbbz", &[(1, 1, 2), (0, 3, 5)]),
-    t!(leftmost300, &["abcd", "bce", "b"], "abce", &[(1, 1, 4)]),
-    t!(leftmost310, &["abcd", "ce", "bc"], "abce", &[(2, 1, 3)]),
-    t!(leftmost320, &["abcd", "bce", "ce", "b"], "abce", &[(1, 1, 4)]),
-    t!(leftmost330, &["abcd", "bce", "cz", "bc"], "abcz", &[(3, 1, 3)]),
-    t!(leftmost340, &["bce", "cz", "bc"], "bcz", &[(2, 0, 2)]),
-    t!(leftmost350, &["abc", "bd", "ab"], "abd", &[(2, 0, 2)]),
-    t!(
-        leftmost360,
-        &["abcdefghi", "hz", "abcdefgh"],
-        "abcdefghz",
-        &[(2, 0, 8),]
-    ),
-    t!(
-        leftmost370,
-        &["abcdefghi", "cde", "hz", "abcdefgh"],
-        "abcdefghz",
-        &[(3, 0, 8),]
-    ),
-    t!(
-        leftmost380,
-        &["abcdefghi", "hz", "abcdefgh", "a"],
-        "abcdefghz",
-        &[(2, 0, 8),]
-    ),
-    t!(
-        leftmost390,
-        &["b", "abcdefghi", "hz", "abcdefgh"],
-        "abcdefghz",
-        &[(3, 0, 8),]
-    ),
-    t!(
-        leftmost400,
-        &["h", "abcdefghi", "hz", "abcdefgh"],
-        "abcdefghz",
-        &[(3, 0, 8),]
-    ),
-    t!(
-        leftmost410,
-        &["z", "abcdefghi", "hz", "abcdefgh"],
-        "abcdefghz",
-        &[(3, 0, 8), (0, 8, 9),]
-    ),
-];
-
-/// Tests for non-overlapping leftmost-first match semantics. These tests
-/// should generally be specific to leftmost-first, which means they should
-/// generally fail under leftmost-longest semantics.
-const LEFTMOST_FIRST: &'static [SearchTest] = &[
-    t!(leftfirst000, &["ab", "abcd"], "abcd", &[(0, 0, 2)]),
-    t!(leftfirst020, &["abcd", "ab"], "abcd", &[(0, 0, 4)]),
-    t!(leftfirst030, &["ab", "ab"], "abcd", &[(0, 0, 2)]),
-    t!(leftfirst040, &["a", "ab"], "xayabbbz", &[(0, 1, 2), (0, 3, 4)]),
-    t!(leftfirst100, &["abcdefg", "bcde", "bcdef"], "abcdef", &[(1, 1, 5)]),
-    t!(leftfirst110, &["abcdefg", "bcdef", "bcde"], "abcdef", &[(1, 1, 6)]),
-    t!(leftfirst300, &["abcd", "b", "bce"], "abce", &[(1, 1, 2)]),
-    t!(
-        leftfirst310,
-        &["abcd", "b", "bce", "ce"],
-        "abce",
-        &[(1, 1, 2), (3, 2, 4),]
-    ),
-    t!(
-        leftfirst320,
-        &["a", "abcdefghi", "hz", "abcdefgh"],
-        "abcdefghz",
-        &[(0, 0, 1), (2, 7, 9),]
-    ),
-    t!(leftfirst330, &["a", "abab"], "abab", &[(0, 0, 1), (0, 2, 3)]),
-    t!(
-        leftfirst340,
-        &["abcdef", "x", "x", "x", "x", "x", "x", "abcde"],
-        "abcdef",
-        &[(0, 0, 6)]
-    ),
-];
-
-/// Tests for non-overlapping leftmost-longest match semantics. These tests
-/// should generally be specific to leftmost-longest, which means they should
-/// generally fail under leftmost-first semantics.
-const LEFTMOST_LONGEST: &'static [SearchTest] = &[
-    t!(leftlong000, &["ab", "abcd"], "abcd", &[(1, 0, 4)]),
-    t!(leftlong010, &["abcd", "bcd", "cd", "b"], "abcd", &[(0, 0, 4),]),
-    t!(leftlong040, &["a", "ab"], "a", &[(0, 0, 1)]),
-    t!(leftlong050, &["a", "ab"], "ab", &[(1, 0, 2)]),
-    t!(leftlong060, &["ab", "a"], "a", &[(1, 0, 1)]),
-    t!(leftlong070, &["ab", "a"], "ab", &[(0, 0, 2)]),
-    t!(leftlong100, &["abcdefg", "bcde", "bcdef"], "abcdef", &[(2, 1, 6)]),
-    t!(leftlong110, &["abcdefg", "bcdef", "bcde"], "abcdef", &[(1, 1, 6)]),
-    t!(leftlong300, &["abcd", "b", "bce"], "abce", &[(2, 1, 4)]),
-    t!(
-        leftlong310,
-        &["a", "abcdefghi", "hz", "abcdefgh"],
-        "abcdefghz",
-        &[(3, 0, 8),]
-    ),
-    t!(leftlong320, &["a", "abab"], "abab", &[(1, 0, 4)]),
-    t!(leftlong330, &["abcd", "b", "ce"], "abce", &[(1, 1, 2), (2, 2, 4),]),
-    t!(leftlong340, &["a", "ab"], "xayabbbz", &[(0, 1, 2), (1, 3, 5)]),
-];
-
-/// Regression tests that are applied to all combinations.
-///
-/// If regression tests are needed for specific match semantics, then add them
-/// to the appropriate group above.
-const REGRESSION: &'static [SearchTest] = &[
-    t!(regression010, &["inf", "ind"], "infind", &[(0, 0, 3), (1, 3, 6),]),
-    t!(regression020, &["ind", "inf"], "infind", &[(1, 0, 3), (0, 3, 6),]),
-    t!(
-        regression030,
-        &["libcore/", "libstd/"],
-        "libcore/char/methods.rs",
-        &[(0, 0, 8),]
-    ),
-    t!(
-        regression040,
-        &["libstd/", "libcore/"],
-        "libcore/char/methods.rs",
-        &[(1, 0, 8),]
-    ),
-    t!(
-        regression050,
-        &["\x00\x00\x01", "\x00\x00\x00"],
-        "\x00\x00\x00",
-        &[(1, 0, 3),]
-    ),
-    t!(
-        regression060,
-        &["\x00\x00\x00", "\x00\x00\x01"],
-        "\x00\x00\x00",
-        &[(0, 0, 3),]
-    ),
-];
-
-const TEDDY: &'static [SearchTest] = &[
-    t!(
-        teddy010,
-        &["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"],
-        "abcdefghijk",
-        &[
-            (0, 0, 1),
-            (1, 1, 2),
-            (2, 2, 3),
-            (3, 3, 4),
-            (4, 4, 5),
-            (5, 5, 6),
-            (6, 6, 7),
-            (7, 7, 8),
-            (8, 8, 9),
-            (9, 9, 10),
-            (10, 10, 11)
-        ]
-    ),
-    t!(
-        teddy020,
-        &["ab", "bc", "cd", "de", "ef", "fg", "gh", "hi", "ij", "jk", "kl"],
-        "abcdefghijk",
-        &[(0, 0, 2), (2, 2, 4), (4, 4, 6), (6, 6, 8), (8, 8, 10),]
-    ),
-    t!(
-        teddy030,
-        &["abc"],
-        "abcdefghijklmnopqrstuvwxyzabcdefghijk",
-        &[(0, 0, 3), (0, 26, 29)]
-    ),
-];
-
-// Now define a test for each combination of things above that we want to run.
-// Since there are a few different combinations for each collection of tests,
-// we define a couple of macros to avoid repetition drudgery. The testconfig
-// macro constructs the automaton from a given match kind, and runs the search
-// tests one-by-one over the given collection. The `with` parameter allows one
-// to configure the config with additional parameters. The testcombo macro
-// invokes testconfig in precisely this way: it sets up several tests where
-// each one turns a different knob on Config.
-
-macro_rules! testconfig {
-    ($name:ident, $collection:expr, $with:expr) => {
-        #[test]
-        fn $name() {
-            run_search_tests($collection, |test| {
-                let mut config = Config::new();
-                $with(&mut config);
-                let mut builder = config.builder();
-                builder.extend(test.patterns.iter().map(|p| p.as_bytes()));
-                let searcher = match builder.build() {
-                    Some(searcher) => searcher,
-                    None => {
-                        // For x86-64 and aarch64, not building a searcher is
-                        // probably a bug, so be loud.
-                        if cfg!(any(
-                            target_arch = "x86_64",
-                            target_arch = "aarch64"
-                        )) {
-                            panic!("failed to build packed searcher")
-                        }
-                        return None;
-                    }
-                };
-                Some(searcher.find_iter(&test.haystack).collect())
-            });
-        }
-    };
-}
-
-testconfig!(
-    search_default_leftmost_first,
-    PACKED_LEFTMOST_FIRST,
-    |_: &mut Config| {}
-);
-
-testconfig!(
-    search_default_leftmost_longest,
-    PACKED_LEFTMOST_LONGEST,
-    |c: &mut Config| {
-        c.match_kind(MatchKind::LeftmostLongest);
-    }
-);
-
-testconfig!(
-    search_teddy_leftmost_first,
-    PACKED_LEFTMOST_FIRST,
-    |c: &mut Config| {
-        c.only_teddy(true);
-    }
-);
-
-testconfig!(
-    search_teddy_leftmost_longest,
-    PACKED_LEFTMOST_LONGEST,
-    |c: &mut Config| {
-        c.only_teddy(true).match_kind(MatchKind::LeftmostLongest);
-    }
-);
-
-testconfig!(
-    search_teddy_ssse3_leftmost_first,
-    PACKED_LEFTMOST_FIRST,
-    |c: &mut Config| {
-        c.only_teddy(true);
-        #[cfg(target_arch = "x86_64")]
-        if std::is_x86_feature_detected!("ssse3") {
-            c.only_teddy_256bit(Some(false));
-        }
-    }
-);
-
-testconfig!(
-    search_teddy_ssse3_leftmost_longest,
-    PACKED_LEFTMOST_LONGEST,
-    |c: &mut Config| {
-        c.only_teddy(true).match_kind(MatchKind::LeftmostLongest);
-        #[cfg(target_arch = "x86_64")]
-        if std::is_x86_feature_detected!("ssse3") {
-            c.only_teddy_256bit(Some(false));
-        }
-    }
-);
-
-testconfig!(
-    search_teddy_avx2_leftmost_first,
-    PACKED_LEFTMOST_FIRST,
-    |c: &mut Config| {
-        c.only_teddy(true);
-        #[cfg(target_arch = "x86_64")]
-        if std::is_x86_feature_detected!("avx2") {
-            c.only_teddy_256bit(Some(true));
-        }
-    }
-);
-
-testconfig!(
-    search_teddy_avx2_leftmost_longest,
-    PACKED_LEFTMOST_LONGEST,
-    |c: &mut Config| {
-        c.only_teddy(true).match_kind(MatchKind::LeftmostLongest);
-        #[cfg(target_arch = "x86_64")]
-        if std::is_x86_feature_detected!("avx2") {
-            c.only_teddy_256bit(Some(true));
-        }
-    }
-);
-
-testconfig!(
-    search_teddy_fat_leftmost_first,
-    PACKED_LEFTMOST_FIRST,
-    |c: &mut Config| {
-        c.only_teddy(true);
-        #[cfg(target_arch = "x86_64")]
-        if std::is_x86_feature_detected!("avx2") {
-            c.only_teddy_fat(Some(true));
-        }
-    }
-);
-
-testconfig!(
-    search_teddy_fat_leftmost_longest,
-    PACKED_LEFTMOST_LONGEST,
-    |c: &mut Config| {
-        c.only_teddy(true).match_kind(MatchKind::LeftmostLongest);
-        #[cfg(target_arch = "x86_64")]
-        if std::is_x86_feature_detected!("avx2") {
-            c.only_teddy_fat(Some(true));
-        }
-    }
-);
-
-testconfig!(
-    search_rabinkarp_leftmost_first,
-    PACKED_LEFTMOST_FIRST,
-    |c: &mut Config| {
-        c.only_rabin_karp(true);
-    }
-);
-
-testconfig!(
-    search_rabinkarp_leftmost_longest,
-    PACKED_LEFTMOST_LONGEST,
-    |c: &mut Config| {
-        c.only_rabin_karp(true).match_kind(MatchKind::LeftmostLongest);
-    }
-);
-
-#[test]
-fn search_tests_have_unique_names() {
-    let assert = |constname, tests: &[SearchTest]| {
-        let mut seen = HashMap::new(); // map from test name to position
-        for (i, test) in tests.iter().enumerate() {
-            if !seen.contains_key(test.name) {
-                seen.insert(test.name, i);
-            } else {
-                let last = seen[test.name];
-                panic!(
-                    "{} tests have duplicate names at positions {} and {}",
-                    constname, last, i
-                );
-            }
-        }
-    };
-    assert("BASICS", BASICS);
-    assert("LEFTMOST", LEFTMOST);
-    assert("LEFTMOST_FIRST", LEFTMOST_FIRST);
-    assert("LEFTMOST_LONGEST", LEFTMOST_LONGEST);
-    assert("REGRESSION", REGRESSION);
-    assert("TEDDY", TEDDY);
-}
-
-fn run_search_tests<F: FnMut(&SearchTestOwned) -> Option<Vec<Match>>>(
-    which: TestCollection,
-    mut f: F,
-) {
-    let get_match_triples =
-        |matches: Vec<Match>| -> Vec<(usize, usize, usize)> {
-            matches
-                .into_iter()
-                .map(|m| (m.pattern().as_usize(), m.start(), m.end()))
-                .collect()
-        };
-    for &tests in which {
-        for spec in tests {
-            for test in spec.variations() {
-                let results = match f(&test) {
-                    None => continue,
-                    Some(results) => results,
-                };
-                assert_eq!(
-                    test.matches,
-                    get_match_triples(results).as_slice(),
-                    "test: {}, patterns: {:?}, haystack(len={:?}): {:?}, \
-                     offset: {:?}",
-                    test.name,
-                    test.patterns,
-                    test.haystack.len(),
-                    test.haystack,
-                    test.offset,
-                );
-            }
-        }
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/vector.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/vector.rs
deleted file mode 100644
index 57c02ccf..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/packed/vector.rs
+++ /dev/null
@@ -1,1757 +0,0 @@
-// NOTE: The descriptions for each of the vector methods on the traits below
-// are pretty inscrutable. For this reason, there are tests for every method
-// on for every trait impl below. If you're confused about what an op does,
-// consult its test. (They probably should be doc tests, but I couldn't figure
-// out how to write them in a non-annoying way.)
-
-use core::{
-    fmt::Debug,
-    panic::{RefUnwindSafe, UnwindSafe},
-};
-
-/// A trait for describing vector operations used by vectorized searchers.
-///
-/// The trait is highly constrained to low level vector operations needed for
-/// the specific algorithms used in this crate. In general, it was invented
-/// mostly to be generic over x86's __m128i and __m256i types. At time of
-/// writing, it also supports wasm and aarch64 128-bit vector types as well.
-///
-/// # Safety
-///
-/// All methods are not safe since they are intended to be implemented using
-/// vendor intrinsics, which are also not safe. Callers must ensure that
-/// the appropriate target features are enabled in the calling function,
-/// and that the current CPU supports them. All implementations should
-/// avoid marking the routines with `#[target_feature]` and instead mark
-/// them as `#[inline(always)]` to ensure they get appropriately inlined.
-/// (`inline(always)` cannot be used with target_feature.)
-pub(crate) trait Vector:
-    Copy + Debug + Send + Sync + UnwindSafe + RefUnwindSafe
-{
-    /// The number of bits in the vector.
-    const BITS: usize;
-    /// The number of bytes in the vector. That is, this is the size of the
-    /// vector in memory.
-    const BYTES: usize;
-
-    /// Create a vector with 8-bit lanes with the given byte repeated into each
-    /// lane.
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    unsafe fn splat(byte: u8) -> Self;
-
-    /// Read a vector-size number of bytes from the given pointer. The pointer
-    /// does not need to be aligned.
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    ///
-    /// Callers must guarantee that at least `BYTES` bytes are readable from
-    /// `data`.
-    unsafe fn load_unaligned(data: *const u8) -> Self;
-
-    /// Returns true if and only if this vector has zero in all of its lanes.
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    unsafe fn is_zero(self) -> bool;
-
-    /// Do an 8-bit pairwise equality check. If lane `i` is equal in this
-    /// vector and the one given, then lane `i` in the resulting vector is set
-    /// to `0xFF`. Otherwise, it is set to `0x00`.
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    unsafe fn cmpeq(self, vector2: Self) -> Self;
-
-    /// Perform a bitwise 'and' of this vector and the one given and return
-    /// the result.
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    unsafe fn and(self, vector2: Self) -> Self;
-
-    /// Perform a bitwise 'or' of this vector and the one given and return
-    /// the result.
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    #[allow(dead_code)] // unused, but useful enough to keep around?
-    unsafe fn or(self, vector2: Self) -> Self;
-
-    /// Shift each 8-bit lane in this vector to the right by the number of
-    /// bits indictated by the `BITS` type parameter.
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    unsafe fn shift_8bit_lane_right<const BITS: i32>(self) -> Self;
-
-    /// Shift this vector to the left by one byte and shift the most
-    /// significant byte of `vector2` into the least significant position of
-    /// this vector.
-    ///
-    /// Stated differently, this behaves as if `self` and `vector2` were
-    /// concatenated into a `2 * Self::BITS` temporary buffer and then shifted
-    /// right by `Self::BYTES - 1` bytes.
-    ///
-    /// With respect to the Teddy algorithm, `vector2` is usually a previous
-    /// `Self::BYTES` chunk from the haystack and `self` is the chunk
-    /// immediately following it. This permits combining the last two bytes
-    /// from the previous chunk (`vector2`) with the first `Self::BYTES - 1`
-    /// bytes from the current chunk. This permits aligning the result of
-    /// various shuffles so that they can be and-ed together and a possible
-    /// candidate discovered.
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    unsafe fn shift_in_one_byte(self, vector2: Self) -> Self;
-
-    /// Shift this vector to the left by two bytes and shift the two most
-    /// significant bytes of `vector2` into the least significant position of
-    /// this vector.
-    ///
-    /// Stated differently, this behaves as if `self` and `vector2` were
-    /// concatenated into a `2 * Self::BITS` temporary buffer and then shifted
-    /// right by `Self::BYTES - 2` bytes.
-    ///
-    /// With respect to the Teddy algorithm, `vector2` is usually a previous
-    /// `Self::BYTES` chunk from the haystack and `self` is the chunk
-    /// immediately following it. This permits combining the last two bytes
-    /// from the previous chunk (`vector2`) with the first `Self::BYTES - 2`
-    /// bytes from the current chunk. This permits aligning the result of
-    /// various shuffles so that they can be and-ed together and a possible
-    /// candidate discovered.
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    unsafe fn shift_in_two_bytes(self, vector2: Self) -> Self;
-
-    /// Shift this vector to the left by three bytes and shift the three most
-    /// significant bytes of `vector2` into the least significant position of
-    /// this vector.
-    ///
-    /// Stated differently, this behaves as if `self` and `vector2` were
-    /// concatenated into a `2 * Self::BITS` temporary buffer and then shifted
-    /// right by `Self::BYTES - 3` bytes.
-    ///
-    /// With respect to the Teddy algorithm, `vector2` is usually a previous
-    /// `Self::BYTES` chunk from the haystack and `self` is the chunk
-    /// immediately following it. This permits combining the last three bytes
-    /// from the previous chunk (`vector2`) with the first `Self::BYTES - 3`
-    /// bytes from the current chunk. This permits aligning the result of
-    /// various shuffles so that they can be and-ed together and a possible
-    /// candidate discovered.
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    unsafe fn shift_in_three_bytes(self, vector2: Self) -> Self;
-
-    /// Shuffles the bytes in this vector according to the indices in each of
-    /// the corresponding lanes in `indices`.
-    ///
-    /// If `i` is the index of corresponding lanes, `A` is this vector, `B` is
-    /// indices and `C` is the resulting vector, then `C = A[B[i]]`.
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    unsafe fn shuffle_bytes(self, indices: Self) -> Self;
-
-    /// Call the provided function for each 64-bit lane in this vector. The
-    /// given function is provided the lane index and lane value as a `u64`.
-    ///
-    /// If `f` returns `Some`, then iteration over the lanes is stopped and the
-    /// value is returned. Otherwise, this returns `None`.
-    ///
-    /// # Notes
-    ///
-    /// Conceptually it would be nice if we could have a
-    /// `unpack64(self) -> [u64; BITS / 64]` method, but defining that is
-    /// tricky given Rust's [current support for const generics][support].
-    /// And even if we could, it would be tricky to write generic code over
-    /// it. (Not impossible. We could introduce another layer that requires
-    /// `AsRef<[u64]>` or something.)
-    ///
-    /// [support]: https://github.com/rust-lang/rust/issues/60551
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    unsafe fn for_each_64bit_lane<T>(
-        self,
-        f: impl FnMut(usize, u64) -> Option<T>,
-    ) -> Option<T>;
-}
-
-/// This trait extends the `Vector` trait with additional operations to support
-/// Fat Teddy.
-///
-/// Fat Teddy uses 16 buckets instead of 8, but reads half as many bytes (as
-/// the vector size) instead of the full size of a vector per iteration. For
-/// example, when using a 256-bit vector, Slim Teddy reads 32 bytes at a timr
-/// but Fat Teddy reads 16 bytes at a time.
-///
-/// Fat Teddy is useful when searching for a large number of literals.
-/// The extra number of buckets spreads the literals out more and reduces
-/// verification time.
-///
-/// Currently we only implement this for AVX on x86_64. It would be nice to
-/// implement this for SSE on x86_64 and NEON on aarch64, with the latter two
-/// only reading 8 bytes at a time. It's not clear how well it would work, but
-/// there are some tricky things to figure out in terms of implementation. The
-/// `half_shift_in_{one,two,three}_bytes` methods in particular are probably
-/// the trickiest of the bunch. For AVX2, these are implemented by taking
-/// advantage of the fact that `_mm256_alignr_epi8` operates on each 128-bit
-/// half instead of the full 256-bit vector. (Where as `_mm_alignr_epi8`
-/// operates on the full 128-bit vector and not on each 64-bit half.) I didn't
-/// do a careful survey of NEON to see if it could easily support these
-/// operations.
-pub(crate) trait FatVector: Vector {
-    type Half: Vector;
-
-    /// Read a half-vector-size number of bytes from the given pointer, and
-    /// broadcast it across both halfs of a full vector. The pointer does not
-    /// need to be aligned.
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    ///
-    /// Callers must guarantee that at least `Self::HALF::BYTES` bytes are
-    /// readable from `data`.
-    unsafe fn load_half_unaligned(data: *const u8) -> Self;
-
-    /// Like `Vector::shift_in_one_byte`, except this is done for each half
-    /// of the vector instead.
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    unsafe fn half_shift_in_one_byte(self, vector2: Self) -> Self;
-
-    /// Like `Vector::shift_in_two_bytes`, except this is done for each half
-    /// of the vector instead.
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    unsafe fn half_shift_in_two_bytes(self, vector2: Self) -> Self;
-
-    /// Like `Vector::shift_in_two_bytes`, except this is done for each half
-    /// of the vector instead.
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    unsafe fn half_shift_in_three_bytes(self, vector2: Self) -> Self;
-
-    /// Swap the 128-bit lanes in this vector.
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    unsafe fn swap_halves(self) -> Self;
-
-    /// Unpack and interleave the 8-bit lanes from the low 128 bits of each
-    /// vector and return the result.
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    unsafe fn interleave_low_8bit_lanes(self, vector2: Self) -> Self;
-
-    /// Unpack and interleave the 8-bit lanes from the high 128 bits of each
-    /// vector and return the result.
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    unsafe fn interleave_high_8bit_lanes(self, vector2: Self) -> Self;
-
-    /// Call the provided function for each 64-bit lane in the lower half
-    /// of this vector and then in the other vector. The given function is
-    /// provided the lane index and lane value as a `u64`. (The high 128-bits
-    /// of each vector are ignored.)
-    ///
-    /// If `f` returns `Some`, then iteration over the lanes is stopped and the
-    /// value is returned. Otherwise, this returns `None`.
-    ///
-    /// # Safety
-    ///
-    /// Callers must ensure that this is okay to call in the current target for
-    /// the current CPU.
-    unsafe fn for_each_low_64bit_lane<T>(
-        self,
-        vector2: Self,
-        f: impl FnMut(usize, u64) -> Option<T>,
-    ) -> Option<T>;
-}
-
-#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
-mod x86_64_ssse3 {
-    use core::arch::x86_64::*;
-
-    use crate::util::int::{I32, I8};
-
-    use super::Vector;
-
-    impl Vector for __m128i {
-        const BITS: usize = 128;
-        const BYTES: usize = 16;
-
-        #[inline(always)]
-        unsafe fn splat(byte: u8) -> __m128i {
-            _mm_set1_epi8(i8::from_bits(byte))
-        }
-
-        #[inline(always)]
-        unsafe fn load_unaligned(data: *const u8) -> __m128i {
-            _mm_loadu_si128(data.cast::<__m128i>())
-        }
-
-        #[inline(always)]
-        unsafe fn is_zero(self) -> bool {
-            let cmp = self.cmpeq(Self::splat(0));
-            _mm_movemask_epi8(cmp).to_bits() == 0xFFFF
-        }
-
-        #[inline(always)]
-        unsafe fn cmpeq(self, vector2: Self) -> __m128i {
-            _mm_cmpeq_epi8(self, vector2)
-        }
-
-        #[inline(always)]
-        unsafe fn and(self, vector2: Self) -> __m128i {
-            _mm_and_si128(self, vector2)
-        }
-
-        #[inline(always)]
-        unsafe fn or(self, vector2: Self) -> __m128i {
-            _mm_or_si128(self, vector2)
-        }
-
-        #[inline(always)]
-        unsafe fn shift_8bit_lane_right<const BITS: i32>(self) -> Self {
-            // Apparently there is no _mm_srli_epi8, so we emulate it by
-            // shifting 16-bit integers and masking out the high nybble of each
-            // 8-bit lane (since that nybble will contain bits from the low
-            // nybble of the previous lane).
-            let lomask = Self::splat(0xF);
-            _mm_srli_epi16(self, BITS).and(lomask)
-        }
-
-        #[inline(always)]
-        unsafe fn shift_in_one_byte(self, vector2: Self) -> Self {
-            _mm_alignr_epi8(self, vector2, 15)
-        }
-
-        #[inline(always)]
-        unsafe fn shift_in_two_bytes(self, vector2: Self) -> Self {
-            _mm_alignr_epi8(self, vector2, 14)
-        }
-
-        #[inline(always)]
-        unsafe fn shift_in_three_bytes(self, vector2: Self) -> Self {
-            _mm_alignr_epi8(self, vector2, 13)
-        }
-
-        #[inline(always)]
-        unsafe fn shuffle_bytes(self, indices: Self) -> Self {
-            _mm_shuffle_epi8(self, indices)
-        }
-
-        #[inline(always)]
-        unsafe fn for_each_64bit_lane<T>(
-            self,
-            mut f: impl FnMut(usize, u64) -> Option<T>,
-        ) -> Option<T> {
-            // We could just use _mm_extract_epi64 here, but that requires
-            // SSE 4.1. It isn't necessarily a problem to just require SSE 4.1,
-            // but everything else works with SSSE3 so we stick to that subset.
-            let lanes: [u64; 2] = core::mem::transmute(self);
-            if let Some(t) = f(0, lanes[0]) {
-                return Some(t);
-            }
-            if let Some(t) = f(1, lanes[1]) {
-                return Some(t);
-            }
-            None
-        }
-    }
-}
-
-#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
-mod x86_64_avx2 {
-    use core::arch::x86_64::*;
-
-    use crate::util::int::{I32, I64, I8};
-
-    use super::{FatVector, Vector};
-
-    impl Vector for __m256i {
-        const BITS: usize = 256;
-        const BYTES: usize = 32;
-
-        #[inline(always)]
-        unsafe fn splat(byte: u8) -> __m256i {
-            _mm256_set1_epi8(i8::from_bits(byte))
-        }
-
-        #[inline(always)]
-        unsafe fn load_unaligned(data: *const u8) -> __m256i {
-            _mm256_loadu_si256(data.cast::<__m256i>())
-        }
-
-        #[inline(always)]
-        unsafe fn is_zero(self) -> bool {
-            let cmp = self.cmpeq(Self::splat(0));
-            _mm256_movemask_epi8(cmp).to_bits() == 0xFFFFFFFF
-        }
-
-        #[inline(always)]
-        unsafe fn cmpeq(self, vector2: Self) -> __m256i {
-            _mm256_cmpeq_epi8(self, vector2)
-        }
-
-        #[inline(always)]
-        unsafe fn and(self, vector2: Self) -> __m256i {
-            _mm256_and_si256(self, vector2)
-        }
-
-        #[inline(always)]
-        unsafe fn or(self, vector2: Self) -> __m256i {
-            _mm256_or_si256(self, vector2)
-        }
-
-        #[inline(always)]
-        unsafe fn shift_8bit_lane_right<const BITS: i32>(self) -> Self {
-            let lomask = Self::splat(0xF);
-            _mm256_srli_epi16(self, BITS).and(lomask)
-        }
-
-        #[inline(always)]
-        unsafe fn shift_in_one_byte(self, vector2: Self) -> Self {
-            // Credit goes to jneem for figuring this out:
-            // https://github.com/jneem/teddy/blob/9ab5e899ad6ef6911aecd3cf1033f1abe6e1f66c/src/x86/teddy_simd.rs#L145-L184
-            //
-            // TL;DR avx2's PALIGNR instruction is actually just two 128-bit
-            // PALIGNR instructions, which is not what we want, so we need to
-            // do some extra shuffling.
-            let v = _mm256_permute2x128_si256(vector2, self, 0x21);
-            _mm256_alignr_epi8(self, v, 15)
-        }
-
-        #[inline(always)]
-        unsafe fn shift_in_two_bytes(self, vector2: Self) -> Self {
-            // Credit goes to jneem for figuring this out:
-            // https://github.com/jneem/teddy/blob/9ab5e899ad6ef6911aecd3cf1033f1abe6e1f66c/src/x86/teddy_simd.rs#L145-L184
-            //
-            // TL;DR avx2's PALIGNR instruction is actually just two 128-bit
-            // PALIGNR instructions, which is not what we want, so we need to
-            // do some extra shuffling.
-            let v = _mm256_permute2x128_si256(vector2, self, 0x21);
-            _mm256_alignr_epi8(self, v, 14)
-        }
-
-        #[inline(always)]
-        unsafe fn shift_in_three_bytes(self, vector2: Self) -> Self {
-            // Credit goes to jneem for figuring this out:
-            // https://github.com/jneem/teddy/blob/9ab5e899ad6ef6911aecd3cf1033f1abe6e1f66c/src/x86/teddy_simd.rs#L145-L184
-            //
-            // TL;DR avx2's PALIGNR instruction is actually just two 128-bit
-            // PALIGNR instructions, which is not what we want, so we need to
-            // do some extra shuffling.
-            let v = _mm256_permute2x128_si256(vector2, self, 0x21);
-            _mm256_alignr_epi8(self, v, 13)
-        }
-
-        #[inline(always)]
-        unsafe fn shuffle_bytes(self, indices: Self) -> Self {
-            _mm256_shuffle_epi8(self, indices)
-        }
-
-        #[inline(always)]
-        unsafe fn for_each_64bit_lane<T>(
-            self,
-            mut f: impl FnMut(usize, u64) -> Option<T>,
-        ) -> Option<T> {
-            // NOTE: At one point in the past, I used transmute to this to
-            // get a [u64; 4], but it turned out to lead to worse codegen IIRC.
-            // I've tried it more recently, and it looks like that's no longer
-            // the case. But since there's no difference, we stick with the
-            // slightly more complicated but transmute-free version.
-            let lane = _mm256_extract_epi64(self, 0).to_bits();
-            if let Some(t) = f(0, lane) {
-                return Some(t);
-            }
-            let lane = _mm256_extract_epi64(self, 1).to_bits();
-            if let Some(t) = f(1, lane) {
-                return Some(t);
-            }
-            let lane = _mm256_extract_epi64(self, 2).to_bits();
-            if let Some(t) = f(2, lane) {
-                return Some(t);
-            }
-            let lane = _mm256_extract_epi64(self, 3).to_bits();
-            if let Some(t) = f(3, lane) {
-                return Some(t);
-            }
-            None
-        }
-    }
-
-    impl FatVector for __m256i {
-        type Half = __m128i;
-
-        #[inline(always)]
-        unsafe fn load_half_unaligned(data: *const u8) -> Self {
-            let half = Self::Half::load_unaligned(data);
-            _mm256_broadcastsi128_si256(half)
-        }
-
-        #[inline(always)]
-        unsafe fn half_shift_in_one_byte(self, vector2: Self) -> Self {
-            _mm256_alignr_epi8(self, vector2, 15)
-        }
-
-        #[inline(always)]
-        unsafe fn half_shift_in_two_bytes(self, vector2: Self) -> Self {
-            _mm256_alignr_epi8(self, vector2, 14)
-        }
-
-        #[inline(always)]
-        unsafe fn half_shift_in_three_bytes(self, vector2: Self) -> Self {
-            _mm256_alignr_epi8(self, vector2, 13)
-        }
-
-        #[inline(always)]
-        unsafe fn swap_halves(self) -> Self {
-            _mm256_permute4x64_epi64(self, 0x4E)
-        }
-
-        #[inline(always)]
-        unsafe fn interleave_low_8bit_lanes(self, vector2: Self) -> Self {
-            _mm256_unpacklo_epi8(self, vector2)
-        }
-
-        #[inline(always)]
-        unsafe fn interleave_high_8bit_lanes(self, vector2: Self) -> Self {
-            _mm256_unpackhi_epi8(self, vector2)
-        }
-
-        #[inline(always)]
-        unsafe fn for_each_low_64bit_lane<T>(
-            self,
-            vector2: Self,
-            mut f: impl FnMut(usize, u64) -> Option<T>,
-        ) -> Option<T> {
-            let lane = _mm256_extract_epi64(self, 0).to_bits();
-            if let Some(t) = f(0, lane) {
-                return Some(t);
-            }
-            let lane = _mm256_extract_epi64(self, 1).to_bits();
-            if let Some(t) = f(1, lane) {
-                return Some(t);
-            }
-            let lane = _mm256_extract_epi64(vector2, 0).to_bits();
-            if let Some(t) = f(2, lane) {
-                return Some(t);
-            }
-            let lane = _mm256_extract_epi64(vector2, 1).to_bits();
-            if let Some(t) = f(3, lane) {
-                return Some(t);
-            }
-            None
-        }
-    }
-}
-
-#[cfg(all(
-    target_arch = "aarch64",
-    target_feature = "neon",
-    target_endian = "little"
-))]
-mod aarch64_neon {
-    use core::arch::aarch64::*;
-
-    use super::Vector;
-
-    impl Vector for uint8x16_t {
-        const BITS: usize = 128;
-        const BYTES: usize = 16;
-
-        #[inline(always)]
-        unsafe fn splat(byte: u8) -> uint8x16_t {
-            vdupq_n_u8(byte)
-        }
-
-        #[inline(always)]
-        unsafe fn load_unaligned(data: *const u8) -> uint8x16_t {
-            vld1q_u8(data)
-        }
-
-        #[inline(always)]
-        unsafe fn is_zero(self) -> bool {
-            // Could also use vmaxvq_u8.
-            // ... I tried that and couldn't observe any meaningful difference
-            // in benchmarks.
-            let maxes = vreinterpretq_u64_u8(vpmaxq_u8(self, self));
-            vgetq_lane_u64(maxes, 0) == 0
-        }
-
-        #[inline(always)]
-        unsafe fn cmpeq(self, vector2: Self) -> uint8x16_t {
-            vceqq_u8(self, vector2)
-        }
-
-        #[inline(always)]
-        unsafe fn and(self, vector2: Self) -> uint8x16_t {
-            vandq_u8(self, vector2)
-        }
-
-        #[inline(always)]
-        unsafe fn or(self, vector2: Self) -> uint8x16_t {
-            vorrq_u8(self, vector2)
-        }
-
-        #[inline(always)]
-        unsafe fn shift_8bit_lane_right<const BITS: i32>(self) -> Self {
-            debug_assert!(BITS <= 7);
-            vshrq_n_u8(self, BITS)
-        }
-
-        #[inline(always)]
-        unsafe fn shift_in_one_byte(self, vector2: Self) -> Self {
-            vextq_u8(vector2, self, 15)
-        }
-
-        #[inline(always)]
-        unsafe fn shift_in_two_bytes(self, vector2: Self) -> Self {
-            vextq_u8(vector2, self, 14)
-        }
-
-        #[inline(always)]
-        unsafe fn shift_in_three_bytes(self, vector2: Self) -> Self {
-            vextq_u8(vector2, self, 13)
-        }
-
-        #[inline(always)]
-        unsafe fn shuffle_bytes(self, indices: Self) -> Self {
-            vqtbl1q_u8(self, indices)
-        }
-
-        #[inline(always)]
-        unsafe fn for_each_64bit_lane<T>(
-            self,
-            mut f: impl FnMut(usize, u64) -> Option<T>,
-        ) -> Option<T> {
-            let this = vreinterpretq_u64_u8(self);
-            let lane = vgetq_lane_u64(this, 0);
-            if let Some(t) = f(0, lane) {
-                return Some(t);
-            }
-            let lane = vgetq_lane_u64(this, 1);
-            if let Some(t) = f(1, lane) {
-                return Some(t);
-            }
-            None
-        }
-    }
-}
-
-#[cfg(all(test, target_arch = "x86_64", target_feature = "sse2"))]
-mod tests_x86_64_ssse3 {
-    use core::arch::x86_64::*;
-
-    use crate::util::int::{I32, U32};
-
-    use super::*;
-
-    fn is_runnable() -> bool {
-        std::is_x86_feature_detected!("ssse3")
-    }
-
-    #[target_feature(enable = "ssse3")]
-    unsafe fn load(lanes: [u8; 16]) -> __m128i {
-        __m128i::load_unaligned(&lanes as *const u8)
-    }
-
-    #[target_feature(enable = "ssse3")]
-    unsafe fn unload(v: __m128i) -> [u8; 16] {
-        [
-            _mm_extract_epi8(v, 0).to_bits().low_u8(),
-            _mm_extract_epi8(v, 1).to_bits().low_u8(),
-            _mm_extract_epi8(v, 2).to_bits().low_u8(),
-            _mm_extract_epi8(v, 3).to_bits().low_u8(),
-            _mm_extract_epi8(v, 4).to_bits().low_u8(),
-            _mm_extract_epi8(v, 5).to_bits().low_u8(),
-            _mm_extract_epi8(v, 6).to_bits().low_u8(),
-            _mm_extract_epi8(v, 7).to_bits().low_u8(),
-            _mm_extract_epi8(v, 8).to_bits().low_u8(),
-            _mm_extract_epi8(v, 9).to_bits().low_u8(),
-            _mm_extract_epi8(v, 10).to_bits().low_u8(),
-            _mm_extract_epi8(v, 11).to_bits().low_u8(),
-            _mm_extract_epi8(v, 12).to_bits().low_u8(),
-            _mm_extract_epi8(v, 13).to_bits().low_u8(),
-            _mm_extract_epi8(v, 14).to_bits().low_u8(),
-            _mm_extract_epi8(v, 15).to_bits().low_u8(),
-        ]
-    }
-
-    #[test]
-    fn vector_splat() {
-        #[target_feature(enable = "ssse3")]
-        unsafe fn test() {
-            let v = __m128i::splat(0xAF);
-            assert_eq!(
-                unload(v),
-                [
-                    0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF,
-                    0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF
-                ]
-            );
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_is_zero() {
-        #[target_feature(enable = "ssse3")]
-        unsafe fn test() {
-            let v = load([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
-            assert!(!v.is_zero());
-            let v = load([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
-            assert!(v.is_zero());
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_cmpeq() {
-        #[target_feature(enable = "ssse3")]
-        unsafe fn test() {
-            let v1 =
-                load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1]);
-            let v2 =
-                load([16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]);
-            assert_eq!(
-                unload(v1.cmpeq(v2)),
-                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF]
-            );
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_and() {
-        #[target_feature(enable = "ssse3")]
-        unsafe fn test() {
-            let v1 =
-                load([0, 0, 0, 0, 0, 0b1001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
-            let v2 =
-                load([0, 0, 0, 0, 0, 0b1010, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
-            assert_eq!(
-                unload(v1.and(v2)),
-                [0, 0, 0, 0, 0, 0b1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
-            );
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_or() {
-        #[target_feature(enable = "ssse3")]
-        unsafe fn test() {
-            let v1 =
-                load([0, 0, 0, 0, 0, 0b1001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
-            let v2 =
-                load([0, 0, 0, 0, 0, 0b1010, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
-            assert_eq!(
-                unload(v1.or(v2)),
-                [0, 0, 0, 0, 0, 0b1011, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
-            );
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_shift_8bit_lane_right() {
-        #[target_feature(enable = "ssse3")]
-        unsafe fn test() {
-            let v = load([
-                0, 0, 0, 0, 0b1011, 0b0101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-            ]);
-            assert_eq!(
-                unload(v.shift_8bit_lane_right::<2>()),
-                [0, 0, 0, 0, 0b0010, 0b0001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
-            );
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_shift_in_one_byte() {
-        #[target_feature(enable = "ssse3")]
-        unsafe fn test() {
-            let v1 =
-                load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
-            let v2 = load([
-                17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
-            ]);
-            assert_eq!(
-                unload(v1.shift_in_one_byte(v2)),
-                [32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
-            );
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_shift_in_two_bytes() {
-        #[target_feature(enable = "ssse3")]
-        unsafe fn test() {
-            let v1 =
-                load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
-            let v2 = load([
-                17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
-            ]);
-            assert_eq!(
-                unload(v1.shift_in_two_bytes(v2)),
-                [31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14],
-            );
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_shift_in_three_bytes() {
-        #[target_feature(enable = "ssse3")]
-        unsafe fn test() {
-            let v1 =
-                load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
-            let v2 = load([
-                17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
-            ]);
-            assert_eq!(
-                unload(v1.shift_in_three_bytes(v2)),
-                [30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
-            );
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_shuffle_bytes() {
-        #[target_feature(enable = "ssse3")]
-        unsafe fn test() {
-            let v1 =
-                load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
-            let v2 =
-                load([0, 0, 0, 0, 4, 4, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12]);
-            assert_eq!(
-                unload(v1.shuffle_bytes(v2)),
-                [1, 1, 1, 1, 5, 5, 5, 5, 9, 9, 9, 9, 13, 13, 13, 13],
-            );
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_for_each_64bit_lane() {
-        #[target_feature(enable = "ssse3")]
-        unsafe fn test() {
-            let v = load([
-                0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A,
-                0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
-            ]);
-            let mut lanes = [0u64; 2];
-            v.for_each_64bit_lane(|i, lane| {
-                lanes[i] = lane;
-                None::<()>
-            });
-            assert_eq!(lanes, [0x0807060504030201, 0x100F0E0D0C0B0A09],);
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-}
-
-#[cfg(all(test, target_arch = "x86_64", target_feature = "sse2"))]
-mod tests_x86_64_avx2 {
-    use core::arch::x86_64::*;
-
-    use crate::util::int::{I32, U32};
-
-    use super::*;
-
-    fn is_runnable() -> bool {
-        std::is_x86_feature_detected!("avx2")
-    }
-
-    #[target_feature(enable = "avx2")]
-    unsafe fn load(lanes: [u8; 32]) -> __m256i {
-        __m256i::load_unaligned(&lanes as *const u8)
-    }
-
-    #[target_feature(enable = "avx2")]
-    unsafe fn load_half(lanes: [u8; 16]) -> __m256i {
-        __m256i::load_half_unaligned(&lanes as *const u8)
-    }
-
-    #[target_feature(enable = "avx2")]
-    unsafe fn unload(v: __m256i) -> [u8; 32] {
-        [
-            _mm256_extract_epi8(v, 0).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 1).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 2).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 3).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 4).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 5).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 6).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 7).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 8).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 9).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 10).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 11).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 12).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 13).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 14).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 15).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 16).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 17).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 18).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 19).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 20).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 21).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 22).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 23).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 24).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 25).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 26).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 27).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 28).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 29).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 30).to_bits().low_u8(),
-            _mm256_extract_epi8(v, 31).to_bits().low_u8(),
-        ]
-    }
-
-    #[test]
-    fn vector_splat() {
-        #[target_feature(enable = "avx2")]
-        unsafe fn test() {
-            let v = __m256i::splat(0xAF);
-            assert_eq!(
-                unload(v),
-                [
-                    0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF,
-                    0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF,
-                    0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF,
-                    0xAF, 0xAF, 0xAF, 0xAF, 0xAF,
-                ]
-            );
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_is_zero() {
-        #[target_feature(enable = "avx2")]
-        unsafe fn test() {
-            let v = load([
-                0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-            ]);
-            assert!(!v.is_zero());
-            let v = load([
-                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-            ]);
-            assert!(v.is_zero());
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_cmpeq() {
-        #[target_feature(enable = "avx2")]
-        unsafe fn test() {
-            let v1 = load([
-                1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
-                19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 1,
-            ]);
-            let v2 = load([
-                32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18,
-                17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1,
-            ]);
-            assert_eq!(
-                unload(v1.cmpeq(v2)),
-                [
-                    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF
-                ]
-            );
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_and() {
-        #[target_feature(enable = "avx2")]
-        unsafe fn test() {
-            let v1 = load([
-                0, 0, 0, 0, 0, 0b1001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-            ]);
-            let v2 = load([
-                0, 0, 0, 0, 0, 0b1010, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-            ]);
-            assert_eq!(
-                unload(v1.and(v2)),
-                [
-                    0, 0, 0, 0, 0, 0b1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                ]
-            );
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_or() {
-        #[target_feature(enable = "avx2")]
-        unsafe fn test() {
-            let v1 = load([
-                0, 0, 0, 0, 0, 0b1001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-            ]);
-            let v2 = load([
-                0, 0, 0, 0, 0, 0b1010, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-            ]);
-            assert_eq!(
-                unload(v1.or(v2)),
-                [
-                    0, 0, 0, 0, 0, 0b1011, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                ]
-            );
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_shift_8bit_lane_right() {
-        #[target_feature(enable = "avx2")]
-        unsafe fn test() {
-            let v = load([
-                0, 0, 0, 0, 0b1011, 0b0101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-            ]);
-            assert_eq!(
-                unload(v.shift_8bit_lane_right::<2>()),
-                [
-                    0, 0, 0, 0, 0b0010, 0b0001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                ]
-            );
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_shift_in_one_byte() {
-        #[target_feature(enable = "avx2")]
-        unsafe fn test() {
-            let v1 = load([
-                1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
-                19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
-            ]);
-            let v2 = load([
-                33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
-                48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
-                63, 64,
-            ]);
-            assert_eq!(
-                unload(v1.shift_in_one_byte(v2)),
-                [
-                    64, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
-                    17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
-                    31,
-                ],
-            );
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_shift_in_two_bytes() {
-        #[target_feature(enable = "avx2")]
-        unsafe fn test() {
-            let v1 = load([
-                1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
-                19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
-            ]);
-            let v2 = load([
-                33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
-                48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
-                63, 64,
-            ]);
-            assert_eq!(
-                unload(v1.shift_in_two_bytes(v2)),
-                [
-                    63, 64, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
-                    16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
-                    30,
-                ],
-            );
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_shift_in_three_bytes() {
-        #[target_feature(enable = "avx2")]
-        unsafe fn test() {
-            let v1 = load([
-                1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
-                19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
-            ]);
-            let v2 = load([
-                33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
-                48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
-                63, 64,
-            ]);
-            assert_eq!(
-                unload(v1.shift_in_three_bytes(v2)),
-                [
-                    62, 63, 64, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
-                    15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
-                    29,
-                ],
-            );
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_shuffle_bytes() {
-        #[target_feature(enable = "avx2")]
-        unsafe fn test() {
-            let v1 = load([
-                1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
-                19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
-            ]);
-            let v2 = load([
-                0, 0, 0, 0, 4, 4, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12, 16, 16,
-                16, 16, 20, 20, 20, 20, 24, 24, 24, 24, 28, 28, 28, 28,
-            ]);
-            assert_eq!(
-                unload(v1.shuffle_bytes(v2)),
-                [
-                    1, 1, 1, 1, 5, 5, 5, 5, 9, 9, 9, 9, 13, 13, 13, 13, 17,
-                    17, 17, 17, 21, 21, 21, 21, 25, 25, 25, 25, 29, 29, 29,
-                    29
-                ],
-            );
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_for_each_64bit_lane() {
-        #[target_feature(enable = "avx2")]
-        unsafe fn test() {
-            let v = load([
-                0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A,
-                0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14,
-                0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E,
-                0x1F, 0x20,
-            ]);
-            let mut lanes = [0u64; 4];
-            v.for_each_64bit_lane(|i, lane| {
-                lanes[i] = lane;
-                None::<()>
-            });
-            assert_eq!(
-                lanes,
-                [
-                    0x0807060504030201,
-                    0x100F0E0D0C0B0A09,
-                    0x1817161514131211,
-                    0x201F1E1D1C1B1A19
-                ]
-            );
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn fat_vector_half_shift_in_one_byte() {
-        #[target_feature(enable = "avx2")]
-        unsafe fn test() {
-            let v1 = load_half([
-                1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
-            ]);
-            let v2 = load_half([
-                17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
-            ]);
-            assert_eq!(
-                unload(v1.half_shift_in_one_byte(v2)),
-                [
-                    32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 32,
-                    1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
-                ],
-            );
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn fat_vector_half_shift_in_two_bytes() {
-        #[target_feature(enable = "avx2")]
-        unsafe fn test() {
-            let v1 = load_half([
-                1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
-            ]);
-            let v2 = load_half([
-                17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
-            ]);
-            assert_eq!(
-                unload(v1.half_shift_in_two_bytes(v2)),
-                [
-                    31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 31,
-                    32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
-                ],
-            );
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn fat_vector_half_shift_in_three_bytes() {
-        #[target_feature(enable = "avx2")]
-        unsafe fn test() {
-            let v1 = load_half([
-                1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
-            ]);
-            let v2 = load_half([
-                17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
-            ]);
-            assert_eq!(
-                unload(v1.half_shift_in_three_bytes(v2)),
-                [
-                    30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 30,
-                    31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
-                ],
-            );
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn fat_vector_swap_halves() {
-        #[target_feature(enable = "avx2")]
-        unsafe fn test() {
-            let v = load([
-                1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
-                19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
-            ]);
-            assert_eq!(
-                unload(v.swap_halves()),
-                [
-                    17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
-                    31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
-                    16,
-                ],
-            );
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn fat_vector_interleave_low_8bit_lanes() {
-        #[target_feature(enable = "avx2")]
-        unsafe fn test() {
-            let v1 = load([
-                1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
-                19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
-            ]);
-            let v2 = load([
-                33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
-                48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
-                63, 64,
-            ]);
-            assert_eq!(
-                unload(v1.interleave_low_8bit_lanes(v2)),
-                [
-                    1, 33, 2, 34, 3, 35, 4, 36, 5, 37, 6, 38, 7, 39, 8, 40,
-                    17, 49, 18, 50, 19, 51, 20, 52, 21, 53, 22, 54, 23, 55,
-                    24, 56,
-                ],
-            );
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn fat_vector_interleave_high_8bit_lanes() {
-        #[target_feature(enable = "avx2")]
-        unsafe fn test() {
-            let v1 = load([
-                1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
-                19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
-            ]);
-            let v2 = load([
-                33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
-                48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
-                63, 64,
-            ]);
-            assert_eq!(
-                unload(v1.interleave_high_8bit_lanes(v2)),
-                [
-                    9, 41, 10, 42, 11, 43, 12, 44, 13, 45, 14, 46, 15, 47, 16,
-                    48, 25, 57, 26, 58, 27, 59, 28, 60, 29, 61, 30, 62, 31,
-                    63, 32, 64,
-                ],
-            );
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn fat_vector_for_each_low_64bit_lane() {
-        #[target_feature(enable = "avx2")]
-        unsafe fn test() {
-            let v1 = load([
-                0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A,
-                0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14,
-                0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E,
-                0x1F, 0x20,
-            ]);
-            let v2 = load([
-                0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A,
-                0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34,
-                0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E,
-                0x3F, 0x40,
-            ]);
-            let mut lanes = [0u64; 4];
-            v1.for_each_low_64bit_lane(v2, |i, lane| {
-                lanes[i] = lane;
-                None::<()>
-            });
-            assert_eq!(
-                lanes,
-                [
-                    0x0807060504030201,
-                    0x100F0E0D0C0B0A09,
-                    0x2827262524232221,
-                    0x302F2E2D2C2B2A29
-                ]
-            );
-        }
-        if !is_runnable() {
-            return;
-        }
-        unsafe { test() }
-    }
-}
-
-#[cfg(all(test, target_arch = "aarch64", target_feature = "neon"))]
-mod tests_aarch64_neon {
-    use core::arch::aarch64::*;
-
-    use super::*;
-
-    #[target_feature(enable = "neon")]
-    unsafe fn load(lanes: [u8; 16]) -> uint8x16_t {
-        uint8x16_t::load_unaligned(&lanes as *const u8)
-    }
-
-    #[target_feature(enable = "neon")]
-    unsafe fn unload(v: uint8x16_t) -> [u8; 16] {
-        [
-            vgetq_lane_u8(v, 0),
-            vgetq_lane_u8(v, 1),
-            vgetq_lane_u8(v, 2),
-            vgetq_lane_u8(v, 3),
-            vgetq_lane_u8(v, 4),
-            vgetq_lane_u8(v, 5),
-            vgetq_lane_u8(v, 6),
-            vgetq_lane_u8(v, 7),
-            vgetq_lane_u8(v, 8),
-            vgetq_lane_u8(v, 9),
-            vgetq_lane_u8(v, 10),
-            vgetq_lane_u8(v, 11),
-            vgetq_lane_u8(v, 12),
-            vgetq_lane_u8(v, 13),
-            vgetq_lane_u8(v, 14),
-            vgetq_lane_u8(v, 15),
-        ]
-    }
-
-    // Example functions. These don't test the Vector traits, but rather,
-    // specific NEON instructions. They are basically little experiments I
-    // wrote to figure out what an instruction does since their descriptions
-    // are so dense. I decided to keep the experiments around as example tests
-    // in case there' useful.
-
-    #[test]
-    fn example_vmaxvq_u8_non_zero() {
-        #[target_feature(enable = "neon")]
-        unsafe fn example() {
-            let v = load([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
-            assert_eq!(vmaxvq_u8(v), 1);
-        }
-        unsafe { example() }
-    }
-
-    #[test]
-    fn example_vmaxvq_u8_zero() {
-        #[target_feature(enable = "neon")]
-        unsafe fn example() {
-            let v = load([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
-            assert_eq!(vmaxvq_u8(v), 0);
-        }
-        unsafe { example() }
-    }
-
-    #[test]
-    fn example_vpmaxq_u8_non_zero() {
-        #[target_feature(enable = "neon")]
-        unsafe fn example() {
-            let v = load([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
-            let r = vpmaxq_u8(v, v);
-            assert_eq!(
-                unload(r),
-                [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
-            );
-        }
-        unsafe { example() }
-    }
-
-    #[test]
-    fn example_vpmaxq_u8_self() {
-        #[target_feature(enable = "neon")]
-        unsafe fn example() {
-            let v =
-                load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
-            let r = vpmaxq_u8(v, v);
-            assert_eq!(
-                unload(r),
-                [2, 4, 6, 8, 10, 12, 14, 16, 2, 4, 6, 8, 10, 12, 14, 16]
-            );
-        }
-        unsafe { example() }
-    }
-
-    #[test]
-    fn example_vpmaxq_u8_other() {
-        #[target_feature(enable = "neon")]
-        unsafe fn example() {
-            let v1 =
-                load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
-            let v2 = load([
-                17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
-            ]);
-            let r = vpmaxq_u8(v1, v2);
-            assert_eq!(
-                unload(r),
-                [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32]
-            );
-        }
-        unsafe { example() }
-    }
-
-    // Now we test the actual methods on the Vector trait.
-
-    #[test]
-    fn vector_splat() {
-        #[target_feature(enable = "neon")]
-        unsafe fn test() {
-            let v = uint8x16_t::splat(0xAF);
-            assert_eq!(
-                unload(v),
-                [
-                    0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF,
-                    0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF
-                ]
-            );
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_is_zero() {
-        #[target_feature(enable = "neon")]
-        unsafe fn test() {
-            let v = load([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
-            assert!(!v.is_zero());
-            let v = load([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
-            assert!(v.is_zero());
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_cmpeq() {
-        #[target_feature(enable = "neon")]
-        unsafe fn test() {
-            let v1 =
-                load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1]);
-            let v2 =
-                load([16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]);
-            assert_eq!(
-                unload(v1.cmpeq(v2)),
-                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF]
-            );
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_and() {
-        #[target_feature(enable = "neon")]
-        unsafe fn test() {
-            let v1 =
-                load([0, 0, 0, 0, 0, 0b1001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
-            let v2 =
-                load([0, 0, 0, 0, 0, 0b1010, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
-            assert_eq!(
-                unload(v1.and(v2)),
-                [0, 0, 0, 0, 0, 0b1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
-            );
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_or() {
-        #[target_feature(enable = "neon")]
-        unsafe fn test() {
-            let v1 =
-                load([0, 0, 0, 0, 0, 0b1001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
-            let v2 =
-                load([0, 0, 0, 0, 0, 0b1010, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
-            assert_eq!(
-                unload(v1.or(v2)),
-                [0, 0, 0, 0, 0, 0b1011, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
-            );
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_shift_8bit_lane_right() {
-        #[target_feature(enable = "neon")]
-        unsafe fn test() {
-            let v = load([
-                0, 0, 0, 0, 0b1011, 0b0101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-            ]);
-            assert_eq!(
-                unload(v.shift_8bit_lane_right::<2>()),
-                [0, 0, 0, 0, 0b0010, 0b0001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
-            );
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_shift_in_one_byte() {
-        #[target_feature(enable = "neon")]
-        unsafe fn test() {
-            let v1 =
-                load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
-            let v2 = load([
-                17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
-            ]);
-            assert_eq!(
-                unload(v1.shift_in_one_byte(v2)),
-                [32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
-            );
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_shift_in_two_bytes() {
-        #[target_feature(enable = "neon")]
-        unsafe fn test() {
-            let v1 =
-                load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
-            let v2 = load([
-                17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
-            ]);
-            assert_eq!(
-                unload(v1.shift_in_two_bytes(v2)),
-                [31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14],
-            );
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_shift_in_three_bytes() {
-        #[target_feature(enable = "neon")]
-        unsafe fn test() {
-            let v1 =
-                load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
-            let v2 = load([
-                17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
-            ]);
-            assert_eq!(
-                unload(v1.shift_in_three_bytes(v2)),
-                [30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
-            );
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_shuffle_bytes() {
-        #[target_feature(enable = "neon")]
-        unsafe fn test() {
-            let v1 =
-                load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
-            let v2 =
-                load([0, 0, 0, 0, 4, 4, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12]);
-            assert_eq!(
-                unload(v1.shuffle_bytes(v2)),
-                [1, 1, 1, 1, 5, 5, 5, 5, 9, 9, 9, 9, 13, 13, 13, 13],
-            );
-        }
-        unsafe { test() }
-    }
-
-    #[test]
-    fn vector_for_each_64bit_lane() {
-        #[target_feature(enable = "neon")]
-        unsafe fn test() {
-            let v = load([
-                0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A,
-                0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
-            ]);
-            let mut lanes = [0u64; 2];
-            v.for_each_64bit_lane(|i, lane| {
-                lanes[i] = lane;
-                None::<()>
-            });
-            assert_eq!(lanes, [0x0807060504030201, 0x100F0E0D0C0B0A09],);
-        }
-        unsafe { test() }
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/tests.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/tests.rs
deleted file mode 100644
index a5276f85..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/tests.rs
+++ /dev/null
@@ -1,1664 +0,0 @@
-use std::{collections::HashMap, format, string::String, vec::Vec};
-
-use crate::{
-    AhoCorasick, AhoCorasickBuilder, AhoCorasickKind, Anchored, Input, Match,
-    MatchKind, StartKind,
-};
-
-/// A description of a single test against an Aho-Corasick automaton.
-///
-/// A single test may not necessarily pass on every configuration of an
-/// Aho-Corasick automaton. The tests are categorized and grouped appropriately
-/// below.
-#[derive(Clone, Debug, Eq, PartialEq)]
-struct SearchTest {
-    /// The name of this test, for debugging.
-    name: &'static str,
-    /// The patterns to search for.
-    patterns: &'static [&'static str],
-    /// The text to search.
-    haystack: &'static str,
-    /// Each match is a triple of (pattern_index, start, end), where
-    /// pattern_index is an index into `patterns` and `start`/`end` are indices
-    /// into `haystack`.
-    matches: &'static [(usize, usize, usize)],
-}
-
-/// Short-hand constructor for SearchTest. We use it a lot below.
-macro_rules! t {
-    ($name:ident, $patterns:expr, $haystack:expr, $matches:expr) => {
-        SearchTest {
-            name: stringify!($name),
-            patterns: $patterns,
-            haystack: $haystack,
-            matches: $matches,
-        }
-    };
-}
-
-/// A collection of test groups.
-type TestCollection = &'static [&'static [SearchTest]];
-
-// Define several collections corresponding to the different type of match
-// semantics supported by Aho-Corasick. These collections have some overlap,
-// but each collection should have some tests that no other collection has.
-
-/// Tests for Aho-Corasick's standard non-overlapping match semantics.
-const AC_STANDARD_NON_OVERLAPPING: TestCollection =
-    &[BASICS, NON_OVERLAPPING, STANDARD, REGRESSION];
-
-/// Tests for Aho-Corasick's anchored standard non-overlapping match semantics.
-const AC_STANDARD_ANCHORED_NON_OVERLAPPING: TestCollection =
-    &[ANCHORED_BASICS, ANCHORED_NON_OVERLAPPING, STANDARD_ANCHORED];
-
-/// Tests for Aho-Corasick's standard overlapping match semantics.
-const AC_STANDARD_OVERLAPPING: TestCollection =
-    &[BASICS, OVERLAPPING, REGRESSION];
-
-/*
-Iterators of anchored overlapping searches were removed from the API in
-after 0.7, but we leave the tests commented out for posterity.
-/// Tests for Aho-Corasick's anchored standard overlapping match semantics.
-const AC_STANDARD_ANCHORED_OVERLAPPING: TestCollection =
-    &[ANCHORED_BASICS, ANCHORED_OVERLAPPING];
-*/
-
-/// Tests for Aho-Corasick's leftmost-first match semantics.
-const AC_LEFTMOST_FIRST: TestCollection =
-    &[BASICS, NON_OVERLAPPING, LEFTMOST, LEFTMOST_FIRST, REGRESSION];
-
-/// Tests for Aho-Corasick's anchored leftmost-first match semantics.
-const AC_LEFTMOST_FIRST_ANCHORED: TestCollection = &[
-    ANCHORED_BASICS,
-    ANCHORED_NON_OVERLAPPING,
-    ANCHORED_LEFTMOST,
-    ANCHORED_LEFTMOST_FIRST,
-];
-
-/// Tests for Aho-Corasick's leftmost-longest match semantics.
-const AC_LEFTMOST_LONGEST: TestCollection =
-    &[BASICS, NON_OVERLAPPING, LEFTMOST, LEFTMOST_LONGEST, REGRESSION];
-
-/// Tests for Aho-Corasick's anchored leftmost-longest match semantics.
-const AC_LEFTMOST_LONGEST_ANCHORED: TestCollection = &[
-    ANCHORED_BASICS,
-    ANCHORED_NON_OVERLAPPING,
-    ANCHORED_LEFTMOST,
-    ANCHORED_LEFTMOST_LONGEST,
-];
-
-// Now define the individual tests that make up the collections above.
-
-/// A collection of tests for the Aho-Corasick algorithm that should always be
-/// true regardless of match semantics. That is, all combinations of
-/// leftmost-{shortest, first, longest} x {overlapping, non-overlapping}
-/// should produce the same answer.
-const BASICS: &'static [SearchTest] = &[
-    t!(basic000, &[], "", &[]),
-    t!(basic001, &[""], "a", &[(0, 0, 0), (0, 1, 1)]),
-    t!(basic002, &["a"], "", &[]),
-    t!(basic010, &["a"], "a", &[(0, 0, 1)]),
-    t!(basic020, &["a"], "aa", &[(0, 0, 1), (0, 1, 2)]),
-    t!(basic030, &["a"], "aaa", &[(0, 0, 1), (0, 1, 2), (0, 2, 3)]),
-    t!(basic040, &["a"], "aba", &[(0, 0, 1), (0, 2, 3)]),
-    t!(basic050, &["a"], "bba", &[(0, 2, 3)]),
-    t!(basic060, &["a"], "bbb", &[]),
-    t!(basic070, &["a"], "bababbbba", &[(0, 1, 2), (0, 3, 4), (0, 8, 9)]),
-    t!(basic100, &["aa"], "", &[]),
-    t!(basic110, &["aa"], "aa", &[(0, 0, 2)]),
-    t!(basic120, &["aa"], "aabbaa", &[(0, 0, 2), (0, 4, 6)]),
-    t!(basic130, &["aa"], "abbab", &[]),
-    t!(basic140, &["aa"], "abbabaa", &[(0, 5, 7)]),
-    t!(basic200, &["abc"], "abc", &[(0, 0, 3)]),
-    t!(basic210, &["abc"], "zazabzabcz", &[(0, 6, 9)]),
-    t!(basic220, &["abc"], "zazabczabcz", &[(0, 3, 6), (0, 7, 10)]),
-    t!(basic300, &["a", "b"], "", &[]),
-    t!(basic310, &["a", "b"], "z", &[]),
-    t!(basic320, &["a", "b"], "b", &[(1, 0, 1)]),
-    t!(basic330, &["a", "b"], "a", &[(0, 0, 1)]),
-    t!(
-        basic340,
-        &["a", "b"],
-        "abba",
-        &[(0, 0, 1), (1, 1, 2), (1, 2, 3), (0, 3, 4),]
-    ),
-    t!(
-        basic350,
-        &["b", "a"],
-        "abba",
-        &[(1, 0, 1), (0, 1, 2), (0, 2, 3), (1, 3, 4),]
-    ),
-    t!(basic360, &["abc", "bc"], "xbc", &[(1, 1, 3),]),
-    t!(basic400, &["foo", "bar"], "", &[]),
-    t!(basic410, &["foo", "bar"], "foobar", &[(0, 0, 3), (1, 3, 6),]),
-    t!(basic420, &["foo", "bar"], "barfoo", &[(1, 0, 3), (0, 3, 6),]),
-    t!(basic430, &["foo", "bar"], "foofoo", &[(0, 0, 3), (0, 3, 6),]),
-    t!(basic440, &["foo", "bar"], "barbar", &[(1, 0, 3), (1, 3, 6),]),
-    t!(basic450, &["foo", "bar"], "bafofoo", &[(0, 4, 7),]),
-    t!(basic460, &["bar", "foo"], "bafofoo", &[(1, 4, 7),]),
-    t!(basic470, &["foo", "bar"], "fobabar", &[(1, 4, 7),]),
-    t!(basic480, &["bar", "foo"], "fobabar", &[(0, 4, 7),]),
-    t!(basic600, &[""], "", &[(0, 0, 0)]),
-    t!(basic610, &[""], "a", &[(0, 0, 0), (0, 1, 1)]),
-    t!(basic620, &[""], "abc", &[(0, 0, 0), (0, 1, 1), (0, 2, 2), (0, 3, 3)]),
-    t!(basic700, &["yabcdef", "abcdezghi"], "yabcdefghi", &[(0, 0, 7),]),
-    t!(basic710, &["yabcdef", "abcdezghi"], "yabcdezghi", &[(1, 1, 10),]),
-    t!(
-        basic720,
-        &["yabcdef", "bcdeyabc", "abcdezghi"],
-        "yabcdezghi",
-        &[(2, 1, 10),]
-    ),
-];
-
-/// A collection of *anchored* tests for the Aho-Corasick algorithm that should
-/// always be true regardless of match semantics. That is, all combinations of
-/// leftmost-{shortest, first, longest} x {overlapping, non-overlapping} should
-/// produce the same answer.
-const ANCHORED_BASICS: &'static [SearchTest] = &[
-    t!(abasic000, &[], "", &[]),
-    t!(abasic001, &[], "a", &[]),
-    t!(abasic002, &[], "abc", &[]),
-    t!(abasic010, &[""], "", &[(0, 0, 0)]),
-    t!(abasic020, &[""], "a", &[(0, 0, 0), (0, 1, 1)]),
-    t!(abasic030, &[""], "abc", &[(0, 0, 0), (0, 1, 1), (0, 2, 2), (0, 3, 3)]),
-    t!(abasic100, &["a"], "a", &[(0, 0, 1)]),
-    t!(abasic110, &["a"], "aa", &[(0, 0, 1), (0, 1, 2)]),
-    t!(abasic120, &["a", "b"], "ab", &[(0, 0, 1), (1, 1, 2)]),
-    t!(abasic130, &["a", "b"], "ba", &[(1, 0, 1), (0, 1, 2)]),
-    t!(abasic140, &["foo", "foofoo"], "foo", &[(0, 0, 3)]),
-    t!(abasic150, &["foofoo", "foo"], "foo", &[(1, 0, 3)]),
-    t!(abasic200, &["foo"], "foofoo foo", &[(0, 0, 3), (0, 3, 6)]),
-];
-
-/// Tests for non-overlapping standard match semantics.
-///
-/// These tests generally shouldn't pass for leftmost-{first,longest}, although
-/// some do in order to write clearer tests. For example, standard000 will
-/// pass with leftmost-first semantics, but standard010 will not. We write
-/// both to emphasize how the match semantics work.
-const STANDARD: &'static [SearchTest] = &[
-    t!(standard000, &["ab", "abcd"], "abcd", &[(0, 0, 2)]),
-    t!(standard010, &["abcd", "ab"], "abcd", &[(1, 0, 2)]),
-    t!(standard020, &["abcd", "ab", "abc"], "abcd", &[(1, 0, 2)]),
-    t!(standard030, &["abcd", "abc", "ab"], "abcd", &[(2, 0, 2)]),
-    t!(standard040, &["a", ""], "a", &[(1, 0, 0), (1, 1, 1)]),
-    t!(
-        standard400,
-        &["abcd", "bcd", "cd", "b"],
-        "abcd",
-        &[(3, 1, 2), (2, 2, 4),]
-    ),
-    t!(standard410, &["", "a"], "a", &[(0, 0, 0), (0, 1, 1),]),
-    t!(standard420, &["", "a"], "aa", &[(0, 0, 0), (0, 1, 1), (0, 2, 2),]),
-    t!(standard430, &["", "a", ""], "a", &[(0, 0, 0), (0, 1, 1),]),
-    t!(standard440, &["a", "", ""], "a", &[(1, 0, 0), (1, 1, 1),]),
-    t!(standard450, &["", "", "a"], "a", &[(0, 0, 0), (0, 1, 1),]),
-];
-
-/// Like STANDARD, but for anchored searches.
-const STANDARD_ANCHORED: &'static [SearchTest] = &[
-    t!(astandard000, &["ab", "abcd"], "abcd", &[(0, 0, 2)]),
-    t!(astandard010, &["abcd", "ab"], "abcd", &[(1, 0, 2)]),
-    t!(astandard020, &["abcd", "ab", "abc"], "abcd", &[(1, 0, 2)]),
-    t!(astandard030, &["abcd", "abc", "ab"], "abcd", &[(2, 0, 2)]),
-    t!(astandard040, &["a", ""], "a", &[(1, 0, 0), (1, 1, 1)]),
-    t!(astandard050, &["abcd", "bcd", "cd", "b"], "abcd", &[(0, 0, 4)]),
-    t!(astandard410, &["", "a"], "a", &[(0, 0, 0), (0, 1, 1)]),
-    t!(astandard420, &["", "a"], "aa", &[(0, 0, 0), (0, 1, 1), (0, 2, 2)]),
-    t!(astandard430, &["", "a", ""], "a", &[(0, 0, 0), (0, 1, 1)]),
-    t!(astandard440, &["a", "", ""], "a", &[(1, 0, 0), (1, 1, 1)]),
-    t!(astandard450, &["", "", "a"], "a", &[(0, 0, 0), (0, 1, 1)]),
-];
-
-/// Tests for non-overlapping leftmost match semantics. These should pass for
-/// both leftmost-first and leftmost-longest match kinds. Stated differently,
-/// among ambiguous matches, the longest match and the match that appeared
-/// first when constructing the automaton should always be the same.
-const LEFTMOST: &'static [SearchTest] = &[
-    t!(leftmost000, &["ab", "ab"], "abcd", &[(0, 0, 2)]),
-    t!(leftmost010, &["a", ""], "a", &[(0, 0, 1)]),
-    t!(leftmost011, &["a", ""], "ab", &[(0, 0, 1), (1, 2, 2)]),
-    t!(leftmost020, &["", ""], "a", &[(0, 0, 0), (0, 1, 1)]),
-    t!(leftmost030, &["a", "ab"], "aa", &[(0, 0, 1), (0, 1, 2)]),
-    t!(leftmost031, &["ab", "a"], "aa", &[(1, 0, 1), (1, 1, 2)]),
-    t!(leftmost032, &["ab", "a"], "xayabbbz", &[(1, 1, 2), (0, 3, 5)]),
-    t!(leftmost300, &["abcd", "bce", "b"], "abce", &[(1, 1, 4)]),
-    t!(leftmost310, &["abcd", "ce", "bc"], "abce", &[(2, 1, 3)]),
-    t!(leftmost320, &["abcd", "bce", "ce", "b"], "abce", &[(1, 1, 4)]),
-    t!(leftmost330, &["abcd", "bce", "cz", "bc"], "abcz", &[(3, 1, 3)]),
-    t!(leftmost340, &["bce", "cz", "bc"], "bcz", &[(2, 0, 2)]),
-    t!(leftmost350, &["abc", "bd", "ab"], "abd", &[(2, 0, 2)]),
-    t!(
-        leftmost360,
-        &["abcdefghi", "hz", "abcdefgh"],
-        "abcdefghz",
-        &[(2, 0, 8),]
-    ),
-    t!(
-        leftmost370,
-        &["abcdefghi", "cde", "hz", "abcdefgh"],
-        "abcdefghz",
-        &[(3, 0, 8),]
-    ),
-    t!(
-        leftmost380,
-        &["abcdefghi", "hz", "abcdefgh", "a"],
-        "abcdefghz",
-        &[(2, 0, 8),]
-    ),
-    t!(
-        leftmost390,
-        &["b", "abcdefghi", "hz", "abcdefgh"],
-        "abcdefghz",
-        &[(3, 0, 8),]
-    ),
-    t!(
-        leftmost400,
-        &["h", "abcdefghi", "hz", "abcdefgh"],
-        "abcdefghz",
-        &[(3, 0, 8),]
-    ),
-    t!(
-        leftmost410,
-        &["z", "abcdefghi", "hz", "abcdefgh"],
-        "abcdefghz",
-        &[(3, 0, 8), (0, 8, 9),]
-    ),
-];
-
-/// Like LEFTMOST, but for anchored searches.
-const ANCHORED_LEFTMOST: &'static [SearchTest] = &[
-    t!(aleftmost000, &["ab", "ab"], "abcd", &[(0, 0, 2)]),
-    // We shouldn't allow an empty match immediately following a match, right?
-    t!(aleftmost010, &["a", ""], "a", &[(0, 0, 1)]),
-    t!(aleftmost020, &["", ""], "a", &[(0, 0, 0), (0, 1, 1)]),
-    t!(aleftmost030, &["a", "ab"], "aa", &[(0, 0, 1), (0, 1, 2)]),
-    t!(aleftmost031, &["ab", "a"], "aa", &[(1, 0, 1), (1, 1, 2)]),
-    t!(aleftmost032, &["ab", "a"], "xayabbbz", &[]),
-    t!(aleftmost300, &["abcd", "bce", "b"], "abce", &[]),
-    t!(aleftmost301, &["abcd", "bcd", "cd", "b"], "abcd", &[(0, 0, 4)]),
-    t!(aleftmost310, &["abcd", "ce", "bc"], "abce", &[]),
-    t!(aleftmost320, &["abcd", "bce", "ce", "b"], "abce", &[]),
-    t!(aleftmost330, &["abcd", "bce", "cz", "bc"], "abcz", &[]),
-    t!(aleftmost340, &["bce", "cz", "bc"], "bcz", &[(2, 0, 2)]),
-    t!(aleftmost350, &["abc", "bd", "ab"], "abd", &[(2, 0, 2)]),
-    t!(
-        aleftmost360,
-        &["abcdefghi", "hz", "abcdefgh"],
-        "abcdefghz",
-        &[(2, 0, 8),]
-    ),
-    t!(
-        aleftmost370,
-        &["abcdefghi", "cde", "hz", "abcdefgh"],
-        "abcdefghz",
-        &[(3, 0, 8),]
-    ),
-    t!(
-        aleftmost380,
-        &["abcdefghi", "hz", "abcdefgh", "a"],
-        "abcdefghz",
-        &[(2, 0, 8),]
-    ),
-    t!(
-        aleftmost390,
-        &["b", "abcdefghi", "hz", "abcdefgh"],
-        "abcdefghz",
-        &[(3, 0, 8),]
-    ),
-    t!(
-        aleftmost400,
-        &["h", "abcdefghi", "hz", "abcdefgh"],
-        "abcdefghz",
-        &[(3, 0, 8),]
-    ),
-    t!(
-        aleftmost410,
-        &["z", "abcdefghi", "hz", "abcdefgh"],
-        "abcdefghzyz",
-        &[(3, 0, 8), (0, 8, 9)]
-    ),
-];
-
-/// Tests for non-overlapping leftmost-first match semantics. These tests
-/// should generally be specific to leftmost-first, which means they should
-/// generally fail under leftmost-longest semantics.
-const LEFTMOST_FIRST: &'static [SearchTest] = &[
-    t!(leftfirst000, &["ab", "abcd"], "abcd", &[(0, 0, 2)]),
-    t!(leftfirst010, &["", "a"], "a", &[(0, 0, 0), (0, 1, 1)]),
-    t!(leftfirst011, &["", "a", ""], "a", &[(0, 0, 0), (0, 1, 1),]),
-    t!(leftfirst012, &["a", "", ""], "a", &[(0, 0, 1)]),
-    t!(leftfirst013, &["", "", "a"], "a", &[(0, 0, 0), (0, 1, 1)]),
-    t!(leftfirst014, &["a", ""], "a", &[(0, 0, 1)]),
-    t!(leftfirst015, &["a", ""], "ab", &[(0, 0, 1), (1, 2, 2)]),
-    t!(leftfirst020, &["abcd", "ab"], "abcd", &[(0, 0, 4)]),
-    t!(leftfirst030, &["ab", "ab"], "abcd", &[(0, 0, 2)]),
-    t!(leftfirst040, &["a", "ab"], "xayabbbz", &[(0, 1, 2), (0, 3, 4)]),
-    t!(leftfirst100, &["abcdefg", "bcde", "bcdef"], "abcdef", &[(1, 1, 5)]),
-    t!(leftfirst110, &["abcdefg", "bcdef", "bcde"], "abcdef", &[(1, 1, 6)]),
-    t!(leftfirst300, &["abcd", "b", "bce"], "abce", &[(1, 1, 2)]),
-    t!(
-        leftfirst310,
-        &["abcd", "b", "bce", "ce"],
-        "abce",
-        &[(1, 1, 2), (3, 2, 4),]
-    ),
-    t!(
-        leftfirst320,
-        &["a", "abcdefghi", "hz", "abcdefgh"],
-        "abcdefghz",
-        &[(0, 0, 1), (2, 7, 9),]
-    ),
-    t!(leftfirst330, &["a", "abab"], "abab", &[(0, 0, 1), (0, 2, 3)]),
-    t!(leftfirst400, &["amwix", "samwise", "sam"], "Zsamwix", &[(2, 1, 4)]),
-];
-
-/// Like LEFTMOST_FIRST, but for anchored searches.
-const ANCHORED_LEFTMOST_FIRST: &'static [SearchTest] = &[
-    t!(aleftfirst000, &["ab", "abcd"], "abcd", &[(0, 0, 2)]),
-    t!(aleftfirst010, &["", "a"], "a", &[(0, 0, 0), (0, 1, 1)]),
-    t!(aleftfirst011, &["", "a", ""], "a", &[(0, 0, 0), (0, 1, 1)]),
-    t!(aleftfirst012, &["a", "", ""], "a", &[(0, 0, 1)]),
-    t!(aleftfirst013, &["", "", "a"], "a", &[(0, 0, 0), (0, 1, 1)]),
-    t!(aleftfirst020, &["abcd", "ab"], "abcd", &[(0, 0, 4)]),
-    t!(aleftfirst030, &["ab", "ab"], "abcd", &[(0, 0, 2)]),
-    t!(aleftfirst040, &["a", "ab"], "xayabbbz", &[]),
-    t!(aleftfirst100, &["abcdefg", "bcde", "bcdef"], "abcdef", &[]),
-    t!(aleftfirst110, &["abcdefg", "bcdef", "bcde"], "abcdef", &[]),
-    t!(aleftfirst300, &["abcd", "b", "bce"], "abce", &[]),
-    t!(aleftfirst310, &["abcd", "b", "bce", "ce"], "abce", &[]),
-    t!(
-        aleftfirst320,
-        &["a", "abcdefghi", "hz", "abcdefgh"],
-        "abcdefghz",
-        &[(0, 0, 1)]
-    ),
-    t!(aleftfirst330, &["a", "abab"], "abab", &[(0, 0, 1)]),
-    t!(aleftfirst400, &["wise", "samwise", "sam"], "samwix", &[(2, 0, 3)]),
-];
-
-/// Tests for non-overlapping leftmost-longest match semantics. These tests
-/// should generally be specific to leftmost-longest, which means they should
-/// generally fail under leftmost-first semantics.
-const LEFTMOST_LONGEST: &'static [SearchTest] = &[
-    t!(leftlong000, &["ab", "abcd"], "abcd", &[(1, 0, 4)]),
-    t!(leftlong010, &["abcd", "bcd", "cd", "b"], "abcd", &[(0, 0, 4),]),
-    t!(leftlong020, &["", "a"], "a", &[(1, 0, 1)]),
-    t!(leftlong021, &["", "a", ""], "a", &[(1, 0, 1)]),
-    t!(leftlong022, &["a", "", ""], "a", &[(0, 0, 1)]),
-    t!(leftlong023, &["", "", "a"], "a", &[(2, 0, 1)]),
-    t!(leftlong024, &["", "a"], "ab", &[(1, 0, 1), (0, 2, 2)]),
-    t!(leftlong030, &["", "a"], "aa", &[(1, 0, 1), (1, 1, 2)]),
-    t!(leftlong040, &["a", "ab"], "a", &[(0, 0, 1)]),
-    t!(leftlong050, &["a", "ab"], "ab", &[(1, 0, 2)]),
-    t!(leftlong060, &["ab", "a"], "a", &[(1, 0, 1)]),
-    t!(leftlong070, &["ab", "a"], "ab", &[(0, 0, 2)]),
-    t!(leftlong100, &["abcdefg", "bcde", "bcdef"], "abcdef", &[(2, 1, 6)]),
-    t!(leftlong110, &["abcdefg", "bcdef", "bcde"], "abcdef", &[(1, 1, 6)]),
-    t!(leftlong300, &["abcd", "b", "bce"], "abce", &[(2, 1, 4)]),
-    t!(
-        leftlong310,
-        &["a", "abcdefghi", "hz", "abcdefgh"],
-        "abcdefghz",
-        &[(3, 0, 8),]
-    ),
-    t!(leftlong320, &["a", "abab"], "abab", &[(1, 0, 4)]),
-    t!(leftlong330, &["abcd", "b", "ce"], "abce", &[(1, 1, 2), (2, 2, 4),]),
-    t!(leftlong340, &["a", "ab"], "xayabbbz", &[(0, 1, 2), (1, 3, 5)]),
-];
-
-/// Like LEFTMOST_LONGEST, but for anchored searches.
-const ANCHORED_LEFTMOST_LONGEST: &'static [SearchTest] = &[
-    t!(aleftlong000, &["ab", "abcd"], "abcd", &[(1, 0, 4)]),
-    t!(aleftlong010, &["abcd", "bcd", "cd", "b"], "abcd", &[(0, 0, 4),]),
-    t!(aleftlong020, &["", "a"], "a", &[(1, 0, 1)]),
-    t!(aleftlong021, &["", "a", ""], "a", &[(1, 0, 1)]),
-    t!(aleftlong022, &["a", "", ""], "a", &[(0, 0, 1)]),
-    t!(aleftlong023, &["", "", "a"], "a", &[(2, 0, 1)]),
-    t!(aleftlong030, &["", "a"], "aa", &[(1, 0, 1), (1, 1, 2)]),
-    t!(aleftlong040, &["a", "ab"], "a", &[(0, 0, 1)]),
-    t!(aleftlong050, &["a", "ab"], "ab", &[(1, 0, 2)]),
-    t!(aleftlong060, &["ab", "a"], "a", &[(1, 0, 1)]),
-    t!(aleftlong070, &["ab", "a"], "ab", &[(0, 0, 2)]),
-    t!(aleftlong100, &["abcdefg", "bcde", "bcdef"], "abcdef", &[]),
-    t!(aleftlong110, &["abcdefg", "bcdef", "bcde"], "abcdef", &[]),
-    t!(aleftlong300, &["abcd", "b", "bce"], "abce", &[]),
-    t!(
-        aleftlong310,
-        &["a", "abcdefghi", "hz", "abcdefgh"],
-        "abcdefghz",
-        &[(3, 0, 8),]
-    ),
-    t!(aleftlong320, &["a", "abab"], "abab", &[(1, 0, 4)]),
-    t!(aleftlong330, &["abcd", "b", "ce"], "abce", &[]),
-    t!(aleftlong340, &["a", "ab"], "xayabbbz", &[]),
-];
-
-/// Tests for non-overlapping match semantics.
-///
-/// Generally these tests shouldn't pass when using overlapping semantics.
-/// These should pass for both standard and leftmost match semantics.
-const NON_OVERLAPPING: &'static [SearchTest] = &[
-    t!(nover010, &["abcd", "bcd", "cd"], "abcd", &[(0, 0, 4),]),
-    t!(nover020, &["bcd", "cd", "abcd"], "abcd", &[(2, 0, 4),]),
-    t!(nover030, &["abc", "bc"], "zazabcz", &[(0, 3, 6),]),
-    t!(
-        nover100,
-        &["ab", "ba"],
-        "abababa",
-        &[(0, 0, 2), (0, 2, 4), (0, 4, 6),]
-    ),
-    t!(nover200, &["foo", "foo"], "foobarfoo", &[(0, 0, 3), (0, 6, 9),]),
-    t!(nover300, &["", ""], "", &[(0, 0, 0),]),
-    t!(nover310, &["", ""], "a", &[(0, 0, 0), (0, 1, 1),]),
-];
-
-/// Like NON_OVERLAPPING, but for anchored searches.
-const ANCHORED_NON_OVERLAPPING: &'static [SearchTest] = &[
-    t!(anover010, &["abcd", "bcd", "cd"], "abcd", &[(0, 0, 4),]),
-    t!(anover020, &["bcd", "cd", "abcd"], "abcd", &[(2, 0, 4),]),
-    t!(anover030, &["abc", "bc"], "zazabcz", &[]),
-    t!(
-        anover100,
-        &["ab", "ba"],
-        "abababa",
-        &[(0, 0, 2), (0, 2, 4), (0, 4, 6)]
-    ),
-    t!(anover200, &["foo", "foo"], "foobarfoo", &[(0, 0, 3)]),
-    t!(anover300, &["", ""], "", &[(0, 0, 0)]),
-    t!(anover310, &["", ""], "a", &[(0, 0, 0), (0, 1, 1)]),
-];
-
-/// Tests for overlapping match semantics.
-///
-/// This only supports standard match semantics, since leftmost-{first,longest}
-/// do not support overlapping matches.
-const OVERLAPPING: &'static [SearchTest] = &[
-    t!(
-        over000,
-        &["abcd", "bcd", "cd", "b"],
-        "abcd",
-        &[(3, 1, 2), (0, 0, 4), (1, 1, 4), (2, 2, 4),]
-    ),
-    t!(
-        over010,
-        &["bcd", "cd", "b", "abcd"],
-        "abcd",
-        &[(2, 1, 2), (3, 0, 4), (0, 1, 4), (1, 2, 4),]
-    ),
-    t!(
-        over020,
-        &["abcd", "bcd", "cd"],
-        "abcd",
-        &[(0, 0, 4), (1, 1, 4), (2, 2, 4),]
-    ),
-    t!(
-        over030,
-        &["bcd", "abcd", "cd"],
-        "abcd",
-        &[(1, 0, 4), (0, 1, 4), (2, 2, 4),]
-    ),
-    t!(
-        over040,
-        &["bcd", "cd", "abcd"],
-        "abcd",
-        &[(2, 0, 4), (0, 1, 4), (1, 2, 4),]
-    ),
-    t!(over050, &["abc", "bc"], "zazabcz", &[(0, 3, 6), (1, 4, 6),]),
-    t!(
-        over100,
-        &["ab", "ba"],
-        "abababa",
-        &[(0, 0, 2), (1, 1, 3), (0, 2, 4), (1, 3, 5), (0, 4, 6), (1, 5, 7),]
-    ),
-    t!(
-        over200,
-        &["foo", "foo"],
-        "foobarfoo",
-        &[(0, 0, 3), (1, 0, 3), (0, 6, 9), (1, 6, 9),]
-    ),
-    t!(over300, &["", ""], "", &[(0, 0, 0), (1, 0, 0),]),
-    t!(
-        over310,
-        &["", ""],
-        "a",
-        &[(0, 0, 0), (1, 0, 0), (0, 1, 1), (1, 1, 1),]
-    ),
-    t!(over320, &["", "a"], "a", &[(0, 0, 0), (1, 0, 1), (0, 1, 1),]),
-    t!(
-        over330,
-        &["", "a", ""],
-        "a",
-        &[(0, 0, 0), (2, 0, 0), (1, 0, 1), (0, 1, 1), (2, 1, 1),]
-    ),
-    t!(
-        over340,
-        &["a", "", ""],
-        "a",
-        &[(1, 0, 0), (2, 0, 0), (0, 0, 1), (1, 1, 1), (2, 1, 1),]
-    ),
-    t!(
-        over350,
-        &["", "", "a"],
-        "a",
-        &[(0, 0, 0), (1, 0, 0), (2, 0, 1), (0, 1, 1), (1, 1, 1),]
-    ),
-    t!(
-        over360,
-        &["foo", "foofoo"],
-        "foofoo",
-        &[(0, 0, 3), (1, 0, 6), (0, 3, 6)]
-    ),
-];
-
-/*
-Iterators of anchored overlapping searches were removed from the API in
-after 0.7, but we leave the tests commented out for posterity.
-/// Like OVERLAPPING, but for anchored searches.
-const ANCHORED_OVERLAPPING: &'static [SearchTest] = &[
-    t!(aover000, &["abcd", "bcd", "cd", "b"], "abcd", &[(0, 0, 4)]),
-    t!(aover010, &["bcd", "cd", "b", "abcd"], "abcd", &[(3, 0, 4)]),
-    t!(aover020, &["abcd", "bcd", "cd"], "abcd", &[(0, 0, 4)]),
-    t!(aover030, &["bcd", "abcd", "cd"], "abcd", &[(1, 0, 4)]),
-    t!(aover040, &["bcd", "cd", "abcd"], "abcd", &[(2, 0, 4)]),
-    t!(aover050, &["abc", "bc"], "zazabcz", &[]),
-    t!(aover100, &["ab", "ba"], "abababa", &[(0, 0, 2)]),
-    t!(aover200, &["foo", "foo"], "foobarfoo", &[(0, 0, 3), (1, 0, 3)]),
-    t!(aover300, &["", ""], "", &[(0, 0, 0), (1, 0, 0),]),
-    t!(aover310, &["", ""], "a", &[(0, 0, 0), (1, 0, 0)]),
-    t!(aover320, &["", "a"], "a", &[(0, 0, 0), (1, 0, 1)]),
-    t!(aover330, &["", "a", ""], "a", &[(0, 0, 0), (2, 0, 0), (1, 0, 1)]),
-    t!(aover340, &["a", "", ""], "a", &[(1, 0, 0), (2, 0, 0), (0, 0, 1)]),
-    t!(aover350, &["", "", "a"], "a", &[(0, 0, 0), (1, 0, 0), (2, 0, 1)]),
-    t!(aover360, &["foo", "foofoo"], "foofoo", &[(0, 0, 3), (1, 0, 6)]),
-];
-*/
-
-/// Tests for ASCII case insensitivity.
-///
-/// These tests should all have the same behavior regardless of match semantics
-/// or whether the search is overlapping.
-const ASCII_CASE_INSENSITIVE: &'static [SearchTest] = &[
-    t!(acasei000, &["a"], "A", &[(0, 0, 1)]),
-    t!(acasei010, &["Samwise"], "SAMWISE", &[(0, 0, 7)]),
-    t!(acasei011, &["Samwise"], "SAMWISE.abcd", &[(0, 0, 7)]),
-    t!(acasei020, &["fOoBaR"], "quux foobar baz", &[(0, 5, 11)]),
-];
-
-/// Like ASCII_CASE_INSENSITIVE, but specifically for non-overlapping tests.
-const ASCII_CASE_INSENSITIVE_NON_OVERLAPPING: &'static [SearchTest] = &[
-    t!(acasei000, &["foo", "FOO"], "fOo", &[(0, 0, 3)]),
-    t!(acasei000, &["FOO", "foo"], "fOo", &[(0, 0, 3)]),
-    t!(acasei010, &["abc", "def"], "abcdef", &[(0, 0, 3), (1, 3, 6)]),
-];
-
-/// Like ASCII_CASE_INSENSITIVE, but specifically for overlapping tests.
-const ASCII_CASE_INSENSITIVE_OVERLAPPING: &'static [SearchTest] = &[
-    t!(acasei000, &["foo", "FOO"], "fOo", &[(0, 0, 3), (1, 0, 3)]),
-    t!(acasei001, &["FOO", "foo"], "fOo", &[(0, 0, 3), (1, 0, 3)]),
-    // This is a regression test from:
-    // https://github.com/BurntSushi/aho-corasick/issues/68
-    // Previously, it was reporting a duplicate (1, 3, 6) match.
-    t!(
-        acasei010,
-        &["abc", "def", "abcdef"],
-        "abcdef",
-        &[(0, 0, 3), (2, 0, 6), (1, 3, 6)]
-    ),
-];
-
-/// Regression tests that are applied to all Aho-Corasick combinations.
-///
-/// If regression tests are needed for specific match semantics, then add them
-/// to the appropriate group above.
-const REGRESSION: &'static [SearchTest] = &[
-    t!(regression010, &["inf", "ind"], "infind", &[(0, 0, 3), (1, 3, 6),]),
-    t!(regression020, &["ind", "inf"], "infind", &[(1, 0, 3), (0, 3, 6),]),
-    t!(
-        regression030,
-        &["libcore/", "libstd/"],
-        "libcore/char/methods.rs",
-        &[(0, 0, 8),]
-    ),
-    t!(
-        regression040,
-        &["libstd/", "libcore/"],
-        "libcore/char/methods.rs",
-        &[(1, 0, 8),]
-    ),
-    t!(
-        regression050,
-        &["\x00\x00\x01", "\x00\x00\x00"],
-        "\x00\x00\x00",
-        &[(1, 0, 3),]
-    ),
-    t!(
-        regression060,
-        &["\x00\x00\x00", "\x00\x00\x01"],
-        "\x00\x00\x00",
-        &[(0, 0, 3),]
-    ),
-];
-
-// Now define a test for each combination of things above that we want to run.
-// Since there are a few different combinations for each collection of tests,
-// we define a couple of macros to avoid repetition drudgery. The testconfig
-// macro constructs the automaton from a given match kind, and runs the search
-// tests one-by-one over the given collection. The `with` parameter allows one
-// to configure the builder with additional parameters. The testcombo macro
-// invokes testconfig in precisely this way: it sets up several tests where
-// each one turns a different knob on AhoCorasickBuilder.
-
-macro_rules! testconfig {
-    (anchored, $name:ident, $collection:expr, $kind:ident, $with:expr) => {
-        #[test]
-        fn $name() {
-            run_search_tests($collection, |test| {
-                let mut builder = AhoCorasick::builder();
-                $with(&mut builder);
-                let input = Input::new(test.haystack).anchored(Anchored::Yes);
-                builder
-                    .match_kind(MatchKind::$kind)
-                    .build(test.patterns)
-                    .unwrap()
-                    .try_find_iter(input)
-                    .unwrap()
-                    .collect()
-            });
-        }
-    };
-    (overlapping, $name:ident, $collection:expr, $kind:ident, $with:expr) => {
-        #[test]
-        fn $name() {
-            run_search_tests($collection, |test| {
-                let mut builder = AhoCorasick::builder();
-                $with(&mut builder);
-                builder
-                    .match_kind(MatchKind::$kind)
-                    .build(test.patterns)
-                    .unwrap()
-                    .find_overlapping_iter(test.haystack)
-                    .collect()
-            });
-        }
-    };
-    (stream, $name:ident, $collection:expr, $kind:ident, $with:expr) => {
-        #[test]
-        fn $name() {
-            run_stream_search_tests($collection, |test| {
-                let buf = std::io::BufReader::with_capacity(
-                    1,
-                    test.haystack.as_bytes(),
-                );
-                let mut builder = AhoCorasick::builder();
-                $with(&mut builder);
-                builder
-                    .match_kind(MatchKind::$kind)
-                    .build(test.patterns)
-                    .unwrap()
-                    .stream_find_iter(buf)
-                    .map(|result| result.unwrap())
-                    .collect()
-            });
-        }
-    };
-    ($name:ident, $collection:expr, $kind:ident, $with:expr) => {
-        #[test]
-        fn $name() {
-            run_search_tests($collection, |test| {
-                let mut builder = AhoCorasick::builder();
-                $with(&mut builder);
-                builder
-                    .match_kind(MatchKind::$kind)
-                    .build(test.patterns)
-                    .unwrap()
-                    .find_iter(test.haystack)
-                    .collect()
-            });
-        }
-    };
-}
-
-macro_rules! testcombo {
-    ($name:ident, $collection:expr, $kind:ident) => {
-        mod $name {
-            use super::*;
-
-            testconfig!(default, $collection, $kind, |_| ());
-            testconfig!(
-                nfa_default,
-                $collection,
-                $kind,
-                |b: &mut AhoCorasickBuilder| {
-                    b.kind(Some(AhoCorasickKind::NoncontiguousNFA));
-                }
-            );
-            testconfig!(
-                nfa_noncontig_no_prefilter,
-                $collection,
-                $kind,
-                |b: &mut AhoCorasickBuilder| {
-                    b.kind(Some(AhoCorasickKind::NoncontiguousNFA))
-                        .prefilter(false);
-                }
-            );
-            testconfig!(
-                nfa_noncontig_all_sparse,
-                $collection,
-                $kind,
-                |b: &mut AhoCorasickBuilder| {
-                    b.kind(Some(AhoCorasickKind::NoncontiguousNFA))
-                        .dense_depth(0);
-                }
-            );
-            testconfig!(
-                nfa_noncontig_all_dense,
-                $collection,
-                $kind,
-                |b: &mut AhoCorasickBuilder| {
-                    b.kind(Some(AhoCorasickKind::NoncontiguousNFA))
-                        .dense_depth(usize::MAX);
-                }
-            );
-            testconfig!(
-                nfa_contig_default,
-                $collection,
-                $kind,
-                |b: &mut AhoCorasickBuilder| {
-                    b.kind(Some(AhoCorasickKind::ContiguousNFA));
-                }
-            );
-            testconfig!(
-                nfa_contig_no_prefilter,
-                $collection,
-                $kind,
-                |b: &mut AhoCorasickBuilder| {
-                    b.kind(Some(AhoCorasickKind::ContiguousNFA))
-                        .prefilter(false);
-                }
-            );
-            testconfig!(
-                nfa_contig_all_sparse,
-                $collection,
-                $kind,
-                |b: &mut AhoCorasickBuilder| {
-                    b.kind(Some(AhoCorasickKind::ContiguousNFA))
-                        .dense_depth(0);
-                }
-            );
-            testconfig!(
-                nfa_contig_all_dense,
-                $collection,
-                $kind,
-                |b: &mut AhoCorasickBuilder| {
-                    b.kind(Some(AhoCorasickKind::ContiguousNFA))
-                        .dense_depth(usize::MAX);
-                }
-            );
-            testconfig!(
-                nfa_contig_no_byte_class,
-                $collection,
-                $kind,
-                |b: &mut AhoCorasickBuilder| {
-                    b.kind(Some(AhoCorasickKind::ContiguousNFA))
-                        .byte_classes(false);
-                }
-            );
-            testconfig!(
-                dfa_default,
-                $collection,
-                $kind,
-                |b: &mut AhoCorasickBuilder| {
-                    b.kind(Some(AhoCorasickKind::DFA));
-                }
-            );
-            testconfig!(
-                dfa_start_both,
-                $collection,
-                $kind,
-                |b: &mut AhoCorasickBuilder| {
-                    b.kind(Some(AhoCorasickKind::DFA))
-                        .start_kind(StartKind::Both);
-                }
-            );
-            testconfig!(
-                dfa_no_prefilter,
-                $collection,
-                $kind,
-                |b: &mut AhoCorasickBuilder| {
-                    b.kind(Some(AhoCorasickKind::DFA)).prefilter(false);
-                }
-            );
-            testconfig!(
-                dfa_start_both_no_prefilter,
-                $collection,
-                $kind,
-                |b: &mut AhoCorasickBuilder| {
-                    b.kind(Some(AhoCorasickKind::DFA))
-                        .start_kind(StartKind::Both)
-                        .prefilter(false);
-                }
-            );
-            testconfig!(
-                dfa_no_byte_class,
-                $collection,
-                $kind,
-                |b: &mut AhoCorasickBuilder| {
-                    b.kind(Some(AhoCorasickKind::DFA)).byte_classes(false);
-                }
-            );
-            testconfig!(
-                dfa_start_both_no_byte_class,
-                $collection,
-                $kind,
-                |b: &mut AhoCorasickBuilder| {
-                    b.kind(Some(AhoCorasickKind::DFA))
-                        .start_kind(StartKind::Both)
-                        .byte_classes(false);
-                }
-            );
-        }
-    };
-}
-
-// Write out the various combinations of match semantics given the variety of
-// configurations tested by 'testcombo!'.
-testcombo!(search_leftmost_longest, AC_LEFTMOST_LONGEST, LeftmostLongest);
-testcombo!(search_leftmost_first, AC_LEFTMOST_FIRST, LeftmostFirst);
-testcombo!(
-    search_standard_nonoverlapping,
-    AC_STANDARD_NON_OVERLAPPING,
-    Standard
-);
-
-// Write out the overlapping combo by hand since there is only one of them.
-testconfig!(
-    overlapping,
-    search_standard_overlapping_default,
-    AC_STANDARD_OVERLAPPING,
-    Standard,
-    |_| ()
-);
-testconfig!(
-    overlapping,
-    search_standard_overlapping_nfa_noncontig_default,
-    AC_STANDARD_OVERLAPPING,
-    Standard,
-    |b: &mut AhoCorasickBuilder| {
-        b.kind(Some(AhoCorasickKind::NoncontiguousNFA));
-    }
-);
-testconfig!(
-    overlapping,
-    search_standard_overlapping_nfa_noncontig_no_prefilter,
-    AC_STANDARD_OVERLAPPING,
-    Standard,
-    |b: &mut AhoCorasickBuilder| {
-        b.kind(Some(AhoCorasickKind::NoncontiguousNFA)).prefilter(false);
-    }
-);
-testconfig!(
-    overlapping,
-    search_standard_overlapping_nfa_contig_default,
-    AC_STANDARD_OVERLAPPING,
-    Standard,
-    |b: &mut AhoCorasickBuilder| {
-        b.kind(Some(AhoCorasickKind::ContiguousNFA));
-    }
-);
-testconfig!(
-    overlapping,
-    search_standard_overlapping_nfa_contig_no_prefilter,
-    AC_STANDARD_OVERLAPPING,
-    Standard,
-    |b: &mut AhoCorasickBuilder| {
-        b.kind(Some(AhoCorasickKind::ContiguousNFA)).prefilter(false);
-    }
-);
-testconfig!(
-    overlapping,
-    search_standard_overlapping_nfa_contig_all_sparse,
-    AC_STANDARD_OVERLAPPING,
-    Standard,
-    |b: &mut AhoCorasickBuilder| {
-        b.kind(Some(AhoCorasickKind::ContiguousNFA)).dense_depth(0);
-    }
-);
-testconfig!(
-    overlapping,
-    search_standard_overlapping_nfa_contig_all_dense,
-    AC_STANDARD_OVERLAPPING,
-    Standard,
-    |b: &mut AhoCorasickBuilder| {
-        b.kind(Some(AhoCorasickKind::ContiguousNFA)).dense_depth(usize::MAX);
-    }
-);
-testconfig!(
-    overlapping,
-    search_standard_overlapping_dfa_default,
-    AC_STANDARD_OVERLAPPING,
-    Standard,
-    |b: &mut AhoCorasickBuilder| {
-        b.kind(Some(AhoCorasickKind::DFA));
-    }
-);
-testconfig!(
-    overlapping,
-    search_standard_overlapping_dfa_start_both,
-    AC_STANDARD_OVERLAPPING,
-    Standard,
-    |b: &mut AhoCorasickBuilder| {
-        b.kind(Some(AhoCorasickKind::DFA)).start_kind(StartKind::Both);
-    }
-);
-testconfig!(
-    overlapping,
-    search_standard_overlapping_dfa_no_prefilter,
-    AC_STANDARD_OVERLAPPING,
-    Standard,
-    |b: &mut AhoCorasickBuilder| {
-        b.kind(Some(AhoCorasickKind::DFA)).prefilter(false);
-    }
-);
-testconfig!(
-    overlapping,
-    search_standard_overlapping_dfa_start_both_no_prefilter,
-    AC_STANDARD_OVERLAPPING,
-    Standard,
-    |b: &mut AhoCorasickBuilder| {
-        b.kind(Some(AhoCorasickKind::DFA))
-            .start_kind(StartKind::Both)
-            .prefilter(false);
-    }
-);
-testconfig!(
-    overlapping,
-    search_standard_overlapping_dfa_no_byte_class,
-    AC_STANDARD_OVERLAPPING,
-    Standard,
-    |b: &mut AhoCorasickBuilder| {
-        b.kind(Some(AhoCorasickKind::DFA)).byte_classes(false);
-    }
-);
-testconfig!(
-    overlapping,
-    search_standard_overlapping_dfa_start_both_no_byte_class,
-    AC_STANDARD_OVERLAPPING,
-    Standard,
-    |b: &mut AhoCorasickBuilder| {
-        b.kind(Some(AhoCorasickKind::DFA))
-            .start_kind(StartKind::Both)
-            .byte_classes(false);
-    }
-);
-
-// Also write out tests manually for streams, since we only test the standard
-// match semantics. We also don't bother testing different automaton
-// configurations, since those are well covered by tests above.
-#[cfg(feature = "std")]
-testconfig!(
-    stream,
-    search_standard_stream_default,
-    AC_STANDARD_NON_OVERLAPPING,
-    Standard,
-    |_| ()
-);
-#[cfg(feature = "std")]
-testconfig!(
-    stream,
-    search_standard_stream_nfa_noncontig_default,
-    AC_STANDARD_NON_OVERLAPPING,
-    Standard,
-    |b: &mut AhoCorasickBuilder| {
-        b.kind(Some(AhoCorasickKind::NoncontiguousNFA));
-    }
-);
-#[cfg(feature = "std")]
-testconfig!(
-    stream,
-    search_standard_stream_nfa_contig_default,
-    AC_STANDARD_NON_OVERLAPPING,
-    Standard,
-    |b: &mut AhoCorasickBuilder| {
-        b.kind(Some(AhoCorasickKind::ContiguousNFA));
-    }
-);
-#[cfg(feature = "std")]
-testconfig!(
-    stream,
-    search_standard_stream_dfa_default,
-    AC_STANDARD_NON_OVERLAPPING,
-    Standard,
-    |b: &mut AhoCorasickBuilder| {
-        b.kind(Some(AhoCorasickKind::DFA));
-    }
-);
-
-// Same thing for anchored searches. Write them out manually.
-testconfig!(
-    anchored,
-    search_standard_anchored_default,
-    AC_STANDARD_ANCHORED_NON_OVERLAPPING,
-    Standard,
-    |b: &mut AhoCorasickBuilder| {
-        b.start_kind(StartKind::Anchored);
-    }
-);
-testconfig!(
-    anchored,
-    search_standard_anchored_nfa_noncontig_default,
-    AC_STANDARD_ANCHORED_NON_OVERLAPPING,
-    Standard,
-    |b: &mut AhoCorasickBuilder| {
-        b.start_kind(StartKind::Anchored)
-            .kind(Some(AhoCorasickKind::NoncontiguousNFA));
-    }
-);
-testconfig!(
-    anchored,
-    search_standard_anchored_nfa_contig_default,
-    AC_STANDARD_ANCHORED_NON_OVERLAPPING,
-    Standard,
-    |b: &mut AhoCorasickBuilder| {
-        b.start_kind(StartKind::Anchored)
-            .kind(Some(AhoCorasickKind::ContiguousNFA));
-    }
-);
-testconfig!(
-    anchored,
-    search_standard_anchored_dfa_default,
-    AC_STANDARD_ANCHORED_NON_OVERLAPPING,
-    Standard,
-    |b: &mut AhoCorasickBuilder| {
-        b.start_kind(StartKind::Anchored).kind(Some(AhoCorasickKind::DFA));
-    }
-);
-testconfig!(
-    anchored,
-    search_standard_anchored_dfa_start_both,
-    AC_STANDARD_ANCHORED_NON_OVERLAPPING,
-    Standard,
-    |b: &mut AhoCorasickBuilder| {
-        b.start_kind(StartKind::Both).kind(Some(AhoCorasickKind::DFA));
-    }
-);
-testconfig!(
-    anchored,
-    search_leftmost_first_anchored_default,
-    AC_LEFTMOST_FIRST_ANCHORED,
-    LeftmostFirst,
-    |b: &mut AhoCorasickBuilder| {
-        b.start_kind(StartKind::Anchored);
-    }
-);
-testconfig!(
-    anchored,
-    search_leftmost_first_anchored_nfa_noncontig_default,
-    AC_LEFTMOST_FIRST_ANCHORED,
-    LeftmostFirst,
-    |b: &mut AhoCorasickBuilder| {
-        b.start_kind(StartKind::Anchored)
-            .kind(Some(AhoCorasickKind::NoncontiguousNFA));
-    }
-);
-testconfig!(
-    anchored,
-    search_leftmost_first_anchored_nfa_contig_default,
-    AC_LEFTMOST_FIRST_ANCHORED,
-    LeftmostFirst,
-    |b: &mut AhoCorasickBuilder| {
-        b.start_kind(StartKind::Anchored)
-            .kind(Some(AhoCorasickKind::ContiguousNFA));
-    }
-);
-testconfig!(
-    anchored,
-    search_leftmost_first_anchored_dfa_default,
-    AC_LEFTMOST_FIRST_ANCHORED,
-    LeftmostFirst,
-    |b: &mut AhoCorasickBuilder| {
-        b.start_kind(StartKind::Anchored).kind(Some(AhoCorasickKind::DFA));
-    }
-);
-testconfig!(
-    anchored,
-    search_leftmost_first_anchored_dfa_start_both,
-    AC_LEFTMOST_FIRST_ANCHORED,
-    LeftmostFirst,
-    |b: &mut AhoCorasickBuilder| {
-        b.start_kind(StartKind::Both).kind(Some(AhoCorasickKind::DFA));
-    }
-);
-testconfig!(
-    anchored,
-    search_leftmost_longest_anchored_default,
-    AC_LEFTMOST_LONGEST_ANCHORED,
-    LeftmostLongest,
-    |b: &mut AhoCorasickBuilder| {
-        b.start_kind(StartKind::Anchored);
-    }
-);
-testconfig!(
-    anchored,
-    search_leftmost_longest_anchored_nfa_noncontig_default,
-    AC_LEFTMOST_LONGEST_ANCHORED,
-    LeftmostLongest,
-    |b: &mut AhoCorasickBuilder| {
-        b.start_kind(StartKind::Anchored)
-            .kind(Some(AhoCorasickKind::NoncontiguousNFA));
-    }
-);
-testconfig!(
-    anchored,
-    search_leftmost_longest_anchored_nfa_contig_default,
-    AC_LEFTMOST_LONGEST_ANCHORED,
-    LeftmostLongest,
-    |b: &mut AhoCorasickBuilder| {
-        b.start_kind(StartKind::Anchored)
-            .kind(Some(AhoCorasickKind::ContiguousNFA));
-    }
-);
-testconfig!(
-    anchored,
-    search_leftmost_longest_anchored_dfa_default,
-    AC_LEFTMOST_LONGEST_ANCHORED,
-    LeftmostLongest,
-    |b: &mut AhoCorasickBuilder| {
-        b.start_kind(StartKind::Anchored).kind(Some(AhoCorasickKind::DFA));
-    }
-);
-testconfig!(
-    anchored,
-    search_leftmost_longest_anchored_dfa_start_both,
-    AC_LEFTMOST_LONGEST_ANCHORED,
-    LeftmostLongest,
-    |b: &mut AhoCorasickBuilder| {
-        b.start_kind(StartKind::Both).kind(Some(AhoCorasickKind::DFA));
-    }
-);
-
-// And also write out the test combinations for ASCII case insensitivity.
-testconfig!(
-    acasei_standard_default,
-    &[ASCII_CASE_INSENSITIVE],
-    Standard,
-    |b: &mut AhoCorasickBuilder| {
-        b.prefilter(false).ascii_case_insensitive(true);
-    }
-);
-testconfig!(
-    acasei_standard_nfa_noncontig_default,
-    &[ASCII_CASE_INSENSITIVE],
-    Standard,
-    |b: &mut AhoCorasickBuilder| {
-        b.kind(Some(AhoCorasickKind::NoncontiguousNFA))
-            .prefilter(false)
-            .ascii_case_insensitive(true);
-    }
-);
-testconfig!(
-    acasei_standard_nfa_contig_default,
-    &[ASCII_CASE_INSENSITIVE],
-    Standard,
-    |b: &mut AhoCorasickBuilder| {
-        b.kind(Some(AhoCorasickKind::ContiguousNFA))
-            .prefilter(false)
-            .ascii_case_insensitive(true);
-    }
-);
-testconfig!(
-    acasei_standard_dfa_default,
-    &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING],
-    Standard,
-    |b: &mut AhoCorasickBuilder| {
-        b.kind(Some(AhoCorasickKind::DFA)).ascii_case_insensitive(true);
-    }
-);
-testconfig!(
-    overlapping,
-    acasei_standard_overlapping_default,
-    &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_OVERLAPPING],
-    Standard,
-    |b: &mut AhoCorasickBuilder| {
-        b.ascii_case_insensitive(true);
-    }
-);
-testconfig!(
-    overlapping,
-    acasei_standard_overlapping_nfa_noncontig_default,
-    &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_OVERLAPPING],
-    Standard,
-    |b: &mut AhoCorasickBuilder| {
-        b.kind(Some(AhoCorasickKind::NoncontiguousNFA))
-            .ascii_case_insensitive(true);
-    }
-);
-testconfig!(
-    overlapping,
-    acasei_standard_overlapping_nfa_contig_default,
-    &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_OVERLAPPING],
-    Standard,
-    |b: &mut AhoCorasickBuilder| {
-        b.kind(Some(AhoCorasickKind::ContiguousNFA))
-            .ascii_case_insensitive(true);
-    }
-);
-testconfig!(
-    overlapping,
-    acasei_standard_overlapping_dfa_default,
-    &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_OVERLAPPING],
-    Standard,
-    |b: &mut AhoCorasickBuilder| {
-        b.kind(Some(AhoCorasickKind::DFA)).ascii_case_insensitive(true);
-    }
-);
-testconfig!(
-    acasei_leftmost_first_default,
-    &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING],
-    LeftmostFirst,
-    |b: &mut AhoCorasickBuilder| {
-        b.ascii_case_insensitive(true);
-    }
-);
-testconfig!(
-    acasei_leftmost_first_nfa_noncontig_default,
-    &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING],
-    LeftmostFirst,
-    |b: &mut AhoCorasickBuilder| {
-        b.kind(Some(AhoCorasickKind::NoncontiguousNFA))
-            .ascii_case_insensitive(true);
-    }
-);
-testconfig!(
-    acasei_leftmost_first_nfa_contig_default,
-    &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING],
-    LeftmostFirst,
-    |b: &mut AhoCorasickBuilder| {
-        b.kind(Some(AhoCorasickKind::ContiguousNFA))
-            .ascii_case_insensitive(true);
-    }
-);
-testconfig!(
-    acasei_leftmost_first_dfa_default,
-    &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING],
-    LeftmostFirst,
-    |b: &mut AhoCorasickBuilder| {
-        b.kind(Some(AhoCorasickKind::DFA)).ascii_case_insensitive(true);
-    }
-);
-testconfig!(
-    acasei_leftmost_longest_default,
-    &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING],
-    LeftmostLongest,
-    |b: &mut AhoCorasickBuilder| {
-        b.ascii_case_insensitive(true);
-    }
-);
-testconfig!(
-    acasei_leftmost_longest_nfa_noncontig_default,
-    &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING],
-    LeftmostLongest,
-    |b: &mut AhoCorasickBuilder| {
-        b.kind(Some(AhoCorasickKind::NoncontiguousNFA))
-            .ascii_case_insensitive(true);
-    }
-);
-testconfig!(
-    acasei_leftmost_longest_nfa_contig_default,
-    &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING],
-    LeftmostLongest,
-    |b: &mut AhoCorasickBuilder| {
-        b.kind(Some(AhoCorasickKind::ContiguousNFA))
-            .ascii_case_insensitive(true);
-    }
-);
-testconfig!(
-    acasei_leftmost_longest_dfa_default,
-    &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING],
-    LeftmostLongest,
-    |b: &mut AhoCorasickBuilder| {
-        b.kind(Some(AhoCorasickKind::DFA)).ascii_case_insensitive(true);
-    }
-);
-
-fn run_search_tests<F: FnMut(&SearchTest) -> Vec<Match>>(
-    which: TestCollection,
-    mut f: F,
-) {
-    let get_match_triples =
-        |matches: Vec<Match>| -> Vec<(usize, usize, usize)> {
-            matches
-                .into_iter()
-                .map(|m| (m.pattern().as_usize(), m.start(), m.end()))
-                .collect()
-        };
-    for &tests in which {
-        for test in tests {
-            assert_eq!(
-                test.matches,
-                get_match_triples(f(&test)).as_slice(),
-                "test: {}, patterns: {:?}, haystack: {:?}",
-                test.name,
-                test.patterns,
-                test.haystack
-            );
-        }
-    }
-}
-
-// Like 'run_search_tests', but we skip any tests that contain the empty
-// pattern because stream searching doesn't support it.
-#[cfg(feature = "std")]
-fn run_stream_search_tests<F: FnMut(&SearchTest) -> Vec<Match>>(
-    which: TestCollection,
-    mut f: F,
-) {
-    let get_match_triples =
-        |matches: Vec<Match>| -> Vec<(usize, usize, usize)> {
-            matches
-                .into_iter()
-                .map(|m| (m.pattern().as_usize(), m.start(), m.end()))
-                .collect()
-        };
-    for &tests in which {
-        for test in tests {
-            if test.patterns.iter().any(|p| p.is_empty()) {
-                continue;
-            }
-            assert_eq!(
-                test.matches,
-                get_match_triples(f(&test)).as_slice(),
-                "test: {}, patterns: {:?}, haystack: {:?}",
-                test.name,
-                test.patterns,
-                test.haystack
-            );
-        }
-    }
-}
-
-#[test]
-fn search_tests_have_unique_names() {
-    let assert = |constname, tests: &[SearchTest]| {
-        let mut seen = HashMap::new(); // map from test name to position
-        for (i, test) in tests.iter().enumerate() {
-            if !seen.contains_key(test.name) {
-                seen.insert(test.name, i);
-            } else {
-                let last = seen[test.name];
-                panic!(
-                    "{} tests have duplicate names at positions {} and {}",
-                    constname, last, i
-                );
-            }
-        }
-    };
-    assert("BASICS", BASICS);
-    assert("STANDARD", STANDARD);
-    assert("LEFTMOST", LEFTMOST);
-    assert("LEFTMOST_FIRST", LEFTMOST_FIRST);
-    assert("LEFTMOST_LONGEST", LEFTMOST_LONGEST);
-    assert("NON_OVERLAPPING", NON_OVERLAPPING);
-    assert("OVERLAPPING", OVERLAPPING);
-    assert("REGRESSION", REGRESSION);
-}
-
-#[cfg(feature = "std")]
-#[test]
-#[should_panic]
-fn stream_not_allowed_leftmost_first() {
-    let fsm = AhoCorasick::builder()
-        .match_kind(MatchKind::LeftmostFirst)
-        .build(None::<String>)
-        .unwrap();
-    assert_eq!(fsm.stream_find_iter(&b""[..]).count(), 0);
-}
-
-#[cfg(feature = "std")]
-#[test]
-#[should_panic]
-fn stream_not_allowed_leftmost_longest() {
-    let fsm = AhoCorasick::builder()
-        .match_kind(MatchKind::LeftmostLongest)
-        .build(None::<String>)
-        .unwrap();
-    assert_eq!(fsm.stream_find_iter(&b""[..]).count(), 0);
-}
-
-#[test]
-#[should_panic]
-fn overlapping_not_allowed_leftmost_first() {
-    let fsm = AhoCorasick::builder()
-        .match_kind(MatchKind::LeftmostFirst)
-        .build(None::<String>)
-        .unwrap();
-    assert_eq!(fsm.find_overlapping_iter("").count(), 0);
-}
-
-#[test]
-#[should_panic]
-fn overlapping_not_allowed_leftmost_longest() {
-    let fsm = AhoCorasick::builder()
-        .match_kind(MatchKind::LeftmostLongest)
-        .build(None::<String>)
-        .unwrap();
-    assert_eq!(fsm.find_overlapping_iter("").count(), 0);
-}
-
-// This tests that if we build an AC matcher with an "unanchored" start kind,
-// then we can't run an anchored search even if the underlying searcher
-// supports it.
-//
-// The key bit here is that both of the NFAs in this crate unconditionally
-// support both unanchored and anchored searches, but the DFA does not because
-// of the added cost of doing so. To avoid the top-level AC matcher sometimes
-// supporting anchored and sometimes not (depending on which searcher it
-// chooses to use internally), we ensure that the given 'StartKind' is always
-// respected.
-#[test]
-fn anchored_not_allowed_even_if_technically_available() {
-    let ac = AhoCorasick::builder()
-        .kind(Some(AhoCorasickKind::NoncontiguousNFA))
-        .start_kind(StartKind::Unanchored)
-        .build(&["foo"])
-        .unwrap();
-    assert!(ac.try_find(Input::new("foo").anchored(Anchored::Yes)).is_err());
-
-    let ac = AhoCorasick::builder()
-        .kind(Some(AhoCorasickKind::ContiguousNFA))
-        .start_kind(StartKind::Unanchored)
-        .build(&["foo"])
-        .unwrap();
-    assert!(ac.try_find(Input::new("foo").anchored(Anchored::Yes)).is_err());
-
-    // For completeness, check that the DFA returns an error too.
-    let ac = AhoCorasick::builder()
-        .kind(Some(AhoCorasickKind::DFA))
-        .start_kind(StartKind::Unanchored)
-        .build(&["foo"])
-        .unwrap();
-    assert!(ac.try_find(Input::new("foo").anchored(Anchored::Yes)).is_err());
-}
-
-// This is like the test aboved, but with unanchored and anchored flipped. That
-// is, we asked for an AC searcher with anchored support and we check that
-// unanchored searches return an error even if the underlying searcher would
-// technically support it.
-#[test]
-fn unanchored_not_allowed_even_if_technically_available() {
-    let ac = AhoCorasick::builder()
-        .kind(Some(AhoCorasickKind::NoncontiguousNFA))
-        .start_kind(StartKind::Anchored)
-        .build(&["foo"])
-        .unwrap();
-    assert!(ac.try_find(Input::new("foo").anchored(Anchored::No)).is_err());
-
-    let ac = AhoCorasick::builder()
-        .kind(Some(AhoCorasickKind::ContiguousNFA))
-        .start_kind(StartKind::Anchored)
-        .build(&["foo"])
-        .unwrap();
-    assert!(ac.try_find(Input::new("foo").anchored(Anchored::No)).is_err());
-
-    // For completeness, check that the DFA returns an error too.
-    let ac = AhoCorasick::builder()
-        .kind(Some(AhoCorasickKind::DFA))
-        .start_kind(StartKind::Anchored)
-        .build(&["foo"])
-        .unwrap();
-    assert!(ac.try_find(Input::new("foo").anchored(Anchored::No)).is_err());
-}
-
-// This tests that a prefilter does not cause a search to report a match
-// outside the bounds provided by the caller.
-//
-// This is a regression test for a bug I introduced during the rewrite of most
-// of the crate after 0.7. It was never released. The tricky part here is
-// ensuring we get a prefilter that can report matches on its own (such as the
-// packed searcher). Otherwise, prefilters that report false positives might
-// have searched past the bounds provided by the caller, but confirming the
-// match would subsequently fail.
-#[test]
-fn prefilter_stays_in_bounds() {
-    let ac = AhoCorasick::builder()
-        .match_kind(MatchKind::LeftmostFirst)
-        .build(&["sam", "frodo", "pippin", "merry", "gandalf", "sauron"])
-        .unwrap();
-    let haystack = "foo gandalf";
-    assert_eq!(None, ac.find(Input::new(haystack).range(0..10)));
-}
-
-// See: https://github.com/BurntSushi/aho-corasick/issues/44
-//
-// In short, this test ensures that enabling ASCII case insensitivity does not
-// visit an exponential number of states when filling in failure transitions.
-#[test]
-fn regression_ascii_case_insensitive_no_exponential() {
-    let ac = AhoCorasick::builder()
-        .ascii_case_insensitive(true)
-        .build(&["Tsubaki House-Triple Shot Vol01æ ĄèŠ±äž‰ć§ćŠč"])
-        .unwrap();
-    assert!(ac.find("").is_none());
-}
-
-// See: https://github.com/BurntSushi/aho-corasick/issues/53
-//
-// This test ensures that the rare byte prefilter works in a particular corner
-// case. In particular, the shift offset detected for '/' in the patterns below
-// was incorrect, leading to a false negative.
-#[test]
-fn regression_rare_byte_prefilter() {
-    use crate::AhoCorasick;
-
-    let ac = AhoCorasick::new(&["ab/j/", "x/"]).unwrap();
-    assert!(ac.is_match("ab/j/"));
-}
-
-#[test]
-fn regression_case_insensitive_prefilter() {
-    for c in b'a'..b'z' {
-        for c2 in b'a'..b'z' {
-            let c = c as char;
-            let c2 = c2 as char;
-            let needle = format!("{}{}", c, c2).to_lowercase();
-            let haystack = needle.to_uppercase();
-            let ac = AhoCorasick::builder()
-                .ascii_case_insensitive(true)
-                .prefilter(true)
-                .build(&[&needle])
-                .unwrap();
-            assert_eq!(
-                1,
-                ac.find_iter(&haystack).count(),
-                "failed to find {:?} in {:?}\n\nautomaton:\n{:?}",
-                needle,
-                haystack,
-                ac,
-            );
-        }
-    }
-}
-
-// See: https://github.com/BurntSushi/aho-corasick/issues/64
-//
-// This occurs when the rare byte prefilter is active.
-#[cfg(feature = "std")]
-#[test]
-fn regression_stream_rare_byte_prefilter() {
-    use std::io::Read;
-
-    // NOTE: The test only fails if this ends with j.
-    const MAGIC: [u8; 5] = *b"1234j";
-
-    // NOTE: The test fails for value in 8188..=8191 These value put the string
-    // to search accross two call to read because the buffer size is 64KB by
-    // default.
-    const BEGIN: usize = 65_535;
-
-    /// This is just a structure that implements Reader. The reader
-    /// implementation will simulate a file filled with 0, except for the MAGIC
-    /// string at offset BEGIN.
-    #[derive(Default)]
-    struct R {
-        read: usize,
-    }
-
-    impl Read for R {
-        fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
-            if self.read > 100000 {
-                return Ok(0);
-            }
-            let mut from = 0;
-            if self.read < BEGIN {
-                from = buf.len().min(BEGIN - self.read);
-                for x in 0..from {
-                    buf[x] = 0;
-                }
-                self.read += from;
-            }
-            if self.read >= BEGIN && self.read <= BEGIN + MAGIC.len() {
-                let to = buf.len().min(BEGIN + MAGIC.len() - self.read + from);
-                if to > from {
-                    buf[from..to].copy_from_slice(
-                        &MAGIC
-                            [self.read - BEGIN..self.read - BEGIN + to - from],
-                    );
-                    self.read += to - from;
-                    from = to;
-                }
-            }
-            for x in from..buf.len() {
-                buf[x] = 0;
-                self.read += 1;
-            }
-            Ok(buf.len())
-        }
-    }
-
-    fn run() -> std::io::Result<()> {
-        let aut = AhoCorasick::builder()
-            // Enable byte classes to make debugging the automaton easier. It
-            // should have no effect on the test result.
-            .byte_classes(false)
-            .build(&[&MAGIC])
-            .unwrap();
-
-        // While reading from a vector, it works:
-        let mut buf = alloc::vec![];
-        R::default().read_to_end(&mut buf)?;
-        let from_whole = aut.find_iter(&buf).next().unwrap().start();
-
-        // But using stream_find_iter fails!
-        let mut file = std::io::BufReader::new(R::default());
-        let begin = aut
-            .stream_find_iter(&mut file)
-            .next()
-            .expect("NOT FOUND!!!!")? // Panic here
-            .start();
-        assert_eq!(from_whole, begin);
-        Ok(())
-    }
-
-    run().unwrap()
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/transducer.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/transducer.rs
deleted file mode 100644
index 39bb240f..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/transducer.rs
+++ /dev/null
@@ -1,270 +0,0 @@
-/*!
-Provides implementations of `fst::Automaton` for Aho-Corasick automata.
-
-This works by providing two wrapper types, [`Anchored`] and [`Unanchored`].
-The former executes an anchored search on an FST while the latter executes
-an unanchored search. Building these wrappers is fallible and will fail if
-the underlying Aho-Corasick automaton does not support the type of search it
-represents.
-*/
-
-use crate::{
-    automaton::{Automaton, StateID},
-    Anchored as AcAnchored, Input, MatchError,
-};
-
-/// Represents an unanchored Aho-Corasick search of a finite state transducer.
-///
-/// Wrapping an Aho-Corasick automaton in `Unanchored` will fail if the
-/// underlying automaton does not support unanchored searches.
-///
-/// # Example
-///
-/// This shows how to build an FST of keys and then run an unanchored search on
-/// those keys using an Aho-Corasick automaton.
-///
-/// ```
-/// use aho_corasick::{nfa::contiguous::NFA, transducer::Unanchored};
-/// use fst::{Automaton, IntoStreamer, Set, Streamer};
-///
-/// let set = Set::from_iter(&["abcd", "bc", "bcd", "xyz"]).unwrap();
-/// let nfa = NFA::new(&["bcd", "x"]).unwrap();
-/// // NFAs always support both unanchored and anchored searches.
-/// let searcher = Unanchored::new(&nfa).unwrap();
-///
-/// let mut stream = set.search(searcher).into_stream();
-/// let mut results = vec![];
-/// while let Some(key) = stream.next() {
-///     results.push(std::str::from_utf8(key).unwrap().to_string());
-/// }
-/// assert_eq!(vec!["abcd", "bcd", "xyz"], results);
-/// ```
-#[derive(Clone, Debug)]
-pub struct Unanchored<A>(A);
-
-impl<A: Automaton> Unanchored<A> {
-    /// Create a new `Unanchored` implementation of the `fst::Automaton` trait.
-    ///
-    /// If the given Aho-Corasick automaton does not support unanchored
-    /// searches, then this returns an error.
-    pub fn new(aut: A) -> Result<Unanchored<A>, MatchError> {
-        let input = Input::new("").anchored(AcAnchored::No);
-        let _ = aut.start_state(&input)?;
-        Ok(Unanchored(aut))
-    }
-
-    /// Returns a borrow to the underlying automaton.
-    pub fn as_ref(&self) -> &A {
-        &self.0
-    }
-
-    /// Unwrap this value and return the inner automaton.
-    pub fn into_inner(self) -> A {
-        self.0
-    }
-}
-
-impl<A: Automaton> fst::Automaton for Unanchored<A> {
-    type State = StateID;
-
-    #[inline]
-    fn start(&self) -> StateID {
-        let input = Input::new("").anchored(AcAnchored::No);
-        self.0.start_state(&input).expect("support for unanchored searches")
-    }
-
-    #[inline]
-    fn is_match(&self, state: &StateID) -> bool {
-        self.0.is_match(*state)
-    }
-
-    #[inline]
-    fn accept(&self, state: &StateID, byte: u8) -> StateID {
-        if fst::Automaton::is_match(self, state) {
-            return *state;
-        }
-        self.0.next_state(AcAnchored::No, *state, byte)
-    }
-
-    #[inline]
-    fn can_match(&self, state: &StateID) -> bool {
-        !self.0.is_dead(*state)
-    }
-}
-
-/// Represents an anchored Aho-Corasick search of a finite state transducer.
-///
-/// Wrapping an Aho-Corasick automaton in `Unanchored` will fail if the
-/// underlying automaton does not support unanchored searches.
-///
-/// # Example
-///
-/// This shows how to build an FST of keys and then run an anchored search on
-/// those keys using an Aho-Corasick automaton.
-///
-/// ```
-/// use aho_corasick::{nfa::contiguous::NFA, transducer::Anchored};
-/// use fst::{Automaton, IntoStreamer, Set, Streamer};
-///
-/// let set = Set::from_iter(&["abcd", "bc", "bcd", "xyz"]).unwrap();
-/// let nfa = NFA::new(&["bcd", "x"]).unwrap();
-/// // NFAs always support both unanchored and anchored searches.
-/// let searcher = Anchored::new(&nfa).unwrap();
-///
-/// let mut stream = set.search(searcher).into_stream();
-/// let mut results = vec![];
-/// while let Some(key) = stream.next() {
-///     results.push(std::str::from_utf8(key).unwrap().to_string());
-/// }
-/// assert_eq!(vec!["bcd", "xyz"], results);
-/// ```
-///
-/// This is like the example above, except we use an Aho-Corasick DFA, which
-/// requires explicitly configuring it to support anchored searches. (NFAs
-/// unconditionally support both unanchored and anchored searches.)
-///
-/// ```
-/// use aho_corasick::{dfa::DFA, transducer::Anchored, StartKind};
-/// use fst::{Automaton, IntoStreamer, Set, Streamer};
-///
-/// let set = Set::from_iter(&["abcd", "bc", "bcd", "xyz"]).unwrap();
-/// let dfa = DFA::builder()
-///     .start_kind(StartKind::Anchored)
-///     .build(&["bcd", "x"])
-///     .unwrap();
-/// // We've explicitly configured our DFA to support anchored searches.
-/// let searcher = Anchored::new(&dfa).unwrap();
-///
-/// let mut stream = set.search(searcher).into_stream();
-/// let mut results = vec![];
-/// while let Some(key) = stream.next() {
-///     results.push(std::str::from_utf8(key).unwrap().to_string());
-/// }
-/// assert_eq!(vec!["bcd", "xyz"], results);
-/// ```
-#[derive(Clone, Debug)]
-pub struct Anchored<A>(A);
-
-impl<A: Automaton> Anchored<A> {
-    /// Create a new `Anchored` implementation of the `fst::Automaton` trait.
-    ///
-    /// If the given Aho-Corasick automaton does not support anchored searches,
-    /// then this returns an error.
-    pub fn new(aut: A) -> Result<Anchored<A>, MatchError> {
-        let input = Input::new("").anchored(AcAnchored::Yes);
-        let _ = aut.start_state(&input)?;
-        Ok(Anchored(aut))
-    }
-
-    /// Returns a borrow to the underlying automaton.
-    pub fn as_ref(&self) -> &A {
-        &self.0
-    }
-
-    /// Unwrap this value and return the inner automaton.
-    pub fn into_inner(self) -> A {
-        self.0
-    }
-}
-
-impl<A: Automaton> fst::Automaton for Anchored<A> {
-    type State = StateID;
-
-    #[inline]
-    fn start(&self) -> StateID {
-        let input = Input::new("").anchored(AcAnchored::Yes);
-        self.0.start_state(&input).expect("support for unanchored searches")
-    }
-
-    #[inline]
-    fn is_match(&self, state: &StateID) -> bool {
-        self.0.is_match(*state)
-    }
-
-    #[inline]
-    fn accept(&self, state: &StateID, byte: u8) -> StateID {
-        if fst::Automaton::is_match(self, state) {
-            return *state;
-        }
-        self.0.next_state(AcAnchored::Yes, *state, byte)
-    }
-
-    #[inline]
-    fn can_match(&self, state: &StateID) -> bool {
-        !self.0.is_dead(*state)
-    }
-}
-
-#[cfg(test)]
-mod tests {
-    use alloc::{string::String, vec, vec::Vec};
-
-    use fst::{Automaton, IntoStreamer, Set, Streamer};
-
-    use crate::{
-        dfa::DFA,
-        nfa::{contiguous, noncontiguous},
-        StartKind,
-    };
-
-    use super::*;
-
-    fn search<A: Automaton, D: AsRef<[u8]>>(
-        set: &Set<D>,
-        aut: A,
-    ) -> Vec<String> {
-        let mut stream = set.search(aut).into_stream();
-        let mut results = vec![];
-        while let Some(key) = stream.next() {
-            results.push(String::from(core::str::from_utf8(key).unwrap()));
-        }
-        results
-    }
-
-    #[test]
-    fn unanchored() {
-        let set =
-            Set::from_iter(&["a", "bar", "baz", "wat", "xba", "xbax", "z"])
-                .unwrap();
-        let patterns = vec!["baz", "bax"];
-        let expected = vec!["baz", "xbax"];
-
-        let aut = Unanchored(noncontiguous::NFA::new(&patterns).unwrap());
-        let got = search(&set, &aut);
-        assert_eq!(got, expected);
-
-        let aut = Unanchored(contiguous::NFA::new(&patterns).unwrap());
-        let got = search(&set, &aut);
-        assert_eq!(got, expected);
-
-        let aut = Unanchored(DFA::new(&patterns).unwrap());
-        let got = search(&set, &aut);
-        assert_eq!(got, expected);
-    }
-
-    #[test]
-    fn anchored() {
-        let set =
-            Set::from_iter(&["a", "bar", "baz", "wat", "xba", "xbax", "z"])
-                .unwrap();
-        let patterns = vec!["baz", "bax"];
-        let expected = vec!["baz"];
-
-        let aut = Anchored(noncontiguous::NFA::new(&patterns).unwrap());
-        let got = search(&set, &aut);
-        assert_eq!(got, expected);
-
-        let aut = Anchored(contiguous::NFA::new(&patterns).unwrap());
-        let got = search(&set, &aut);
-        assert_eq!(got, expected);
-
-        let aut = Anchored(
-            DFA::builder()
-                .start_kind(StartKind::Anchored)
-                .build(&patterns)
-                .unwrap(),
-        );
-        let got = search(&set, &aut);
-        assert_eq!(got, expected);
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/alphabet.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/alphabet.rs
deleted file mode 100644
index 69724fa3..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/alphabet.rs
+++ /dev/null
@@ -1,409 +0,0 @@
-use crate::util::int::Usize;
-
-/// A representation of byte oriented equivalence classes.
-///
-/// This is used in finite state machines to reduce the size of the transition
-/// table. This can have a particularly large impact not only on the total size
-/// of an FSM, but also on FSM build times because it reduces the number of
-/// transitions that need to be visited/set.
-#[derive(Clone, Copy)]
-pub(crate) struct ByteClasses([u8; 256]);
-
-impl ByteClasses {
-    /// Creates a new set of equivalence classes where all bytes are mapped to
-    /// the same class.
-    pub(crate) fn empty() -> ByteClasses {
-        ByteClasses([0; 256])
-    }
-
-    /// Creates a new set of equivalence classes where each byte belongs to
-    /// its own equivalence class.
-    pub(crate) fn singletons() -> ByteClasses {
-        let mut classes = ByteClasses::empty();
-        for b in 0..=255 {
-            classes.set(b, b);
-        }
-        classes
-    }
-
-    /// Set the equivalence class for the given byte.
-    #[inline]
-    pub(crate) fn set(&mut self, byte: u8, class: u8) {
-        self.0[usize::from(byte)] = class;
-    }
-
-    /// Get the equivalence class for the given byte.
-    #[inline]
-    pub(crate) fn get(&self, byte: u8) -> u8 {
-        self.0[usize::from(byte)]
-    }
-
-    /// Return the total number of elements in the alphabet represented by
-    /// these equivalence classes. Equivalently, this returns the total number
-    /// of equivalence classes.
-    #[inline]
-    pub(crate) fn alphabet_len(&self) -> usize {
-        // Add one since the number of equivalence classes is one bigger than
-        // the last one.
-        usize::from(self.0[255]) + 1
-    }
-
-    /// Returns the stride, as a base-2 exponent, required for these
-    /// equivalence classes.
-    ///
-    /// The stride is always the smallest power of 2 that is greater than or
-    /// equal to the alphabet length. This is done so that converting between
-    /// state IDs and indices can be done with shifts alone, which is much
-    /// faster than integer division. The "stride2" is the exponent. i.e.,
-    /// `2^stride2 = stride`.
-    pub(crate) fn stride2(&self) -> usize {
-        let zeros = self.alphabet_len().next_power_of_two().trailing_zeros();
-        usize::try_from(zeros).unwrap()
-    }
-
-    /// Returns the stride for these equivalence classes, which corresponds
-    /// to the smallest power of 2 greater than or equal to the number of
-    /// equivalence classes.
-    pub(crate) fn stride(&self) -> usize {
-        1 << self.stride2()
-    }
-
-    /// Returns true if and only if every byte in this class maps to its own
-    /// equivalence class. Equivalently, there are 257 equivalence classes
-    /// and each class contains exactly one byte (plus the special EOI class).
-    #[inline]
-    pub(crate) fn is_singleton(&self) -> bool {
-        self.alphabet_len() == 256
-    }
-
-    /// Returns an iterator over all equivalence classes in this set.
-    pub(crate) fn iter(&self) -> ByteClassIter {
-        ByteClassIter { it: 0..self.alphabet_len() }
-    }
-
-    /// Returns an iterator of the bytes in the given equivalence class.
-    pub(crate) fn elements(&self, class: u8) -> ByteClassElements {
-        ByteClassElements { classes: self, class, bytes: 0..=255 }
-    }
-
-    /// Returns an iterator of byte ranges in the given equivalence class.
-    ///
-    /// That is, a sequence of contiguous ranges are returned. Typically, every
-    /// class maps to a single contiguous range.
-    fn element_ranges(&self, class: u8) -> ByteClassElementRanges {
-        ByteClassElementRanges { elements: self.elements(class), range: None }
-    }
-}
-
-impl core::fmt::Debug for ByteClasses {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        if self.is_singleton() {
-            write!(f, "ByteClasses(<one-class-per-byte>)")
-        } else {
-            write!(f, "ByteClasses(")?;
-            for (i, class) in self.iter().enumerate() {
-                if i > 0 {
-                    write!(f, ", ")?;
-                }
-                write!(f, "{:?} => [", class)?;
-                for (start, end) in self.element_ranges(class) {
-                    if start == end {
-                        write!(f, "{:?}", start)?;
-                    } else {
-                        write!(f, "{:?}-{:?}", start, end)?;
-                    }
-                }
-                write!(f, "]")?;
-            }
-            write!(f, ")")
-        }
-    }
-}
-
-/// An iterator over each equivalence class.
-#[derive(Debug)]
-pub(crate) struct ByteClassIter {
-    it: core::ops::Range<usize>,
-}
-
-impl Iterator for ByteClassIter {
-    type Item = u8;
-
-    fn next(&mut self) -> Option<u8> {
-        self.it.next().map(|class| class.as_u8())
-    }
-}
-
-/// An iterator over all elements in a specific equivalence class.
-#[derive(Debug)]
-pub(crate) struct ByteClassElements<'a> {
-    classes: &'a ByteClasses,
-    class: u8,
-    bytes: core::ops::RangeInclusive<u8>,
-}
-
-impl<'a> Iterator for ByteClassElements<'a> {
-    type Item = u8;
-
-    fn next(&mut self) -> Option<u8> {
-        while let Some(byte) = self.bytes.next() {
-            if self.class == self.classes.get(byte) {
-                return Some(byte);
-            }
-        }
-        None
-    }
-}
-
-/// An iterator over all elements in an equivalence class expressed as a
-/// sequence of contiguous ranges.
-#[derive(Debug)]
-pub(crate) struct ByteClassElementRanges<'a> {
-    elements: ByteClassElements<'a>,
-    range: Option<(u8, u8)>,
-}
-
-impl<'a> Iterator for ByteClassElementRanges<'a> {
-    type Item = (u8, u8);
-
-    fn next(&mut self) -> Option<(u8, u8)> {
-        loop {
-            let element = match self.elements.next() {
-                None => return self.range.take(),
-                Some(element) => element,
-            };
-            match self.range.take() {
-                None => {
-                    self.range = Some((element, element));
-                }
-                Some((start, end)) => {
-                    if usize::from(end) + 1 != usize::from(element) {
-                        self.range = Some((element, element));
-                        return Some((start, end));
-                    }
-                    self.range = Some((start, element));
-                }
-            }
-        }
-    }
-}
-
-/// A partitioning of bytes into equivalence classes.
-///
-/// A byte class set keeps track of an *approximation* of equivalence classes
-/// of bytes during NFA construction. That is, every byte in an equivalence
-/// class cannot discriminate between a match and a non-match.
-///
-/// Note that this may not compute the minimal set of equivalence classes.
-/// Basically, any byte in a pattern given to the noncontiguous NFA builder
-/// will automatically be treated as its own equivalence class. All other
-/// bytes---any byte not in any pattern---will be treated as their own
-/// equivalence classes. In theory, all bytes not in any pattern should
-/// be part of a single equivalence class, but in practice, we only treat
-/// contiguous ranges of bytes as an equivalence class. So the number of
-/// classes computed may be bigger than necessary. This usually doesn't make
-/// much of a difference, and keeps the implementation simple.
-#[derive(Clone, Debug)]
-pub(crate) struct ByteClassSet(ByteSet);
-
-impl Default for ByteClassSet {
-    fn default() -> ByteClassSet {
-        ByteClassSet::empty()
-    }
-}
-
-impl ByteClassSet {
-    /// Create a new set of byte classes where all bytes are part of the same
-    /// equivalence class.
-    pub(crate) fn empty() -> Self {
-        ByteClassSet(ByteSet::empty())
-    }
-
-    /// Indicate the the range of byte given (inclusive) can discriminate a
-    /// match between it and all other bytes outside of the range.
-    pub(crate) fn set_range(&mut self, start: u8, end: u8) {
-        debug_assert!(start <= end);
-        if start > 0 {
-            self.0.add(start - 1);
-        }
-        self.0.add(end);
-    }
-
-    /// Convert this boolean set to a map that maps all byte values to their
-    /// corresponding equivalence class. The last mapping indicates the largest
-    /// equivalence class identifier (which is never bigger than 255).
-    pub(crate) fn byte_classes(&self) -> ByteClasses {
-        let mut classes = ByteClasses::empty();
-        let mut class = 0u8;
-        let mut b = 0u8;
-        loop {
-            classes.set(b, class);
-            if b == 255 {
-                break;
-            }
-            if self.0.contains(b) {
-                class = class.checked_add(1).unwrap();
-            }
-            b = b.checked_add(1).unwrap();
-        }
-        classes
-    }
-}
-
-/// A simple set of bytes that is reasonably cheap to copy and allocation free.
-#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
-pub(crate) struct ByteSet {
-    bits: BitSet,
-}
-
-/// The representation of a byte set. Split out so that we can define a
-/// convenient Debug impl for it while keeping "ByteSet" in the output.
-#[derive(Clone, Copy, Default, Eq, PartialEq)]
-struct BitSet([u128; 2]);
-
-impl ByteSet {
-    /// Create an empty set of bytes.
-    pub(crate) fn empty() -> ByteSet {
-        ByteSet { bits: BitSet([0; 2]) }
-    }
-
-    /// Add a byte to this set.
-    ///
-    /// If the given byte already belongs to this set, then this is a no-op.
-    pub(crate) fn add(&mut self, byte: u8) {
-        let bucket = byte / 128;
-        let bit = byte % 128;
-        self.bits.0[usize::from(bucket)] |= 1 << bit;
-    }
-
-    /// Return true if and only if the given byte is in this set.
-    pub(crate) fn contains(&self, byte: u8) -> bool {
-        let bucket = byte / 128;
-        let bit = byte % 128;
-        self.bits.0[usize::from(bucket)] & (1 << bit) > 0
-    }
-}
-
-impl core::fmt::Debug for BitSet {
-    fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-        let mut fmtd = f.debug_set();
-        for b in 0u8..=255 {
-            if (ByteSet { bits: *self }).contains(b) {
-                fmtd.entry(&b);
-            }
-        }
-        fmtd.finish()
-    }
-}
-
-#[cfg(test)]
-mod tests {
-    use alloc::{vec, vec::Vec};
-
-    use super::*;
-
-    #[test]
-    fn byte_classes() {
-        let mut set = ByteClassSet::empty();
-        set.set_range(b'a', b'z');
-
-        let classes = set.byte_classes();
-        assert_eq!(classes.get(0), 0);
-        assert_eq!(classes.get(1), 0);
-        assert_eq!(classes.get(2), 0);
-        assert_eq!(classes.get(b'a' - 1), 0);
-        assert_eq!(classes.get(b'a'), 1);
-        assert_eq!(classes.get(b'm'), 1);
-        assert_eq!(classes.get(b'z'), 1);
-        assert_eq!(classes.get(b'z' + 1), 2);
-        assert_eq!(classes.get(254), 2);
-        assert_eq!(classes.get(255), 2);
-
-        let mut set = ByteClassSet::empty();
-        set.set_range(0, 2);
-        set.set_range(4, 6);
-        let classes = set.byte_classes();
-        assert_eq!(classes.get(0), 0);
-        assert_eq!(classes.get(1), 0);
-        assert_eq!(classes.get(2), 0);
-        assert_eq!(classes.get(3), 1);
-        assert_eq!(classes.get(4), 2);
-        assert_eq!(classes.get(5), 2);
-        assert_eq!(classes.get(6), 2);
-        assert_eq!(classes.get(7), 3);
-        assert_eq!(classes.get(255), 3);
-    }
-
-    #[test]
-    fn full_byte_classes() {
-        let mut set = ByteClassSet::empty();
-        for b in 0u8..=255 {
-            set.set_range(b, b);
-        }
-        assert_eq!(set.byte_classes().alphabet_len(), 256);
-    }
-
-    #[test]
-    fn elements_typical() {
-        let mut set = ByteClassSet::empty();
-        set.set_range(b'b', b'd');
-        set.set_range(b'g', b'm');
-        set.set_range(b'z', b'z');
-        let classes = set.byte_classes();
-        // class 0: \x00-a
-        // class 1: b-d
-        // class 2: e-f
-        // class 3: g-m
-        // class 4: n-y
-        // class 5: z-z
-        // class 6: \x7B-\xFF
-        assert_eq!(classes.alphabet_len(), 7);
-
-        let elements = classes.elements(0).collect::<Vec<_>>();
-        assert_eq!(elements.len(), 98);
-        assert_eq!(elements[0], b'\x00');
-        assert_eq!(elements[97], b'a');
-
-        let elements = classes.elements(1).collect::<Vec<_>>();
-        assert_eq!(elements, vec![b'b', b'c', b'd'],);
-
-        let elements = classes.elements(2).collect::<Vec<_>>();
-        assert_eq!(elements, vec![b'e', b'f'],);
-
-        let elements = classes.elements(3).collect::<Vec<_>>();
-        assert_eq!(elements, vec![b'g', b'h', b'i', b'j', b'k', b'l', b'm',],);
-
-        let elements = classes.elements(4).collect::<Vec<_>>();
-        assert_eq!(elements.len(), 12);
-        assert_eq!(elements[0], b'n');
-        assert_eq!(elements[11], b'y');
-
-        let elements = classes.elements(5).collect::<Vec<_>>();
-        assert_eq!(elements, vec![b'z']);
-
-        let elements = classes.elements(6).collect::<Vec<_>>();
-        assert_eq!(elements.len(), 133);
-        assert_eq!(elements[0], b'\x7B');
-        assert_eq!(elements[132], b'\xFF');
-    }
-
-    #[test]
-    fn elements_singletons() {
-        let classes = ByteClasses::singletons();
-        assert_eq!(classes.alphabet_len(), 256);
-
-        let elements = classes.elements(b'a').collect::<Vec<_>>();
-        assert_eq!(elements, vec![b'a']);
-    }
-
-    #[test]
-    fn elements_empty() {
-        let classes = ByteClasses::empty();
-        assert_eq!(classes.alphabet_len(), 1);
-
-        let elements = classes.elements(0).collect::<Vec<_>>();
-        assert_eq!(elements.len(), 256);
-        assert_eq!(elements[0], b'\x00');
-        assert_eq!(elements[255], b'\xFF');
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/buffer.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/buffer.rs
deleted file mode 100644
index e9e982a..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/buffer.rs
+++ /dev/null
@@ -1,124 +0,0 @@
-use alloc::{vec, vec::Vec};
-
-/// The default buffer capacity that we use for the stream buffer.
-const DEFAULT_BUFFER_CAPACITY: usize = 64 * (1 << 10); // 64 KB
-
-/// A fairly simple roll buffer for supporting stream searches.
-///
-/// This buffer acts as a temporary place to store a fixed amount of data when
-/// reading from a stream. Its central purpose is to allow "rolling" some
-/// suffix of the data to the beginning of the buffer before refilling it with
-/// more data from the stream. For example, let's say we are trying to match
-/// "foobar" on a stream. When we report the match, we'd like to not only
-/// report the correct offsets at which the match occurs, but also the matching
-/// bytes themselves. So let's say our stream is a file with the following
-/// contents: `test test foobar test test`. Now assume that we happen to read
-/// the aforementioned file in two chunks: `test test foo` and `bar test test`.
-/// Naively, it would not be possible to report a single contiguous `foobar`
-/// match, but this roll buffer allows us to do that. Namely, after the second
-/// read, the contents of the buffer should be `st foobar test test`, where the
-/// search should ultimately resume immediately after `foo`. (The prefix `st `
-/// is included because the roll buffer saves N bytes at the end of the buffer,
-/// where N is the maximum possible length of a match.)
-///
-/// A lot of the logic for dealing with this is unfortunately split out between
-/// this roll buffer and the `StreamChunkIter`.
-///
-/// Note also that this buffer is not actually required to just report matches.
-/// Because a `Match` is just some offsets. But it *is* required for supporting
-/// things like `try_stream_replace_all` because that needs some mechanism for
-/// knowing which bytes in the stream correspond to a match and which don't. So
-/// when a match occurs across two `read` calls, *something* needs to retain
-/// the bytes from the previous `read` call because you don't know before the
-/// second read call whether a match exists or not.
-#[derive(Debug)]
-pub(crate) struct Buffer {
-    /// The raw buffer contents. This has a fixed size and never increases.
-    buf: Vec<u8>,
-    /// The minimum size of the buffer, which is equivalent to the maximum
-    /// possible length of a match. This corresponds to the amount that we
-    /// roll
-    min: usize,
-    /// The end of the contents of this buffer.
-    end: usize,
-}
-
-impl Buffer {
-    /// Create a new buffer for stream searching. The minimum buffer length
-    /// given should be the size of the maximum possible match length.
-    pub(crate) fn new(min_buffer_len: usize) -> Buffer {
-        let min = core::cmp::max(1, min_buffer_len);
-        // The minimum buffer amount is also the amount that we roll our
-        // buffer in order to support incremental searching. To this end,
-        // our actual capacity needs to be at least 1 byte bigger than our
-        // minimum amount, otherwise we won't have any overlap. In actuality,
-        // we want our buffer to be a bit bigger than that for performance
-        // reasons, so we set a lower bound of `8 * min`.
-        //
-        // TODO: It would be good to find a way to test the streaming
-        // implementation with the minimal buffer size. For now, we just
-        // uncomment out the next line and comment out the subsequent line.
-        // let capacity = 1 + min;
-        let capacity = core::cmp::max(min * 8, DEFAULT_BUFFER_CAPACITY);
-        Buffer { buf: vec![0; capacity], min, end: 0 }
-    }
-
-    /// Return the contents of this buffer.
-    #[inline]
-    pub(crate) fn buffer(&self) -> &[u8] {
-        &self.buf[..self.end]
-    }
-
-    /// Return the minimum size of the buffer. The only way a buffer may be
-    /// smaller than this is if the stream itself contains less than the
-    /// minimum buffer amount.
-    #[inline]
-    pub(crate) fn min_buffer_len(&self) -> usize {
-        self.min
-    }
-
-    /// Return all free capacity in this buffer.
-    fn free_buffer(&mut self) -> &mut [u8] {
-        &mut self.buf[self.end..]
-    }
-
-    /// Refill the contents of this buffer by reading as much as possible into
-    /// this buffer's free capacity. If no more bytes could be read, then this
-    /// returns false. Otherwise, this reads until it has filled the buffer
-    /// past the minimum amount.
-    pub(crate) fn fill<R: std::io::Read>(
-        &mut self,
-        mut rdr: R,
-    ) -> std::io::Result<bool> {
-        let mut readany = false;
-        loop {
-            let readlen = rdr.read(self.free_buffer())?;
-            if readlen == 0 {
-                return Ok(readany);
-            }
-            readany = true;
-            self.end += readlen;
-            if self.buffer().len() >= self.min {
-                return Ok(true);
-            }
-        }
-    }
-
-    /// Roll the contents of the buffer so that the suffix of this buffer is
-    /// moved to the front and all other contents are dropped. The size of the
-    /// suffix corresponds precisely to the minimum buffer length.
-    ///
-    /// This should only be called when the entire contents of this buffer have
-    /// been searched.
-    pub(crate) fn roll(&mut self) {
-        let roll_start = self
-            .end
-            .checked_sub(self.min)
-            .expect("buffer capacity should be bigger than minimum amount");
-        let roll_end = roll_start + self.min;
-
-        assert!(roll_end <= self.end);
-        self.buf.copy_within(roll_start..roll_end, 0);
-        self.end = self.min;
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/byte_frequencies.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/byte_frequencies.rs
deleted file mode 100644
index c313b62..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/byte_frequencies.rs
+++ /dev/null
@@ -1,258 +0,0 @@
-pub const BYTE_FREQUENCIES: [u8; 256] = [
-    55,  // '\x00'
-    52,  // '\x01'
-    51,  // '\x02'
-    50,  // '\x03'
-    49,  // '\x04'
-    48,  // '\x05'
-    47,  // '\x06'
-    46,  // '\x07'
-    45,  // '\x08'
-    103, // '\t'
-    242, // '\n'
-    66,  // '\x0b'
-    67,  // '\x0c'
-    229, // '\r'
-    44,  // '\x0e'
-    43,  // '\x0f'
-    42,  // '\x10'
-    41,  // '\x11'
-    40,  // '\x12'
-    39,  // '\x13'
-    38,  // '\x14'
-    37,  // '\x15'
-    36,  // '\x16'
-    35,  // '\x17'
-    34,  // '\x18'
-    33,  // '\x19'
-    56,  // '\x1a'
-    32,  // '\x1b'
-    31,  // '\x1c'
-    30,  // '\x1d'
-    29,  // '\x1e'
-    28,  // '\x1f'
-    255, // ' '
-    148, // '!'
-    164, // '"'
-    149, // '#'
-    136, // '$'
-    160, // '%'
-    155, // '&'
-    173, // "'"
-    221, // '('
-    222, // ')'
-    134, // '*'
-    122, // '+'
-    232, // ','
-    202, // '-'
-    215, // '.'
-    224, // '/'
-    208, // '0'
-    220, // '1'
-    204, // '2'
-    187, // '3'
-    183, // '4'
-    179, // '5'
-    177, // '6'
-    168, // '7'
-    178, // '8'
-    200, // '9'
-    226, // ':'
-    195, // ';'
-    154, // '<'
-    184, // '='
-    174, // '>'
-    126, // '?'
-    120, // '@'
-    191, // 'A'
-    157, // 'B'
-    194, // 'C'
-    170, // 'D'
-    189, // 'E'
-    162, // 'F'
-    161, // 'G'
-    150, // 'H'
-    193, // 'I'
-    142, // 'J'
-    137, // 'K'
-    171, // 'L'
-    176, // 'M'
-    185, // 'N'
-    167, // 'O'
-    186, // 'P'
-    112, // 'Q'
-    175, // 'R'
-    192, // 'S'
-    188, // 'T'
-    156, // 'U'
-    140, // 'V'
-    143, // 'W'
-    123, // 'X'
-    133, // 'Y'
-    128, // 'Z'
-    147, // '['
-    138, // '\\'
-    146, // ']'
-    114, // '^'
-    223, // '_'
-    151, // '`'
-    249, // 'a'
-    216, // 'b'
-    238, // 'c'
-    236, // 'd'
-    253, // 'e'
-    227, // 'f'
-    218, // 'g'
-    230, // 'h'
-    247, // 'i'
-    135, // 'j'
-    180, // 'k'
-    241, // 'l'
-    233, // 'm'
-    246, // 'n'
-    244, // 'o'
-    231, // 'p'
-    139, // 'q'
-    245, // 'r'
-    243, // 's'
-    251, // 't'
-    235, // 'u'
-    201, // 'v'
-    196, // 'w'
-    240, // 'x'
-    214, // 'y'
-    152, // 'z'
-    182, // '{'
-    205, // '|'
-    181, // '}'
-    127, // '~'
-    27,  // '\x7f'
-    212, // '\x80'
-    211, // '\x81'
-    210, // '\x82'
-    213, // '\x83'
-    228, // '\x84'
-    197, // '\x85'
-    169, // '\x86'
-    159, // '\x87'
-    131, // '\x88'
-    172, // '\x89'
-    105, // '\x8a'
-    80,  // '\x8b'
-    98,  // '\x8c'
-    96,  // '\x8d'
-    97,  // '\x8e'
-    81,  // '\x8f'
-    207, // '\x90'
-    145, // '\x91'
-    116, // '\x92'
-    115, // '\x93'
-    144, // '\x94'
-    130, // '\x95'
-    153, // '\x96'
-    121, // '\x97'
-    107, // '\x98'
-    132, // '\x99'
-    109, // '\x9a'
-    110, // '\x9b'
-    124, // '\x9c'
-    111, // '\x9d'
-    82,  // '\x9e'
-    108, // '\x9f'
-    118, // '\xa0'
-    141, // '¡'
-    113, // '¢'
-    129, // '£'
-    119, // '¤'
-    125, // '¥'
-    165, // '¦'
-    117, // '§'
-    92,  // '¨'
-    106, // '©'
-    83,  // 'ª'
-    72,  // '«'
-    99,  // '¬'
-    93,  // '\xad'
-    65,  // '®'
-    79,  // '¯'
-    166, // '°'
-    237, // '±'
-    163, // '²'
-    199, // '³'
-    190, // '´'
-    225, // 'µ'
-    209, // '¶'
-    203, // '·'
-    198, // '¸'
-    217, // '¹'
-    219, // 'º'
-    206, // '»'
-    234, // '¼'
-    248, // '½'
-    158, // '¾'
-    239, // '¿'
-    255, // 'À'
-    255, // 'Á'
-    255, // 'Â'
-    255, // 'Ã'
-    255, // 'Ä'
-    255, // 'Å'
-    255, // 'Æ'
-    255, // 'Ç'
-    255, // 'È'
-    255, // 'É'
-    255, // 'Ê'
-    255, // 'Ë'
-    255, // 'Ì'
-    255, // 'Í'
-    255, // 'Î'
-    255, // 'Ï'
-    255, // 'Ð'
-    255, // 'Ñ'
-    255, // 'Ò'
-    255, // 'Ó'
-    255, // 'Ô'
-    255, // 'Õ'
-    255, // 'Ö'
-    255, // '×'
-    255, // 'Ø'
-    255, // 'Ù'
-    255, // 'Ú'
-    255, // 'Û'
-    255, // 'Ü'
-    255, // 'Ý'
-    255, // 'Þ'
-    255, // 'ß'
-    255, // 'à'
-    255, // 'á'
-    255, // 'â'
-    255, // 'ã'
-    255, // 'ä'
-    255, // 'å'
-    255, // 'æ'
-    255, // 'ç'
-    255, // 'è'
-    255, // 'é'
-    255, // 'ê'
-    255, // 'ë'
-    255, // 'ì'
-    255, // 'í'
-    255, // 'î'
-    255, // 'ï'
-    255, // 'ð'
-    255, // 'ñ'
-    255, // 'ò'
-    255, // 'ó'
-    255, // 'ô'
-    255, // 'õ'
-    255, // 'ö'
-    255, // '÷'
-    255, // 'ø'
-    255, // 'ù'
-    255, // 'ú'
-    255, // 'û'
-    255, // 'ü'
-    255, // 'ý'
-    255, // 'þ'
-    255, // 'ÿ'
-];
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/debug.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/debug.rs
deleted file mode 100644
index 22b5f22..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/debug.rs
+++ /dev/null
@@ -1,26 +0,0 @@
-/// A type that wraps a single byte with a convenient fmt::Debug impl that
-/// escapes the byte.
-pub(crate) struct DebugByte(pub(crate) u8);
-
-impl core::fmt::Debug for DebugByte {
-    fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-        // Special case ASCII space. It's too hard to read otherwise, so
-        // put quotes around it. I sometimes wonder whether just '\x20' would
-        // be better...
-        if self.0 == b' ' {
-            return write!(f, "' '");
-        }
-        // 10 bytes is enough to cover any output from ascii::escape_default.
-        let mut bytes = [0u8; 10];
-        let mut len = 0;
-        for (i, mut b) in core::ascii::escape_default(self.0).enumerate() {
-            // capitalize \xab to \xAB
-            if i >= 2 && b'a' <= b && b <= b'f' {
-                b -= 32;
-            }
-            bytes[len] = b;
-            len += 1;
-        }
-        write!(f, "{}", core::str::from_utf8(&bytes[..len]).unwrap())
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/error.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/error.rs
deleted file mode 100644
index 326d046..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/error.rs
+++ /dev/null
@@ -1,259 +0,0 @@
-use crate::util::{
-    primitives::{PatternID, SmallIndex},
-    search::MatchKind,
-};
-
-/// An error that occurred during the construction of an Aho-Corasick
-/// automaton.
-///
-/// Build errors occur when some kind of limit has been exceeded, either in the
-/// number of states, the number of patterns of the length of a pattern. These
-/// limits aren't part of the public API, but they should generally be large
-/// enough to handle most use cases.
-///
-/// When the `std` feature is enabled, this implements the `std::error::Error`
-/// trait.
-#[derive(Clone, Debug)]
-pub struct BuildError {
-    kind: ErrorKind,
-}
-
-/// The kind of error that occurred.
-#[derive(Clone, Debug)]
-enum ErrorKind {
-    /// An error that occurs when allocating a new state would result in an
-    /// identifier that exceeds the capacity of a `StateID`.
-    StateIDOverflow {
-        /// The maximum possible id.
-        max: u64,
-        /// The maximum ID requested.
-        requested_max: u64,
-    },
-    /// An error that occurs when adding a pattern to an Aho-Corasick
-    /// automaton would result in an identifier that exceeds the capacity of a
-    /// `PatternID`.
-    PatternIDOverflow {
-        /// The maximum possible id.
-        max: u64,
-        /// The maximum ID requested.
-        requested_max: u64,
-    },
-    /// Occurs when a pattern string is given to the Aho-Corasick constructor
-    /// that is too long.
-    PatternTooLong {
-        /// The ID of the pattern that was too long.
-        pattern: PatternID,
-        /// The length that was too long.
-        len: usize,
-    },
-}
-
-impl BuildError {
-    pub(crate) fn state_id_overflow(
-        max: u64,
-        requested_max: u64,
-    ) -> BuildError {
-        BuildError { kind: ErrorKind::StateIDOverflow { max, requested_max } }
-    }
-
-    pub(crate) fn pattern_id_overflow(
-        max: u64,
-        requested_max: u64,
-    ) -> BuildError {
-        BuildError {
-            kind: ErrorKind::PatternIDOverflow { max, requested_max },
-        }
-    }
-
-    pub(crate) fn pattern_too_long(
-        pattern: PatternID,
-        len: usize,
-    ) -> BuildError {
-        BuildError { kind: ErrorKind::PatternTooLong { pattern, len } }
-    }
-}
-
-#[cfg(feature = "std")]
-impl std::error::Error for BuildError {}
-
-impl core::fmt::Display for BuildError {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        match self.kind {
-            ErrorKind::StateIDOverflow { max, requested_max } => {
-                write!(
-                    f,
-                    "state identifier overflow: failed to create state ID \
-                     from {}, which exceeds the max of {}",
-                    requested_max, max,
-                )
-            }
-            ErrorKind::PatternIDOverflow { max, requested_max } => {
-                write!(
-                    f,
-                    "pattern identifier overflow: failed to create pattern ID \
-                     from {}, which exceeds the max of {}",
-                    requested_max, max,
-                )
-            }
-            ErrorKind::PatternTooLong { pattern, len } => {
-                write!(
-                    f,
-                    "pattern {} with length {} exceeds \
-                     the maximum pattern length of {}",
-                    pattern.as_usize(),
-                    len,
-                    SmallIndex::MAX.as_usize(),
-                )
-            }
-        }
-    }
-}
-
-/// An error that occurred during an Aho-Corasick search.
-///
-/// An error that occurs during a search is limited to some kind of
-/// misconfiguration that resulted in an illegal call. Stated differently,
-/// whether an error occurs is not dependent on the specific bytes in the
-/// haystack.
-///
-/// Examples of misconfiguration:
-///
-/// * Executing a stream or overlapping search on a searcher that was built was
-/// something other than [`MatchKind::Standard`](crate::MatchKind::Standard)
-/// semantics.
-/// * Requested an anchored or an unanchored search on a searcher that doesn't
-/// support unanchored or anchored searches, respectively.
-///
-/// When the `std` feature is enabled, this implements the `std::error::Error`
-/// trait.
-#[derive(Clone, Debug, Eq, PartialEq)]
-pub struct MatchError(alloc::boxed::Box<MatchErrorKind>);
-
-impl MatchError {
-    /// Create a new error value with the given kind.
-    ///
-    /// This is a more verbose version of the kind-specific constructors, e.g.,
-    /// `MatchError::unsupported_stream`.
-    pub fn new(kind: MatchErrorKind) -> MatchError {
-        MatchError(alloc::boxed::Box::new(kind))
-    }
-
-    /// Returns a reference to the underlying error kind.
-    pub fn kind(&self) -> &MatchErrorKind {
-        &self.0
-    }
-
-    /// Create a new "invalid anchored search" error. This occurs when the
-    /// caller requests an anchored search but where anchored searches aren't
-    /// supported.
-    ///
-    /// This is the same as calling `MatchError::new` with a
-    /// [`MatchErrorKind::InvalidInputAnchored`] kind.
-    pub fn invalid_input_anchored() -> MatchError {
-        MatchError::new(MatchErrorKind::InvalidInputAnchored)
-    }
-
-    /// Create a new "invalid unanchored search" error. This occurs when the
-    /// caller requests an unanchored search but where unanchored searches
-    /// aren't supported.
-    ///
-    /// This is the same as calling `MatchError::new` with a
-    /// [`MatchErrorKind::InvalidInputUnanchored`] kind.
-    pub fn invalid_input_unanchored() -> MatchError {
-        MatchError::new(MatchErrorKind::InvalidInputUnanchored)
-    }
-
-    /// Create a new "unsupported stream search" error. This occurs when the
-    /// caller requests a stream search while using an Aho-Corasick automaton
-    /// with a match kind other than [`MatchKind::Standard`].
-    ///
-    /// The match kind given should be the match kind of the automaton. It
-    /// should never be `MatchKind::Standard`.
-    pub fn unsupported_stream(got: MatchKind) -> MatchError {
-        MatchError::new(MatchErrorKind::UnsupportedStream { got })
-    }
-
-    /// Create a new "unsupported overlapping search" error. This occurs when
-    /// the caller requests an overlapping search while using an Aho-Corasick
-    /// automaton with a match kind other than [`MatchKind::Standard`].
-    ///
-    /// The match kind given should be the match kind of the automaton. It
-    /// should never be `MatchKind::Standard`.
-    pub fn unsupported_overlapping(got: MatchKind) -> MatchError {
-        MatchError::new(MatchErrorKind::UnsupportedOverlapping { got })
-    }
-
-    /// Create a new "unsupported empty pattern" error. This occurs when the
-    /// caller requests a search for which matching an automaton that contains
-    /// an empty pattern string is not supported.
-    pub fn unsupported_empty() -> MatchError {
-        MatchError::new(MatchErrorKind::UnsupportedEmpty)
-    }
-}
-
-/// The underlying kind of a [`MatchError`].
-///
-/// This is a **non-exhaustive** enum. That means new variants may be added in
-/// a semver-compatible release.
-#[non_exhaustive]
-#[derive(Clone, Debug, Eq, PartialEq)]
-pub enum MatchErrorKind {
-    /// An error indicating that an anchored search was requested, but from a
-    /// searcher that was built without anchored support.
-    InvalidInputAnchored,
-    /// An error indicating that an unanchored search was requested, but from a
-    /// searcher that was built without unanchored support.
-    InvalidInputUnanchored,
-    /// An error indicating that a stream search was attempted on an
-    /// Aho-Corasick automaton with an unsupported `MatchKind`.
-    UnsupportedStream {
-        /// The match semantics for the automaton that was used.
-        got: MatchKind,
-    },
-    /// An error indicating that an overlapping search was attempted on an
-    /// Aho-Corasick automaton with an unsupported `MatchKind`.
-    UnsupportedOverlapping {
-        /// The match semantics for the automaton that was used.
-        got: MatchKind,
-    },
-    /// An error indicating that the operation requested doesn't support
-    /// automatons that contain an empty pattern string.
-    UnsupportedEmpty,
-}
-
-#[cfg(feature = "std")]
-impl std::error::Error for MatchError {}
-
-impl core::fmt::Display for MatchError {
-    fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-        match *self.kind() {
-            MatchErrorKind::InvalidInputAnchored => {
-                write!(f, "anchored searches are not supported or enabled")
-            }
-            MatchErrorKind::InvalidInputUnanchored => {
-                write!(f, "unanchored searches are not supported or enabled")
-            }
-            MatchErrorKind::UnsupportedStream { got } => {
-                write!(
-                    f,
-                    "match kind {:?} does not support stream searching",
-                    got,
-                )
-            }
-            MatchErrorKind::UnsupportedOverlapping { got } => {
-                write!(
-                    f,
-                    "match kind {:?} does not support overlapping searches",
-                    got,
-                )
-            }
-            MatchErrorKind::UnsupportedEmpty => {
-                write!(
-                    f,
-                    "matching with an empty pattern string is not \
-                     supported for this operation",
-                )
-            }
-        }
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/int.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/int.rs
deleted file mode 100644
index 54762b6..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/int.rs
+++ /dev/null
@@ -1,278 +0,0 @@
-/*!
-This module provides several integer oriented traits for converting between
-both fixed size integers and integers whose size varies based on the target
-(like `usize`).
-
-The main design principle for this module is to centralize all uses of `as`.
-The thinking here is that `as` makes it very easy to perform accidental lossy
-conversions, and if we centralize all its uses here under more descriptive
-higher level operations, its use and correctness becomes easier to audit.
-
-This was copied mostly wholesale from `regex-automata`.
-
-NOTE: for simplicity, we don't take target pointer width into account here for
-`usize` conversions. Since we currently only panic in debug mode, skipping the
-check when it can be proven it isn't needed at compile time doesn't really
-matter. Now, if we wind up wanting to do as many checks as possible in release
-mode, then we would want to skip those when we know the conversions are always
-non-lossy.
-*/
-
-// We define a little more than what we need, but I'd rather just have
-// everything via a consistent and uniform API then have holes.
-#![allow(dead_code)]
-
-pub(crate) trait U8 {
-    fn as_usize(self) -> usize;
-}
-
-impl U8 for u8 {
-    fn as_usize(self) -> usize {
-        usize::from(self)
-    }
-}
-
-pub(crate) trait U16 {
-    fn as_usize(self) -> usize;
-    fn low_u8(self) -> u8;
-    fn high_u8(self) -> u8;
-}
-
-impl U16 for u16 {
-    fn as_usize(self) -> usize {
-        usize::from(self)
-    }
-
-    fn low_u8(self) -> u8 {
-        self as u8
-    }
-
-    fn high_u8(self) -> u8 {
-        (self >> 8) as u8
-    }
-}
-
-pub(crate) trait U32 {
-    fn as_usize(self) -> usize;
-    fn low_u8(self) -> u8;
-    fn low_u16(self) -> u16;
-    fn high_u16(self) -> u16;
-}
-
-impl U32 for u32 {
-    #[inline]
-    fn as_usize(self) -> usize {
-        #[cfg(debug_assertions)]
-        {
-            usize::try_from(self).expect("u32 overflowed usize")
-        }
-        #[cfg(not(debug_assertions))]
-        {
-            self as usize
-        }
-    }
-
-    fn low_u8(self) -> u8 {
-        self as u8
-    }
-
-    fn low_u16(self) -> u16 {
-        self as u16
-    }
-
-    fn high_u16(self) -> u16 {
-        (self >> 16) as u16
-    }
-}
-
-pub(crate) trait U64 {
-    fn as_usize(self) -> usize;
-    fn low_u8(self) -> u8;
-    fn low_u16(self) -> u16;
-    fn low_u32(self) -> u32;
-    fn high_u32(self) -> u32;
-}
-
-impl U64 for u64 {
-    fn as_usize(self) -> usize {
-        #[cfg(debug_assertions)]
-        {
-            usize::try_from(self).expect("u64 overflowed usize")
-        }
-        #[cfg(not(debug_assertions))]
-        {
-            self as usize
-        }
-    }
-
-    fn low_u8(self) -> u8 {
-        self as u8
-    }
-
-    fn low_u16(self) -> u16 {
-        self as u16
-    }
-
-    fn low_u32(self) -> u32 {
-        self as u32
-    }
-
-    fn high_u32(self) -> u32 {
-        (self >> 32) as u32
-    }
-}
-
-pub(crate) trait I8 {
-    fn as_usize(self) -> usize;
-    fn to_bits(self) -> u8;
-    fn from_bits(n: u8) -> i8;
-}
-
-impl I8 for i8 {
-    fn as_usize(self) -> usize {
-        #[cfg(debug_assertions)]
-        {
-            usize::try_from(self).expect("i8 overflowed usize")
-        }
-        #[cfg(not(debug_assertions))]
-        {
-            self as usize
-        }
-    }
-
-    fn to_bits(self) -> u8 {
-        self as u8
-    }
-
-    fn from_bits(n: u8) -> i8 {
-        n as i8
-    }
-}
-
-pub(crate) trait I32 {
-    fn as_usize(self) -> usize;
-    fn to_bits(self) -> u32;
-    fn from_bits(n: u32) -> i32;
-}
-
-impl I32 for i32 {
-    fn as_usize(self) -> usize {
-        #[cfg(debug_assertions)]
-        {
-            usize::try_from(self).expect("i32 overflowed usize")
-        }
-        #[cfg(not(debug_assertions))]
-        {
-            self as usize
-        }
-    }
-
-    fn to_bits(self) -> u32 {
-        self as u32
-    }
-
-    fn from_bits(n: u32) -> i32 {
-        n as i32
-    }
-}
-
-pub(crate) trait I64 {
-    fn as_usize(self) -> usize;
-    fn to_bits(self) -> u64;
-    fn from_bits(n: u64) -> i64;
-}
-
-impl I64 for i64 {
-    fn as_usize(self) -> usize {
-        #[cfg(debug_assertions)]
-        {
-            usize::try_from(self).expect("i64 overflowed usize")
-        }
-        #[cfg(not(debug_assertions))]
-        {
-            self as usize
-        }
-    }
-
-    fn to_bits(self) -> u64 {
-        self as u64
-    }
-
-    fn from_bits(n: u64) -> i64 {
-        n as i64
-    }
-}
-
-pub(crate) trait Usize {
-    fn as_u8(self) -> u8;
-    fn as_u16(self) -> u16;
-    fn as_u32(self) -> u32;
-    fn as_u64(self) -> u64;
-}
-
-impl Usize for usize {
-    fn as_u8(self) -> u8 {
-        #[cfg(debug_assertions)]
-        {
-            u8::try_from(self).expect("usize overflowed u8")
-        }
-        #[cfg(not(debug_assertions))]
-        {
-            self as u8
-        }
-    }
-
-    fn as_u16(self) -> u16 {
-        #[cfg(debug_assertions)]
-        {
-            u16::try_from(self).expect("usize overflowed u16")
-        }
-        #[cfg(not(debug_assertions))]
-        {
-            self as u16
-        }
-    }
-
-    fn as_u32(self) -> u32 {
-        #[cfg(debug_assertions)]
-        {
-            u32::try_from(self).expect("usize overflowed u32")
-        }
-        #[cfg(not(debug_assertions))]
-        {
-            self as u32
-        }
-    }
-
-    fn as_u64(self) -> u64 {
-        #[cfg(debug_assertions)]
-        {
-            u64::try_from(self).expect("usize overflowed u64")
-        }
-        #[cfg(not(debug_assertions))]
-        {
-            self as u64
-        }
-    }
-}
-
-// Pointers aren't integers, but we convert pointers to integers to perform
-// offset arithmetic in some places. (And no, we don't convert the integers
-// back to pointers.) So add 'as_usize' conversions here too for completeness.
-//
-// These 'as' casts are actually okay because they're always non-lossy. But the
-// idea here is to just try and remove as much 'as' as possible, particularly
-// in this crate where we are being really paranoid about offsets and making
-// sure we don't panic on inputs that might be untrusted. This way, the 'as'
-// casts become easier to audit if they're all in one place, even when some of
-// them are actually okay 100% of the time.
-
-pub(crate) trait Pointer {
-    fn as_usize(self) -> usize;
-}
-
-impl<T> Pointer for *const T {
-    fn as_usize(self) -> usize {
-        self as usize
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/mod.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/mod.rs
deleted file mode 100644
index f7a1ddd..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/mod.rs
+++ /dev/null
@@ -1,12 +0,0 @@
-pub(crate) mod alphabet;
-#[cfg(feature = "std")]
-pub(crate) mod buffer;
-pub(crate) mod byte_frequencies;
-pub(crate) mod debug;
-pub(crate) mod error;
-pub(crate) mod int;
-pub(crate) mod prefilter;
-pub(crate) mod primitives;
-pub(crate) mod remapper;
-pub(crate) mod search;
-pub(crate) mod special;
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/prefilter.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/prefilter.rs
deleted file mode 100644
index f5ddc75..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/prefilter.rs
+++ /dev/null
@@ -1,924 +0,0 @@
-use core::{
-    cmp,
-    fmt::Debug,
-    panic::{RefUnwindSafe, UnwindSafe},
-    u8,
-};
-
-use alloc::{sync::Arc, vec, vec::Vec};
-
-use crate::{
-    packed,
-    util::{
-        alphabet::ByteSet,
-        search::{Match, MatchKind, Span},
-    },
-};
-
-/// A prefilter for accelerating a search.
-///
-/// This crate uses prefilters in the core search implementations to accelerate
-/// common cases. They typically only apply to cases where there are a small
-/// number of patterns (less than 100 or so), but when they do, thoughput can
-/// be boosted considerably, perhaps by an order of magnitude. When a prefilter
-/// is active, it is used whenever a search enters an automaton's start state.
-///
-/// Currently, prefilters cannot be constructed by
-/// callers. A `Prefilter` can only be accessed via the
-/// [`Automaton::prefilter`](crate::automaton::Automaton::prefilter)
-/// method and used to execute a search. In other words, a prefilter can be
-/// used to optimize your own search implementation if necessary, but cannot do
-/// much else. If you have a use case for more APIs, please submit an issue.
-#[derive(Clone, Debug)]
-pub struct Prefilter {
-    finder: Arc<dyn PrefilterI>,
-    memory_usage: usize,
-}
-
-impl Prefilter {
-    /// Execute a search in the haystack within the span given. If a match or
-    /// a possible match is returned, then it is guaranteed to occur within
-    /// the bounds of the span.
-    ///
-    /// If the span provided is invalid for the given haystack, then behavior
-    /// is unspecified.
-    #[inline]
-    pub fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
-        self.finder.find_in(haystack, span)
-    }
-
-    #[inline]
-    pub(crate) fn memory_usage(&self) -> usize {
-        self.memory_usage
-    }
-}
-
-/// A candidate is the result of running a prefilter on a haystack at a
-/// particular position.
-///
-/// The result is either no match, a confirmed match or a possible match.
-///
-/// When no match is returned, the prefilter is guaranteeing that no possible
-/// match can be found in the haystack, and the caller may trust this. That is,
-/// all correct prefilters must never report false negatives.
-///
-/// In some cases, a prefilter can confirm a match very quickly, in which case,
-/// the caller may use this to stop what it's doing and report the match. In
-/// this case, prefilter implementations must never report a false positive.
-/// In other cases, the prefilter can only report a potential match, in which
-/// case the callers must attempt to confirm the match. In this case, prefilter
-/// implementations are permitted to return false positives.
-#[derive(Clone, Debug)]
-pub enum Candidate {
-    /// No match was found. Since false negatives are not possible, this means
-    /// the search can quit as it is guaranteed not to find another match.
-    None,
-    /// A confirmed match was found. Callers do not need to confirm it.
-    Match(Match),
-    /// The start of a possible match was found. Callers must confirm it before
-    /// reporting it as a match.
-    PossibleStartOfMatch(usize),
-}
-
-impl Candidate {
-    /// Convert this candidate into an option. This is useful when callers
-    /// do not distinguish between true positives and false positives (i.e.,
-    /// the caller must always confirm the match).
-    pub fn into_option(self) -> Option<usize> {
-        match self {
-            Candidate::None => None,
-            Candidate::Match(ref m) => Some(m.start()),
-            Candidate::PossibleStartOfMatch(start) => Some(start),
-        }
-    }
-}
-
-/// A prefilter describes the behavior of fast literal scanners for quickly
-/// skipping past bytes in the haystack that we know cannot possibly
-/// participate in a match.
-trait PrefilterI:
-    Send + Sync + RefUnwindSafe + UnwindSafe + Debug + 'static
-{
-    /// Returns the next possible match candidate. This may yield false
-    /// positives, so callers must confirm a match starting at the position
-    /// returned. This, however, must never produce false negatives. That is,
-    /// this must, at minimum, return the starting position of the next match
-    /// in the given haystack after or at the given position.
-    fn find_in(&self, haystack: &[u8], span: Span) -> Candidate;
-}
-
-impl<P: PrefilterI + ?Sized> PrefilterI for Arc<P> {
-    #[inline(always)]
-    fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
-        (**self).find_in(haystack, span)
-    }
-}
-
-/// A builder for constructing the best possible prefilter. When constructed,
-/// this builder will heuristically select the best prefilter it can build,
-/// if any, and discard the rest.
-#[derive(Debug)]
-pub(crate) struct Builder {
-    count: usize,
-    ascii_case_insensitive: bool,
-    start_bytes: StartBytesBuilder,
-    rare_bytes: RareBytesBuilder,
-    memmem: MemmemBuilder,
-    packed: Option<packed::Builder>,
-    // If we run across a condition that suggests we shouldn't use a prefilter
-    // at all (like an empty pattern), then disable prefilters entirely.
-    enabled: bool,
-}
-
-impl Builder {
-    /// Create a new builder for constructing the best possible prefilter.
-    pub(crate) fn new(kind: MatchKind) -> Builder {
-        let pbuilder = kind
-            .as_packed()
-            .map(|kind| packed::Config::new().match_kind(kind).builder());
-        Builder {
-            count: 0,
-            ascii_case_insensitive: false,
-            start_bytes: StartBytesBuilder::new(),
-            rare_bytes: RareBytesBuilder::new(),
-            memmem: MemmemBuilder::default(),
-            packed: pbuilder,
-            enabled: true,
-        }
-    }
-
-    /// Enable ASCII case insensitivity. When set, byte strings added to this
-    /// builder will be interpreted without respect to ASCII case.
-    pub(crate) fn ascii_case_insensitive(mut self, yes: bool) -> Builder {
-        self.ascii_case_insensitive = yes;
-        self.start_bytes = self.start_bytes.ascii_case_insensitive(yes);
-        self.rare_bytes = self.rare_bytes.ascii_case_insensitive(yes);
-        self
-    }
-
-    /// Return a prefilter suitable for quickly finding potential matches.
-    ///
-    /// All patterns added to an Aho-Corasick automaton should be added to this
-    /// builder before attempting to construct the prefilter.
-    pub(crate) fn build(&self) -> Option<Prefilter> {
-        if !self.enabled {
-            debug!("prefilter not enabled, skipping");
-            return None;
-        }
-        // If we only have one pattern, then deferring to memmem is always
-        // the best choice. This is kind of a weird case, because, well, why
-        // use Aho-Corasick if you only have one pattern? But maybe you don't
-        // know exactly how many patterns you'll get up front, and you need to
-        // support the option of multiple patterns. So instead of relying on
-        // the caller to branch and use memmem explicitly, we just do it for
-        // them.
-        if !self.ascii_case_insensitive {
-            if let Some(pre) = self.memmem.build() {
-                debug!("using memmem prefilter");
-                return Some(pre);
-            }
-        }
-        let (packed, patlen, minlen) = if self.ascii_case_insensitive {
-            (None, usize::MAX, 0)
-        } else {
-            let patlen = self.packed.as_ref().map_or(usize::MAX, |p| p.len());
-            let minlen = self.packed.as_ref().map_or(0, |p| p.minimum_len());
-            let packed =
-                self.packed.as_ref().and_then(|b| b.build()).map(|s| {
-                    let memory_usage = s.memory_usage();
-                    debug!(
-                        "built packed prefilter (len: {}, \
-                         minimum pattern len: {}, memory usage: {}) \
-                         for consideration",
-                        patlen, minlen, memory_usage,
-                    );
-                    Prefilter { finder: Arc::new(Packed(s)), memory_usage }
-                });
-            (packed, patlen, minlen)
-        };
-        match (self.start_bytes.build(), self.rare_bytes.build()) {
-            // If we could build both start and rare prefilters, then there are
-            // a few cases in which we'd want to use the start-byte prefilter
-            // over the rare-byte prefilter, since the former has lower
-            // overhead.
-            (prestart @ Some(_), prerare @ Some(_)) => {
-                debug!(
-                    "both start (len={}, rank={}) and \
-                     rare (len={}, rank={}) byte prefilters \
-                     are available",
-                    self.start_bytes.count,
-                    self.start_bytes.rank_sum,
-                    self.rare_bytes.count,
-                    self.rare_bytes.rank_sum,
-                );
-                if patlen <= 16
-                    && minlen >= 2
-                    && self.start_bytes.count >= 3
-                    && self.rare_bytes.count >= 3
-                {
-                    debug!(
-                        "start and rare byte prefilters available, but \
-                             they're probably slower than packed so using \
-                             packed"
-                    );
-                    return packed;
-                }
-                // If the start-byte prefilter can scan for a smaller number
-                // of bytes than the rare-byte prefilter, then it's probably
-                // faster.
-                let has_fewer_bytes =
-                    self.start_bytes.count < self.rare_bytes.count;
-                // Otherwise, if the combined frequency rank of the detected
-                // bytes in the start-byte prefilter is "close" to the combined
-                // frequency rank of the rare-byte prefilter, then we pick
-                // the start-byte prefilter even if the rare-byte prefilter
-                // heuristically searches for rare bytes. This is because the
-                // rare-byte prefilter has higher constant costs, so we tend to
-                // prefer the start-byte prefilter when we can.
-                let has_rarer_bytes =
-                    self.start_bytes.rank_sum <= self.rare_bytes.rank_sum + 50;
-                if has_fewer_bytes {
-                    debug!(
-                        "using start byte prefilter because it has fewer
-                         bytes to search for than the rare byte prefilter",
-                    );
-                    prestart
-                } else if has_rarer_bytes {
-                    debug!(
-                        "using start byte prefilter because its byte \
-                         frequency rank was determined to be \
-                         \"good enough\" relative to the rare byte prefilter \
-                         byte frequency rank",
-                    );
-                    prestart
-                } else {
-                    debug!("using rare byte prefilter");
-                    prerare
-                }
-            }
-            (prestart @ Some(_), None) => {
-                if patlen <= 16 && minlen >= 2 && self.start_bytes.count >= 3 {
-                    debug!(
-                        "start byte prefilter available, but \
-                         it's probably slower than packed so using \
-                         packed"
-                    );
-                    return packed;
-                }
-                debug!(
-                    "have start byte prefilter but not rare byte prefilter, \
-                     so using start byte prefilter",
-                );
-                prestart
-            }
-            (None, prerare @ Some(_)) => {
-                if patlen <= 16 && minlen >= 2 && self.rare_bytes.count >= 3 {
-                    debug!(
-                        "rare byte prefilter available, but \
-                         it's probably slower than packed so using \
-                         packed"
-                    );
-                    return packed;
-                }
-                debug!(
-                    "have rare byte prefilter but not start byte prefilter, \
-                     so using rare byte prefilter",
-                );
-                prerare
-            }
-            (None, None) if self.ascii_case_insensitive => {
-                debug!(
-                    "no start or rare byte prefilter and ASCII case \
-                     insensitivity was enabled, so skipping prefilter",
-                );
-                None
-            }
-            (None, None) => {
-                if packed.is_some() {
-                    debug!("falling back to packed prefilter");
-                } else {
-                    debug!("no prefilter available");
-                }
-                packed
-            }
-        }
-    }
-
-    /// Add a literal string to this prefilter builder.
-    pub(crate) fn add(&mut self, bytes: &[u8]) {
-        if bytes.is_empty() {
-            self.enabled = false;
-        }
-        if !self.enabled {
-            return;
-        }
-        self.count += 1;
-        self.start_bytes.add(bytes);
-        self.rare_bytes.add(bytes);
-        self.memmem.add(bytes);
-        if let Some(ref mut pbuilder) = self.packed {
-            pbuilder.add(bytes);
-        }
-    }
-}
-
-/// A type that wraps a packed searcher and implements the `Prefilter`
-/// interface.
-#[derive(Clone, Debug)]
-struct Packed(packed::Searcher);
-
-impl PrefilterI for Packed {
-    fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
-        self.0
-            .find_in(&haystack, span)
-            .map_or(Candidate::None, Candidate::Match)
-    }
-}
-
-/// A builder for constructing a prefilter that uses memmem.
-#[derive(Debug, Default)]
-struct MemmemBuilder {
-    /// The number of patterns that have been added.
-    count: usize,
-    /// The singular pattern to search for. This is only set when count==1.
-    one: Option<Vec<u8>>,
-}
-
-impl MemmemBuilder {
-    fn build(&self) -> Option<Prefilter> {
-        #[cfg(all(feature = "std", feature = "perf-literal"))]
-        fn imp(builder: &MemmemBuilder) -> Option<Prefilter> {
-            let pattern = builder.one.as_ref()?;
-            assert_eq!(1, builder.count);
-            let finder = Arc::new(Memmem(
-                memchr::memmem::Finder::new(pattern).into_owned(),
-            ));
-            let memory_usage = pattern.len();
-            Some(Prefilter { finder, memory_usage })
-        }
-
-        #[cfg(not(all(feature = "std", feature = "perf-literal")))]
-        fn imp(_: &MemmemBuilder) -> Option<Prefilter> {
-            None
-        }
-
-        imp(self)
-    }
-
-    fn add(&mut self, bytes: &[u8]) {
-        self.count += 1;
-        if self.count == 1 {
-            self.one = Some(bytes.to_vec());
-        } else {
-            self.one = None;
-        }
-    }
-}
-
-/// A type that wraps a SIMD accelerated single substring search from the
-/// `memchr` crate for use as a prefilter.
-///
-/// Currently, this prefilter is only active for Aho-Corasick searchers with
-/// a single pattern. In theory, this could be extended to support searchers
-/// that have a common prefix of more than one byte (for one byte, we would use
-/// memchr), but it's not clear if it's worth it or not.
-///
-/// Also, unfortunately, this currently also requires the 'std' feature to
-/// be enabled. That's because memchr doesn't have a no-std-but-with-alloc
-/// mode, and so APIs like Finder::into_owned aren't available when 'std' is
-/// disabled. But there should be an 'alloc' feature that brings in APIs like
-/// Finder::into_owned but doesn't use std-only features like runtime CPU
-/// feature detection.
-#[cfg(all(feature = "std", feature = "perf-literal"))]
-#[derive(Clone, Debug)]
-struct Memmem(memchr::memmem::Finder<'static>);
-
-#[cfg(all(feature = "std", feature = "perf-literal"))]
-impl PrefilterI for Memmem {
-    fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
-        use crate::util::primitives::PatternID;
-
-        self.0.find(&haystack[span]).map_or(Candidate::None, |i| {
-            let start = span.start + i;
-            let end = start + self.0.needle().len();
-            // N.B. We can declare a match and use a fixed pattern ID here
-            // because a Memmem prefilter is only ever created for searchers
-            // with exactly one pattern. Thus, every match is always a match
-            // and it is always for the first and only pattern.
-            Candidate::Match(Match::new(PatternID::ZERO, start..end))
-        })
-    }
-}
-
-/// A builder for constructing a rare byte prefilter.
-///
-/// A rare byte prefilter attempts to pick out a small set of rare bytes that
-/// occurr in the patterns, and then quickly scan to matches of those rare
-/// bytes.
-#[derive(Clone, Debug)]
-struct RareBytesBuilder {
-    /// Whether this prefilter should account for ASCII case insensitivity or
-    /// not.
-    ascii_case_insensitive: bool,
-    /// A set of rare bytes, indexed by byte value.
-    rare_set: ByteSet,
-    /// A set of byte offsets associated with bytes in a pattern. An entry
-    /// corresponds to a particular bytes (its index) and is only non-zero if
-    /// the byte occurred at an offset greater than 0 in at least one pattern.
-    ///
-    /// If a byte's offset is not representable in 8 bits, then the rare bytes
-    /// prefilter becomes inert.
-    byte_offsets: RareByteOffsets,
-    /// Whether this is available as a prefilter or not. This can be set to
-    /// false during construction if a condition is seen that invalidates the
-    /// use of the rare-byte prefilter.
-    available: bool,
-    /// The number of bytes set to an active value in `byte_offsets`.
-    count: usize,
-    /// The sum of frequency ranks for the rare bytes detected. This is
-    /// intended to give a heuristic notion of how rare the bytes are.
-    rank_sum: u16,
-}
-
-/// A set of byte offsets, keyed by byte.
-#[derive(Clone, Copy)]
-struct RareByteOffsets {
-    /// Each entry corresponds to the maximum offset of the corresponding
-    /// byte across all patterns seen.
-    set: [RareByteOffset; 256],
-}
-
-impl RareByteOffsets {
-    /// Create a new empty set of rare byte offsets.
-    pub(crate) fn empty() -> RareByteOffsets {
-        RareByteOffsets { set: [RareByteOffset::default(); 256] }
-    }
-
-    /// Add the given offset for the given byte to this set. If the offset is
-    /// greater than the existing offset, then it overwrites the previous
-    /// value and returns false. If there is no previous value set, then this
-    /// sets it and returns true.
-    pub(crate) fn set(&mut self, byte: u8, off: RareByteOffset) {
-        self.set[byte as usize].max =
-            cmp::max(self.set[byte as usize].max, off.max);
-    }
-}
-
-impl core::fmt::Debug for RareByteOffsets {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        let mut offsets = vec![];
-        for off in self.set.iter() {
-            if off.max > 0 {
-                offsets.push(off);
-            }
-        }
-        f.debug_struct("RareByteOffsets").field("set", &offsets).finish()
-    }
-}
-
-/// Offsets associated with an occurrence of a "rare" byte in any of the
-/// patterns used to construct a single Aho-Corasick automaton.
-#[derive(Clone, Copy, Debug)]
-struct RareByteOffset {
-    /// The maximum offset at which a particular byte occurs from the start
-    /// of any pattern. This is used as a shift amount. That is, when an
-    /// occurrence of this byte is found, the candidate position reported by
-    /// the prefilter is `position_of_byte - max`, such that the automaton
-    /// will begin its search at a position that is guaranteed to observe a
-    /// match.
-    ///
-    /// To avoid accidentally quadratic behavior, a prefilter is considered
-    /// ineffective when it is asked to start scanning from a position that it
-    /// has already scanned past.
-    ///
-    /// Using a `u8` here means that if we ever see a pattern that's longer
-    /// than 255 bytes, then the entire rare byte prefilter is disabled.
-    max: u8,
-}
-
-impl Default for RareByteOffset {
-    fn default() -> RareByteOffset {
-        RareByteOffset { max: 0 }
-    }
-}
-
-impl RareByteOffset {
-    /// Create a new rare byte offset. If the given offset is too big, then
-    /// None is returned. In that case, callers should render the rare bytes
-    /// prefilter inert.
-    fn new(max: usize) -> Option<RareByteOffset> {
-        if max > u8::MAX as usize {
-            None
-        } else {
-            Some(RareByteOffset { max: max as u8 })
-        }
-    }
-}
-
-impl RareBytesBuilder {
-    /// Create a new builder for constructing a rare byte prefilter.
-    fn new() -> RareBytesBuilder {
-        RareBytesBuilder {
-            ascii_case_insensitive: false,
-            rare_set: ByteSet::empty(),
-            byte_offsets: RareByteOffsets::empty(),
-            available: true,
-            count: 0,
-            rank_sum: 0,
-        }
-    }
-
-    /// Enable ASCII case insensitivity. When set, byte strings added to this
-    /// builder will be interpreted without respect to ASCII case.
-    fn ascii_case_insensitive(mut self, yes: bool) -> RareBytesBuilder {
-        self.ascii_case_insensitive = yes;
-        self
-    }
-
-    /// Build the rare bytes prefilter.
-    ///
-    /// If there are more than 3 distinct rare bytes found, or if heuristics
-    /// otherwise determine that this prefilter should not be used, then `None`
-    /// is returned.
-    fn build(&self) -> Option<Prefilter> {
-        #[cfg(feature = "perf-literal")]
-        fn imp(builder: &RareBytesBuilder) -> Option<Prefilter> {
-            if !builder.available || builder.count > 3 {
-                return None;
-            }
-            let (mut bytes, mut len) = ([0; 3], 0);
-            for b in 0..=255 {
-                if builder.rare_set.contains(b) {
-                    bytes[len] = b as u8;
-                    len += 1;
-                }
-            }
-            let finder: Arc<dyn PrefilterI> = match len {
-                0 => return None,
-                1 => Arc::new(RareBytesOne {
-                    byte1: bytes[0],
-                    offset: builder.byte_offsets.set[bytes[0] as usize],
-                }),
-                2 => Arc::new(RareBytesTwo {
-                    offsets: builder.byte_offsets,
-                    byte1: bytes[0],
-                    byte2: bytes[1],
-                }),
-                3 => Arc::new(RareBytesThree {
-                    offsets: builder.byte_offsets,
-                    byte1: bytes[0],
-                    byte2: bytes[1],
-                    byte3: bytes[2],
-                }),
-                _ => unreachable!(),
-            };
-            Some(Prefilter { finder, memory_usage: 0 })
-        }
-
-        #[cfg(not(feature = "perf-literal"))]
-        fn imp(_: &RareBytesBuilder) -> Option<Prefilter> {
-            None
-        }
-
-        imp(self)
-    }
-
-    /// Add a byte string to this builder.
-    ///
-    /// All patterns added to an Aho-Corasick automaton should be added to this
-    /// builder before attempting to construct the prefilter.
-    fn add(&mut self, bytes: &[u8]) {
-        // If we've already given up, then do nothing.
-        if !self.available {
-            return;
-        }
-        // If we've already blown our budget, then don't waste time looking
-        // for more rare bytes.
-        if self.count > 3 {
-            self.available = false;
-            return;
-        }
-        // If the pattern is too long, then our offset table is bunk, so
-        // give up.
-        if bytes.len() >= 256 {
-            self.available = false;
-            return;
-        }
-        let mut rarest = match bytes.get(0) {
-            None => return,
-            Some(&b) => (b, freq_rank(b)),
-        };
-        // The idea here is to look for the rarest byte in each pattern, and
-        // add that to our set. As a special exception, if we see a byte that
-        // we've already added, then we immediately stop and choose that byte,
-        // even if there's another rare byte in the pattern. This helps us
-        // apply the rare byte optimization in more cases by attempting to pick
-        // bytes that are in common between patterns. So for example, if we
-        // were searching for `Sherlock` and `lockjaw`, then this would pick
-        // `k` for both patterns, resulting in the use of `memchr` instead of
-        // `memchr2` for `k` and `j`.
-        let mut found = false;
-        for (pos, &b) in bytes.iter().enumerate() {
-            self.set_offset(pos, b);
-            if found {
-                continue;
-            }
-            if self.rare_set.contains(b) {
-                found = true;
-                continue;
-            }
-            let rank = freq_rank(b);
-            if rank < rarest.1 {
-                rarest = (b, rank);
-            }
-        }
-        if !found {
-            self.add_rare_byte(rarest.0);
-        }
-    }
-
-    fn set_offset(&mut self, pos: usize, byte: u8) {
-        // This unwrap is OK because pos is never bigger than our max.
-        let offset = RareByteOffset::new(pos).unwrap();
-        self.byte_offsets.set(byte, offset);
-        if self.ascii_case_insensitive {
-            self.byte_offsets.set(opposite_ascii_case(byte), offset);
-        }
-    }
-
-    fn add_rare_byte(&mut self, byte: u8) {
-        self.add_one_rare_byte(byte);
-        if self.ascii_case_insensitive {
-            self.add_one_rare_byte(opposite_ascii_case(byte));
-        }
-    }
-
-    fn add_one_rare_byte(&mut self, byte: u8) {
-        if !self.rare_set.contains(byte) {
-            self.rare_set.add(byte);
-            self.count += 1;
-            self.rank_sum += freq_rank(byte) as u16;
-        }
-    }
-}
-
-/// A prefilter for scanning for a single "rare" byte.
-#[cfg(feature = "perf-literal")]
-#[derive(Clone, Debug)]
-struct RareBytesOne {
-    byte1: u8,
-    offset: RareByteOffset,
-}
-
-#[cfg(feature = "perf-literal")]
-impl PrefilterI for RareBytesOne {
-    fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
-        memchr::memchr(self.byte1, &haystack[span])
-            .map(|i| {
-                let pos = span.start + i;
-                cmp::max(
-                    span.start,
-                    pos.saturating_sub(usize::from(self.offset.max)),
-                )
-            })
-            .map_or(Candidate::None, Candidate::PossibleStartOfMatch)
-    }
-}
-
-/// A prefilter for scanning for two "rare" bytes.
-#[cfg(feature = "perf-literal")]
-#[derive(Clone, Debug)]
-struct RareBytesTwo {
-    offsets: RareByteOffsets,
-    byte1: u8,
-    byte2: u8,
-}
-
-#[cfg(feature = "perf-literal")]
-impl PrefilterI for RareBytesTwo {
-    fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
-        memchr::memchr2(self.byte1, self.byte2, &haystack[span])
-            .map(|i| {
-                let pos = span.start + i;
-                let offset = self.offsets.set[usize::from(haystack[pos])].max;
-                cmp::max(span.start, pos.saturating_sub(usize::from(offset)))
-            })
-            .map_or(Candidate::None, Candidate::PossibleStartOfMatch)
-    }
-}
-
-/// A prefilter for scanning for three "rare" bytes.
-#[cfg(feature = "perf-literal")]
-#[derive(Clone, Debug)]
-struct RareBytesThree {
-    offsets: RareByteOffsets,
-    byte1: u8,
-    byte2: u8,
-    byte3: u8,
-}
-
-#[cfg(feature = "perf-literal")]
-impl PrefilterI for RareBytesThree {
-    fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
-        memchr::memchr3(self.byte1, self.byte2, self.byte3, &haystack[span])
-            .map(|i| {
-                let pos = span.start + i;
-                let offset = self.offsets.set[usize::from(haystack[pos])].max;
-                cmp::max(span.start, pos.saturating_sub(usize::from(offset)))
-            })
-            .map_or(Candidate::None, Candidate::PossibleStartOfMatch)
-    }
-}
-
-/// A builder for constructing a starting byte prefilter.
-///
-/// A starting byte prefilter is a simplistic prefilter that looks for possible
-/// matches by reporting all positions corresponding to a particular byte. This
-/// generally only takes affect when there are at most 3 distinct possible
-/// starting bytes. e.g., the patterns `foo`, `bar`, and `baz` have two
-/// distinct starting bytes (`f` and `b`), and this prefilter returns all
-/// occurrences of either `f` or `b`.
-///
-/// In some cases, a heuristic frequency analysis may determine that it would
-/// be better not to use this prefilter even when there are 3 or fewer distinct
-/// starting bytes.
-#[derive(Clone, Debug)]
-struct StartBytesBuilder {
-    /// Whether this prefilter should account for ASCII case insensitivity or
-    /// not.
-    ascii_case_insensitive: bool,
-    /// The set of starting bytes observed.
-    byteset: Vec<bool>,
-    /// The number of bytes set to true in `byteset`.
-    count: usize,
-    /// The sum of frequency ranks for the rare bytes detected. This is
-    /// intended to give a heuristic notion of how rare the bytes are.
-    rank_sum: u16,
-}
-
-impl StartBytesBuilder {
-    /// Create a new builder for constructing a start byte prefilter.
-    fn new() -> StartBytesBuilder {
-        StartBytesBuilder {
-            ascii_case_insensitive: false,
-            byteset: vec![false; 256],
-            count: 0,
-            rank_sum: 0,
-        }
-    }
-
-    /// Enable ASCII case insensitivity. When set, byte strings added to this
-    /// builder will be interpreted without respect to ASCII case.
-    fn ascii_case_insensitive(mut self, yes: bool) -> StartBytesBuilder {
-        self.ascii_case_insensitive = yes;
-        self
-    }
-
-    /// Build the starting bytes prefilter.
-    ///
-    /// If there are more than 3 distinct starting bytes, or if heuristics
-    /// otherwise determine that this prefilter should not be used, then `None`
-    /// is returned.
-    fn build(&self) -> Option<Prefilter> {
-        #[cfg(feature = "perf-literal")]
-        fn imp(builder: &StartBytesBuilder) -> Option<Prefilter> {
-            if builder.count > 3 {
-                return None;
-            }
-            let (mut bytes, mut len) = ([0; 3], 0);
-            for b in 0..256 {
-                if !builder.byteset[b] {
-                    continue;
-                }
-                // We don't handle non-ASCII bytes for now. Getting non-ASCII
-                // bytes right is trickier, since we generally don't want to put
-                // a leading UTF-8 code unit into a prefilter that isn't ASCII,
-                // since they can frequently. Instead, it would be better to use a
-                // continuation byte, but this requires more sophisticated analysis
-                // of the automaton and a richer prefilter API.
-                if b > 0x7F {
-                    return None;
-                }
-                bytes[len] = b as u8;
-                len += 1;
-            }
-            let finder: Arc<dyn PrefilterI> = match len {
-                0 => return None,
-                1 => Arc::new(StartBytesOne { byte1: bytes[0] }),
-                2 => Arc::new(StartBytesTwo {
-                    byte1: bytes[0],
-                    byte2: bytes[1],
-                }),
-                3 => Arc::new(StartBytesThree {
-                    byte1: bytes[0],
-                    byte2: bytes[1],
-                    byte3: bytes[2],
-                }),
-                _ => unreachable!(),
-            };
-            Some(Prefilter { finder, memory_usage: 0 })
-        }
-
-        #[cfg(not(feature = "perf-literal"))]
-        fn imp(_: &StartBytesBuilder) -> Option<Prefilter> {
-            None
-        }
-
-        imp(self)
-    }
-
-    /// Add a byte string to this builder.
-    ///
-    /// All patterns added to an Aho-Corasick automaton should be added to this
-    /// builder before attempting to construct the prefilter.
-    fn add(&mut self, bytes: &[u8]) {
-        if self.count > 3 {
-            return;
-        }
-        if let Some(&byte) = bytes.get(0) {
-            self.add_one_byte(byte);
-            if self.ascii_case_insensitive {
-                self.add_one_byte(opposite_ascii_case(byte));
-            }
-        }
-    }
-
-    fn add_one_byte(&mut self, byte: u8) {
-        if !self.byteset[byte as usize] {
-            self.byteset[byte as usize] = true;
-            self.count += 1;
-            self.rank_sum += freq_rank(byte) as u16;
-        }
-    }
-}
-
-/// A prefilter for scanning for a single starting byte.
-#[cfg(feature = "perf-literal")]
-#[derive(Clone, Debug)]
-struct StartBytesOne {
-    byte1: u8,
-}
-
-#[cfg(feature = "perf-literal")]
-impl PrefilterI for StartBytesOne {
-    fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
-        memchr::memchr(self.byte1, &haystack[span])
-            .map(|i| span.start + i)
-            .map_or(Candidate::None, Candidate::PossibleStartOfMatch)
-    }
-}
-
-/// A prefilter for scanning for two starting bytes.
-#[cfg(feature = "perf-literal")]
-#[derive(Clone, Debug)]
-struct StartBytesTwo {
-    byte1: u8,
-    byte2: u8,
-}
-
-#[cfg(feature = "perf-literal")]
-impl PrefilterI for StartBytesTwo {
-    fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
-        memchr::memchr2(self.byte1, self.byte2, &haystack[span])
-            .map(|i| span.start + i)
-            .map_or(Candidate::None, Candidate::PossibleStartOfMatch)
-    }
-}
-
-/// A prefilter for scanning for three starting bytes.
-#[cfg(feature = "perf-literal")]
-#[derive(Clone, Debug)]
-struct StartBytesThree {
-    byte1: u8,
-    byte2: u8,
-    byte3: u8,
-}
-
-#[cfg(feature = "perf-literal")]
-impl PrefilterI for StartBytesThree {
-    fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
-        memchr::memchr3(self.byte1, self.byte2, self.byte3, &haystack[span])
-            .map(|i| span.start + i)
-            .map_or(Candidate::None, Candidate::PossibleStartOfMatch)
-    }
-}
-
-/// If the given byte is an ASCII letter, then return it in the opposite case.
-/// e.g., Given `b'A'`, this returns `b'a'`, and given `b'a'`, this returns
-/// `b'A'`. If a non-ASCII letter is given, then the given byte is returned.
-pub(crate) fn opposite_ascii_case(b: u8) -> u8 {
-    if b'A' <= b && b <= b'Z' {
-        b.to_ascii_lowercase()
-    } else if b'a' <= b && b <= b'z' {
-        b.to_ascii_uppercase()
-    } else {
-        b
-    }
-}
-
-/// Return the frequency rank of the given byte. The higher the rank, the more
-/// common the byte (heuristically speaking).
-fn freq_rank(b: u8) -> u8 {
-    use crate::util::byte_frequencies::BYTE_FREQUENCIES;
-    BYTE_FREQUENCIES[b as usize]
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/primitives.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/primitives.rs
deleted file mode 100644
index 784d397..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/primitives.rs
+++ /dev/null
@@ -1,759 +0,0 @@
-/*!
-Lower level primitive types that are useful in a variety of circumstances.
-
-# Overview
-
-This list represents the principle types in this module and briefly describes
-when you might want to use them.
-
-* [`PatternID`] - A type that represents the identifier of a regex pattern.
-This is probably the most widely used type in this module (which is why it's
-also re-exported in the crate root).
-* [`StateID`] - A type the represents the identifier of a finite automaton
-state. This is used for both NFAs and DFAs, with the notable exception of
-the hybrid NFA/DFA. (The hybrid NFA/DFA uses a special purpose "lazy" state
-identifier.)
-* [`SmallIndex`] - The internal representation of both a `PatternID` and a
-`StateID`. Its purpose is to serve as a type that can index memory without
-being as big as a `usize` on 64-bit targets. The main idea behind this type
-is that there are many things in regex engines that will, in practice, never
-overflow a 32-bit integer. (For example, like the number of patterns in a regex
-or the number of states in an NFA.) Thus, a `SmallIndex` can be used to index
-memory without peppering `as` casts everywhere. Moreover, it forces callers
-to handle errors in the case where, somehow, the value would otherwise overflow
-either a 32-bit integer or a `usize` (e.g., on 16-bit targets).
-*/
-
-// The macro we use to define some types below adds methods that we don't
-// use on some of the types. There isn't much, so we just squash the warning.
-#![allow(dead_code)]
-
-use alloc::vec::Vec;
-
-use crate::util::int::{Usize, U16, U32, U64};
-
-/// A type that represents a "small" index.
-///
-/// The main idea of this type is to provide something that can index memory,
-/// but uses less memory than `usize` on 64-bit systems. Specifically, its
-/// representation is always a `u32` and has `repr(transparent)` enabled. (So
-/// it is safe to transmute between a `u32` and a `SmallIndex`.)
-///
-/// A small index is typically useful in cases where there is no practical way
-/// that the index will overflow a 32-bit integer. A good example of this is
-/// an NFA state. If you could somehow build an NFA with `2^30` states, its
-/// memory usage would be exorbitant and its runtime execution would be so
-/// slow as to be completely worthless. Therefore, this crate generally deems
-/// it acceptable to return an error if it would otherwise build an NFA that
-/// requires a slice longer than what a 32-bit integer can index. In exchange,
-/// we can use 32-bit indices instead of 64-bit indices in various places.
-///
-/// This type ensures this by providing a constructor that will return an error
-/// if its argument cannot fit into the type. This makes it much easier to
-/// handle these sorts of boundary cases that are otherwise extremely subtle.
-///
-/// On all targets, this type guarantees that its value will fit in a `u32`,
-/// `i32`, `usize` and an `isize`. This means that on 16-bit targets, for
-/// example, this type's maximum value will never overflow an `isize`,
-/// which means it will never overflow a `i16` even though its internal
-/// representation is still a `u32`.
-///
-/// The purpose for making the type fit into even signed integer types like
-/// `isize` is to guarantee that the difference between any two small indices
-/// is itself also a small index. This is useful in certain contexts, e.g.,
-/// for delta encoding.
-///
-/// # Other types
-///
-/// The following types wrap `SmallIndex` to provide a more focused use case:
-///
-/// * [`PatternID`] is for representing the identifiers of patterns.
-/// * [`StateID`] is for representing the identifiers of states in finite
-/// automata. It is used for both NFAs and DFAs.
-///
-/// # Representation
-///
-/// This type is always represented internally by a `u32` and is marked as
-/// `repr(transparent)`. Thus, this type always has the same representation as
-/// a `u32`. It is thus safe to transmute between a `u32` and a `SmallIndex`.
-///
-/// # Indexing
-///
-/// For convenience, callers may use a `SmallIndex` to index slices.
-///
-/// # Safety
-///
-/// While a `SmallIndex` is meant to guarantee that its value fits into `usize`
-/// without using as much space as a `usize` on all targets, callers must
-/// not rely on this property for safety. Callers may choose to rely on this
-/// property for correctness however. For example, creating a `SmallIndex` with
-/// an invalid value can be done in entirely safe code. This may in turn result
-/// in panics or silent logical errors.
-#[derive(
-    Clone, Copy, Debug, Default, Eq, Hash, PartialEq, PartialOrd, Ord,
-)]
-#[repr(transparent)]
-pub(crate) struct SmallIndex(u32);
-
-impl SmallIndex {
-    /// The maximum index value.
-    #[cfg(any(target_pointer_width = "32", target_pointer_width = "64"))]
-    pub const MAX: SmallIndex =
-        // FIXME: Use as_usize() once const functions in traits are stable.
-        SmallIndex::new_unchecked(core::i32::MAX as usize - 1);
-
-    /// The maximum index value.
-    #[cfg(target_pointer_width = "16")]
-    pub const MAX: SmallIndex =
-        SmallIndex::new_unchecked(core::isize::MAX - 1);
-
-    /// The total number of values that can be represented as a small index.
-    pub const LIMIT: usize = SmallIndex::MAX.as_usize() + 1;
-
-    /// The zero index value.
-    pub const ZERO: SmallIndex = SmallIndex::new_unchecked(0);
-
-    /// The number of bytes that a single small index uses in memory.
-    pub const SIZE: usize = core::mem::size_of::<SmallIndex>();
-
-    /// Create a new small index.
-    ///
-    /// If the given index exceeds [`SmallIndex::MAX`], then this returns
-    /// an error.
-    #[inline]
-    pub fn new(index: usize) -> Result<SmallIndex, SmallIndexError> {
-        SmallIndex::try_from(index)
-    }
-
-    /// Create a new small index without checking whether the given value
-    /// exceeds [`SmallIndex::MAX`].
-    ///
-    /// Using this routine with an invalid index value will result in
-    /// unspecified behavior, but *not* undefined behavior. In particular, an
-    /// invalid index value is likely to cause panics or possibly even silent
-    /// logical errors.
-    ///
-    /// Callers must never rely on a `SmallIndex` to be within a certain range
-    /// for memory safety.
-    #[inline]
-    pub const fn new_unchecked(index: usize) -> SmallIndex {
-        // FIXME: Use as_u32() once const functions in traits are stable.
-        SmallIndex::from_u32_unchecked(index as u32)
-    }
-
-    /// Create a new small index from a `u32` without checking whether the
-    /// given value exceeds [`SmallIndex::MAX`].
-    ///
-    /// Using this routine with an invalid index value will result in
-    /// unspecified behavior, but *not* undefined behavior. In particular, an
-    /// invalid index value is likely to cause panics or possibly even silent
-    /// logical errors.
-    ///
-    /// Callers must never rely on a `SmallIndex` to be within a certain range
-    /// for memory safety.
-    #[inline]
-    pub const fn from_u32_unchecked(index: u32) -> SmallIndex {
-        SmallIndex(index)
-    }
-
-    /// Like [`SmallIndex::new`], but panics if the given index is not valid.
-    #[inline]
-    pub fn must(index: usize) -> SmallIndex {
-        SmallIndex::new(index).expect("invalid small index")
-    }
-
-    /// Return this small index as a `usize`. This is guaranteed to never
-    /// overflow `usize`.
-    #[inline]
-    pub const fn as_usize(&self) -> usize {
-        // FIXME: Use as_usize() once const functions in traits are stable.
-        self.0 as usize
-    }
-
-    /// Return this small index as a `u64`. This is guaranteed to never
-    /// overflow.
-    #[inline]
-    pub const fn as_u64(&self) -> u64 {
-        // FIXME: Use u64::from() once const functions in traits are stable.
-        self.0 as u64
-    }
-
-    /// Return the internal `u32` of this small index. This is guaranteed to
-    /// never overflow `u32`.
-    #[inline]
-    pub const fn as_u32(&self) -> u32 {
-        self.0
-    }
-
-    /// Return the internal `u32` of this small index represented as an `i32`.
-    /// This is guaranteed to never overflow an `i32`.
-    #[inline]
-    pub const fn as_i32(&self) -> i32 {
-        // This is OK because we guarantee that our max value is <= i32::MAX.
-        self.0 as i32
-    }
-
-    /// Returns one more than this small index as a usize.
-    ///
-    /// Since a small index has constraints on its maximum value, adding `1` to
-    /// it will always fit in a `usize`, `isize`, `u32` and a `i32`.
-    #[inline]
-    pub fn one_more(&self) -> usize {
-        self.as_usize() + 1
-    }
-
-    /// Decode this small index from the bytes given using the native endian
-    /// byte order for the current target.
-    ///
-    /// If the decoded integer is not representable as a small index for the
-    /// current target, then this returns an error.
-    #[inline]
-    pub fn from_ne_bytes(
-        bytes: [u8; 4],
-    ) -> Result<SmallIndex, SmallIndexError> {
-        let id = u32::from_ne_bytes(bytes);
-        if id > SmallIndex::MAX.as_u32() {
-            return Err(SmallIndexError { attempted: u64::from(id) });
-        }
-        Ok(SmallIndex::new_unchecked(id.as_usize()))
-    }
-
-    /// Decode this small index from the bytes given using the native endian
-    /// byte order for the current target.
-    ///
-    /// This is analogous to [`SmallIndex::new_unchecked`] in that is does not
-    /// check whether the decoded integer is representable as a small index.
-    #[inline]
-    pub fn from_ne_bytes_unchecked(bytes: [u8; 4]) -> SmallIndex {
-        SmallIndex::new_unchecked(u32::from_ne_bytes(bytes).as_usize())
-    }
-
-    /// Return the underlying small index integer as raw bytes in native endian
-    /// format.
-    #[inline]
-    pub fn to_ne_bytes(&self) -> [u8; 4] {
-        self.0.to_ne_bytes()
-    }
-}
-
-impl<T> core::ops::Index<SmallIndex> for [T] {
-    type Output = T;
-
-    #[inline]
-    fn index(&self, index: SmallIndex) -> &T {
-        &self[index.as_usize()]
-    }
-}
-
-impl<T> core::ops::IndexMut<SmallIndex> for [T] {
-    #[inline]
-    fn index_mut(&mut self, index: SmallIndex) -> &mut T {
-        &mut self[index.as_usize()]
-    }
-}
-
-impl<T> core::ops::Index<SmallIndex> for Vec<T> {
-    type Output = T;
-
-    #[inline]
-    fn index(&self, index: SmallIndex) -> &T {
-        &self[index.as_usize()]
-    }
-}
-
-impl<T> core::ops::IndexMut<SmallIndex> for Vec<T> {
-    #[inline]
-    fn index_mut(&mut self, index: SmallIndex) -> &mut T {
-        &mut self[index.as_usize()]
-    }
-}
-
-impl From<StateID> for SmallIndex {
-    fn from(sid: StateID) -> SmallIndex {
-        sid.0
-    }
-}
-
-impl From<PatternID> for SmallIndex {
-    fn from(pid: PatternID) -> SmallIndex {
-        pid.0
-    }
-}
-
-impl From<u8> for SmallIndex {
-    fn from(index: u8) -> SmallIndex {
-        SmallIndex::new_unchecked(usize::from(index))
-    }
-}
-
-impl TryFrom<u16> for SmallIndex {
-    type Error = SmallIndexError;
-
-    fn try_from(index: u16) -> Result<SmallIndex, SmallIndexError> {
-        if u32::from(index) > SmallIndex::MAX.as_u32() {
-            return Err(SmallIndexError { attempted: u64::from(index) });
-        }
-        Ok(SmallIndex::new_unchecked(index.as_usize()))
-    }
-}
-
-impl TryFrom<u32> for SmallIndex {
-    type Error = SmallIndexError;
-
-    fn try_from(index: u32) -> Result<SmallIndex, SmallIndexError> {
-        if index > SmallIndex::MAX.as_u32() {
-            return Err(SmallIndexError { attempted: u64::from(index) });
-        }
-        Ok(SmallIndex::new_unchecked(index.as_usize()))
-    }
-}
-
-impl TryFrom<u64> for SmallIndex {
-    type Error = SmallIndexError;
-
-    fn try_from(index: u64) -> Result<SmallIndex, SmallIndexError> {
-        if index > SmallIndex::MAX.as_u64() {
-            return Err(SmallIndexError { attempted: index });
-        }
-        Ok(SmallIndex::new_unchecked(index.as_usize()))
-    }
-}
-
-impl TryFrom<usize> for SmallIndex {
-    type Error = SmallIndexError;
-
-    fn try_from(index: usize) -> Result<SmallIndex, SmallIndexError> {
-        if index > SmallIndex::MAX.as_usize() {
-            return Err(SmallIndexError { attempted: index.as_u64() });
-        }
-        Ok(SmallIndex::new_unchecked(index))
-    }
-}
-
-/// This error occurs when a small index could not be constructed.
-///
-/// This occurs when given an integer exceeding the maximum small index value.
-///
-/// When the `std` feature is enabled, this implements the `Error` trait.
-#[derive(Clone, Debug, Eq, PartialEq)]
-pub struct SmallIndexError {
-    attempted: u64,
-}
-
-impl SmallIndexError {
-    /// Returns the value that could not be converted to a small index.
-    pub fn attempted(&self) -> u64 {
-        self.attempted
-    }
-}
-
-#[cfg(feature = "std")]
-impl std::error::Error for SmallIndexError {}
-
-impl core::fmt::Display for SmallIndexError {
-    fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-        write!(
-            f,
-            "failed to create small index from {:?}, which exceeds {:?}",
-            self.attempted(),
-            SmallIndex::MAX,
-        )
-    }
-}
-
-#[derive(Clone, Debug)]
-pub(crate) struct SmallIndexIter {
-    rng: core::ops::Range<usize>,
-}
-
-impl Iterator for SmallIndexIter {
-    type Item = SmallIndex;
-
-    fn next(&mut self) -> Option<SmallIndex> {
-        if self.rng.start >= self.rng.end {
-            return None;
-        }
-        let next_id = self.rng.start + 1;
-        let id = core::mem::replace(&mut self.rng.start, next_id);
-        // new_unchecked is OK since we asserted that the number of
-        // elements in this iterator will fit in an ID at construction.
-        Some(SmallIndex::new_unchecked(id))
-    }
-}
-
-macro_rules! index_type_impls {
-    ($name:ident, $err:ident, $iter:ident, $withiter:ident) => {
-        impl $name {
-            /// The maximum value.
-            pub const MAX: $name = $name(SmallIndex::MAX);
-
-            /// The total number of values that can be represented.
-            pub const LIMIT: usize = SmallIndex::LIMIT;
-
-            /// The zero value.
-            pub const ZERO: $name = $name(SmallIndex::ZERO);
-
-            /// The number of bytes that a single value uses in memory.
-            pub const SIZE: usize = SmallIndex::SIZE;
-
-            /// Create a new value that is represented by a "small index."
-            ///
-            /// If the given index exceeds the maximum allowed value, then this
-            /// returns an error.
-            #[inline]
-            pub fn new(value: usize) -> Result<$name, $err> {
-                SmallIndex::new(value).map($name).map_err($err)
-            }
-
-            /// Create a new value without checking whether the given argument
-            /// exceeds the maximum.
-            ///
-            /// Using this routine with an invalid value will result in
-            /// unspecified behavior, but *not* undefined behavior. In
-            /// particular, an invalid ID value is likely to cause panics or
-            /// possibly even silent logical errors.
-            ///
-            /// Callers must never rely on this type to be within a certain
-            /// range for memory safety.
-            #[inline]
-            pub const fn new_unchecked(value: usize) -> $name {
-                $name(SmallIndex::new_unchecked(value))
-            }
-
-            /// Create a new value from a `u32` without checking whether the
-            /// given value exceeds the maximum.
-            ///
-            /// Using this routine with an invalid value will result in
-            /// unspecified behavior, but *not* undefined behavior. In
-            /// particular, an invalid ID value is likely to cause panics or
-            /// possibly even silent logical errors.
-            ///
-            /// Callers must never rely on this type to be within a certain
-            /// range for memory safety.
-            #[inline]
-            pub const fn from_u32_unchecked(index: u32) -> $name {
-                $name(SmallIndex::from_u32_unchecked(index))
-            }
-
-            /// Like `new`, but panics if the given value is not valid.
-            #[inline]
-            pub fn must(value: usize) -> $name {
-                $name::new(value).expect(concat!(
-                    "invalid ",
-                    stringify!($name),
-                    " value"
-                ))
-            }
-
-            /// Return the internal value as a `usize`. This is guaranteed to
-            /// never overflow `usize`.
-            #[inline]
-            pub const fn as_usize(&self) -> usize {
-                self.0.as_usize()
-            }
-
-            /// Return the internal value as a `u64`. This is guaranteed to
-            /// never overflow.
-            #[inline]
-            pub const fn as_u64(&self) -> u64 {
-                self.0.as_u64()
-            }
-
-            /// Return the internal value as a `u32`. This is guaranteed to
-            /// never overflow `u32`.
-            #[inline]
-            pub const fn as_u32(&self) -> u32 {
-                self.0.as_u32()
-            }
-
-            /// Return the internal value as a `i32`. This is guaranteed to
-            /// never overflow an `i32`.
-            #[inline]
-            pub const fn as_i32(&self) -> i32 {
-                self.0.as_i32()
-            }
-
-            /// Returns one more than this value as a usize.
-            ///
-            /// Since values represented by a "small index" have constraints
-            /// on their maximum value, adding `1` to it will always fit in a
-            /// `usize`, `u32` and a `i32`.
-            #[inline]
-            pub fn one_more(&self) -> usize {
-                self.0.one_more()
-            }
-
-            /// Decode this value from the bytes given using the native endian
-            /// byte order for the current target.
-            ///
-            /// If the decoded integer is not representable as a small index
-            /// for the current target, then this returns an error.
-            #[inline]
-            pub fn from_ne_bytes(bytes: [u8; 4]) -> Result<$name, $err> {
-                SmallIndex::from_ne_bytes(bytes).map($name).map_err($err)
-            }
-
-            /// Decode this value from the bytes given using the native endian
-            /// byte order for the current target.
-            ///
-            /// This is analogous to `new_unchecked` in that is does not check
-            /// whether the decoded integer is representable as a small index.
-            #[inline]
-            pub fn from_ne_bytes_unchecked(bytes: [u8; 4]) -> $name {
-                $name(SmallIndex::from_ne_bytes_unchecked(bytes))
-            }
-
-            /// Return the underlying integer as raw bytes in native endian
-            /// format.
-            #[inline]
-            pub fn to_ne_bytes(&self) -> [u8; 4] {
-                self.0.to_ne_bytes()
-            }
-
-            /// Returns an iterator over all values from 0 up to and not
-            /// including the given length.
-            ///
-            /// If the given length exceeds this type's limit, then this
-            /// panics.
-            pub(crate) fn iter(len: usize) -> $iter {
-                $iter::new(len)
-            }
-        }
-
-        // We write our own Debug impl so that we get things like PatternID(5)
-        // instead of PatternID(SmallIndex(5)).
-        impl core::fmt::Debug for $name {
-            fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-                f.debug_tuple(stringify!($name)).field(&self.as_u32()).finish()
-            }
-        }
-
-        impl<T> core::ops::Index<$name> for [T] {
-            type Output = T;
-
-            #[inline]
-            fn index(&self, index: $name) -> &T {
-                &self[index.as_usize()]
-            }
-        }
-
-        impl<T> core::ops::IndexMut<$name> for [T] {
-            #[inline]
-            fn index_mut(&mut self, index: $name) -> &mut T {
-                &mut self[index.as_usize()]
-            }
-        }
-
-        impl<T> core::ops::Index<$name> for Vec<T> {
-            type Output = T;
-
-            #[inline]
-            fn index(&self, index: $name) -> &T {
-                &self[index.as_usize()]
-            }
-        }
-
-        impl<T> core::ops::IndexMut<$name> for Vec<T> {
-            #[inline]
-            fn index_mut(&mut self, index: $name) -> &mut T {
-                &mut self[index.as_usize()]
-            }
-        }
-
-        impl From<SmallIndex> for $name {
-            fn from(index: SmallIndex) -> $name {
-                $name(index)
-            }
-        }
-
-        impl From<u8> for $name {
-            fn from(value: u8) -> $name {
-                $name(SmallIndex::from(value))
-            }
-        }
-
-        impl TryFrom<u16> for $name {
-            type Error = $err;
-
-            fn try_from(value: u16) -> Result<$name, $err> {
-                SmallIndex::try_from(value).map($name).map_err($err)
-            }
-        }
-
-        impl TryFrom<u32> for $name {
-            type Error = $err;
-
-            fn try_from(value: u32) -> Result<$name, $err> {
-                SmallIndex::try_from(value).map($name).map_err($err)
-            }
-        }
-
-        impl TryFrom<u64> for $name {
-            type Error = $err;
-
-            fn try_from(value: u64) -> Result<$name, $err> {
-                SmallIndex::try_from(value).map($name).map_err($err)
-            }
-        }
-
-        impl TryFrom<usize> for $name {
-            type Error = $err;
-
-            fn try_from(value: usize) -> Result<$name, $err> {
-                SmallIndex::try_from(value).map($name).map_err($err)
-            }
-        }
-
-        /// This error occurs when an ID could not be constructed.
-        ///
-        /// This occurs when given an integer exceeding the maximum allowed
-        /// value.
-        ///
-        /// When the `std` feature is enabled, this implements the `Error`
-        /// trait.
-        #[derive(Clone, Debug, Eq, PartialEq)]
-        pub struct $err(SmallIndexError);
-
-        impl $err {
-            /// Returns the value that could not be converted to an ID.
-            pub fn attempted(&self) -> u64 {
-                self.0.attempted()
-            }
-        }
-
-        #[cfg(feature = "std")]
-        impl std::error::Error for $err {}
-
-        impl core::fmt::Display for $err {
-            fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-                write!(
-                    f,
-                    "failed to create {} from {:?}, which exceeds {:?}",
-                    stringify!($name),
-                    self.attempted(),
-                    $name::MAX,
-                )
-            }
-        }
-
-        #[derive(Clone, Debug)]
-        pub(crate) struct $iter(SmallIndexIter);
-
-        impl $iter {
-            fn new(len: usize) -> $iter {
-                assert!(
-                    len <= $name::LIMIT,
-                    "cannot create iterator for {} when number of \
-                     elements exceed {:?}",
-                    stringify!($name),
-                    $name::LIMIT,
-                );
-                $iter(SmallIndexIter { rng: 0..len })
-            }
-        }
-
-        impl Iterator for $iter {
-            type Item = $name;
-
-            fn next(&mut self) -> Option<$name> {
-                self.0.next().map($name)
-            }
-        }
-
-        /// An iterator adapter that is like std::iter::Enumerate, but attaches
-        /// small index values instead. It requires `ExactSizeIterator`. At
-        /// construction, it ensures that the index of each element in the
-        /// iterator is representable in the corresponding small index type.
-        #[derive(Clone, Debug)]
-        pub(crate) struct $withiter<I> {
-            it: I,
-            ids: $iter,
-        }
-
-        impl<I: Iterator + ExactSizeIterator> $withiter<I> {
-            fn new(it: I) -> $withiter<I> {
-                let ids = $name::iter(it.len());
-                $withiter { it, ids }
-            }
-        }
-
-        impl<I: Iterator + ExactSizeIterator> Iterator for $withiter<I> {
-            type Item = ($name, I::Item);
-
-            fn next(&mut self) -> Option<($name, I::Item)> {
-                let item = self.it.next()?;
-                // Number of elements in this iterator must match, according
-                // to contract of ExactSizeIterator.
-                let id = self.ids.next().unwrap();
-                Some((id, item))
-            }
-        }
-    };
-}
-
-/// The identifier of a pattern in an Aho-Corasick automaton.
-///
-/// It is represented by a `u32` even on 64-bit systems in order to conserve
-/// space. Namely, on all targets, this type guarantees that its value will
-/// fit in a `u32`, `i32`, `usize` and an `isize`. This means that on 16-bit
-/// targets, for example, this type's maximum value will never overflow an
-/// `isize`, which means it will never overflow a `i16` even though its
-/// internal representation is still a `u32`.
-///
-/// # Safety
-///
-/// While a `PatternID` is meant to guarantee that its value fits into `usize`
-/// without using as much space as a `usize` on all targets, callers must
-/// not rely on this property for safety. Callers may choose to rely on this
-/// property for correctness however. For example, creating a `StateID` with an
-/// invalid value can be done in entirely safe code. This may in turn result in
-/// panics or silent logical errors.
-#[derive(Clone, Copy, Default, Eq, Hash, PartialEq, PartialOrd, Ord)]
-#[repr(transparent)]
-pub struct PatternID(SmallIndex);
-
-/// The identifier of a finite automaton state.
-///
-/// It is represented by a `u32` even on 64-bit systems in order to conserve
-/// space. Namely, on all targets, this type guarantees that its value will
-/// fit in a `u32`, `i32`, `usize` and an `isize`. This means that on 16-bit
-/// targets, for example, this type's maximum value will never overflow an
-/// `isize`, which means it will never overflow a `i16` even though its
-/// internal representation is still a `u32`.
-///
-/// # Safety
-///
-/// While a `StateID` is meant to guarantee that its value fits into `usize`
-/// without using as much space as a `usize` on all targets, callers must
-/// not rely on this property for safety. Callers may choose to rely on this
-/// property for correctness however. For example, creating a `StateID` with an
-/// invalid value can be done in entirely safe code. This may in turn result in
-/// panics or silent logical errors.
-#[derive(Clone, Copy, Default, Eq, Hash, PartialEq, PartialOrd, Ord)]
-#[repr(transparent)]
-pub struct StateID(SmallIndex);
-
-index_type_impls!(PatternID, PatternIDError, PatternIDIter, WithPatternIDIter);
-index_type_impls!(StateID, StateIDError, StateIDIter, WithStateIDIter);
-
-/// A utility trait that defines a couple of adapters for making it convenient
-/// to access indices as "small index" types. We require ExactSizeIterator so
-/// that iterator construction can do a single check to make sure the index of
-/// each element is representable by its small index type.
-pub(crate) trait IteratorIndexExt: Iterator {
-    fn with_pattern_ids(self) -> WithPatternIDIter<Self>
-    where
-        Self: Sized + ExactSizeIterator,
-    {
-        WithPatternIDIter::new(self)
-    }
-
-    fn with_state_ids(self) -> WithStateIDIter<Self>
-    where
-        Self: Sized + ExactSizeIterator,
-    {
-        WithStateIDIter::new(self)
-    }
-}
-
-impl<I: Iterator> IteratorIndexExt for I {}
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/remapper.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/remapper.rs
deleted file mode 100644
index 7c47a082..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/remapper.rs
+++ /dev/null
@@ -1,214 +0,0 @@
-use alloc::vec::Vec;
-
-use crate::{nfa::noncontiguous, util::primitives::StateID};
-
-/// Remappable is a tightly coupled abstraction that facilitates remapping
-/// state identifiers in DFAs.
-///
-/// The main idea behind remapping state IDs is that DFAs often need to check
-/// if a certain state is a "special" state of some kind (like a match state)
-/// during a search. Since this is extremely perf critical code, we want this
-/// check to be as fast as possible. Partitioning state IDs into, for example,
-/// into "non-match" and "match" states means one can tell if a state is a
-/// match state via a simple comparison of the state ID.
-///
-/// The issue is that during the DFA construction process, it's not
-/// particularly easy to partition the states. Instead, the simplest thing is
-/// to often just do a pass over all of the states and shuffle them into their
-/// desired partitionings. To do that, we need a mechanism for swapping states.
-/// Hence, this abstraction.
-///
-/// Normally, for such little code, I would just duplicate it. But this is a
-/// key optimization and the implementation is a bit subtle. So the abstraction
-/// is basically a ham-fisted attempt at DRY. The only place we use this is in
-/// the dense and one-pass DFAs.
-///
-/// See also src/dfa/special.rs for a more detailed explanation of how dense
-/// DFAs are partitioned.
-pub(crate) trait Remappable: core::fmt::Debug {
-    /// Return the total number of states.
-    fn state_len(&self) -> usize;
-
-    /// Swap the states pointed to by the given IDs. The underlying finite
-    /// state machine should be mutated such that all of the transitions in
-    /// `id1` are now in the memory region where the transitions for `id2`
-    /// were, and all of the transitions in `id2` are now in the memory region
-    /// where the transitions for `id1` were.
-    ///
-    /// Essentially, this "moves" `id1` to `id2` and `id2` to `id1`.
-    ///
-    /// It is expected that, after calling this, the underlying state machine
-    /// will be left in an inconsistent state, since any other transitions
-    /// pointing to, e.g., `id1` need to be updated to point to `id2`, since
-    /// that's where `id1` moved to.
-    ///
-    /// In order to "fix" the underlying inconsistent state, a `Remapper`
-    /// should be used to guarantee that `remap` is called at the appropriate
-    /// time.
-    fn swap_states(&mut self, id1: StateID, id2: StateID);
-
-    /// This must remap every single state ID in the underlying value according
-    /// to the function given. For example, in a DFA, this should remap every
-    /// transition and every starting state ID.
-    fn remap(&mut self, map: impl Fn(StateID) -> StateID);
-}
-
-/// Remapper is an abstraction the manages the remapping of state IDs in a
-/// finite state machine. This is useful when one wants to shuffle states into
-/// different positions in the machine.
-///
-/// One of the key complexities this manages is the ability to correctly move
-/// one state multiple times.
-///
-/// Once shuffling is complete, `remap` must be called, which will rewrite
-/// all pertinent transitions to updated state IDs. Neglecting to call `remap`
-/// will almost certainly result in a corrupt machine.
-#[derive(Debug)]
-pub(crate) struct Remapper {
-    /// A map from the index of a state to its pre-multiplied identifier.
-    ///
-    /// When a state is swapped with another, then their corresponding
-    /// locations in this map are also swapped. Thus, its new position will
-    /// still point to its old pre-multiplied StateID.
-    ///
-    /// While there is a bit more to it, this then allows us to rewrite the
-    /// state IDs in a DFA's transition table in a single pass. This is done
-    /// by iterating over every ID in this map, then iterating over each
-    /// transition for the state at that ID and re-mapping the transition from
-    /// `old_id` to `map[dfa.to_index(old_id)]`. That is, we find the position
-    /// in this map where `old_id` *started*, and set it to where it ended up
-    /// after all swaps have been completed.
-    map: Vec<StateID>,
-    /// A way to map indices to state IDs (and back).
-    idx: IndexMapper,
-}
-
-impl Remapper {
-    /// Create a new remapper from the given remappable implementation. The
-    /// remapper can then be used to swap states. The remappable value given
-    /// here must the same one given to `swap` and `remap`.
-    ///
-    /// The given stride should be the stride of the transition table expressed
-    /// as a power of 2. This stride is used to map between state IDs and state
-    /// indices. If state IDs and state indices are equivalent, then provide
-    /// a `stride2` of `0`, which acts as an identity.
-    pub(crate) fn new(r: &impl Remappable, stride2: usize) -> Remapper {
-        let idx = IndexMapper { stride2 };
-        let map = (0..r.state_len()).map(|i| idx.to_state_id(i)).collect();
-        Remapper { map, idx }
-    }
-
-    /// Swap two states. Once this is called, callers must follow through to
-    /// call `remap`, or else it's possible for the underlying remappable
-    /// value to be in a corrupt state.
-    pub(crate) fn swap(
-        &mut self,
-        r: &mut impl Remappable,
-        id1: StateID,
-        id2: StateID,
-    ) {
-        if id1 == id2 {
-            return;
-        }
-        r.swap_states(id1, id2);
-        self.map.swap(self.idx.to_index(id1), self.idx.to_index(id2));
-    }
-
-    /// Complete the remapping process by rewriting all state IDs in the
-    /// remappable value according to the swaps performed.
-    pub(crate) fn remap(mut self, r: &mut impl Remappable) {
-        // Update the map to account for states that have been swapped
-        // multiple times. For example, if (A, C) and (C, G) are swapped, then
-        // transitions previously pointing to A should now point to G. But if
-        // we don't update our map, they will erroneously be set to C. All we
-        // do is follow the swaps in our map until we see our original state
-        // ID.
-        //
-        // The intuition here is to think about how changes are made to the
-        // map: only through pairwise swaps. That means that starting at any
-        // given state, it is always possible to find the loop back to that
-        // state by following the swaps represented in the map (which might be
-        // 0 swaps).
-        //
-        // We are also careful to clone the map before starting in order to
-        // freeze it. We use the frozen map to find our loops, since we need to
-        // update our map as well. Without freezing it, our updates could break
-        // the loops referenced above and produce incorrect results.
-        let oldmap = self.map.clone();
-        for i in 0..r.state_len() {
-            let cur_id = self.idx.to_state_id(i);
-            let mut new_id = oldmap[i];
-            if cur_id == new_id {
-                continue;
-            }
-            loop {
-                let id = oldmap[self.idx.to_index(new_id)];
-                if cur_id == id {
-                    self.map[i] = new_id;
-                    break;
-                }
-                new_id = id;
-            }
-        }
-        r.remap(|sid| self.map[self.idx.to_index(sid)]);
-    }
-}
-
-/// A simple type for mapping between state indices and state IDs.
-///
-/// The reason why this exists is because state IDs are "premultiplied" in a
-/// DFA. That is, in order to get to the transitions for a particular state,
-/// one need only use the state ID as-is, instead of having to multiply it by
-/// transition table's stride.
-///
-/// The downside of this is that it's inconvenient to map between state IDs
-/// using a dense map, e.g., Vec<StateID>. That's because state IDs look like
-/// `0`, `stride`, `2*stride`, `3*stride`, etc., instead of `0`, `1`, `2`, `3`,
-/// etc.
-///
-/// Since our state IDs are premultiplied, we can convert back-and-forth
-/// between IDs and indices by simply unmultiplying the IDs and multiplying the
-/// indices.
-///
-/// Note that for a sparse NFA, state IDs and indices are equivalent. In this
-/// case, we set the stride of the index mapped to be `0`, which acts as an
-/// identity.
-#[derive(Debug)]
-struct IndexMapper {
-    /// The power of 2 corresponding to the stride of the corresponding
-    /// transition table. 'id >> stride2' de-multiplies an ID while 'index <<
-    /// stride2' pre-multiplies an index to an ID.
-    stride2: usize,
-}
-
-impl IndexMapper {
-    /// Convert a state ID to a state index.
-    fn to_index(&self, id: StateID) -> usize {
-        id.as_usize() >> self.stride2
-    }
-
-    /// Convert a state index to a state ID.
-    fn to_state_id(&self, index: usize) -> StateID {
-        // CORRECTNESS: If the given index is not valid, then it is not
-        // required for this to panic or return a valid state ID. We'll "just"
-        // wind up with panics or silent logic errors at some other point. But
-        // this is OK because if Remappable::state_len is correct and so is
-        // 'to_index', then all inputs to 'to_state_id' should be valid indices
-        // and thus transform into valid state IDs.
-        StateID::new_unchecked(index << self.stride2)
-    }
-}
-
-impl Remappable for noncontiguous::NFA {
-    fn state_len(&self) -> usize {
-        noncontiguous::NFA::states(self).len()
-    }
-
-    fn swap_states(&mut self, id1: StateID, id2: StateID) {
-        noncontiguous::NFA::swap_states(self, id1, id2)
-    }
-
-    fn remap(&mut self, map: impl Fn(StateID) -> StateID) {
-        noncontiguous::NFA::remap(self, map)
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/search.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/search.rs
deleted file mode 100644
index 59b7035e..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/search.rs
+++ /dev/null
@@ -1,1148 +0,0 @@
-use core::ops::{Range, RangeBounds};
-
-use crate::util::primitives::PatternID;
-
-/// The configuration and the haystack to use for an Aho-Corasick search.
-///
-/// When executing a search, there are a few parameters one might want to
-/// configure:
-///
-/// * The haystack to search, provided to the [`Input::new`] constructor. This
-/// is the only required parameter.
-/// * The span _within_ the haystack to limit a search to. (The default
-/// is the entire haystack.) This is configured via [`Input::span`] or
-/// [`Input::range`].
-/// * Whether to run an unanchored (matches can occur anywhere after the
-/// start of the search) or anchored (matches can only occur beginning at
-/// the start of the search) search. Unanchored search is the default. This is
-/// configured via [`Input::anchored`].
-/// * Whether to quit the search as soon as a match has been found, regardless
-/// of the [`MatchKind`] that the searcher was built with. This is configured
-/// via [`Input::earliest`].
-///
-/// For most cases, the defaults for all optional parameters are appropriate.
-/// The utility of this type is that it keeps the default or common case simple
-/// while permitting tweaking parameters in more niche use cases while reusing
-/// the same search APIs.
-///
-/// # Valid bounds and search termination
-///
-/// An `Input` permits setting the bounds of a search via either
-/// [`Input::span`] or [`Input::range`]. The bounds set must be valid, or
-/// else a panic will occur. Bounds are valid if and only if:
-///
-/// * The bounds represent a valid range into the input's haystack.
-/// * **or** the end bound is a valid ending bound for the haystack *and*
-/// the start bound is exactly one greater than the end bound.
-///
-/// In the latter case, [`Input::is_done`] will return true and indicates any
-/// search receiving such an input should immediately return with no match.
-///
-/// Other than representing "search is complete," the `Input::span` and
-/// `Input::range` APIs are never necessary. Instead, callers can slice the
-/// haystack instead, e.g., with `&haystack[start..end]`. With that said, they
-/// can be more convenient than slicing because the match positions reported
-/// when using `Input::span` or `Input::range` are in terms of the original
-/// haystack. If you instead use `&haystack[start..end]`, then you'll need to
-/// add `start` to any match position returned in order for it to be a correct
-/// index into `haystack`.
-///
-/// # Example: `&str` and `&[u8]` automatically convert to an `Input`
-///
-/// There is a `From<&T> for Input` implementation for all `T: AsRef<[u8]>`.
-/// Additionally, the [`AhoCorasick`](crate::AhoCorasick) search APIs accept
-/// a `Into<Input>`. These two things combined together mean you can provide
-/// things like `&str` and `&[u8]` to search APIs when the defaults are
-/// suitable, but also an `Input` when they're not. For example:
-///
-/// ```
-/// use aho_corasick::{AhoCorasick, Anchored, Input, Match, StartKind};
-///
-/// // Build a searcher that supports both unanchored and anchored modes.
-/// let ac = AhoCorasick::builder()
-///     .start_kind(StartKind::Both)
-///     .build(&["abcd", "b"])
-///     .unwrap();
-/// let haystack = "abcd";
-///
-/// // A search using default parameters is unanchored. With standard
-/// // semantics, this finds `b` first.
-/// assert_eq!(
-///     Some(Match::must(1, 1..2)),
-///     ac.find(haystack),
-/// );
-/// // Using the same 'find' routine, we can provide an 'Input' explicitly
-/// // that is configured to do an anchored search. Since 'b' doesn't start
-/// // at the beginning of the search, it is not reported as a match.
-/// assert_eq!(
-///     Some(Match::must(0, 0..4)),
-///     ac.find(Input::new(haystack).anchored(Anchored::Yes)),
-/// );
-/// ```
-#[derive(Clone)]
-pub struct Input<'h> {
-    haystack: &'h [u8],
-    span: Span,
-    anchored: Anchored,
-    earliest: bool,
-}
-
-impl<'h> Input<'h> {
-    /// Create a new search configuration for the given haystack.
-    #[inline]
-    pub fn new<H: ?Sized + AsRef<[u8]>>(haystack: &'h H) -> Input<'h> {
-        Input {
-            haystack: haystack.as_ref(),
-            span: Span { start: 0, end: haystack.as_ref().len() },
-            anchored: Anchored::No,
-            earliest: false,
-        }
-    }
-
-    /// Set the span for this search.
-    ///
-    /// This routine is generic over how a span is provided. While
-    /// a [`Span`] may be given directly, one may also provide a
-    /// `std::ops::Range<usize>`. To provide anything supported by range
-    /// syntax, use the [`Input::range`] method.
-    ///
-    /// The default span is the entire haystack.
-    ///
-    /// Note that [`Input::range`] overrides this method and vice versa.
-    ///
-    /// # Panics
-    ///
-    /// This panics if the given span does not correspond to valid bounds in
-    /// the haystack or the termination of a search.
-    ///
-    /// # Example
-    ///
-    /// This example shows how the span of the search can impact whether a
-    /// match is reported or not.
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, Input, MatchKind};
-    ///
-    /// let patterns = &["b", "abcd", "abc"];
-    /// let haystack = "abcd";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::LeftmostFirst)
-    ///     .build(patterns)
-    ///     .unwrap();
-    /// let input = Input::new(haystack).span(0..3);
-    /// let mat = ac.try_find(input)?.expect("should have a match");
-    /// // Without the span stopping the search early, 'abcd' would be reported
-    /// // because it is the correct leftmost-first match.
-    /// assert_eq!("abc", &haystack[mat.span()]);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    #[inline]
-    pub fn span<S: Into<Span>>(mut self, span: S) -> Input<'h> {
-        self.set_span(span);
-        self
-    }
-
-    /// Like `Input::span`, but accepts any range instead.
-    ///
-    /// The default range is the entire haystack.
-    ///
-    /// Note that [`Input::span`] overrides this method and vice versa.
-    ///
-    /// # Panics
-    ///
-    /// This routine will panic if the given range could not be converted
-    /// to a valid [`Range`]. For example, this would panic when given
-    /// `0..=usize::MAX` since it cannot be represented using a half-open
-    /// interval in terms of `usize`.
-    ///
-    /// This routine also panics if the given range does not correspond to
-    /// valid bounds in the haystack or the termination of a search.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use aho_corasick::Input;
-    ///
-    /// let input = Input::new("foobar");
-    /// assert_eq!(0..6, input.get_range());
-    ///
-    /// let input = Input::new("foobar").range(2..=4);
-    /// assert_eq!(2..5, input.get_range());
-    /// ```
-    #[inline]
-    pub fn range<R: RangeBounds<usize>>(mut self, range: R) -> Input<'h> {
-        self.set_range(range);
-        self
-    }
-
-    /// Sets the anchor mode of a search.
-    ///
-    /// When a search is anchored (via [`Anchored::Yes`]), a match must begin
-    /// at the start of a search. When a search is not anchored (that's
-    /// [`Anchored::No`]), searchers will look for a match anywhere in the
-    /// haystack.
-    ///
-    /// By default, the anchored mode is [`Anchored::No`].
-    ///
-    /// # Support for anchored searches
-    ///
-    /// Anchored or unanchored searches might not always be available,
-    /// depending on the type of searcher used and its configuration:
-    ///
-    /// * [`noncontiguous::NFA`](crate::nfa::noncontiguous::NFA) always
-    /// supports both unanchored and anchored searches.
-    /// * [`contiguous::NFA`](crate::nfa::contiguous::NFA) always supports both
-    /// unanchored and anchored searches.
-    /// * [`dfa::DFA`](crate::dfa::DFA) supports only unanchored
-    /// searches by default.
-    /// [`dfa::Builder::start_kind`](crate::dfa::Builder::start_kind) can
-    /// be used to change the default to supporting both kinds of searches
-    /// or even just anchored searches.
-    /// * [`AhoCorasick`](crate::AhoCorasick) inherits the same setup as a
-    /// `DFA`. Namely, it only supports unanchored searches by default, but
-    /// [`AhoCorasickBuilder::start_kind`](crate::AhoCorasickBuilder::start_kind)
-    /// can change this.
-    ///
-    /// If you try to execute a search using a `try_` ("fallible") method with
-    /// an unsupported anchor mode, then an error will be returned. For calls
-    /// to infallible search methods, a panic will result.
-    ///
-    /// # Example
-    ///
-    /// This demonstrates the differences between an anchored search and
-    /// an unanchored search. Notice that we build our `AhoCorasick` searcher
-    /// with [`StartKind::Both`] so that it supports both unanchored and
-    /// anchored searches simultaneously.
-    ///
-    /// ```
-    /// use aho_corasick::{
-    ///     AhoCorasick, Anchored, Input, MatchKind, StartKind,
-    /// };
-    ///
-    /// let patterns = &["bcd"];
-    /// let haystack = "abcd";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .start_kind(StartKind::Both)
-    ///     .build(patterns)
-    ///     .unwrap();
-    ///
-    /// // Note that 'Anchored::No' is the default, so it doesn't need to
-    /// // be explicitly specified here.
-    /// let input = Input::new(haystack);
-    /// let mat = ac.try_find(input)?.expect("should have a match");
-    /// assert_eq!("bcd", &haystack[mat.span()]);
-    ///
-    /// // While 'bcd' occurs in the haystack, it does not begin where our
-    /// // search begins, so no match is found.
-    /// let input = Input::new(haystack).anchored(Anchored::Yes);
-    /// assert_eq!(None, ac.try_find(input)?);
-    ///
-    /// // However, if we start our search where 'bcd' starts, then we will
-    /// // find a match.
-    /// let input = Input::new(haystack).range(1..).anchored(Anchored::Yes);
-    /// let mat = ac.try_find(input)?.expect("should have a match");
-    /// assert_eq!("bcd", &haystack[mat.span()]);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    #[inline]
-    pub fn anchored(mut self, mode: Anchored) -> Input<'h> {
-        self.set_anchored(mode);
-        self
-    }
-
-    /// Whether to execute an "earliest" search or not.
-    ///
-    /// When running a non-overlapping search, an "earliest" search will
-    /// return the match location as early as possible. For example, given
-    /// the patterns `abc` and `b`, and a haystack of `abc`, a normal
-    /// leftmost-first search will return `abc` as a match. But an "earliest"
-    /// search will return as soon as it is known that a match occurs, which
-    /// happens once `b` is seen.
-    ///
-    /// Note that when using [`MatchKind::Standard`], the "earliest" option
-    /// has no effect since standard semantics are already "earliest." Note
-    /// also that this has no effect in overlapping searches, since overlapping
-    /// searches also use standard semantics and report all possible matches.
-    ///
-    /// This is disabled by default.
-    ///
-    /// # Example
-    ///
-    /// This example shows the difference between "earliest" searching and
-    /// normal leftmost searching.
-    ///
-    /// ```
-    /// use aho_corasick::{AhoCorasick, Anchored, Input, MatchKind, StartKind};
-    ///
-    /// let patterns = &["abc", "b"];
-    /// let haystack = "abc";
-    ///
-    /// let ac = AhoCorasick::builder()
-    ///     .match_kind(MatchKind::LeftmostFirst)
-    ///     .build(patterns)
-    ///     .unwrap();
-    ///
-    /// // The normal leftmost-first match.
-    /// let input = Input::new(haystack);
-    /// let mat = ac.try_find(input)?.expect("should have a match");
-    /// assert_eq!("abc", &haystack[mat.span()]);
-    ///
-    /// // The "earliest" possible match, even if it isn't leftmost-first.
-    /// let input = Input::new(haystack).earliest(true);
-    /// let mat = ac.try_find(input)?.expect("should have a match");
-    /// assert_eq!("b", &haystack[mat.span()]);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    #[inline]
-    pub fn earliest(mut self, yes: bool) -> Input<'h> {
-        self.set_earliest(yes);
-        self
-    }
-
-    /// Set the span for this search configuration.
-    ///
-    /// This is like the [`Input::span`] method, except this mutates the
-    /// span in place.
-    ///
-    /// This routine is generic over how a span is provided. While
-    /// a [`Span`] may be given directly, one may also provide a
-    /// `std::ops::Range<usize>`.
-    ///
-    /// # Panics
-    ///
-    /// This panics if the given span does not correspond to valid bounds in
-    /// the haystack or the termination of a search.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use aho_corasick::Input;
-    ///
-    /// let mut input = Input::new("foobar");
-    /// assert_eq!(0..6, input.get_range());
-    /// input.set_span(2..4);
-    /// assert_eq!(2..4, input.get_range());
-    /// ```
-    #[inline]
-    pub fn set_span<S: Into<Span>>(&mut self, span: S) {
-        let span = span.into();
-        assert!(
-            span.end <= self.haystack.len()
-                && span.start <= span.end.wrapping_add(1),
-            "invalid span {:?} for haystack of length {}",
-            span,
-            self.haystack.len(),
-        );
-        self.span = span;
-    }
-
-    /// Set the span for this search configuration given any range.
-    ///
-    /// This is like the [`Input::range`] method, except this mutates the
-    /// span in place.
-    ///
-    /// # Panics
-    ///
-    /// This routine will panic if the given range could not be converted
-    /// to a valid [`Range`]. For example, this would panic when given
-    /// `0..=usize::MAX` since it cannot be represented using a half-open
-    /// interval in terms of `usize`.
-    ///
-    /// This routine also panics if the given range does not correspond to
-    /// valid bounds in the haystack or the termination of a search.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use aho_corasick::Input;
-    ///
-    /// let mut input = Input::new("foobar");
-    /// assert_eq!(0..6, input.get_range());
-    /// input.set_range(2..=4);
-    /// assert_eq!(2..5, input.get_range());
-    /// ```
-    #[inline]
-    pub fn set_range<R: RangeBounds<usize>>(&mut self, range: R) {
-        use core::ops::Bound;
-
-        // It's a little weird to convert ranges into spans, and then spans
-        // back into ranges when we actually slice the haystack. Because
-        // of that process, we always represent everything as a half-open
-        // internal. Therefore, handling things like m..=n is a little awkward.
-        let start = match range.start_bound() {
-            Bound::Included(&i) => i,
-            // Can this case ever happen? Range syntax doesn't support it...
-            Bound::Excluded(&i) => i.checked_add(1).unwrap(),
-            Bound::Unbounded => 0,
-        };
-        let end = match range.end_bound() {
-            Bound::Included(&i) => i.checked_add(1).unwrap(),
-            Bound::Excluded(&i) => i,
-            Bound::Unbounded => self.haystack().len(),
-        };
-        self.set_span(Span { start, end });
-    }
-
-    /// Set the starting offset for the span for this search configuration.
-    ///
-    /// This is a convenience routine for only mutating the start of a span
-    /// without having to set the entire span.
-    ///
-    /// # Panics
-    ///
-    /// This panics if the given span does not correspond to valid bounds in
-    /// the haystack or the termination of a search.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use aho_corasick::Input;
-    ///
-    /// let mut input = Input::new("foobar");
-    /// assert_eq!(0..6, input.get_range());
-    /// input.set_start(5);
-    /// assert_eq!(5..6, input.get_range());
-    /// ```
-    #[inline]
-    pub fn set_start(&mut self, start: usize) {
-        self.set_span(Span { start, ..self.get_span() });
-    }
-
-    /// Set the ending offset for the span for this search configuration.
-    ///
-    /// This is a convenience routine for only mutating the end of a span
-    /// without having to set the entire span.
-    ///
-    /// # Panics
-    ///
-    /// This panics if the given span does not correspond to valid bounds in
-    /// the haystack or the termination of a search.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use aho_corasick::Input;
-    ///
-    /// let mut input = Input::new("foobar");
-    /// assert_eq!(0..6, input.get_range());
-    /// input.set_end(5);
-    /// assert_eq!(0..5, input.get_range());
-    /// ```
-    #[inline]
-    pub fn set_end(&mut self, end: usize) {
-        self.set_span(Span { end, ..self.get_span() });
-    }
-
-    /// Set the anchor mode of a search.
-    ///
-    /// This is like [`Input::anchored`], except it mutates the search
-    /// configuration in place.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use aho_corasick::{Anchored, Input};
-    ///
-    /// let mut input = Input::new("foobar");
-    /// assert_eq!(Anchored::No, input.get_anchored());
-    ///
-    /// input.set_anchored(Anchored::Yes);
-    /// assert_eq!(Anchored::Yes, input.get_anchored());
-    /// ```
-    #[inline]
-    pub fn set_anchored(&mut self, mode: Anchored) {
-        self.anchored = mode;
-    }
-
-    /// Set whether the search should execute in "earliest" mode or not.
-    ///
-    /// This is like [`Input::earliest`], except it mutates the search
-    /// configuration in place.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use aho_corasick::Input;
-    ///
-    /// let mut input = Input::new("foobar");
-    /// assert!(!input.get_earliest());
-    /// input.set_earliest(true);
-    /// assert!(input.get_earliest());
-    /// ```
-    #[inline]
-    pub fn set_earliest(&mut self, yes: bool) {
-        self.earliest = yes;
-    }
-
-    /// Return a borrow of the underlying haystack as a slice of bytes.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use aho_corasick::Input;
-    ///
-    /// let input = Input::new("foobar");
-    /// assert_eq!(b"foobar", input.haystack());
-    /// ```
-    #[inline]
-    pub fn haystack(&self) -> &[u8] {
-        self.haystack
-    }
-
-    /// Return the start position of this search.
-    ///
-    /// This is a convenience routine for `search.get_span().start()`.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use aho_corasick::Input;
-    ///
-    /// let input = Input::new("foobar");
-    /// assert_eq!(0, input.start());
-    ///
-    /// let input = Input::new("foobar").span(2..4);
-    /// assert_eq!(2, input.start());
-    /// ```
-    #[inline]
-    pub fn start(&self) -> usize {
-        self.get_span().start
-    }
-
-    /// Return the end position of this search.
-    ///
-    /// This is a convenience routine for `search.get_span().end()`.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use aho_corasick::Input;
-    ///
-    /// let input = Input::new("foobar");
-    /// assert_eq!(6, input.end());
-    ///
-    /// let input = Input::new("foobar").span(2..4);
-    /// assert_eq!(4, input.end());
-    /// ```
-    #[inline]
-    pub fn end(&self) -> usize {
-        self.get_span().end
-    }
-
-    /// Return the span for this search configuration.
-    ///
-    /// If one was not explicitly set, then the span corresponds to the entire
-    /// range of the haystack.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use aho_corasick::{Input, Span};
-    ///
-    /// let input = Input::new("foobar");
-    /// assert_eq!(Span { start: 0, end: 6 }, input.get_span());
-    /// ```
-    #[inline]
-    pub fn get_span(&self) -> Span {
-        self.span
-    }
-
-    /// Return the span as a range for this search configuration.
-    ///
-    /// If one was not explicitly set, then the span corresponds to the entire
-    /// range of the haystack.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use aho_corasick::Input;
-    ///
-    /// let input = Input::new("foobar");
-    /// assert_eq!(0..6, input.get_range());
-    /// ```
-    #[inline]
-    pub fn get_range(&self) -> Range<usize> {
-        self.get_span().range()
-    }
-
-    /// Return the anchored mode for this search configuration.
-    ///
-    /// If no anchored mode was set, then it defaults to [`Anchored::No`].
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use aho_corasick::{Anchored, Input};
-    ///
-    /// let mut input = Input::new("foobar");
-    /// assert_eq!(Anchored::No, input.get_anchored());
-    ///
-    /// input.set_anchored(Anchored::Yes);
-    /// assert_eq!(Anchored::Yes, input.get_anchored());
-    /// ```
-    #[inline]
-    pub fn get_anchored(&self) -> Anchored {
-        self.anchored
-    }
-
-    /// Return whether this search should execute in "earliest" mode.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use aho_corasick::Input;
-    ///
-    /// let input = Input::new("foobar");
-    /// assert!(!input.get_earliest());
-    /// ```
-    #[inline]
-    pub fn get_earliest(&self) -> bool {
-        self.earliest
-    }
-
-    /// Return true if this input has been exhausted, which in turn means all
-    /// subsequent searches will return no matches.
-    ///
-    /// This occurs precisely when the start position of this search is greater
-    /// than the end position of the search.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use aho_corasick::Input;
-    ///
-    /// let mut input = Input::new("foobar");
-    /// assert!(!input.is_done());
-    /// input.set_start(6);
-    /// assert!(!input.is_done());
-    /// input.set_start(7);
-    /// assert!(input.is_done());
-    /// ```
-    #[inline]
-    pub fn is_done(&self) -> bool {
-        self.get_span().start > self.get_span().end
-    }
-}
-
-impl<'h> core::fmt::Debug for Input<'h> {
-    fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-        let mut fmter = f.debug_struct("Input");
-        match core::str::from_utf8(self.haystack()) {
-            Ok(nice) => fmter.field("haystack", &nice),
-            Err(_) => fmter.field("haystack", &self.haystack()),
-        }
-        .field("span", &self.span)
-        .field("anchored", &self.anchored)
-        .field("earliest", &self.earliest)
-        .finish()
-    }
-}
-
-impl<'h, H: ?Sized + AsRef<[u8]>> From<&'h H> for Input<'h> {
-    #[inline]
-    fn from(haystack: &'h H) -> Input<'h> {
-        Input::new(haystack)
-    }
-}
-
-/// A representation of a range in a haystack.
-///
-/// A span corresponds to the starting and ending _byte offsets_ of a
-/// contiguous region of bytes. The starting offset is inclusive while the
-/// ending offset is exclusive. That is, a span is a half-open interval.
-///
-/// A span is used to report the offsets of a match, but it is also used to
-/// convey which region of a haystack should be searched via routines like
-/// [`Input::span`].
-///
-/// This is basically equivalent to a `std::ops::Range<usize>`, except this
-/// type implements `Copy` which makes it more ergonomic to use in the context
-/// of this crate. Indeed, `Span` exists only because `Range<usize>` does
-/// not implement `Copy`. Like a range, this implements `Index` for `[u8]`
-/// and `str`, and `IndexMut` for `[u8]`. For convenience, this also impls
-/// `From<Range>`, which means things like `Span::from(5..10)` work.
-///
-/// There are no constraints on the values of a span. It is, for example, legal
-/// to create a span where `start > end`.
-#[derive(Clone, Copy, Eq, Hash, PartialEq)]
-pub struct Span {
-    /// The start offset of the span, inclusive.
-    pub start: usize,
-    /// The end offset of the span, exclusive.
-    pub end: usize,
-}
-
-impl Span {
-    /// Returns this span as a range.
-    #[inline]
-    pub fn range(&self) -> Range<usize> {
-        Range::from(*self)
-    }
-
-    /// Returns true when this span is empty. That is, when `start >= end`.
-    #[inline]
-    pub fn is_empty(&self) -> bool {
-        self.start >= self.end
-    }
-
-    /// Returns the length of this span.
-    ///
-    /// This returns `0` in precisely the cases that `is_empty` returns `true`.
-    #[inline]
-    pub fn len(&self) -> usize {
-        self.end.saturating_sub(self.start)
-    }
-
-    /// Returns true when the given offset is contained within this span.
-    ///
-    /// Note that an empty span contains no offsets and will always return
-    /// false.
-    #[inline]
-    pub fn contains(&self, offset: usize) -> bool {
-        !self.is_empty() && self.start <= offset && offset <= self.end
-    }
-
-    /// Returns a new span with `offset` added to this span's `start` and `end`
-    /// values.
-    #[inline]
-    pub fn offset(&self, offset: usize) -> Span {
-        Span { start: self.start + offset, end: self.end + offset }
-    }
-}
-
-impl core::fmt::Debug for Span {
-    fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-        write!(f, "{}..{}", self.start, self.end)
-    }
-}
-
-impl core::ops::Index<Span> for [u8] {
-    type Output = [u8];
-
-    #[inline]
-    fn index(&self, index: Span) -> &[u8] {
-        &self[index.range()]
-    }
-}
-
-impl core::ops::IndexMut<Span> for [u8] {
-    #[inline]
-    fn index_mut(&mut self, index: Span) -> &mut [u8] {
-        &mut self[index.range()]
-    }
-}
-
-impl core::ops::Index<Span> for str {
-    type Output = str;
-
-    #[inline]
-    fn index(&self, index: Span) -> &str {
-        &self[index.range()]
-    }
-}
-
-impl From<Range<usize>> for Span {
-    #[inline]
-    fn from(range: Range<usize>) -> Span {
-        Span { start: range.start, end: range.end }
-    }
-}
-
-impl From<Span> for Range<usize> {
-    #[inline]
-    fn from(span: Span) -> Range<usize> {
-        Range { start: span.start, end: span.end }
-    }
-}
-
-impl PartialEq<Range<usize>> for Span {
-    #[inline]
-    fn eq(&self, range: &Range<usize>) -> bool {
-        self.start == range.start && self.end == range.end
-    }
-}
-
-impl PartialEq<Span> for Range<usize> {
-    #[inline]
-    fn eq(&self, span: &Span) -> bool {
-        self.start == span.start && self.end == span.end
-    }
-}
-
-/// The type of anchored search to perform.
-///
-/// If an Aho-Corasick searcher does not support the anchored mode selected,
-/// then the search will return an error or panic, depending on whether a
-/// fallible or an infallible routine was called.
-#[non_exhaustive]
-#[derive(Clone, Copy, Debug, Eq, PartialEq)]
-pub enum Anchored {
-    /// Run an unanchored search. This means a match may occur anywhere at or
-    /// after the start position of the search up until the end position of the
-    /// search.
-    No,
-    /// Run an anchored search. This means that a match must begin at the start
-    /// position of the search and end before the end position of the search.
-    Yes,
-}
-
-impl Anchored {
-    /// Returns true if and only if this anchor mode corresponds to an anchored
-    /// search.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use aho_corasick::Anchored;
-    ///
-    /// assert!(!Anchored::No.is_anchored());
-    /// assert!(Anchored::Yes.is_anchored());
-    /// ```
-    #[inline]
-    pub fn is_anchored(&self) -> bool {
-        matches!(*self, Anchored::Yes)
-    }
-}
-
-/// A representation of a match reported by an Aho-Corasick searcher.
-///
-/// A match has two essential pieces of information: the [`PatternID`] that
-/// matches, and the [`Span`] of the match in a haystack.
-///
-/// The pattern is identified by an ID, which corresponds to its position
-/// (starting from `0`) relative to other patterns used to construct the
-/// corresponding searcher. If only a single pattern is provided, then all
-/// matches are guaranteed to have a pattern ID of `0`.
-///
-/// Every match reported by a searcher guarantees that its span has its start
-/// offset as less than or equal to its end offset.
-#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
-pub struct Match {
-    /// The pattern ID.
-    pattern: PatternID,
-    /// The underlying match span.
-    span: Span,
-}
-
-impl Match {
-    /// Create a new match from a pattern ID and a span.
-    ///
-    /// This constructor is generic over how a span is provided. While
-    /// a [`Span`] may be given directly, one may also provide a
-    /// `std::ops::Range<usize>`.
-    ///
-    /// # Panics
-    ///
-    /// This panics if `end < start`.
-    ///
-    /// # Example
-    ///
-    /// This shows how to create a match for the first pattern in an
-    /// Aho-Corasick searcher using convenient range syntax.
-    ///
-    /// ```
-    /// use aho_corasick::{Match, PatternID};
-    ///
-    /// let m = Match::new(PatternID::ZERO, 5..10);
-    /// assert_eq!(0, m.pattern().as_usize());
-    /// assert_eq!(5, m.start());
-    /// assert_eq!(10, m.end());
-    /// ```
-    #[inline]
-    pub fn new<S: Into<Span>>(pattern: PatternID, span: S) -> Match {
-        let span = span.into();
-        assert!(span.start <= span.end, "invalid match span");
-        Match { pattern, span }
-    }
-
-    /// Create a new match from a pattern ID and a byte offset span.
-    ///
-    /// This constructor is generic over how a span is provided. While
-    /// a [`Span`] may be given directly, one may also provide a
-    /// `std::ops::Range<usize>`.
-    ///
-    /// This is like [`Match::new`], but accepts a `usize` instead of a
-    /// [`PatternID`]. This panics if the given `usize` is not representable
-    /// as a `PatternID`.
-    ///
-    /// # Panics
-    ///
-    /// This panics if `end < start` or if `pattern > PatternID::MAX`.
-    ///
-    /// # Example
-    ///
-    /// This shows how to create a match for the third pattern in an
-    /// Aho-Corasick searcher using convenient range syntax.
-    ///
-    /// ```
-    /// use aho_corasick::Match;
-    ///
-    /// let m = Match::must(3, 5..10);
-    /// assert_eq!(3, m.pattern().as_usize());
-    /// assert_eq!(5, m.start());
-    /// assert_eq!(10, m.end());
-    /// ```
-    #[inline]
-    pub fn must<S: Into<Span>>(pattern: usize, span: S) -> Match {
-        Match::new(PatternID::must(pattern), span)
-    }
-
-    /// Returns the ID of the pattern that matched.
-    ///
-    /// The ID of a pattern is derived from the position in which it was
-    /// originally inserted into the corresponding searcher. The first pattern
-    /// has identifier `0`, and each subsequent pattern is `1`, `2` and so on.
-    #[inline]
-    pub fn pattern(&self) -> PatternID {
-        self.pattern
-    }
-
-    /// The starting position of the match.
-    ///
-    /// This is a convenience routine for `Match::span().start`.
-    #[inline]
-    pub fn start(&self) -> usize {
-        self.span().start
-    }
-
-    /// The ending position of the match.
-    ///
-    /// This is a convenience routine for `Match::span().end`.
-    #[inline]
-    pub fn end(&self) -> usize {
-        self.span().end
-    }
-
-    /// Returns the match span as a range.
-    ///
-    /// This is a convenience routine for `Match::span().range()`.
-    #[inline]
-    pub fn range(&self) -> core::ops::Range<usize> {
-        self.span().range()
-    }
-
-    /// Returns the span for this match.
-    #[inline]
-    pub fn span(&self) -> Span {
-        self.span
-    }
-
-    /// Returns true when the span in this match is empty.
-    ///
-    /// An empty match can only be returned when empty pattern is in the
-    /// Aho-Corasick searcher.
-    #[inline]
-    pub fn is_empty(&self) -> bool {
-        self.span().is_empty()
-    }
-
-    /// Returns the length of this match.
-    ///
-    /// This returns `0` in precisely the cases that `is_empty` returns `true`.
-    #[inline]
-    pub fn len(&self) -> usize {
-        self.span().len()
-    }
-
-    /// Returns a new match with `offset` added to its span's `start` and `end`
-    /// values.
-    #[inline]
-    pub fn offset(&self, offset: usize) -> Match {
-        Match {
-            pattern: self.pattern,
-            span: Span {
-                start: self.start() + offset,
-                end: self.end() + offset,
-            },
-        }
-    }
-}
-
-/// A knob for controlling the match semantics of an Aho-Corasick automaton.
-///
-/// There are two generally different ways that Aho-Corasick automatons can
-/// report matches. The first way is the "standard" approach that results from
-/// implementing most textbook explanations of Aho-Corasick. The second way is
-/// to report only the leftmost non-overlapping matches. The leftmost approach
-/// is in turn split into two different ways of resolving ambiguous matches:
-/// leftmost-first and leftmost-longest.
-///
-/// The `Standard` match kind is the default and is the only one that supports
-/// overlapping matches and stream searching. (Trying to find overlapping or
-/// streaming matches using leftmost match semantics will result in an error in
-/// fallible APIs and a panic when using infallibe APIs.) The `Standard` match
-/// kind will report matches as they are seen. When searching for overlapping
-/// matches, then all possible matches are reported. When searching for
-/// non-overlapping matches, the first match seen is reported. For example, for
-/// non-overlapping matches, given the patterns `abcd` and `b` and the haystack
-/// `abcdef`, only a match for `b` is reported since it is detected first. The
-/// `abcd` match is never reported since it overlaps with the `b` match.
-///
-/// In contrast, the leftmost match kind always prefers the leftmost match
-/// among all possible matches. Given the same example as above with `abcd` and
-/// `b` as patterns and `abcdef` as the haystack, the leftmost match is `abcd`
-/// since it begins before the `b` match, even though the `b` match is detected
-/// before the `abcd` match. In this case, the `b` match is not reported at all
-/// since it overlaps with the `abcd` match.
-///
-/// The difference between leftmost-first and leftmost-longest is in how they
-/// resolve ambiguous matches when there are multiple leftmost matches to
-/// choose from. Leftmost-first always chooses the pattern that was provided
-/// earliest, where as leftmost-longest always chooses the longest matching
-/// pattern. For example, given the patterns `a` and `ab` and the subject
-/// string `ab`, the leftmost-first match is `a` but the leftmost-longest match
-/// is `ab`. Conversely, if the patterns were given in reverse order, i.e.,
-/// `ab` and `a`, then both the leftmost-first and leftmost-longest matches
-/// would be `ab`. Stated differently, the leftmost-first match depends on the
-/// order in which the patterns were given to the Aho-Corasick automaton.
-/// Because of that, when leftmost-first matching is used, if a pattern `A`
-/// that appears before a pattern `B` is a prefix of `B`, then it is impossible
-/// to ever observe a match of `B`.
-///
-/// If you're not sure which match kind to pick, then stick with the standard
-/// kind, which is the default. In particular, if you need overlapping or
-/// streaming matches, then you _must_ use the standard kind. The leftmost
-/// kinds are useful in specific circumstances. For example, leftmost-first can
-/// be very useful as a way to implement match priority based on the order of
-/// patterns given and leftmost-longest can be useful for dictionary searching
-/// such that only the longest matching words are reported.
-///
-/// # Relationship with regular expression alternations
-///
-/// Understanding match semantics can be a little tricky, and one easy way
-/// to conceptualize non-overlapping matches from an Aho-Corasick automaton
-/// is to think about them as a simple alternation of literals in a regular
-/// expression. For example, let's say we wanted to match the strings
-/// `Sam` and `Samwise`, which would turn into the regex `Sam|Samwise`. It
-/// turns out that regular expression engines have two different ways of
-/// matching this alternation. The first way, leftmost-longest, is commonly
-/// found in POSIX compatible implementations of regular expressions (such as
-/// `grep`). The second way, leftmost-first, is commonly found in backtracking
-/// implementations such as Perl. (Some regex engines, such as RE2 and Rust's
-/// regex engine do not use backtracking, but still implement leftmost-first
-/// semantics in an effort to match the behavior of dominant backtracking
-/// regex engines such as those found in Perl, Ruby, Python, Javascript and
-/// PHP.)
-///
-/// That is, when matching `Sam|Samwise` against `Samwise`, a POSIX regex
-/// will match `Samwise` because it is the longest possible match, but a
-/// Perl-like regex will match `Sam` since it appears earlier in the
-/// alternation. Indeed, the regex `Sam|Samwise` in a Perl-like regex engine
-/// will never match `Samwise` since `Sam` will always have higher priority.
-/// Conversely, matching the regex `Samwise|Sam` against `Samwise` will lead to
-/// a match of `Samwise` in both POSIX and Perl-like regexes since `Samwise` is
-/// still longest match, but it also appears earlier than `Sam`.
-///
-/// The "standard" match semantics of Aho-Corasick generally don't correspond
-/// to the match semantics of any large group of regex implementations, so
-/// there's no direct analogy that can be made here. Standard match semantics
-/// are generally useful for overlapping matches, or if you just want to see
-/// matches as they are detected.
-///
-/// The main conclusion to draw from this section is that the match semantics
-/// can be tweaked to precisely match either Perl-like regex alternations or
-/// POSIX regex alternations.
-#[non_exhaustive]
-#[derive(Clone, Copy, Debug, Eq, PartialEq)]
-pub enum MatchKind {
-    /// Use standard match semantics, which support overlapping matches. When
-    /// used with non-overlapping matches, matches are reported as they are
-    /// seen.
-    Standard,
-    /// Use leftmost-first match semantics, which reports leftmost matches.
-    /// When there are multiple possible leftmost matches, the match
-    /// corresponding to the pattern that appeared earlier when constructing
-    /// the automaton is reported.
-    ///
-    /// This does **not** support overlapping matches or stream searching. If
-    /// this match kind is used, attempting to find overlapping matches or
-    /// stream matches will fail.
-    LeftmostFirst,
-    /// Use leftmost-longest match semantics, which reports leftmost matches.
-    /// When there are multiple possible leftmost matches, the longest match
-    /// is chosen.
-    ///
-    /// This does **not** support overlapping matches or stream searching. If
-    /// this match kind is used, attempting to find overlapping matches or
-    /// stream matches will fail.
-    LeftmostLongest,
-}
-
-/// The default match kind is `MatchKind::Standard`.
-impl Default for MatchKind {
-    fn default() -> MatchKind {
-        MatchKind::Standard
-    }
-}
-
-impl MatchKind {
-    #[inline]
-    pub(crate) fn is_standard(&self) -> bool {
-        matches!(*self, MatchKind::Standard)
-    }
-
-    #[inline]
-    pub(crate) fn is_leftmost(&self) -> bool {
-        matches!(*self, MatchKind::LeftmostFirst | MatchKind::LeftmostLongest)
-    }
-
-    #[inline]
-    pub(crate) fn is_leftmost_first(&self) -> bool {
-        matches!(*self, MatchKind::LeftmostFirst)
-    }
-
-    /// Convert this match kind into a packed match kind. If this match kind
-    /// corresponds to standard semantics, then this returns None, since
-    /// packed searching does not support standard semantics.
-    #[inline]
-    pub(crate) fn as_packed(&self) -> Option<crate::packed::MatchKind> {
-        match *self {
-            MatchKind::Standard => None,
-            MatchKind::LeftmostFirst => {
-                Some(crate::packed::MatchKind::LeftmostFirst)
-            }
-            MatchKind::LeftmostLongest => {
-                Some(crate::packed::MatchKind::LeftmostLongest)
-            }
-        }
-    }
-}
-
-/// The kind of anchored starting configurations to support in an Aho-Corasick
-/// searcher.
-///
-/// Depending on which searcher is used internally by
-/// [`AhoCorasick`](crate::AhoCorasick), supporting both unanchored
-/// and anchored searches can be quite costly. For this reason,
-/// [`AhoCorasickBuilder::start_kind`](crate::AhoCorasickBuilder::start_kind)
-/// can be used to configure whether your searcher supports unanchored,
-/// anchored or both kinds of searches.
-///
-/// This searcher configuration knob works in concert with the search time
-/// configuration [`Input::anchored`]. Namely, if one requests an unsupported
-/// anchored mode, then the search will either panic or return an error,
-/// depending on whether you're using infallible or fallibe APIs, respectively.
-///
-/// `AhoCorasick` by default only supports unanchored searches.
-#[derive(Clone, Copy, Debug, Eq, PartialEq)]
-pub enum StartKind {
-    /// Support both anchored and unanchored searches.
-    Both,
-    /// Support only unanchored searches. Requesting an anchored search will
-    /// return an error in fallible APIs and panic in infallible APIs.
-    Unanchored,
-    /// Support only anchored searches. Requesting an unanchored search will
-    /// return an error in fallible APIs and panic in infallible APIs.
-    Anchored,
-}
-
-impl Default for StartKind {
-    fn default() -> StartKind {
-        StartKind::Unanchored
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/special.rs b/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/special.rs
deleted file mode 100644
index beeba40..0000000
--- a/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/util/special.rs
+++ /dev/null
@@ -1,42 +0,0 @@
-use crate::util::primitives::StateID;
-
-/// A collection of sentinel state IDs for Aho-Corasick automata.
-///
-/// This specifically enables the technique by which we determine which states
-/// are dead, matches or start states. Namely, by arranging states in a
-/// particular order, we can determine the type of a state simply by looking at
-/// its ID.
-#[derive(Clone, Debug)]
-pub(crate) struct Special {
-    /// The maximum ID of all the "special" states. This corresponds either to
-    /// start_anchored_id when a prefilter is active and max_match_id when a
-    /// prefilter is not active. The idea here is that if there is no prefilter,
-    /// then there is no point in treating start states as special.
-    pub(crate) max_special_id: StateID,
-    /// The maximum ID of all the match states. Any state ID bigger than this
-    /// is guaranteed to be a non-match ID.
-    ///
-    /// It is possible and legal for max_match_id to be equal to
-    /// start_anchored_id, which occurs precisely in the case where the empty
-    /// string is a pattern that was added to the underlying automaton.
-    pub(crate) max_match_id: StateID,
-    /// The state ID of the start state used for unanchored searches.
-    pub(crate) start_unanchored_id: StateID,
-    /// The state ID of the start state used for anchored searches. This is
-    /// always start_unanchored_id+1.
-    pub(crate) start_anchored_id: StateID,
-}
-
-impl Special {
-    /// Create a new set of "special" state IDs with all IDs initialized to
-    /// zero. The general idea here is that they will be updated and set to
-    /// correct values later.
-    pub(crate) fn zero() -> Special {
-        Special {
-            max_special_id: StateID::ZERO,
-            max_match_id: StateID::ZERO,
-            start_unanchored_id: StateID::ZERO,
-            start_anchored_id: StateID::ZERO,
-        }
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/.cargo-checksum.json
deleted file mode 100644
index 697c9ce..0000000
--- a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/.cargo-checksum.json
+++ /dev/null
@@ -1 +0,0 @@
-{"files":{}}
diff --git a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/.cargo_vcs_info.json
deleted file mode 100644
index abc42e85..0000000
--- a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/.cargo_vcs_info.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
-  "git": {
-    "sha1": "58d0748ead23616834871fe42dce475102f8d895"
-  },
-  "path_in_vcs": ""
-}
\ No newline at end of file
diff --git a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/.github/dependabot.yml b/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/.github/dependabot.yml
deleted file mode 100644
index de97070..0000000
--- a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/.github/dependabot.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-version: 2
-updates:
-  - package-ecosystem: "cargo"
-    directory: "/"
-    schedule:
-      interval: "monthly"
-    open-pull-requests-limit: 10
-    ignore:
-      - dependency-name: "tempdir"
-  - package-ecosystem: "github-actions"
-    directory: "/"
-    schedule:
-      interval: "monthly"
diff --git a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/.github/workflows/publish.yml b/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/.github/workflows/publish.yml
deleted file mode 100644
index e715c618..0000000
--- a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/.github/workflows/publish.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-name: Release-plz
-
-permissions:
-  pull-requests: write
-  contents: write
-
-on:
-  push:
-    branches:
-      - master
-
-jobs:
-  release-plz:
-    name: Release-plz
-    runs-on: ubuntu-latest
-    steps:
-      - name: Checkout repository
-        uses: actions/checkout@v4
-        with:
-          fetch-depth: 0
-      - name: Install Rust (rustup)
-        run: rustup update nightly --no-self-update && rustup default nightly
-      - name: Run release-plz
-        uses: MarcoIeni/release-plz-action@v0.5
-        env:
-          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-          CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
diff --git a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/.github/workflows/rust.yml b/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/.github/workflows/rust.yml
deleted file mode 100644
index 8f3901c1..0000000
--- a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/.github/workflows/rust.yml
+++ /dev/null
@@ -1,65 +0,0 @@
-name: CI
-
-on:
-  pull_request:
-  push:
-    branches:
-      - master
-
-jobs:
-  test:
-    name: Tests
-    runs-on: ${{ matrix.os }}
-    strategy:
-      matrix:
-        channel:
-          - stable
-          - nightly
-          - 1.63.0 # MSRV of test dependencies
-        os:
-          - macos-13 # x86 MacOS
-          - macos-15 # Arm MacOS
-          - windows-2025
-          - ubuntu-24.04
-        include:
-          - channel: beta
-            os: ubuntu-24.04
-
-    steps:
-    - name: Checkout repository
-      uses: actions/checkout@v4
-
-    - name: Update rust
-      run: |
-        rustup default ${{ matrix.channel }}
-        rustup update --no-self-update
-
-    - run: cargo test --all
-
-  msrv:
-    name: Check building with the MSRV
-    runs-on: ubuntu-24.04
-    steps:
-    - name: Checkout repository
-      uses: actions/checkout@v4
-
-    - name: Update rust
-      run: |
-        rustup default 1.23.0
-        rustup update --no-self-update
-
-    - run: cargo build
-
-  success:
-    needs:
-      - test
-      - msrv
-    runs-on: ubuntu-latest
-    # GitHub branch protection is exceedingly silly and treats "jobs skipped because a dependency
-    # failed" as success. So we have to do some contortions to ensure the job fails if any of its
-    # dependencies fails.
-    if: always() # make sure this is never "skipped"
-    steps:
-      # Manually check the status of all dependencies. `if: failure()` does not work.
-      - name: check if any dependency failed
-        run: jq --exit-status 'all(.result == "success")' <<< '${{ toJson(needs) }}'
diff --git a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/.gitignore b/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/.gitignore
deleted file mode 100644
index 4fffb2f..0000000
--- a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-/target
-/Cargo.lock
diff --git a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/CHANGELOG.md b/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/CHANGELOG.md
deleted file mode 100644
index 11d7f62f..0000000
--- a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/CHANGELOG.md
+++ /dev/null
@@ -1,33 +0,0 @@
-# Changelog
-
-All notable changes to this project will be documented in this file.
-
-## [Unreleased]
-
-## [0.3.2](https://github.com/rust-lang/glob/compare/v0.3.1...v0.3.2) - 2024-12-28
-
-## What's Changed
-* Add fs::symlink_metadata to detect broken symlinks by @kyoheiu in https://github.com/rust-lang/glob/pull/105
-* Add support for windows verbatim disk paths by @nico-abram in https://github.com/rust-lang/glob/pull/112
-* Respect `require_literal_leading_dot` option in `glob_with` method for path components by @JohnTitor in https://github.com/rust-lang/glob/pull/128
-* Harden tests for symlink by @JohnTitor in https://github.com/rust-lang/glob/pull/127
-* Remove "extern crate" directions from README by @zmitchell in https://github.com/rust-lang/glob/pull/131
-* Add FIXME for tempdir by @JohnTitor in https://github.com/rust-lang/glob/pull/126
-* Cache information about file type by @Kobzol in https://github.com/rust-lang/glob/pull/135
-* Document the behaviour of ** with files by @Wilfred in https://github.com/rust-lang/glob/pull/138
-* Add dependabot by @oriontvv in https://github.com/rust-lang/glob/pull/139
-* Bump actions/checkout from 3 to 4 by @dependabot in https://github.com/rust-lang/glob/pull/140
-* Check only (no longer test) at the MSRV by @tgross35 in https://github.com/rust-lang/glob/pull/151
-* Add release-plz for automated releases by @tgross35 in https://github.com/rust-lang/glob/pull/150
-
-## New Contributors
-* @kyoheiu made their first contribution in https://github.com/rust-lang/glob/pull/105
-* @nico-abram made their first contribution in https://github.com/rust-lang/glob/pull/112
-* @zmitchell made their first contribution in https://github.com/rust-lang/glob/pull/131
-* @Kobzol made their first contribution in https://github.com/rust-lang/glob/pull/135
-* @Wilfred made their first contribution in https://github.com/rust-lang/glob/pull/138
-* @oriontvv made their first contribution in https://github.com/rust-lang/glob/pull/139
-* @dependabot made their first contribution in https://github.com/rust-lang/glob/pull/140
-* @tgross35 made their first contribution in https://github.com/rust-lang/glob/pull/151
-
-**Full Changelog**: https://github.com/rust-lang/glob/compare/0.3.1...0.3.2
diff --git a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/Cargo.lock b/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/Cargo.lock
deleted file mode 100644
index d6a653b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/Cargo.lock
+++ /dev/null
@@ -1,107 +0,0 @@
-# This file is automatically @generated by Cargo.
-# It is not intended for manual editing.
-[[package]]
-name = "doc-comment"
-version = "0.3.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
-name = "fuchsia-cprng"
-version = "0.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
-name = "glob"
-version = "0.3.2"
-dependencies = [
- "doc-comment 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "libc"
-version = "0.2.169"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
-name = "rand"
-version = "0.4.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.169 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "rand_core"
-version = "0.3.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "rand_core"
-version = "0.4.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
-name = "rdrand"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "remove_dir_all"
-version = "0.5.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "tempdir"
-version = "0.3.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "remove_dir_all 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "winapi"
-version = "0.3.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "winapi-i686-pc-windows-gnu"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
-name = "winapi-x86_64-pc-windows-gnu"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[metadata]
-"checksum doc-comment 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10"
-"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
-"checksum libc 0.2.169 (registry+https://github.com/rust-lang/crates.io-index)" = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a"
-"checksum rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293"
-"checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b"
-"checksum rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc"
-"checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2"
-"checksum remove_dir_all 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
-"checksum tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8"
-"checksum winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
-"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
-"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
diff --git a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/Cargo.toml
deleted file mode 100644
index 59393e1..0000000
--- a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/Cargo.toml
+++ /dev/null
@@ -1,45 +0,0 @@
-# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
-#
-# When uploading crates to the registry Cargo will automatically
-# "normalize" Cargo.toml files for maximal compatibility
-# with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g., crates.io) dependencies.
-#
-# If you are reading this file be aware that the original Cargo.toml
-# will likely look very different (and much more reasonable).
-# See Cargo.toml.orig for the original contents.
-
-[package]
-rust-version = "1.23.0"
-name = "glob"
-version = "0.3.2"
-authors = ["The Rust Project Developers"]
-build = false
-autolib = false
-autobins = false
-autoexamples = false
-autotests = false
-autobenches = false
-description = """
-Support for matching file paths against Unix shell style patterns.
-"""
-homepage = "https://github.com/rust-lang/glob"
-documentation = "https://docs.rs/glob/0.3.1"
-readme = "README.md"
-categories = ["filesystem"]
-license = "MIT OR Apache-2.0"
-repository = "https://github.com/rust-lang/glob"
-
-[lib]
-name = "glob"
-path = "src/lib.rs"
-
-[[test]]
-name = "glob-std"
-path = "tests/glob-std.rs"
-
-[dev-dependencies.doc-comment]
-version = "0.3"
-
-[dev-dependencies.tempdir]
-version = "0.3"
diff --git a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/Cargo.toml.orig
deleted file mode 100644
index 7eb57cc..0000000
--- a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/Cargo.toml.orig
+++ /dev/null
@@ -1,19 +0,0 @@
-[package]
-
-name = "glob"
-version = "0.3.2"
-authors = ["The Rust Project Developers"]
-license = "MIT OR Apache-2.0"
-homepage = "https://github.com/rust-lang/glob"
-repository = "https://github.com/rust-lang/glob"
-documentation = "https://docs.rs/glob/0.3.1"
-description = """
-Support for matching file paths against Unix shell style patterns.
-"""
-categories = ["filesystem"]
-rust-version = "1.23.0"
-
-[dev-dependencies]
-# FIXME: This should be replaced by `tempfile`
-tempdir = "0.3"
-doc-comment = "0.3"
diff --git a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/LICENSE-APACHE b/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/LICENSE-APACHE
deleted file mode 100644
index 16fe87b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/LICENSE-APACHE
+++ /dev/null
@@ -1,201 +0,0 @@
-                              Apache License
-                        Version 2.0, January 2004
-                     http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-   "License" shall mean the terms and conditions for use, reproduction,
-   and distribution as defined by Sections 1 through 9 of this document.
-
-   "Licensor" shall mean the copyright owner or entity authorized by
-   the copyright owner that is granting the License.
-
-   "Legal Entity" shall mean the union of the acting entity and all
-   other entities that control, are controlled by, or are under common
-   control with that entity. For the purposes of this definition,
-   "control" means (i) the power, direct or indirect, to cause the
-   direction or management of such entity, whether by contract or
-   otherwise, or (ii) ownership of fifty percent (50%) or more of the
-   outstanding shares, or (iii) beneficial ownership of such entity.
-
-   "You" (or "Your") shall mean an individual or Legal Entity
-   exercising permissions granted by this License.
-
-   "Source" form shall mean the preferred form for making modifications,
-   including but not limited to software source code, documentation
-   source, and configuration files.
-
-   "Object" form shall mean any form resulting from mechanical
-   transformation or translation of a Source form, including but
-   not limited to compiled object code, generated documentation,
-   and conversions to other media types.
-
-   "Work" shall mean the work of authorship, whether in Source or
-   Object form, made available under the License, as indicated by a
-   copyright notice that is included in or attached to the work
-   (an example is provided in the Appendix below).
-
-   "Derivative Works" shall mean any work, whether in Source or Object
-   form, that is based on (or derived from) the Work and for which the
-   editorial revisions, annotations, elaborations, or other modifications
-   represent, as a whole, an original work of authorship. For the purposes
-   of this License, Derivative Works shall not include works that remain
-   separable from, or merely link (or bind by name) to the interfaces of,
-   the Work and Derivative Works thereof.
-
-   "Contribution" shall mean any work of authorship, including
-   the original version of the Work and any modifications or additions
-   to that Work or Derivative Works thereof, that is intentionally
-   submitted to Licensor for inclusion in the Work by the copyright owner
-   or by an individual or Legal Entity authorized to submit on behalf of
-   the copyright owner. For the purposes of this definition, "submitted"
-   means any form of electronic, verbal, or written communication sent
-   to the Licensor or its representatives, including but not limited to
-   communication on electronic mailing lists, source code control systems,
-   and issue tracking systems that are managed by, or on behalf of, the
-   Licensor for the purpose of discussing and improving the Work, but
-   excluding communication that is conspicuously marked or otherwise
-   designated in writing by the copyright owner as "Not a Contribution."
-
-   "Contributor" shall mean Licensor and any individual or Legal Entity
-   on behalf of whom a Contribution has been received by Licensor and
-   subsequently incorporated within the Work.
-
-2. Grant of Copyright License. Subject to the terms and conditions of
-   this License, each Contributor hereby grants to You a perpetual,
-   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-   copyright license to reproduce, prepare Derivative Works of,
-   publicly display, publicly perform, sublicense, and distribute the
-   Work and such Derivative Works in Source or Object form.
-
-3. Grant of Patent License. Subject to the terms and conditions of
-   this License, each Contributor hereby grants to You a perpetual,
-   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-   (except as stated in this section) patent license to make, have made,
-   use, offer to sell, sell, import, and otherwise transfer the Work,
-   where such license applies only to those patent claims licensable
-   by such Contributor that are necessarily infringed by their
-   Contribution(s) alone or by combination of their Contribution(s)
-   with the Work to which such Contribution(s) was submitted. If You
-   institute patent litigation against any entity (including a
-   cross-claim or counterclaim in a lawsuit) alleging that the Work
-   or a Contribution incorporated within the Work constitutes direct
-   or contributory patent infringement, then any patent licenses
-   granted to You under this License for that Work shall terminate
-   as of the date such litigation is filed.
-
-4. Redistribution. You may reproduce and distribute copies of the
-   Work or Derivative Works thereof in any medium, with or without
-   modifications, and in Source or Object form, provided that You
-   meet the following conditions:
-
-   (a) You must give any other recipients of the Work or
-       Derivative Works a copy of this License; and
-
-   (b) You must cause any modified files to carry prominent notices
-       stating that You changed the files; and
-
-   (c) You must retain, in the Source form of any Derivative Works
-       that You distribute, all copyright, patent, trademark, and
-       attribution notices from the Source form of the Work,
-       excluding those notices that do not pertain to any part of
-       the Derivative Works; and
-
-   (d) If the Work includes a "NOTICE" text file as part of its
-       distribution, then any Derivative Works that You distribute must
-       include a readable copy of the attribution notices contained
-       within such NOTICE file, excluding those notices that do not
-       pertain to any part of the Derivative Works, in at least one
-       of the following places: within a NOTICE text file distributed
-       as part of the Derivative Works; within the Source form or
-       documentation, if provided along with the Derivative Works; or,
-       within a display generated by the Derivative Works, if and
-       wherever such third-party notices normally appear. The contents
-       of the NOTICE file are for informational purposes only and
-       do not modify the License. You may add Your own attribution
-       notices within Derivative Works that You distribute, alongside
-       or as an addendum to the NOTICE text from the Work, provided
-       that such additional attribution notices cannot be construed
-       as modifying the License.
-
-   You may add Your own copyright statement to Your modifications and
-   may provide additional or different license terms and conditions
-   for use, reproduction, or distribution of Your modifications, or
-   for any such Derivative Works as a whole, provided Your use,
-   reproduction, and distribution of the Work otherwise complies with
-   the conditions stated in this License.
-
-5. Submission of Contributions. Unless You explicitly state otherwise,
-   any Contribution intentionally submitted for inclusion in the Work
-   by You to the Licensor shall be under the terms and conditions of
-   this License, without any additional terms or conditions.
-   Notwithstanding the above, nothing herein shall supersede or modify
-   the terms of any separate license agreement you may have executed
-   with Licensor regarding such Contributions.
-
-6. Trademarks. This License does not grant permission to use the trade
-   names, trademarks, service marks, or product names of the Licensor,
-   except as required for reasonable and customary use in describing the
-   origin of the Work and reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty. Unless required by applicable law or
-   agreed to in writing, Licensor provides the Work (and each
-   Contributor provides its Contributions) on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-   implied, including, without limitation, any warranties or conditions
-   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-   PARTICULAR PURPOSE. You are solely responsible for determining the
-   appropriateness of using or redistributing the Work and assume any
-   risks associated with Your exercise of permissions under this License.
-
-8. Limitation of Liability. In no event and under no legal theory,
-   whether in tort (including negligence), contract, or otherwise,
-   unless required by applicable law (such as deliberate and grossly
-   negligent acts) or agreed to in writing, shall any Contributor be
-   liable to You for damages, including any direct, indirect, special,
-   incidental, or consequential damages of any character arising as a
-   result of this License or out of the use or inability to use the
-   Work (including but not limited to damages for loss of goodwill,
-   work stoppage, computer failure or malfunction, or any and all
-   other commercial damages or losses), even if such Contributor
-   has been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability. While redistributing
-   the Work or Derivative Works thereof, You may choose to offer,
-   and charge a fee for, acceptance of support, warranty, indemnity,
-   or other liability obligations and/or rights consistent with this
-   License. However, in accepting such obligations, You may act only
-   on Your own behalf and on Your sole responsibility, not on behalf
-   of any other Contributor, and only if You agree to indemnify,
-   defend, and hold each Contributor harmless for any liability
-   incurred by, or claims asserted against, such Contributor by reason
-   of your accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work.
-
-   To apply the Apache License to your work, attach the following
-   boilerplate notice, with the fields enclosed by brackets "[]"
-   replaced with your own identifying information. (Don't include
-   the brackets!)  The text should be enclosed in the appropriate
-   comment syntax for the file format. We also recommend that a
-   file or class name and description of purpose be included on the
-   same "printed page" as the copyright notice for easier
-   identification within third-party archives.
-
-Copyright [yyyy] [name of copyright owner]
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-	http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/LICENSE-MIT b/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/LICENSE-MIT
deleted file mode 100644
index 39d4bdb..0000000
--- a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/LICENSE-MIT
+++ /dev/null
@@ -1,25 +0,0 @@
-Copyright (c) 2014 The Rust Project Developers
-
-Permission is hereby granted, free of charge, to any
-person obtaining a copy of this software and associated
-documentation files (the "Software"), to deal in the
-Software without restriction, including without
-limitation the rights to use, copy, modify, merge,
-publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software
-is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice
-shall be included in all copies or substantial portions
-of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
-ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
-TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
-PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
-IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-DEALINGS IN THE SOFTWARE.
diff --git a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/README.md b/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/README.md
deleted file mode 100644
index 36302eea..0000000
--- a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/README.md
+++ /dev/null
@@ -1,37 +0,0 @@
-glob
-====
-
-Support for matching file paths against Unix shell style patterns.
-
-[![Continuous integration](https://github.com/rust-lang/glob/actions/workflows/rust.yml/badge.svg)](https://github.com/rust-lang/glob/actions/workflows/rust.yml)
-
-[Documentation](https://docs.rs/glob)
-
-## Usage
-
-To use `glob`, add this to your `Cargo.toml`:
-
-```toml
-[dependencies]
-glob = "0.3.1"
-```
-
-If you're using Rust 1.30 or earlier, or edition 2015, add this to your crate root:
-```rust
-extern crate glob;
-```
-
-## Examples
-
-Print all jpg files in /media/ and all of its subdirectories.
-
-```rust
-use glob::glob;
-
-for entry in glob("/media/**/*.jpg").expect("Failed to read glob pattern") {
-    match entry {
-        Ok(path) => println!("{:?}", path.display()),
-        Err(e) => println!("{:?}", e),
-    }
-}
-```
diff --git a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/src/lib.rs
deleted file mode 100644
index fef80908..0000000
--- a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/src/lib.rs
+++ /dev/null
@@ -1,1501 +0,0 @@
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Support for matching file paths against Unix shell style patterns.
-//!
-//! The `glob` and `glob_with` functions allow querying the filesystem for all
-//! files that match a particular pattern (similar to the libc `glob` function).
-//! The methods on the `Pattern` type provide functionality for checking if
-//! individual paths match a particular pattern (similar to the libc `fnmatch`
-//! function).
-//!
-//! For consistency across platforms, and for Windows support, this module
-//! is implemented entirely in Rust rather than deferring to the libc
-//! `glob`/`fnmatch` functions.
-//!
-//! # Examples
-//!
-//! To print all jpg files in `/media/` and all of its subdirectories.
-//!
-//! ```rust,no_run
-//! use glob::glob;
-//!
-//! for entry in glob("/media/**/*.jpg").expect("Failed to read glob pattern") {
-//!     match entry {
-//!         Ok(path) => println!("{:?}", path.display()),
-//!         Err(e) => println!("{:?}", e),
-//!     }
-//! }
-//! ```
-//!
-//! To print all files containing the letter "a", case insensitive, in a `local`
-//! directory relative to the current working directory. This ignores errors
-//! instead of printing them.
-//!
-//! ```rust,no_run
-//! use glob::glob_with;
-//! use glob::MatchOptions;
-//!
-//! let options = MatchOptions {
-//!     case_sensitive: false,
-//!     require_literal_separator: false,
-//!     require_literal_leading_dot: false,
-//! };
-//! for entry in glob_with("local/*a*", options).unwrap() {
-//!     if let Ok(path) = entry {
-//!         println!("{:?}", path.display())
-//!     }
-//! }
-//! ```
-
-#![doc(
-    html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
-    html_favicon_url = "https://www.rust-lang.org/favicon.ico",
-    html_root_url = "https://docs.rs/glob/0.3.1"
-)]
-#![deny(missing_docs)]
-
-#[cfg(test)]
-#[macro_use]
-extern crate doc_comment;
-
-#[cfg(test)]
-doctest!("../README.md");
-
-use std::cmp;
-use std::error::Error;
-use std::fmt;
-use std::fs;
-use std::fs::DirEntry;
-use std::io;
-use std::ops::Deref;
-use std::path::{self, Component, Path, PathBuf};
-use std::str::FromStr;
-
-use CharSpecifier::{CharRange, SingleChar};
-use MatchResult::{EntirePatternDoesntMatch, Match, SubPatternDoesntMatch};
-use PatternToken::AnyExcept;
-use PatternToken::{AnyChar, AnyRecursiveSequence, AnySequence, AnyWithin, Char};
-
-/// An iterator that yields `Path`s from the filesystem that match a particular
-/// pattern.
-///
-/// Note that it yields `GlobResult` in order to report any `IoErrors` that may
-/// arise during iteration. If a directory matches but is unreadable,
-/// thereby preventing its contents from being checked for matches, a
-/// `GlobError` is returned to express this.
-///
-/// See the `glob` function for more details.
-#[derive(Debug)]
-pub struct Paths {
-    dir_patterns: Vec<Pattern>,
-    require_dir: bool,
-    options: MatchOptions,
-    todo: Vec<Result<(PathWrapper, usize), GlobError>>,
-    scope: Option<PathWrapper>,
-}
-
-/// Return an iterator that produces all the `Path`s that match the given
-/// pattern using default match options, which may be absolute or relative to
-/// the current working directory.
-///
-/// This may return an error if the pattern is invalid.
-///
-/// This method uses the default match options and is equivalent to calling
-/// `glob_with(pattern, MatchOptions::new())`. Use `glob_with` directly if you
-/// want to use non-default match options.
-///
-/// When iterating, each result is a `GlobResult` which expresses the
-/// possibility that there was an `IoError` when attempting to read the contents
-/// of the matched path.  In other words, each item returned by the iterator
-/// will either be an `Ok(Path)` if the path matched, or an `Err(GlobError)` if
-/// the path (partially) matched _but_ its contents could not be read in order
-/// to determine if its contents matched.
-///
-/// See the `Paths` documentation for more information.
-///
-/// # Examples
-///
-/// Consider a directory `/media/pictures` containing only the files
-/// `kittens.jpg`, `puppies.jpg` and `hamsters.gif`:
-///
-/// ```rust,no_run
-/// use glob::glob;
-///
-/// for entry in glob("/media/pictures/*.jpg").unwrap() {
-///     match entry {
-///         Ok(path) => println!("{:?}", path.display()),
-///
-///         // if the path matched but was unreadable,
-///         // thereby preventing its contents from matching
-///         Err(e) => println!("{:?}", e),
-///     }
-/// }
-/// ```
-///
-/// The above code will print:
-///
-/// ```ignore
-/// /media/pictures/kittens.jpg
-/// /media/pictures/puppies.jpg
-/// ```
-///
-/// If you want to ignore unreadable paths, you can use something like
-/// `filter_map`:
-///
-/// ```rust
-/// use glob::glob;
-/// use std::result::Result;
-///
-/// for path in glob("/media/pictures/*.jpg").unwrap().filter_map(Result::ok) {
-///     println!("{}", path.display());
-/// }
-/// ```
-/// Paths are yielded in alphabetical order.
-pub fn glob(pattern: &str) -> Result<Paths, PatternError> {
-    glob_with(pattern, MatchOptions::new())
-}
-
-/// Return an iterator that produces all the `Path`s that match the given
-/// pattern using the specified match options, which may be absolute or relative
-/// to the current working directory.
-///
-/// This may return an error if the pattern is invalid.
-///
-/// This function accepts Unix shell style patterns as described by
-/// `Pattern::new(..)`.  The options given are passed through unchanged to
-/// `Pattern::matches_with(..)` with the exception that
-/// `require_literal_separator` is always set to `true` regardless of the value
-/// passed to this function.
-///
-/// Paths are yielded in alphabetical order.
-pub fn glob_with(pattern: &str, options: MatchOptions) -> Result<Paths, PatternError> {
-    #[cfg(windows)]
-    fn check_windows_verbatim(p: &Path) -> bool {
-        match p.components().next() {
-            Some(Component::Prefix(ref p)) => {
-                // Allow VerbatimDisk paths. std canonicalize() generates them, and they work fine
-                p.kind().is_verbatim()
-                    && if let std::path::Prefix::VerbatimDisk(_) = p.kind() {
-                        false
-                    } else {
-                        true
-                    }
-            }
-            _ => false,
-        }
-    }
-    #[cfg(not(windows))]
-    fn check_windows_verbatim(_: &Path) -> bool {
-        false
-    }
-
-    #[cfg(windows)]
-    fn to_scope(p: &Path) -> PathBuf {
-        // FIXME handle volume relative paths here
-        p.to_path_buf()
-    }
-    #[cfg(not(windows))]
-    fn to_scope(p: &Path) -> PathBuf {
-        p.to_path_buf()
-    }
-
-    // make sure that the pattern is valid first, else early return with error
-    if let Err(err) = Pattern::new(pattern) {
-        return Err(err);
-    }
-
-    let mut components = Path::new(pattern).components().peekable();
-    loop {
-        match components.peek() {
-            Some(&Component::Prefix(..)) | Some(&Component::RootDir) => {
-                components.next();
-            }
-            _ => break,
-        }
-    }
-    let rest = components.map(|s| s.as_os_str()).collect::<PathBuf>();
-    let normalized_pattern = Path::new(pattern).iter().collect::<PathBuf>();
-    let root_len = normalized_pattern.to_str().unwrap().len() - rest.to_str().unwrap().len();
-    let root = if root_len > 0 {
-        Some(Path::new(&pattern[..root_len]))
-    } else {
-        None
-    };
-
-    if root_len > 0 && check_windows_verbatim(root.unwrap()) {
-        // FIXME: How do we want to handle verbatim paths? I'm inclined to
-        // return nothing, since we can't very well find all UNC shares with a
-        // 1-letter server name.
-        return Ok(Paths {
-            dir_patterns: Vec::new(),
-            require_dir: false,
-            options,
-            todo: Vec::new(),
-            scope: None,
-        });
-    }
-
-    let scope = root.map_or_else(|| PathBuf::from("."), to_scope);
-    let scope = PathWrapper::from_path(scope);
-
-    let mut dir_patterns = Vec::new();
-    let components =
-        pattern[cmp::min(root_len, pattern.len())..].split_terminator(path::is_separator);
-
-    for component in components {
-        dir_patterns.push(Pattern::new(component)?);
-    }
-
-    if root_len == pattern.len() {
-        dir_patterns.push(Pattern {
-            original: "".to_string(),
-            tokens: Vec::new(),
-            is_recursive: false,
-        });
-    }
-
-    let last_is_separator = pattern.chars().next_back().map(path::is_separator);
-    let require_dir = last_is_separator == Some(true);
-    let todo = Vec::new();
-
-    Ok(Paths {
-        dir_patterns,
-        require_dir,
-        options,
-        todo,
-        scope: Some(scope),
-    })
-}
-
-/// A glob iteration error.
-///
-/// This is typically returned when a particular path cannot be read
-/// to determine if its contents match the glob pattern. This is possible
-/// if the program lacks the appropriate permissions, for example.
-#[derive(Debug)]
-pub struct GlobError {
-    path: PathBuf,
-    error: io::Error,
-}
-
-impl GlobError {
-    /// The Path that the error corresponds to.
-    pub fn path(&self) -> &Path {
-        &self.path
-    }
-
-    /// The error in question.
-    pub fn error(&self) -> &io::Error {
-        &self.error
-    }
-
-    /// Consumes self, returning the _raw_ underlying `io::Error`
-    pub fn into_error(self) -> io::Error {
-        self.error
-    }
-}
-
-impl Error for GlobError {
-    #[allow(deprecated)]
-    fn description(&self) -> &str {
-        self.error.description()
-    }
-
-    #[allow(unknown_lints, bare_trait_objects)]
-    fn cause(&self) -> Option<&Error> {
-        Some(&self.error)
-    }
-}
-
-impl fmt::Display for GlobError {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(
-            f,
-            "attempting to read `{}` resulted in an error: {}",
-            self.path.display(),
-            self.error
-        )
-    }
-}
-
-#[derive(Debug)]
-struct PathWrapper {
-    path: PathBuf,
-    is_directory: bool,
-}
-
-impl PathWrapper {
-    fn from_dir_entry(path: PathBuf, e: DirEntry) -> Self {
-        let is_directory = e
-            .file_type()
-            .ok()
-            .and_then(|file_type| {
-                // We need to use fs::metadata to resolve the actual path
-                // if it's a symlink.
-                if file_type.is_symlink() {
-                    None
-                } else {
-                    Some(file_type.is_dir())
-                }
-            })
-            .or_else(|| fs::metadata(&path).map(|m| m.is_dir()).ok())
-            .unwrap_or(false);
-        Self { path, is_directory }
-    }
-    fn from_path(path: PathBuf) -> Self {
-        let is_directory = fs::metadata(&path).map(|m| m.is_dir()).unwrap_or(false);
-        Self { path, is_directory }
-    }
-
-    fn into_path(self) -> PathBuf {
-        self.path
-    }
-}
-
-impl Deref for PathWrapper {
-    type Target = Path;
-
-    fn deref(&self) -> &Self::Target {
-        self.path.deref()
-    }
-}
-
-impl AsRef<Path> for PathWrapper {
-    fn as_ref(&self) -> &Path {
-        self.path.as_ref()
-    }
-}
-
-/// An alias for a glob iteration result.
-///
-/// This represents either a matched path or a glob iteration error,
-/// such as failing to read a particular directory's contents.
-pub type GlobResult = Result<PathBuf, GlobError>;
-
-impl Iterator for Paths {
-    type Item = GlobResult;
-
-    fn next(&mut self) -> Option<GlobResult> {
-        // the todo buffer hasn't been initialized yet, so it's done at this
-        // point rather than in glob() so that the errors are unified that is,
-        // failing to fill the buffer is an iteration error construction of the
-        // iterator (i.e. glob()) only fails if it fails to compile the Pattern
-        if let Some(scope) = self.scope.take() {
-            if !self.dir_patterns.is_empty() {
-                // Shouldn't happen, but we're using -1 as a special index.
-                assert!(self.dir_patterns.len() < !0 as usize);
-
-                fill_todo(&mut self.todo, &self.dir_patterns, 0, &scope, self.options);
-            }
-        }
-
-        loop {
-            if self.dir_patterns.is_empty() || self.todo.is_empty() {
-                return None;
-            }
-
-            let (path, mut idx) = match self.todo.pop().unwrap() {
-                Ok(pair) => pair,
-                Err(e) => return Some(Err(e)),
-            };
-
-            // idx -1: was already checked by fill_todo, maybe path was '.' or
-            // '..' that we can't match here because of normalization.
-            if idx == !0 as usize {
-                if self.require_dir && !path.is_directory {
-                    continue;
-                }
-                return Some(Ok(path.into_path()));
-            }
-
-            if self.dir_patterns[idx].is_recursive {
-                let mut next = idx;
-
-                // collapse consecutive recursive patterns
-                while (next + 1) < self.dir_patterns.len()
-                    && self.dir_patterns[next + 1].is_recursive
-                {
-                    next += 1;
-                }
-
-                if path.is_directory {
-                    // the path is a directory, so it's a match
-
-                    // push this directory's contents
-                    fill_todo(
-                        &mut self.todo,
-                        &self.dir_patterns,
-                        next,
-                        &path,
-                        self.options,
-                    );
-
-                    if next == self.dir_patterns.len() - 1 {
-                        // pattern ends in recursive pattern, so return this
-                        // directory as a result
-                        return Some(Ok(path.into_path()));
-                    } else {
-                        // advanced to the next pattern for this path
-                        idx = next + 1;
-                    }
-                } else if next == self.dir_patterns.len() - 1 {
-                    // not a directory and it's the last pattern, meaning no
-                    // match
-                    continue;
-                } else {
-                    // advanced to the next pattern for this path
-                    idx = next + 1;
-                }
-            }
-
-            // not recursive, so match normally
-            if self.dir_patterns[idx].matches_with(
-                {
-                    match path.file_name().and_then(|s| s.to_str()) {
-                        // FIXME (#9639): How do we handle non-utf8 filenames?
-                        // Ignore them for now; ideally we'd still match them
-                        // against a *
-                        None => continue,
-                        Some(x) => x,
-                    }
-                },
-                self.options,
-            ) {
-                if idx == self.dir_patterns.len() - 1 {
-                    // it is not possible for a pattern to match a directory
-                    // *AND* its children so we don't need to check the
-                    // children
-
-                    if !self.require_dir || path.is_directory {
-                        return Some(Ok(path.into_path()));
-                    }
-                } else {
-                    fill_todo(
-                        &mut self.todo,
-                        &self.dir_patterns,
-                        idx + 1,
-                        &path,
-                        self.options,
-                    );
-                }
-            }
-        }
-    }
-}
-
-/// A pattern parsing error.
-#[derive(Debug)]
-#[allow(missing_copy_implementations)]
-pub struct PatternError {
-    /// The approximate character index of where the error occurred.
-    pub pos: usize,
-
-    /// A message describing the error.
-    pub msg: &'static str,
-}
-
-impl Error for PatternError {
-    fn description(&self) -> &str {
-        self.msg
-    }
-}
-
-impl fmt::Display for PatternError {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(
-            f,
-            "Pattern syntax error near position {}: {}",
-            self.pos, self.msg
-        )
-    }
-}
-
-/// A compiled Unix shell style pattern.
-///
-/// - `?` matches any single character.
-///
-/// - `*` matches any (possibly empty) sequence of characters.
-///
-/// - `**` matches the current directory and arbitrary
-///   subdirectories. To match files in arbitrary subdiretories, use
-///   `**/*`.
-///
-///   This sequence **must** form a single path component, so both
-///   `**a` and `b**` are invalid and will result in an error.  A
-///   sequence of more than two consecutive `*` characters is also
-///   invalid.
-///
-/// - `[...]` matches any character inside the brackets.  Character sequences
-///   can also specify ranges of characters, as ordered by Unicode, so e.g.
-///   `[0-9]` specifies any character between 0 and 9 inclusive. An unclosed
-///   bracket is invalid.
-///
-/// - `[!...]` is the negation of `[...]`, i.e. it matches any characters
-///   **not** in the brackets.
-///
-/// - The metacharacters `?`, `*`, `[`, `]` can be matched by using brackets
-///   (e.g. `[?]`).  When a `]` occurs immediately following `[` or `[!` then it
-///   is interpreted as being part of, rather then ending, the character set, so
-///   `]` and NOT `]` can be matched by `[]]` and `[!]]` respectively.  The `-`
-///   character can be specified inside a character sequence pattern by placing
-///   it at the start or the end, e.g. `[abc-]`.
-#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Debug)]
-pub struct Pattern {
-    original: String,
-    tokens: Vec<PatternToken>,
-    is_recursive: bool,
-}
-
-/// Show the original glob pattern.
-impl fmt::Display for Pattern {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        self.original.fmt(f)
-    }
-}
-
-impl FromStr for Pattern {
-    type Err = PatternError;
-
-    fn from_str(s: &str) -> Result<Self, PatternError> {
-        Self::new(s)
-    }
-}
-
-#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
-enum PatternToken {
-    Char(char),
-    AnyChar,
-    AnySequence,
-    AnyRecursiveSequence,
-    AnyWithin(Vec<CharSpecifier>),
-    AnyExcept(Vec<CharSpecifier>),
-}
-
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
-enum CharSpecifier {
-    SingleChar(char),
-    CharRange(char, char),
-}
-
-#[derive(Copy, Clone, PartialEq)]
-enum MatchResult {
-    Match,
-    SubPatternDoesntMatch,
-    EntirePatternDoesntMatch,
-}
-
-const ERROR_WILDCARDS: &str = "wildcards are either regular `*` or recursive `**`";
-const ERROR_RECURSIVE_WILDCARDS: &str = "recursive wildcards must form a single path \
-                                         component";
-const ERROR_INVALID_RANGE: &str = "invalid range pattern";
-
-impl Pattern {
-    /// This function compiles Unix shell style patterns.
-    ///
-    /// An invalid glob pattern will yield a `PatternError`.
-    pub fn new(pattern: &str) -> Result<Self, PatternError> {
-        let chars = pattern.chars().collect::<Vec<_>>();
-        let mut tokens = Vec::new();
-        let mut is_recursive = false;
-        let mut i = 0;
-
-        while i < chars.len() {
-            match chars[i] {
-                '?' => {
-                    tokens.push(AnyChar);
-                    i += 1;
-                }
-                '*' => {
-                    let old = i;
-
-                    while i < chars.len() && chars[i] == '*' {
-                        i += 1;
-                    }
-
-                    let count = i - old;
-
-                    if count > 2 {
-                        return Err(PatternError {
-                            pos: old + 2,
-                            msg: ERROR_WILDCARDS,
-                        });
-                    } else if count == 2 {
-                        // ** can only be an entire path component
-                        // i.e. a/**/b is valid, but a**/b or a/**b is not
-                        // invalid matches are treated literally
-                        let is_valid = if i == 2 || path::is_separator(chars[i - count - 1]) {
-                            // it ends in a '/'
-                            if i < chars.len() && path::is_separator(chars[i]) {
-                                i += 1;
-                                true
-                            // or the pattern ends here
-                            // this enables the existing globbing mechanism
-                            } else if i == chars.len() {
-                                true
-                            // `**` ends in non-separator
-                            } else {
-                                return Err(PatternError {
-                                    pos: i,
-                                    msg: ERROR_RECURSIVE_WILDCARDS,
-                                });
-                            }
-                        // `**` begins with non-separator
-                        } else {
-                            return Err(PatternError {
-                                pos: old - 1,
-                                msg: ERROR_RECURSIVE_WILDCARDS,
-                            });
-                        };
-
-                        if is_valid {
-                            // collapse consecutive AnyRecursiveSequence to a
-                            // single one
-
-                            let tokens_len = tokens.len();
-
-                            if !(tokens_len > 1 && tokens[tokens_len - 1] == AnyRecursiveSequence) {
-                                is_recursive = true;
-                                tokens.push(AnyRecursiveSequence);
-                            }
-                        }
-                    } else {
-                        tokens.push(AnySequence);
-                    }
-                }
-                '[' => {
-                    if i + 4 <= chars.len() && chars[i + 1] == '!' {
-                        match chars[i + 3..].iter().position(|x| *x == ']') {
-                            None => (),
-                            Some(j) => {
-                                let chars = &chars[i + 2..i + 3 + j];
-                                let cs = parse_char_specifiers(chars);
-                                tokens.push(AnyExcept(cs));
-                                i += j + 4;
-                                continue;
-                            }
-                        }
-                    } else if i + 3 <= chars.len() && chars[i + 1] != '!' {
-                        match chars[i + 2..].iter().position(|x| *x == ']') {
-                            None => (),
-                            Some(j) => {
-                                let cs = parse_char_specifiers(&chars[i + 1..i + 2 + j]);
-                                tokens.push(AnyWithin(cs));
-                                i += j + 3;
-                                continue;
-                            }
-                        }
-                    }
-
-                    // if we get here then this is not a valid range pattern
-                    return Err(PatternError {
-                        pos: i,
-                        msg: ERROR_INVALID_RANGE,
-                    });
-                }
-                c => {
-                    tokens.push(Char(c));
-                    i += 1;
-                }
-            }
-        }
-
-        Ok(Self {
-            tokens,
-            original: pattern.to_string(),
-            is_recursive,
-        })
-    }
-
-    /// Escape metacharacters within the given string by surrounding them in
-    /// brackets. The resulting string will, when compiled into a `Pattern`,
-    /// match the input string and nothing else.
-    pub fn escape(s: &str) -> String {
-        let mut escaped = String::new();
-        for c in s.chars() {
-            match c {
-                // note that ! does not need escaping because it is only special
-                // inside brackets
-                '?' | '*' | '[' | ']' => {
-                    escaped.push('[');
-                    escaped.push(c);
-                    escaped.push(']');
-                }
-                c => {
-                    escaped.push(c);
-                }
-            }
-        }
-        escaped
-    }
-
-    /// Return if the given `str` matches this `Pattern` using the default
-    /// match options (i.e. `MatchOptions::new()`).
-    ///
-    /// # Examples
-    ///
-    /// ```rust
-    /// use glob::Pattern;
-    ///
-    /// assert!(Pattern::new("c?t").unwrap().matches("cat"));
-    /// assert!(Pattern::new("k[!e]tteh").unwrap().matches("kitteh"));
-    /// assert!(Pattern::new("d*g").unwrap().matches("doog"));
-    /// ```
-    pub fn matches(&self, str: &str) -> bool {
-        self.matches_with(str, MatchOptions::new())
-    }
-
-    /// Return if the given `Path`, when converted to a `str`, matches this
-    /// `Pattern` using the default match options (i.e. `MatchOptions::new()`).
-    pub fn matches_path(&self, path: &Path) -> bool {
-        // FIXME (#9639): This needs to handle non-utf8 paths
-        path.to_str().map_or(false, |s| self.matches(s))
-    }
-
-    /// Return if the given `str` matches this `Pattern` using the specified
-    /// match options.
-    pub fn matches_with(&self, str: &str, options: MatchOptions) -> bool {
-        self.matches_from(true, str.chars(), 0, options) == Match
-    }
-
-    /// Return if the given `Path`, when converted to a `str`, matches this
-    /// `Pattern` using the specified match options.
-    pub fn matches_path_with(&self, path: &Path, options: MatchOptions) -> bool {
-        // FIXME (#9639): This needs to handle non-utf8 paths
-        path.to_str()
-            .map_or(false, |s| self.matches_with(s, options))
-    }
-
-    /// Access the original glob pattern.
-    pub fn as_str(&self) -> &str {
-        &self.original
-    }
-
-    fn matches_from(
-        &self,
-        mut follows_separator: bool,
-        mut file: std::str::Chars,
-        i: usize,
-        options: MatchOptions,
-    ) -> MatchResult {
-        for (ti, token) in self.tokens[i..].iter().enumerate() {
-            match *token {
-                AnySequence | AnyRecursiveSequence => {
-                    // ** must be at the start.
-                    debug_assert!(match *token {
-                        AnyRecursiveSequence => follows_separator,
-                        _ => true,
-                    });
-
-                    // Empty match
-                    match self.matches_from(follows_separator, file.clone(), i + ti + 1, options) {
-                        SubPatternDoesntMatch => (), // keep trying
-                        m => return m,
-                    };
-
-                    while let Some(c) = file.next() {
-                        if follows_separator && options.require_literal_leading_dot && c == '.' {
-                            return SubPatternDoesntMatch;
-                        }
-                        follows_separator = path::is_separator(c);
-                        match *token {
-                            AnyRecursiveSequence if !follows_separator => continue,
-                            AnySequence
-                                if options.require_literal_separator && follows_separator =>
-                            {
-                                return SubPatternDoesntMatch
-                            }
-                            _ => (),
-                        }
-                        match self.matches_from(
-                            follows_separator,
-                            file.clone(),
-                            i + ti + 1,
-                            options,
-                        ) {
-                            SubPatternDoesntMatch => (), // keep trying
-                            m => return m,
-                        }
-                    }
-                }
-                _ => {
-                    let c = match file.next() {
-                        Some(c) => c,
-                        None => return EntirePatternDoesntMatch,
-                    };
-
-                    let is_sep = path::is_separator(c);
-
-                    if !match *token {
-                        AnyChar | AnyWithin(..) | AnyExcept(..)
-                            if (options.require_literal_separator && is_sep)
-                                || (follows_separator
-                                    && options.require_literal_leading_dot
-                                    && c == '.') =>
-                        {
-                            false
-                        }
-                        AnyChar => true,
-                        AnyWithin(ref specifiers) => in_char_specifiers(&specifiers, c, options),
-                        AnyExcept(ref specifiers) => !in_char_specifiers(&specifiers, c, options),
-                        Char(c2) => chars_eq(c, c2, options.case_sensitive),
-                        AnySequence | AnyRecursiveSequence => unreachable!(),
-                    } {
-                        return SubPatternDoesntMatch;
-                    }
-                    follows_separator = is_sep;
-                }
-            }
-        }
-
-        // Iter is fused.
-        if file.next().is_none() {
-            Match
-        } else {
-            SubPatternDoesntMatch
-        }
-    }
-}
-
-// Fills `todo` with paths under `path` to be matched by `patterns[idx]`,
-// special-casing patterns to match `.` and `..`, and avoiding `readdir()`
-// calls when there are no metacharacters in the pattern.
-fn fill_todo(
-    todo: &mut Vec<Result<(PathWrapper, usize), GlobError>>,
-    patterns: &[Pattern],
-    idx: usize,
-    path: &PathWrapper,
-    options: MatchOptions,
-) {
-    // convert a pattern that's just many Char(_) to a string
-    fn pattern_as_str(pattern: &Pattern) -> Option<String> {
-        let mut s = String::new();
-        for token in &pattern.tokens {
-            match *token {
-                Char(c) => s.push(c),
-                _ => return None,
-            }
-        }
-
-        Some(s)
-    }
-
-    let add = |todo: &mut Vec<_>, next_path: PathWrapper| {
-        if idx + 1 == patterns.len() {
-            // We know it's good, so don't make the iterator match this path
-            // against the pattern again. In particular, it can't match
-            // . or .. globs since these never show up as path components.
-            todo.push(Ok((next_path, !0 as usize)));
-        } else {
-            fill_todo(todo, patterns, idx + 1, &next_path, options);
-        }
-    };
-
-    let pattern = &patterns[idx];
-    let is_dir = path.is_directory;
-    let curdir = path.as_ref() == Path::new(".");
-    match pattern_as_str(pattern) {
-        Some(s) => {
-            // This pattern component doesn't have any metacharacters, so we
-            // don't need to read the current directory to know where to
-            // continue. So instead of passing control back to the iterator,
-            // we can just check for that one entry and potentially recurse
-            // right away.
-            let special = "." == s || ".." == s;
-            let next_path = if curdir {
-                PathBuf::from(s)
-            } else {
-                path.join(&s)
-            };
-            let next_path = PathWrapper::from_path(next_path);
-            if (special && is_dir)
-                || (!special
-                    && (fs::metadata(&next_path).is_ok()
-                        || fs::symlink_metadata(&next_path).is_ok()))
-            {
-                add(todo, next_path);
-            }
-        }
-        None if is_dir => {
-            let dirs = fs::read_dir(path).and_then(|d| {
-                d.map(|e| {
-                    e.map(|e| {
-                        let path = if curdir {
-                            PathBuf::from(e.path().file_name().unwrap())
-                        } else {
-                            e.path()
-                        };
-                        PathWrapper::from_dir_entry(path, e)
-                    })
-                })
-                .collect::<Result<Vec<_>, _>>()
-            });
-            match dirs {
-                Ok(mut children) => {
-                    if options.require_literal_leading_dot {
-                        children
-                            .retain(|x| !x.file_name().unwrap().to_str().unwrap().starts_with("."));
-                    }
-                    children.sort_by(|p1, p2| p2.file_name().cmp(&p1.file_name()));
-                    todo.extend(children.into_iter().map(|x| Ok((x, idx))));
-
-                    // Matching the special directory entries . and .. that
-                    // refer to the current and parent directory respectively
-                    // requires that the pattern has a leading dot, even if the
-                    // `MatchOptions` field `require_literal_leading_dot` is not
-                    // set.
-                    if !pattern.tokens.is_empty() && pattern.tokens[0] == Char('.') {
-                        for &special in &[".", ".."] {
-                            if pattern.matches_with(special, options) {
-                                add(todo, PathWrapper::from_path(path.join(special)));
-                            }
-                        }
-                    }
-                }
-                Err(e) => {
-                    todo.push(Err(GlobError {
-                        path: path.to_path_buf(),
-                        error: e,
-                    }));
-                }
-            }
-        }
-        None => {
-            // not a directory, nothing more to find
-        }
-    }
-}
-
-fn parse_char_specifiers(s: &[char]) -> Vec<CharSpecifier> {
-    let mut cs = Vec::new();
-    let mut i = 0;
-    while i < s.len() {
-        if i + 3 <= s.len() && s[i + 1] == '-' {
-            cs.push(CharRange(s[i], s[i + 2]));
-            i += 3;
-        } else {
-            cs.push(SingleChar(s[i]));
-            i += 1;
-        }
-    }
-    cs
-}
-
-fn in_char_specifiers(specifiers: &[CharSpecifier], c: char, options: MatchOptions) -> bool {
-    for &specifier in specifiers.iter() {
-        match specifier {
-            SingleChar(sc) => {
-                if chars_eq(c, sc, options.case_sensitive) {
-                    return true;
-                }
-            }
-            CharRange(start, end) => {
-                // FIXME: work with non-ascii chars properly (issue #1347)
-                if !options.case_sensitive && c.is_ascii() && start.is_ascii() && end.is_ascii() {
-                    let start = start.to_ascii_lowercase();
-                    let end = end.to_ascii_lowercase();
-
-                    let start_up = start.to_uppercase().next().unwrap();
-                    let end_up = end.to_uppercase().next().unwrap();
-
-                    // only allow case insensitive matching when
-                    // both start and end are within a-z or A-Z
-                    if start != start_up && end != end_up {
-                        let c = c.to_ascii_lowercase();
-                        if c >= start && c <= end {
-                            return true;
-                        }
-                    }
-                }
-
-                if c >= start && c <= end {
-                    return true;
-                }
-            }
-        }
-    }
-
-    false
-}
-
-/// A helper function to determine if two chars are (possibly case-insensitively) equal.
-fn chars_eq(a: char, b: char, case_sensitive: bool) -> bool {
-    if cfg!(windows) && path::is_separator(a) && path::is_separator(b) {
-        true
-    } else if !case_sensitive && a.is_ascii() && b.is_ascii() {
-        // FIXME: work with non-ascii chars properly (issue #9084)
-        a.to_ascii_lowercase() == b.to_ascii_lowercase()
-    } else {
-        a == b
-    }
-}
-
-/// Configuration options to modify the behaviour of `Pattern::matches_with(..)`.
-#[allow(missing_copy_implementations)]
-#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
-pub struct MatchOptions {
-    /// Whether or not patterns should be matched in a case-sensitive manner.
-    /// This currently only considers upper/lower case relationships between
-    /// ASCII characters, but in future this might be extended to work with
-    /// Unicode.
-    pub case_sensitive: bool,
-
-    /// Whether or not path-component separator characters (e.g. `/` on
-    /// Posix) must be matched by a literal `/`, rather than by `*` or `?` or
-    /// `[...]`.
-    pub require_literal_separator: bool,
-
-    /// Whether or not paths that contain components that start with a `.`
-    /// will require that `.` appears literally in the pattern; `*`, `?`, `**`,
-    /// or `[...]` will not match. This is useful because such files are
-    /// conventionally considered hidden on Unix systems and it might be
-    /// desirable to skip them when listing files.
-    pub require_literal_leading_dot: bool,
-}
-
-impl MatchOptions {
-    /// Constructs a new `MatchOptions` with default field values. This is used
-    /// when calling functions that do not take an explicit `MatchOptions`
-    /// parameter.
-    ///
-    /// This function always returns this value:
-    ///
-    /// ```rust,ignore
-    /// MatchOptions {
-    ///     case_sensitive: true,
-    ///     require_literal_separator: false,
-    ///     require_literal_leading_dot: false
-    /// }
-    /// ```
-    ///
-    /// # Note
-    /// The behavior of this method doesn't match `default()`'s. This returns
-    /// `case_sensitive` as `true` while `default()` does it as `false`.
-    // FIXME: Consider unity the behavior with `default()` in a next major release.
-    pub fn new() -> Self {
-        Self {
-            case_sensitive: true,
-            require_literal_separator: false,
-            require_literal_leading_dot: false,
-        }
-    }
-}
-
-#[cfg(test)]
-mod test {
-    use super::{glob, MatchOptions, Pattern};
-    use std::path::Path;
-
-    #[test]
-    fn test_pattern_from_str() {
-        assert!("a*b".parse::<Pattern>().unwrap().matches("a_b"));
-        assert!("a/**b".parse::<Pattern>().unwrap_err().pos == 4);
-    }
-
-    #[test]
-    fn test_wildcard_errors() {
-        assert!(Pattern::new("a/**b").unwrap_err().pos == 4);
-        assert!(Pattern::new("a/bc**").unwrap_err().pos == 3);
-        assert!(Pattern::new("a/*****").unwrap_err().pos == 4);
-        assert!(Pattern::new("a/b**c**d").unwrap_err().pos == 2);
-        assert!(Pattern::new("a**b").unwrap_err().pos == 0);
-    }
-
-    #[test]
-    fn test_unclosed_bracket_errors() {
-        assert!(Pattern::new("abc[def").unwrap_err().pos == 3);
-        assert!(Pattern::new("abc[!def").unwrap_err().pos == 3);
-        assert!(Pattern::new("abc[").unwrap_err().pos == 3);
-        assert!(Pattern::new("abc[!").unwrap_err().pos == 3);
-        assert!(Pattern::new("abc[d").unwrap_err().pos == 3);
-        assert!(Pattern::new("abc[!d").unwrap_err().pos == 3);
-        assert!(Pattern::new("abc[]").unwrap_err().pos == 3);
-        assert!(Pattern::new("abc[!]").unwrap_err().pos == 3);
-    }
-
-    #[test]
-    fn test_glob_errors() {
-        assert!(glob("a/**b").err().unwrap().pos == 4);
-        assert!(glob("abc[def").err().unwrap().pos == 3);
-    }
-
-    // this test assumes that there is a /root directory and that
-    // the user running this test is not root or otherwise doesn't
-    // have permission to read its contents
-    #[cfg(all(unix, not(target_os = "macos")))]
-    #[test]
-    fn test_iteration_errors() {
-        use std::io;
-        let mut iter = glob("/root/*").unwrap();
-
-        // GlobErrors shouldn't halt iteration
-        let next = iter.next();
-        assert!(next.is_some());
-
-        let err = next.unwrap();
-        assert!(err.is_err());
-
-        let err = err.err().unwrap();
-        assert!(err.path() == Path::new("/root"));
-        assert!(err.error().kind() == io::ErrorKind::PermissionDenied);
-    }
-
-    #[test]
-    fn test_absolute_pattern() {
-        assert!(glob("/").unwrap().next().is_some());
-        assert!(glob("//").unwrap().next().is_some());
-
-        // assume that the filesystem is not empty!
-        assert!(glob("/*").unwrap().next().is_some());
-
-        #[cfg(not(windows))]
-        fn win() {}
-
-        #[cfg(windows)]
-        fn win() {
-            use std::env::current_dir;
-            use std::path::Component;
-
-            // check windows absolute paths with host/device components
-            let root_with_device = current_dir()
-                .ok()
-                .and_then(|p| match p.components().next().unwrap() {
-                    Component::Prefix(prefix_component) => {
-                        let path = Path::new(prefix_component.as_os_str());
-                        path.join("*");
-                        Some(path.to_path_buf())
-                    }
-                    _ => panic!("no prefix in this path"),
-                })
-                .unwrap();
-            // FIXME (#9639): This needs to handle non-utf8 paths
-            assert!(glob(root_with_device.as_os_str().to_str().unwrap())
-                .unwrap()
-                .next()
-                .is_some());
-        }
-        win()
-    }
-
-    #[test]
-    fn test_wildcards() {
-        assert!(Pattern::new("a*b").unwrap().matches("a_b"));
-        assert!(Pattern::new("a*b*c").unwrap().matches("abc"));
-        assert!(!Pattern::new("a*b*c").unwrap().matches("abcd"));
-        assert!(Pattern::new("a*b*c").unwrap().matches("a_b_c"));
-        assert!(Pattern::new("a*b*c").unwrap().matches("a___b___c"));
-        assert!(Pattern::new("abc*abc*abc")
-            .unwrap()
-            .matches("abcabcabcabcabcabcabc"));
-        assert!(!Pattern::new("abc*abc*abc")
-            .unwrap()
-            .matches("abcabcabcabcabcabcabca"));
-        assert!(Pattern::new("a*a*a*a*a*a*a*a*a")
-            .unwrap()
-            .matches("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"));
-        assert!(Pattern::new("a*b[xyz]c*d").unwrap().matches("abxcdbxcddd"));
-    }
-
-    #[test]
-    fn test_recursive_wildcards() {
-        let pat = Pattern::new("some/**/needle.txt").unwrap();
-        assert!(pat.matches("some/needle.txt"));
-        assert!(pat.matches("some/one/needle.txt"));
-        assert!(pat.matches("some/one/two/needle.txt"));
-        assert!(pat.matches("some/other/needle.txt"));
-        assert!(!pat.matches("some/other/notthis.txt"));
-
-        // a single ** should be valid, for globs
-        // Should accept anything
-        let pat = Pattern::new("**").unwrap();
-        assert!(pat.is_recursive);
-        assert!(pat.matches("abcde"));
-        assert!(pat.matches(""));
-        assert!(pat.matches(".asdf"));
-        assert!(pat.matches("/x/.asdf"));
-
-        // collapse consecutive wildcards
-        let pat = Pattern::new("some/**/**/needle.txt").unwrap();
-        assert!(pat.matches("some/needle.txt"));
-        assert!(pat.matches("some/one/needle.txt"));
-        assert!(pat.matches("some/one/two/needle.txt"));
-        assert!(pat.matches("some/other/needle.txt"));
-        assert!(!pat.matches("some/other/notthis.txt"));
-
-        // ** can begin the pattern
-        let pat = Pattern::new("**/test").unwrap();
-        assert!(pat.matches("one/two/test"));
-        assert!(pat.matches("one/test"));
-        assert!(pat.matches("test"));
-
-        // /** can begin the pattern
-        let pat = Pattern::new("/**/test").unwrap();
-        assert!(pat.matches("/one/two/test"));
-        assert!(pat.matches("/one/test"));
-        assert!(pat.matches("/test"));
-        assert!(!pat.matches("/one/notthis"));
-        assert!(!pat.matches("/notthis"));
-
-        // Only start sub-patterns on start of path segment.
-        let pat = Pattern::new("**/.*").unwrap();
-        assert!(pat.matches(".abc"));
-        assert!(pat.matches("abc/.abc"));
-        assert!(!pat.matches("ab.c"));
-        assert!(!pat.matches("abc/ab.c"));
-    }
-
-    #[test]
-    fn test_lots_of_files() {
-        // this is a good test because it touches lots of differently named files
-        glob("/*/*/*/*").unwrap().skip(10000).next();
-    }
-
-    #[test]
-    fn test_range_pattern() {
-        let pat = Pattern::new("a[0-9]b").unwrap();
-        for i in 0..10 {
-            assert!(pat.matches(&format!("a{}b", i)));
-        }
-        assert!(!pat.matches("a_b"));
-
-        let pat = Pattern::new("a[!0-9]b").unwrap();
-        for i in 0..10 {
-            assert!(!pat.matches(&format!("a{}b", i)));
-        }
-        assert!(pat.matches("a_b"));
-
-        let pats = ["[a-z123]", "[1a-z23]", "[123a-z]"];
-        for &p in pats.iter() {
-            let pat = Pattern::new(p).unwrap();
-            for c in "abcdefghijklmnopqrstuvwxyz".chars() {
-                assert!(pat.matches(&c.to_string()));
-            }
-            for c in "ABCDEFGHIJKLMNOPQRSTUVWXYZ".chars() {
-                let options = MatchOptions {
-                    case_sensitive: false,
-                    ..MatchOptions::new()
-                };
-                assert!(pat.matches_with(&c.to_string(), options));
-            }
-            assert!(pat.matches("1"));
-            assert!(pat.matches("2"));
-            assert!(pat.matches("3"));
-        }
-
-        let pats = ["[abc-]", "[-abc]", "[a-c-]"];
-        for &p in pats.iter() {
-            let pat = Pattern::new(p).unwrap();
-            assert!(pat.matches("a"));
-            assert!(pat.matches("b"));
-            assert!(pat.matches("c"));
-            assert!(pat.matches("-"));
-            assert!(!pat.matches("d"));
-        }
-
-        let pat = Pattern::new("[2-1]").unwrap();
-        assert!(!pat.matches("1"));
-        assert!(!pat.matches("2"));
-
-        assert!(Pattern::new("[-]").unwrap().matches("-"));
-        assert!(!Pattern::new("[!-]").unwrap().matches("-"));
-    }
-
-    #[test]
-    fn test_pattern_matches() {
-        let txt_pat = Pattern::new("*hello.txt").unwrap();
-        assert!(txt_pat.matches("hello.txt"));
-        assert!(txt_pat.matches("gareth_says_hello.txt"));
-        assert!(txt_pat.matches("some/path/to/hello.txt"));
-        assert!(txt_pat.matches("some\\path\\to\\hello.txt"));
-        assert!(txt_pat.matches("/an/absolute/path/to/hello.txt"));
-        assert!(!txt_pat.matches("hello.txt-and-then-some"));
-        assert!(!txt_pat.matches("goodbye.txt"));
-
-        let dir_pat = Pattern::new("*some/path/to/hello.txt").unwrap();
-        assert!(dir_pat.matches("some/path/to/hello.txt"));
-        assert!(dir_pat.matches("a/bigger/some/path/to/hello.txt"));
-        assert!(!dir_pat.matches("some/path/to/hello.txt-and-then-some"));
-        assert!(!dir_pat.matches("some/other/path/to/hello.txt"));
-    }
-
-    #[test]
-    fn test_pattern_escape() {
-        let s = "_[_]_?_*_!_";
-        assert_eq!(Pattern::escape(s), "_[[]_[]]_[?]_[*]_!_".to_string());
-        assert!(Pattern::new(&Pattern::escape(s)).unwrap().matches(s));
-    }
-
-    #[test]
-    fn test_pattern_matches_case_insensitive() {
-        let pat = Pattern::new("aBcDeFg").unwrap();
-        let options = MatchOptions {
-            case_sensitive: false,
-            require_literal_separator: false,
-            require_literal_leading_dot: false,
-        };
-
-        assert!(pat.matches_with("aBcDeFg", options));
-        assert!(pat.matches_with("abcdefg", options));
-        assert!(pat.matches_with("ABCDEFG", options));
-        assert!(pat.matches_with("AbCdEfG", options));
-    }
-
-    #[test]
-    fn test_pattern_matches_case_insensitive_range() {
-        let pat_within = Pattern::new("[a]").unwrap();
-        let pat_except = Pattern::new("[!a]").unwrap();
-
-        let options_case_insensitive = MatchOptions {
-            case_sensitive: false,
-            require_literal_separator: false,
-            require_literal_leading_dot: false,
-        };
-        let options_case_sensitive = MatchOptions {
-            case_sensitive: true,
-            require_literal_separator: false,
-            require_literal_leading_dot: false,
-        };
-
-        assert!(pat_within.matches_with("a", options_case_insensitive));
-        assert!(pat_within.matches_with("A", options_case_insensitive));
-        assert!(!pat_within.matches_with("A", options_case_sensitive));
-
-        assert!(!pat_except.matches_with("a", options_case_insensitive));
-        assert!(!pat_except.matches_with("A", options_case_insensitive));
-        assert!(pat_except.matches_with("A", options_case_sensitive));
-    }
-
-    #[test]
-    fn test_pattern_matches_require_literal_separator() {
-        let options_require_literal = MatchOptions {
-            case_sensitive: true,
-            require_literal_separator: true,
-            require_literal_leading_dot: false,
-        };
-        let options_not_require_literal = MatchOptions {
-            case_sensitive: true,
-            require_literal_separator: false,
-            require_literal_leading_dot: false,
-        };
-
-        assert!(Pattern::new("abc/def")
-            .unwrap()
-            .matches_with("abc/def", options_require_literal));
-        assert!(!Pattern::new("abc?def")
-            .unwrap()
-            .matches_with("abc/def", options_require_literal));
-        assert!(!Pattern::new("abc*def")
-            .unwrap()
-            .matches_with("abc/def", options_require_literal));
-        assert!(!Pattern::new("abc[/]def")
-            .unwrap()
-            .matches_with("abc/def", options_require_literal));
-
-        assert!(Pattern::new("abc/def")
-            .unwrap()
-            .matches_with("abc/def", options_not_require_literal));
-        assert!(Pattern::new("abc?def")
-            .unwrap()
-            .matches_with("abc/def", options_not_require_literal));
-        assert!(Pattern::new("abc*def")
-            .unwrap()
-            .matches_with("abc/def", options_not_require_literal));
-        assert!(Pattern::new("abc[/]def")
-            .unwrap()
-            .matches_with("abc/def", options_not_require_literal));
-    }
-
-    #[test]
-    fn test_pattern_matches_require_literal_leading_dot() {
-        let options_require_literal_leading_dot = MatchOptions {
-            case_sensitive: true,
-            require_literal_separator: false,
-            require_literal_leading_dot: true,
-        };
-        let options_not_require_literal_leading_dot = MatchOptions {
-            case_sensitive: true,
-            require_literal_separator: false,
-            require_literal_leading_dot: false,
-        };
-
-        let f = |options| {
-            Pattern::new("*.txt")
-                .unwrap()
-                .matches_with(".hello.txt", options)
-        };
-        assert!(f(options_not_require_literal_leading_dot));
-        assert!(!f(options_require_literal_leading_dot));
-
-        let f = |options| {
-            Pattern::new(".*.*")
-                .unwrap()
-                .matches_with(".hello.txt", options)
-        };
-        assert!(f(options_not_require_literal_leading_dot));
-        assert!(f(options_require_literal_leading_dot));
-
-        let f = |options| {
-            Pattern::new("aaa/bbb/*")
-                .unwrap()
-                .matches_with("aaa/bbb/.ccc", options)
-        };
-        assert!(f(options_not_require_literal_leading_dot));
-        assert!(!f(options_require_literal_leading_dot));
-
-        let f = |options| {
-            Pattern::new("aaa/bbb/*")
-                .unwrap()
-                .matches_with("aaa/bbb/c.c.c.", options)
-        };
-        assert!(f(options_not_require_literal_leading_dot));
-        assert!(f(options_require_literal_leading_dot));
-
-        let f = |options| {
-            Pattern::new("aaa/bbb/.*")
-                .unwrap()
-                .matches_with("aaa/bbb/.ccc", options)
-        };
-        assert!(f(options_not_require_literal_leading_dot));
-        assert!(f(options_require_literal_leading_dot));
-
-        let f = |options| {
-            Pattern::new("aaa/?bbb")
-                .unwrap()
-                .matches_with("aaa/.bbb", options)
-        };
-        assert!(f(options_not_require_literal_leading_dot));
-        assert!(!f(options_require_literal_leading_dot));
-
-        let f = |options| {
-            Pattern::new("aaa/[.]bbb")
-                .unwrap()
-                .matches_with("aaa/.bbb", options)
-        };
-        assert!(f(options_not_require_literal_leading_dot));
-        assert!(!f(options_require_literal_leading_dot));
-
-        let f = |options| Pattern::new("**/*").unwrap().matches_with(".bbb", options);
-        assert!(f(options_not_require_literal_leading_dot));
-        assert!(!f(options_require_literal_leading_dot));
-    }
-
-    #[test]
-    fn test_matches_path() {
-        // on windows, (Path::new("a/b").as_str().unwrap() == "a\\b"), so this
-        // tests that / and \ are considered equivalent on windows
-        assert!(Pattern::new("a/b").unwrap().matches_path(&Path::new("a/b")));
-    }
-
-    #[test]
-    fn test_path_join() {
-        let pattern = Path::new("one").join(&Path::new("**/*.rs"));
-        assert!(Pattern::new(pattern.to_str().unwrap()).is_ok());
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/tests/glob-std.rs b/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/tests/glob-std.rs
deleted file mode 100644
index 44664139..0000000
--- a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/tests/glob-std.rs
+++ /dev/null
@@ -1,474 +0,0 @@
-// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// ignore-windows TempDir may cause IoError on windows: #10462
-
-#![cfg_attr(test, deny(warnings))]
-
-extern crate glob;
-extern crate tempdir;
-
-use glob::{glob, glob_with};
-use std::env;
-use std::fs;
-use std::path::PathBuf;
-use tempdir::TempDir;
-
-#[test]
-fn main() {
-    fn mk_file(path: &str, directory: bool) {
-        if directory {
-            fs::create_dir(path).unwrap();
-        } else {
-            fs::File::create(path).unwrap();
-        }
-    }
-
-    fn mk_symlink_file(original: &str, link: &str) {
-        #[cfg(unix)]
-        {
-            use std::os::unix::fs::symlink;
-            symlink(original, link).unwrap();
-        }
-        #[cfg(windows)]
-        {
-            use std::os::windows::fs::symlink_file;
-            symlink_file(original, link).unwrap();
-        }
-    }
-
-    fn mk_symlink_dir(original: &str, link: &str) {
-        #[cfg(unix)]
-        {
-            use std::os::unix::fs::symlink;
-            symlink(original, link).unwrap();
-        }
-        #[cfg(windows)]
-        {
-            use std::os::windows::fs::symlink_dir;
-            symlink_dir(original, link).unwrap();
-        }
-    }
-
-    fn glob_vec(pattern: &str) -> Vec<PathBuf> {
-        glob(pattern).unwrap().map(|r| r.unwrap()).collect()
-    }
-
-    fn glob_with_vec(pattern: &str, options: glob::MatchOptions) -> Vec<PathBuf> {
-        glob_with(pattern, options).unwrap().map(|r| r.unwrap()).collect()
-    }
-
-    let root = TempDir::new("glob-tests");
-    let root = root.ok().expect("Should have created a temp directory");
-    assert!(env::set_current_dir(root.path()).is_ok());
-
-    mk_file("aaa", true);
-    mk_file("aaa/apple", true);
-    mk_file("aaa/orange", true);
-    mk_file("aaa/tomato", true);
-    mk_file("aaa/tomato/tomato.txt", false);
-    mk_file("aaa/tomato/tomoto.txt", false);
-    mk_file("bbb", true);
-    mk_file("bbb/specials", true);
-    mk_file("bbb/specials/!", false);
-    // a valid symlink
-    mk_symlink_file("aaa/apple", "aaa/green_apple");
-    // a broken symlink
-    mk_symlink_file("aaa/setsuna", "aaa/kazusa");
-
-    // windows does not allow `*` or `?` characters to exist in filenames
-    if env::consts::FAMILY != "windows" {
-        mk_file("bbb/specials/*", false);
-        mk_file("bbb/specials/?", false);
-    }
-
-    mk_file("bbb/specials/[", false);
-    mk_file("bbb/specials/]", false);
-    mk_file("ccc", true);
-    mk_file("xyz", true);
-    mk_file("xyz/x", false);
-    mk_file("xyz/y", false);
-    mk_file("xyz/z", false);
-
-    mk_file("r", true);
-    mk_file("r/current_dir.md", false);
-    mk_file("r/one", true);
-    mk_file("r/one/a.md", false);
-    mk_file("r/one/another", true);
-    mk_file("r/one/another/a.md", false);
-    mk_file("r/one/another/deep", true);
-    mk_file("r/one/another/deep/spelunking.md", false);
-    mk_file("r/another", true);
-    mk_file("r/another/a.md", false);
-    mk_file("r/two", true);
-    mk_file("r/two/b.md", false);
-    mk_file("r/three", true);
-    mk_file("r/three/c.md", false);
-
-    mk_file("dirsym", true);
-    mk_symlink_dir(root.path().join("r").to_str().unwrap(), "dirsym/link");
-
-    assert_eq!(
-        glob_vec("dirsym/**/*.md"),
-        vec!(
-            PathBuf::from("dirsym/link/another/a.md"),
-            PathBuf::from("dirsym/link/current_dir.md"),
-            PathBuf::from("dirsym/link/one/a.md"),
-            PathBuf::from("dirsym/link/one/another/a.md"),
-            PathBuf::from("dirsym/link/one/another/deep/spelunking.md"),
-            PathBuf::from("dirsym/link/three/c.md"),
-            PathBuf::from("dirsym/link/two/b.md")
-        )
-    );
-
-    // all recursive entities
-    assert_eq!(
-        glob_vec("r/**"),
-        vec!(
-            PathBuf::from("r/another"),
-            PathBuf::from("r/one"),
-            PathBuf::from("r/one/another"),
-            PathBuf::from("r/one/another/deep"),
-            PathBuf::from("r/three"),
-            PathBuf::from("r/two")
-        )
-    );
-
-    // std-canonicalized windows verbatim disk paths should work
-    if env::consts::FAMILY == "windows" {
-        let r_verbatim = PathBuf::from("r").canonicalize().unwrap();
-        assert_eq!(
-            glob_vec(&format!("{}\\**", r_verbatim.display().to_string()))
-                .into_iter()
-                .map(|p| p.strip_prefix(&r_verbatim).unwrap().to_owned())
-                .collect::<Vec<_>>(),
-            vec!(
-                PathBuf::from("another"),
-                PathBuf::from("one"),
-                PathBuf::from("one\\another"),
-                PathBuf::from("one\\another\\deep"),
-                PathBuf::from("three"),
-                PathBuf::from("two")
-            )
-        );
-    }
-
-    // collapse consecutive recursive patterns
-    assert_eq!(
-        glob_vec("r/**/**"),
-        vec!(
-            PathBuf::from("r/another"),
-            PathBuf::from("r/one"),
-            PathBuf::from("r/one/another"),
-            PathBuf::from("r/one/another/deep"),
-            PathBuf::from("r/three"),
-            PathBuf::from("r/two")
-        )
-    );
-
-    assert_eq!(
-        glob_vec("r/**/*"),
-        vec!(
-            PathBuf::from("r/another"),
-            PathBuf::from("r/another/a.md"),
-            PathBuf::from("r/current_dir.md"),
-            PathBuf::from("r/one"),
-            PathBuf::from("r/one/a.md"),
-            PathBuf::from("r/one/another"),
-            PathBuf::from("r/one/another/a.md"),
-            PathBuf::from("r/one/another/deep"),
-            PathBuf::from("r/one/another/deep/spelunking.md"),
-            PathBuf::from("r/three"),
-            PathBuf::from("r/three/c.md"),
-            PathBuf::from("r/two"),
-            PathBuf::from("r/two/b.md")
-        )
-    );
-
-    // followed by a wildcard
-    assert_eq!(
-        glob_vec("r/**/*.md"),
-        vec!(
-            PathBuf::from("r/another/a.md"),
-            PathBuf::from("r/current_dir.md"),
-            PathBuf::from("r/one/a.md"),
-            PathBuf::from("r/one/another/a.md"),
-            PathBuf::from("r/one/another/deep/spelunking.md"),
-            PathBuf::from("r/three/c.md"),
-            PathBuf::from("r/two/b.md")
-        )
-    );
-
-    // followed by a precise pattern
-    assert_eq!(
-        glob_vec("r/one/**/a.md"),
-        vec!(
-            PathBuf::from("r/one/a.md"),
-            PathBuf::from("r/one/another/a.md")
-        )
-    );
-
-    // followed by another recursive pattern
-    // collapses consecutive recursives into one
-    assert_eq!(
-        glob_vec("r/one/**/**/a.md"),
-        vec!(
-            PathBuf::from("r/one/a.md"),
-            PathBuf::from("r/one/another/a.md")
-        )
-    );
-
-    // followed by two precise patterns
-    assert_eq!(
-        glob_vec("r/**/another/a.md"),
-        vec!(
-            PathBuf::from("r/another/a.md"),
-            PathBuf::from("r/one/another/a.md")
-        )
-    );
-
-    assert_eq!(glob_vec(""), Vec::<PathBuf>::new());
-    assert_eq!(glob_vec("."), vec!(PathBuf::from(".")));
-    assert_eq!(glob_vec(".."), vec!(PathBuf::from("..")));
-
-    assert_eq!(glob_vec("aaa"), vec!(PathBuf::from("aaa")));
-    assert_eq!(glob_vec("aaa/"), vec!(PathBuf::from("aaa")));
-    assert_eq!(glob_vec("a"), Vec::<PathBuf>::new());
-    assert_eq!(glob_vec("aa"), Vec::<PathBuf>::new());
-    assert_eq!(glob_vec("aaaa"), Vec::<PathBuf>::new());
-
-    assert_eq!(glob_vec("aaa/apple"), vec!(PathBuf::from("aaa/apple")));
-    assert_eq!(glob_vec("aaa/apple/nope"), Vec::<PathBuf>::new());
-
-    // windows should support both / and \ as directory separators
-    if env::consts::FAMILY == "windows" {
-        assert_eq!(glob_vec("aaa\\apple"), vec!(PathBuf::from("aaa/apple")));
-    }
-
-    assert_eq!(
-        glob_vec("???/"),
-        vec!(
-            PathBuf::from("aaa"),
-            PathBuf::from("bbb"),
-            PathBuf::from("ccc"),
-            PathBuf::from("xyz")
-        )
-    );
-
-    assert_eq!(
-        glob_vec("aaa/tomato/tom?to.txt"),
-        vec!(
-            PathBuf::from("aaa/tomato/tomato.txt"),
-            PathBuf::from("aaa/tomato/tomoto.txt")
-        )
-    );
-
-    assert_eq!(
-        glob_vec("xyz/?"),
-        vec!(
-            PathBuf::from("xyz/x"),
-            PathBuf::from("xyz/y"),
-            PathBuf::from("xyz/z")
-        )
-    );
-
-    assert_eq!(glob_vec("a*"), vec!(PathBuf::from("aaa")));
-    assert_eq!(glob_vec("*a*"), vec!(PathBuf::from("aaa")));
-    assert_eq!(glob_vec("a*a"), vec!(PathBuf::from("aaa")));
-    assert_eq!(glob_vec("aaa*"), vec!(PathBuf::from("aaa")));
-    assert_eq!(glob_vec("*aaa"), vec!(PathBuf::from("aaa")));
-    assert_eq!(glob_vec("*aaa*"), vec!(PathBuf::from("aaa")));
-    assert_eq!(glob_vec("*a*a*a*"), vec!(PathBuf::from("aaa")));
-    assert_eq!(glob_vec("aaa*/"), vec!(PathBuf::from("aaa")));
-
-    assert_eq!(
-        glob_vec("aaa/*"),
-        vec!(
-            PathBuf::from("aaa/apple"),
-            PathBuf::from("aaa/green_apple"),
-            PathBuf::from("aaa/kazusa"),
-            PathBuf::from("aaa/orange"),
-            PathBuf::from("aaa/tomato"),
-        )
-    );
-
-    assert_eq!(
-        glob_vec("aaa/*a*"),
-        vec!(
-            PathBuf::from("aaa/apple"),
-            PathBuf::from("aaa/green_apple"),
-            PathBuf::from("aaa/kazusa"),
-            PathBuf::from("aaa/orange"),
-            PathBuf::from("aaa/tomato")
-        )
-    );
-
-    assert_eq!(
-        glob_vec("*/*/*.txt"),
-        vec!(
-            PathBuf::from("aaa/tomato/tomato.txt"),
-            PathBuf::from("aaa/tomato/tomoto.txt")
-        )
-    );
-
-    assert_eq!(
-        glob_vec("*/*/t[aob]m?to[.]t[!y]t"),
-        vec!(
-            PathBuf::from("aaa/tomato/tomato.txt"),
-            PathBuf::from("aaa/tomato/tomoto.txt")
-        )
-    );
-
-    assert_eq!(glob_vec("./aaa"), vec!(PathBuf::from("aaa")));
-    assert_eq!(glob_vec("./*"), glob_vec("*"));
-    assert_eq!(glob_vec("*/..").pop().unwrap(), PathBuf::from("xyz/.."));
-    assert_eq!(glob_vec("aaa/../bbb"), vec!(PathBuf::from("aaa/../bbb")));
-    assert_eq!(glob_vec("nonexistent/../bbb"), Vec::<PathBuf>::new());
-    assert_eq!(glob_vec("aaa/tomato/tomato.txt/.."), Vec::<PathBuf>::new());
-
-    assert_eq!(glob_vec("aaa/tomato/tomato.txt/"), Vec::<PathBuf>::new());
-
-    // Ensure to find a broken symlink.
-    assert_eq!(glob_vec("aaa/kazusa"), vec!(PathBuf::from("aaa/kazusa")));
-
-    assert_eq!(glob_vec("aa[a]"), vec!(PathBuf::from("aaa")));
-    assert_eq!(glob_vec("aa[abc]"), vec!(PathBuf::from("aaa")));
-    assert_eq!(glob_vec("a[bca]a"), vec!(PathBuf::from("aaa")));
-    assert_eq!(glob_vec("aa[b]"), Vec::<PathBuf>::new());
-    assert_eq!(glob_vec("aa[xyz]"), Vec::<PathBuf>::new());
-    assert_eq!(glob_vec("aa[]]"), Vec::<PathBuf>::new());
-
-    assert_eq!(glob_vec("aa[!b]"), vec!(PathBuf::from("aaa")));
-    assert_eq!(glob_vec("aa[!bcd]"), vec!(PathBuf::from("aaa")));
-    assert_eq!(glob_vec("a[!bcd]a"), vec!(PathBuf::from("aaa")));
-    assert_eq!(glob_vec("aa[!a]"), Vec::<PathBuf>::new());
-    assert_eq!(glob_vec("aa[!abc]"), Vec::<PathBuf>::new());
-
-    assert_eq!(
-        glob_vec("bbb/specials/[[]"),
-        vec!(PathBuf::from("bbb/specials/["))
-    );
-    assert_eq!(
-        glob_vec("bbb/specials/!"),
-        vec!(PathBuf::from("bbb/specials/!"))
-    );
-    assert_eq!(
-        glob_vec("bbb/specials/[]]"),
-        vec!(PathBuf::from("bbb/specials/]"))
-    );
-
-    mk_file("i", true);
-    mk_file("i/qwe", true);
-    mk_file("i/qwe/.aaa", false);
-    mk_file("i/qwe/.bbb", true);
-    mk_file("i/qwe/.bbb/ccc", false);
-    mk_file("i/qwe/.bbb/.ddd", false);
-    mk_file("i/qwe/eee", false);
-
-    let options = glob::MatchOptions {
-        case_sensitive: false,
-        require_literal_separator: true,
-        require_literal_leading_dot: true,
-    };
-    assert_eq!(glob_with_vec("i/**/*a*", options), Vec::<PathBuf>::new());
-    assert_eq!(glob_with_vec("i/**/*c*", options), Vec::<PathBuf>::new());
-    assert_eq!(glob_with_vec("i/**/*d*", options), Vec::<PathBuf>::new());
-    assert_eq!(
-        glob_with_vec("i/**/*e*", options),
-        vec!(PathBuf::from("i/qwe"), PathBuf::from("i/qwe/eee"))
-    );
-
-    if env::consts::FAMILY != "windows" {
-        assert_eq!(
-            glob_vec("bbb/specials/[*]"),
-            vec!(PathBuf::from("bbb/specials/*"))
-        );
-        assert_eq!(
-            glob_vec("bbb/specials/[?]"),
-            vec!(PathBuf::from("bbb/specials/?"))
-        );
-    }
-
-    if env::consts::FAMILY == "windows" {
-        assert_eq!(
-            glob_vec("bbb/specials/[![]"),
-            vec!(
-                PathBuf::from("bbb/specials/!"),
-                PathBuf::from("bbb/specials/]")
-            )
-        );
-
-        assert_eq!(
-            glob_vec("bbb/specials/[!]]"),
-            vec!(
-                PathBuf::from("bbb/specials/!"),
-                PathBuf::from("bbb/specials/[")
-            )
-        );
-
-        assert_eq!(
-            glob_vec("bbb/specials/[!!]"),
-            vec!(
-                PathBuf::from("bbb/specials/["),
-                PathBuf::from("bbb/specials/]")
-            )
-        );
-    } else {
-        assert_eq!(
-            glob_vec("bbb/specials/[![]"),
-            vec!(
-                PathBuf::from("bbb/specials/!"),
-                PathBuf::from("bbb/specials/*"),
-                PathBuf::from("bbb/specials/?"),
-                PathBuf::from("bbb/specials/]")
-            )
-        );
-
-        assert_eq!(
-            glob_vec("bbb/specials/[!]]"),
-            vec!(
-                PathBuf::from("bbb/specials/!"),
-                PathBuf::from("bbb/specials/*"),
-                PathBuf::from("bbb/specials/?"),
-                PathBuf::from("bbb/specials/[")
-            )
-        );
-
-        assert_eq!(
-            glob_vec("bbb/specials/[!!]"),
-            vec!(
-                PathBuf::from("bbb/specials/*"),
-                PathBuf::from("bbb/specials/?"),
-                PathBuf::from("bbb/specials/["),
-                PathBuf::from("bbb/specials/]")
-            )
-        );
-
-        assert_eq!(
-            glob_vec("bbb/specials/[!*]"),
-            vec!(
-                PathBuf::from("bbb/specials/!"),
-                PathBuf::from("bbb/specials/?"),
-                PathBuf::from("bbb/specials/["),
-                PathBuf::from("bbb/specials/]")
-            )
-        );
-
-        assert_eq!(
-            glob_vec("bbb/specials/[!?]"),
-            vec!(
-                PathBuf::from("bbb/specials/!"),
-                PathBuf::from("bbb/specials/*"),
-                PathBuf::from("bbb/specials/["),
-                PathBuf::from("bbb/specials/]")
-            )
-        );
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/triagebot.toml b/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/triagebot.toml
deleted file mode 100644
index fa0824a..0000000
--- a/third_party/rust/chromium_crates_io/vendor/glob-0.3.2/triagebot.toml
+++ /dev/null
@@ -1 +0,0 @@
-[assign]
diff --git a/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/.cargo-checksum.json
deleted file mode 100644
index 697c9ce..0000000
--- a/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/.cargo-checksum.json
+++ /dev/null
@@ -1 +0,0 @@
-{"files":{}}
diff --git a/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/.cargo_vcs_info.json
deleted file mode 100644
index b6ba6f16..0000000
--- a/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/.cargo_vcs_info.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
-  "git": {
-    "sha1": "b8bd42f441f8e2987ded60fbaa809e7bf9d17a8e"
-  },
-  "path_in_vcs": "hex-literal"
-}
\ No newline at end of file
diff --git a/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/.gitignore b/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/.gitignore
deleted file mode 100644
index 143b1ca0..0000000
--- a/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-
-/target/
-**/*.rs.bk
-Cargo.lock
diff --git a/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/CHANGELOG.md b/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/CHANGELOG.md
deleted file mode 100644
index 260838e6..0000000
--- a/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/CHANGELOG.md
+++ /dev/null
@@ -1,58 +0,0 @@
-# Changelog
-All notable changes to this project will be documented in this file.
-
-The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
-and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
-
-## 0.4.1 (2023-04-05)
-### Changed
-- Enforce const evaluation ([#889])
-
-[#889]: https://github.com/RustCrypto/utils/pull/889
-
-## 0.4.0 (2023-04-02)
-### Changed
-- Disallow comments inside hex strings ([#816])
-- Migrate to 2021 edition and bump MSRV to 1.57 ([#816])
-- Use CTFE instead of proc macro ([#816])
-
-[#816]: https://github.com/RustCrypto/utils/pull/816
-
-## 0.3.4 (2021-11-11)
-### Changed
-- Provide more info in `panic!` messages ([#664])
-- Minor changes in the comments filtration code ([#666])
-
-### Added
-- New tests for the `hex!()` macro and internal documentation ([#664])
-
-### Fixed
-- Make `hex!()` error when forward slash encountered as last byte ([#665])
-
-[#664]: https://github.com/RustCrypto/utils/pull/664
-[#665]: https://github.com/RustCrypto/utils/pull/665
-[#666]: https://github.com/RustCrypto/utils/pull/666
-
-## 0.3.3 (2021-07-17)
-### Added
-- Accept sequence of string literals ([#519])
-
-[#519]: https://github.com/RustCrypto/utils/pull/519
-
-## 0.3.2 (2021-07-02)
-### Added
-- Allow line (`//`) and block (`/* */`) comments ([#512])
-
-[#512]: https://github.com/RustCrypto/utils/pull/512
-
-## 0.3.1 (2020-08-01)
-### Added
-- Documentation for the `hex!` macro ([#73])
-
-[#73]: https://github.com/RustCrypto/utils/pull/73
-
-## 0.3.0 (2020-07-16)
-### Changed
-- MSRV bump to 1.45 ([#53])
-
-[#53]: https://github.com/RustCrypto/utils/pull/53
diff --git a/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/Cargo.toml
deleted file mode 100644
index a861081..0000000
--- a/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/Cargo.toml
+++ /dev/null
@@ -1,27 +0,0 @@
-# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
-#
-# When uploading crates to the registry Cargo will automatically
-# "normalize" Cargo.toml files for maximal compatibility
-# with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g., crates.io) dependencies.
-#
-# If you are reading this file be aware that the original Cargo.toml
-# will likely look very different (and much more reasonable).
-# See Cargo.toml.orig for the original contents.
-
-[package]
-edition = "2021"
-rust-version = "1.57"
-name = "hex-literal"
-version = "0.4.1"
-authors = ["RustCrypto Developers"]
-description = "Macro for converting hexadecimal string to a byte array at compile time"
-documentation = "https://docs.rs/hex-literal"
-readme = "README.md"
-keywords = [
-    "hex",
-    "literals",
-]
-license = "MIT OR Apache-2.0"
-repository = "https://github.com/RustCrypto/utils"
-resolver = "1"
diff --git a/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/Cargo.toml.orig
deleted file mode 100644
index ad07ffb..0000000
--- a/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/Cargo.toml.orig
+++ /dev/null
@@ -1,11 +0,0 @@
-[package]
-name = "hex-literal"
-version = "0.4.1"
-authors = ["RustCrypto Developers"]
-license = "MIT OR Apache-2.0"
-description = "Macro for converting hexadecimal string to a byte array at compile time"
-documentation = "https://docs.rs/hex-literal"
-repository = "https://github.com/RustCrypto/utils"
-keywords = ["hex", "literals"]
-edition = "2021"
-rust-version = "1.57"
diff --git a/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/LICENSE-APACHE b/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/LICENSE-APACHE
deleted file mode 100644
index 78173fa..0000000
--- a/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/LICENSE-APACHE
+++ /dev/null
@@ -1,201 +0,0 @@
-                              Apache License
-                        Version 2.0, January 2004
-                     http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-   "License" shall mean the terms and conditions for use, reproduction,
-   and distribution as defined by Sections 1 through 9 of this document.
-
-   "Licensor" shall mean the copyright owner or entity authorized by
-   the copyright owner that is granting the License.
-
-   "Legal Entity" shall mean the union of the acting entity and all
-   other entities that control, are controlled by, or are under common
-   control with that entity. For the purposes of this definition,
-   "control" means (i) the power, direct or indirect, to cause the
-   direction or management of such entity, whether by contract or
-   otherwise, or (ii) ownership of fifty percent (50%) or more of the
-   outstanding shares, or (iii) beneficial ownership of such entity.
-
-   "You" (or "Your") shall mean an individual or Legal Entity
-   exercising permissions granted by this License.
-
-   "Source" form shall mean the preferred form for making modifications,
-   including but not limited to software source code, documentation
-   source, and configuration files.
-
-   "Object" form shall mean any form resulting from mechanical
-   transformation or translation of a Source form, including but
-   not limited to compiled object code, generated documentation,
-   and conversions to other media types.
-
-   "Work" shall mean the work of authorship, whether in Source or
-   Object form, made available under the License, as indicated by a
-   copyright notice that is included in or attached to the work
-   (an example is provided in the Appendix below).
-
-   "Derivative Works" shall mean any work, whether in Source or Object
-   form, that is based on (or derived from) the Work and for which the
-   editorial revisions, annotations, elaborations, or other modifications
-   represent, as a whole, an original work of authorship. For the purposes
-   of this License, Derivative Works shall not include works that remain
-   separable from, or merely link (or bind by name) to the interfaces of,
-   the Work and Derivative Works thereof.
-
-   "Contribution" shall mean any work of authorship, including
-   the original version of the Work and any modifications or additions
-   to that Work or Derivative Works thereof, that is intentionally
-   submitted to Licensor for inclusion in the Work by the copyright owner
-   or by an individual or Legal Entity authorized to submit on behalf of
-   the copyright owner. For the purposes of this definition, "submitted"
-   means any form of electronic, verbal, or written communication sent
-   to the Licensor or its representatives, including but not limited to
-   communication on electronic mailing lists, source code control systems,
-   and issue tracking systems that are managed by, or on behalf of, the
-   Licensor for the purpose of discussing and improving the Work, but
-   excluding communication that is conspicuously marked or otherwise
-   designated in writing by the copyright owner as "Not a Contribution."
-
-   "Contributor" shall mean Licensor and any individual or Legal Entity
-   on behalf of whom a Contribution has been received by Licensor and
-   subsequently incorporated within the Work.
-
-2. Grant of Copyright License. Subject to the terms and conditions of
-   this License, each Contributor hereby grants to You a perpetual,
-   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-   copyright license to reproduce, prepare Derivative Works of,
-   publicly display, publicly perform, sublicense, and distribute the
-   Work and such Derivative Works in Source or Object form.
-
-3. Grant of Patent License. Subject to the terms and conditions of
-   this License, each Contributor hereby grants to You a perpetual,
-   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-   (except as stated in this section) patent license to make, have made,
-   use, offer to sell, sell, import, and otherwise transfer the Work,
-   where such license applies only to those patent claims licensable
-   by such Contributor that are necessarily infringed by their
-   Contribution(s) alone or by combination of their Contribution(s)
-   with the Work to which such Contribution(s) was submitted. If You
-   institute patent litigation against any entity (including a
-   cross-claim or counterclaim in a lawsuit) alleging that the Work
-   or a Contribution incorporated within the Work constitutes direct
-   or contributory patent infringement, then any patent licenses
-   granted to You under this License for that Work shall terminate
-   as of the date such litigation is filed.
-
-4. Redistribution. You may reproduce and distribute copies of the
-   Work or Derivative Works thereof in any medium, with or without
-   modifications, and in Source or Object form, provided that You
-   meet the following conditions:
-
-   (a) You must give any other recipients of the Work or
-       Derivative Works a copy of this License; and
-
-   (b) You must cause any modified files to carry prominent notices
-       stating that You changed the files; and
-
-   (c) You must retain, in the Source form of any Derivative Works
-       that You distribute, all copyright, patent, trademark, and
-       attribution notices from the Source form of the Work,
-       excluding those notices that do not pertain to any part of
-       the Derivative Works; and
-
-   (d) If the Work includes a "NOTICE" text file as part of its
-       distribution, then any Derivative Works that You distribute must
-       include a readable copy of the attribution notices contained
-       within such NOTICE file, excluding those notices that do not
-       pertain to any part of the Derivative Works, in at least one
-       of the following places: within a NOTICE text file distributed
-       as part of the Derivative Works; within the Source form or
-       documentation, if provided along with the Derivative Works; or,
-       within a display generated by the Derivative Works, if and
-       wherever such third-party notices normally appear. The contents
-       of the NOTICE file are for informational purposes only and
-       do not modify the License. You may add Your own attribution
-       notices within Derivative Works that You distribute, alongside
-       or as an addendum to the NOTICE text from the Work, provided
-       that such additional attribution notices cannot be construed
-       as modifying the License.
-
-   You may add Your own copyright statement to Your modifications and
-   may provide additional or different license terms and conditions
-   for use, reproduction, or distribution of Your modifications, or
-   for any such Derivative Works as a whole, provided Your use,
-   reproduction, and distribution of the Work otherwise complies with
-   the conditions stated in this License.
-
-5. Submission of Contributions. Unless You explicitly state otherwise,
-   any Contribution intentionally submitted for inclusion in the Work
-   by You to the Licensor shall be under the terms and conditions of
-   this License, without any additional terms or conditions.
-   Notwithstanding the above, nothing herein shall supersede or modify
-   the terms of any separate license agreement you may have executed
-   with Licensor regarding such Contributions.
-
-6. Trademarks. This License does not grant permission to use the trade
-   names, trademarks, service marks, or product names of the Licensor,
-   except as required for reasonable and customary use in describing the
-   origin of the Work and reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty. Unless required by applicable law or
-   agreed to in writing, Licensor provides the Work (and each
-   Contributor provides its Contributions) on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-   implied, including, without limitation, any warranties or conditions
-   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-   PARTICULAR PURPOSE. You are solely responsible for determining the
-   appropriateness of using or redistributing the Work and assume any
-   risks associated with Your exercise of permissions under this License.
-
-8. Limitation of Liability. In no event and under no legal theory,
-   whether in tort (including negligence), contract, or otherwise,
-   unless required by applicable law (such as deliberate and grossly
-   negligent acts) or agreed to in writing, shall any Contributor be
-   liable to You for damages, including any direct, indirect, special,
-   incidental, or consequential damages of any character arising as a
-   result of this License or out of the use or inability to use the
-   Work (including but not limited to damages for loss of goodwill,
-   work stoppage, computer failure or malfunction, or any and all
-   other commercial damages or losses), even if such Contributor
-   has been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability. While redistributing
-   the Work or Derivative Works thereof, You may choose to offer,
-   and charge a fee for, acceptance of support, warranty, indemnity,
-   or other liability obligations and/or rights consistent with this
-   License. However, in accepting such obligations, You may act only
-   on Your own behalf and on Your sole responsibility, not on behalf
-   of any other Contributor, and only if You agree to indemnify,
-   defend, and hold each Contributor harmless for any liability
-   incurred by, or claims asserted against, such Contributor by reason
-   of your accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work.
-
-   To apply the Apache License to your work, attach the following
-   boilerplate notice, with the fields enclosed by brackets "[]"
-   replaced with your own identifying information. (Don't include
-   the brackets!)  The text should be enclosed in the appropriate
-   comment syntax for the file format. We also recommend that a
-   file or class name and description of purpose be included on the
-   same "printed page" as the copyright notice for easier
-   identification within third-party archives.
-
-Copyright [yyyy] [name of copyright owner]
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/LICENSE-MIT b/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/LICENSE-MIT
deleted file mode 100644
index b03ace9..0000000
--- a/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/LICENSE-MIT
+++ /dev/null
@@ -1,26 +0,0 @@
-Copyright (c) 2018 Artyom Pavlov
-Copyright (c) 2018 The RustCrypto Project Developers
-
-Permission is hereby granted, free of charge, to any
-person obtaining a copy of this software and associated
-documentation files (the "Software"), to deal in the
-Software without restriction, including without
-limitation the rights to use, copy, modify, merge,
-publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software
-is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice
-shall be included in all copies or substantial portions
-of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
-ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
-TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
-PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
-IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-DEALINGS IN THE SOFTWARE.
diff --git a/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/README.md b/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/README.md
deleted file mode 100644
index eb06be5..0000000
--- a/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/README.md
+++ /dev/null
@@ -1,99 +0,0 @@
-# [RustCrypto]: hex-literal
-
-[![Crate][crate-image]][crate-link]
-[![Docs][docs-image]][docs-link]
-![Apache 2.0/MIT Licensed][license-image]
-![MSRV][rustc-image]
-[![Build Status][build-image]][build-link]
-
-This crate provides the `hex!` macro for converting hexadecimal string literals to a byte array at compile time.
-
-It accepts the following characters in the input string:
-
-- `'0'...'9'`, `'a'...'f'`, `'A'...'F'` — hex characters which will be used in construction of the output byte array
-- `' '`, `'\r'`, `'\n'`, `'\t'` — formatting characters which will be ignored
-
-# Examples
-```rust
-use hex_literal::hex;
-
-// The macro can be used in const contexts
-const DATA: [u8; 4] = hex!("01020304");
-assert_eq!(DATA, [1, 2, 3, 4]);
-
-// Both upper and lower hex values are supported
-assert_eq!(hex!("a1 b2 c3 d4"), [0xA1, 0xB2, 0xC3, 0xD4]);
-assert_eq!(hex!("E5 E6 90 92"), [0xE5, 0xE6, 0x90, 0x92]);
-assert_eq!(hex!("0a0B 0C0d"), [10, 11, 12, 13]);
-
-// Multi-line literals
-let bytes1 = hex!("
-    00010203 04050607
-    08090a0b 0c0d0e0f
-");
-assert_eq!(
-    bytes1,
-    [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
-);
-
-// It's possible to use several literals
-// (results will be concatenated)
-let bytes2 = hex!(
-    "00010203 04050607" // first half
-    "08090a0b 0c0d0e0f" // second half
-);
-assert_eq!(bytes1, bytes2);
-```
-
-Using an unsupported character inside literals will result in a compilation error:
-```rust,compile_fail
-hex_literal::hex!("АА"); // Cyrillic "А"
-hex_literal::hex!("11 22"); // Japanese space
-```
-
-ĐĄomments inside literals are not supported:
-```rust,compile_fail
-hex_literal::hex!("0123 // foo");
-```
-
-Each literal must contain an even number of hex characters:
-```rust,compile_fail
-hex_literal::hex!(
-    "01234"
-    "567"
-);
-```
-
-## Minimum Supported Rust Version
-
-Rust **1.57** or newer.
-
-In the future, we reserve the right to change MSRV (i.e. MSRV is out-of-scope for this crate's SemVer guarantees), however when we do it will be accompanied by a minor version bump.
-
-## License
-
-Licensed under either of:
-
-* [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0)
-* [MIT license](http://opensource.org/licenses/MIT)
-
-at your option.
-
-### Contribution
-
-Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions.
-
-[//]: # (badges)
-
-[crate-image]: https://img.shields.io/crates/v/hex-literal.svg
-[crate-link]: https://crates.io/crates/hex-literal
-[docs-image]: https://docs.rs/hex-literal/badge.svg
-[docs-link]: https://docs.rs/hex-literal/
-[license-image]: https://img.shields.io/badge/license-Apache2.0/MIT-blue.svg
-[rustc-image]: https://img.shields.io/badge/rustc-1.57+-blue.svg
-[build-image]: https://github.com/RustCrypto/utils/actions/workflows/hex-literal.yml/badge.svg
-[build-link]: https://github.com/RustCrypto/utils/actions/workflows/hex-literal.yml
-
-[//]: # (general links)
-
-[RustCrypto]: https://github.com/RustCrypto
diff --git a/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/src/lib.rs
deleted file mode 100644
index f6c8fac..0000000
--- a/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/src/lib.rs
+++ /dev/null
@@ -1,88 +0,0 @@
-#![doc = include_str!("../README.md")]
-#![no_std]
-#![doc(
-    html_logo_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg",
-    html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg"
-)]
-
-const fn next_hex_char(string: &[u8], mut pos: usize) -> Option<(u8, usize)> {
-    while pos < string.len() {
-        let raw_val = string[pos];
-        pos += 1;
-        let val = match raw_val {
-            b'0'..=b'9' => raw_val - 48,
-            b'A'..=b'F' => raw_val - 55,
-            b'a'..=b'f' => raw_val - 87,
-            b' ' | b'\r' | b'\n' | b'\t' => continue,
-            0..=127 => panic!("Encountered invalid ASCII character"),
-            _ => panic!("Encountered non-ASCII character"),
-        };
-        return Some((val, pos));
-    }
-    None
-}
-
-const fn next_byte(string: &[u8], pos: usize) -> Option<(u8, usize)> {
-    let (half1, pos) = match next_hex_char(string, pos) {
-        Some(v) => v,
-        None => return None,
-    };
-    let (half2, pos) = match next_hex_char(string, pos) {
-        Some(v) => v,
-        None => panic!("Odd number of hex characters"),
-    };
-    Some(((half1 << 4) + half2, pos))
-}
-
-/// Compute length of a byte array which will be decoded from the strings.
-///
-/// This function is an implementation detail and SHOULD NOT be called directly!
-#[doc(hidden)]
-pub const fn len(strings: &[&[u8]]) -> usize {
-    let mut i = 0;
-    let mut len = 0;
-    while i < strings.len() {
-        let mut pos = 0;
-        while let Some((_, new_pos)) = next_byte(strings[i], pos) {
-            len += 1;
-            pos = new_pos;
-        }
-        i += 1;
-    }
-    len
-}
-
-/// Decode hex strings into a byte array of pre-computed length.
-///
-/// This function is an implementation detail and SHOULD NOT be called directly!
-#[doc(hidden)]
-pub const fn decode<const LEN: usize>(strings: &[&[u8]]) -> [u8; LEN] {
-    let mut i = 0;
-    let mut buf = [0u8; LEN];
-    let mut buf_pos = 0;
-    while i < strings.len() {
-        let mut pos = 0;
-        while let Some((byte, new_pos)) = next_byte(strings[i], pos) {
-            buf[buf_pos] = byte;
-            buf_pos += 1;
-            pos = new_pos;
-        }
-        i += 1;
-    }
-    if LEN != buf_pos {
-        panic!("Length mismatch. Please report this bug.");
-    }
-    buf
-}
-
-/// Macro for converting sequence of string literals containing hex-encoded data
-/// into an array of bytes.
-#[macro_export]
-macro_rules! hex {
-    ($($s:literal)*) => {{
-        const STRINGS: &[&'static [u8]] = &[$($s.as_bytes(),)*];
-        const LEN: usize = $crate::len(STRINGS);
-        const RES: [u8; LEN] = $crate::decode(STRINGS);
-        RES
-    }};
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/tests/basic.rs b/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/tests/basic.rs
deleted file mode 100644
index b4af38ec..0000000
--- a/third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/tests/basic.rs
+++ /dev/null
@@ -1,83 +0,0 @@
-use hex_literal::hex;
-
-#[test]
-fn single_literal() {
-    assert_eq!(hex!("ff e4"), [0xff, 0xe4]);
-}
-
-#[test]
-fn empty() {
-    let nothing: [u8; 0] = hex!();
-    let empty_literals: [u8; 0] = hex!("" "" "");
-    let expected: [u8; 0] = [];
-    assert_eq!(nothing, expected);
-    assert_eq!(empty_literals, expected);
-}
-
-#[test]
-fn upper_case() {
-    assert_eq!(hex!("AE DF 04 B2"), [0xae, 0xdf, 0x04, 0xb2]);
-    assert_eq!(hex!("FF BA 8C 00 01"), [0xff, 0xba, 0x8c, 0x00, 0x01]);
-}
-
-#[test]
-fn mixed_case() {
-    assert_eq!(hex!("bF dd E4 Cd"), [0xbf, 0xdd, 0xe4, 0xcd]);
-}
-
-#[test]
-fn multiple_literals() {
-    assert_eq!(
-        hex!(
-            "01 dd f7 7f"
-            "ee f0 d8"
-        ),
-        [0x01, 0xdd, 0xf7, 0x7f, 0xee, 0xf0, 0xd8]
-    );
-    assert_eq!(
-        hex!(
-            "ff"
-            "e8 d0"
-            ""
-            "01 1f"
-            "ab"
-        ),
-        [0xff, 0xe8, 0xd0, 0x01, 0x1f, 0xab]
-    );
-}
-
-#[test]
-fn no_spacing() {
-    assert_eq!(hex!("abf0d8bb0f14"), [0xab, 0xf0, 0xd8, 0xbb, 0x0f, 0x14]);
-    assert_eq!(
-        hex!("09FFd890cbcCd1d08F"),
-        [0x09, 0xff, 0xd8, 0x90, 0xcb, 0xcc, 0xd1, 0xd0, 0x8f]
-    );
-}
-
-#[test]
-fn allows_various_spacing() {
-    // newlines
-    assert_eq!(
-        hex!(
-            "f
-            f
-            d
-            0
-            e
-            
-            8
-            "
-        ),
-        [0xff, 0xd0, 0xe8]
-    );
-    // tabs
-    assert_eq!(hex!("9f	d		1		f07	3		01	"), [0x9f, 0xd1, 0xf0, 0x73, 0x01]);
-    // spaces
-    assert_eq!(hex!(" e    e d0  9 1   f  f  "), [0xee, 0xd0, 0x91, 0xff]);
-}
-
-#[test]
-fn can_use_const() {
-    const _: [u8; 4] = hex!("ff d3 01 7f");
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/.cargo-checksum.json
deleted file mode 100644
index 697c9ce..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/.cargo-checksum.json
+++ /dev/null
@@ -1 +0,0 @@
-{"files":{}}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/.cargo_vcs_info.json
deleted file mode 100644
index e0274016..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/.cargo_vcs_info.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
-  "git": {
-    "sha1": "9870c06e6c772daaad7ab612faab29130753e41c"
-  },
-  "path_in_vcs": ""
-}
\ No newline at end of file
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/.gitignore b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/.gitignore
deleted file mode 100644
index 8f7a426..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/.gitignore
+++ /dev/null
@@ -1,8 +0,0 @@
-target
-Cargo.lock
-bench-log
-.*.swp
-wiki
-tags
-examples/debug.rs
-tmp/
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/.vim/coc-settings.json b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/.vim/coc-settings.json
deleted file mode 100644
index d756767..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/.vim/coc-settings.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
-  "rust-analyzer.linkedProjects": [
-    "fuzz/Cargo.toml",
-    "Cargo.toml"
-  ]
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/CHANGELOG.md b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/CHANGELOG.md
deleted file mode 100644
index b88e2aa4..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/CHANGELOG.md
+++ /dev/null
@@ -1,1663 +0,0 @@
-1.11.1 (2024-10-24)
-===================
-This is a new patch release of `regex` that fixes compilation on nightly
-Rust when the unstable `pattern` crate feature is enabled. Users on nightly
-Rust without this feature enabled are unaffected.
-
-Bug fixes:
-
-* [BUG #1231](https://github.com/rust-lang/regex/issues/1231):
-Fix the `Pattern` trait implementation as a result of nightly API breakage.
-
-
-1.11.0 (2024-09-29)
-===================
-This is a new minor release of `regex` that brings in an update to the
-Unicode Character Database. Specifically, this updates the Unicode data
-used by `regex` internally to the version 16 release.
-
-New features:
-
-* [FEATURE #1228](https://github.com/rust-lang/regex/pull/1228):
-Add new `regex::SetMatches::matched_all` method.
-* [FEATURE #1229](https://github.com/rust-lang/regex/pull/1229):
-Update to Unicode Character Database (UCD) version 16.
-
-
-1.10.6 (2024-08-02)
-===================
-This is a new patch release with a fix for the `unstable` crate feature that
-enables `std::str::Pattern` trait integration.
-
-Bug fixes:
-
-* [BUG #1219](https://github.com/rust-lang/regex/pull/1219):
-Fix the `Pattern` trait implementation as a result of nightly API breakage.
-
-
-1.10.5 (2024-06-09)
-===================
-This is a new patch release with some minor fixes.
-
-Bug fixes:
-
-* [BUG #1203](https://github.com/rust-lang/regex/pull/1203):
-Escape invalid UTF-8 when in the `Debug` impl of `regex::bytes::Match`.
-
-
-1.10.4 (2024-03-22)
-===================
-This is a new patch release with some minor fixes.
-
-* [BUG #1169](https://github.com/rust-lang/regex/issues/1169):
-Fixes a bug with compiling a reverse NFA automaton in `regex-automata`.
-* [BUG #1178](https://github.com/rust-lang/regex/pull/1178):
-Clarifies that when `Cow::Borrowed` is returned from replace APIs, it is
-equivalent to the input.
-
-
-1.10.3 (2024-01-21)
-===================
-This is a new patch release that fixes the feature configuration of optional
-dependencies, and fixes an unsound use of bounds check elision.
-
-Bug fixes:
-
-* [BUG #1147](https://github.com/rust-lang/regex/issues/1147):
-Set `default-features=false` for the `memchr` and `aho-corasick` dependencies.
-* [BUG #1154](https://github.com/rust-lang/regex/pull/1154):
-Fix unsound bounds check elision.
-
-
-1.10.2 (2023-10-16)
-===================
-This is a new patch release that fixes a search regression where incorrect
-matches could be reported.
-
-Bug fixes:
-
-* [BUG #1110](https://github.com/rust-lang/regex/issues/1110):
-Revert broadening of reverse suffix literal optimization introduced in 1.10.1.
-
-
-1.10.1 (2023-10-14)
-===================
-This is a new patch release with a minor increase in the number of valid
-patterns and a broadening of some literal optimizations.
-
-New features:
-
-* [FEATURE 04f5d7be](https://github.com/rust-lang/regex/commit/04f5d7be4efc542864cc400f5d43fbea4eb9bab6):
-Loosen ASCII-compatible rules such that regexes like `(?-u:☃)` are now allowed.
-
-Performance improvements:
-
-* [PERF 8a8d599f](https://github.com/rust-lang/regex/commit/8a8d599f9d2f2d78e9ad84e4084788c2d563afa5):
-Broader the reverse suffix optimization to apply in more cases.
-
-
-1.10.0 (2023-10-09)
-===================
-This is a new minor release of `regex` that adds support for start and end
-word boundary assertions. That is, `\<` and `\>`. The minimum supported Rust
-version has also been raised to 1.65, which was released about one year ago.
-
-The new word boundary assertions are:
-
-* `\<` or `\b{start}`: a Unicode start-of-word boundary (`\W|\A` on the left,
-`\w` on the right).
-* `\>` or `\b{end}`: a Unicode end-of-word boundary (`\w` on the left, `\W|\z`
-on the right)).
-* `\b{start-half}`: half of a Unicode start-of-word boundary (`\W|\A` on the
-left).
-* `\b{end-half}`: half of a Unicode end-of-word boundary (`\W|\z` on the
-right).
-
-The `\<` and `\>` are GNU extensions to POSIX regexes. They have been added
-to the `regex` crate because they enjoy somewhat broad support in other regex
-engines as well (for example, vim). The `\b{start}` and `\b{end}` assertions
-are aliases for `\<` and `\>`, respectively.
-
-The `\b{start-half}` and `\b{end-half}` assertions are not found in any
-other regex engine (although regex engines with general look-around support
-can certainly express them). They were added principally to support the
-implementation of word matching in grep programs, where one generally wants to
-be a bit more flexible in what is considered a word boundary.
-
-New features:
-
-* [FEATURE #469](https://github.com/rust-lang/regex/issues/469):
-Add support for `\<` and `\>` word boundary assertions.
-* [FEATURE(regex-automata) #1031](https://github.com/rust-lang/regex/pull/1031):
-DFAs now have a `start_state` method that doesn't use an `Input`.
-
-Performance improvements:
-
-* [PERF #1051](https://github.com/rust-lang/regex/pull/1051):
-Unicode character class operations have been optimized in `regex-syntax`.
-* [PERF #1090](https://github.com/rust-lang/regex/issues/1090):
-Make patterns containing lots of literal characters use less memory.
-
-Bug fixes:
-
-* [BUG #1046](https://github.com/rust-lang/regex/issues/1046):
-Fix a bug that could result in incorrect match spans when using a Unicode word
-boundary and searching non-ASCII strings.
-* [BUG(regex-syntax) #1047](https://github.com/rust-lang/regex/issues/1047):
-Fix panics that can occur in `Ast->Hir` translation (not reachable from `regex`
-crate).
-* [BUG(regex-syntax) #1088](https://github.com/rust-lang/regex/issues/1088):
-Remove guarantees in the API that connect the `u` flag with a specific HIR
-representation.
-
-`regex-automata` breaking change release:
-
-This release includes a `regex-automata 0.4.0` breaking change release, which
-was necessary in order to support the new word boundary assertions. For
-example, the `Look` enum has new variants and the `LookSet` type now uses `u32`
-instead of `u16` to represent a bitset of look-around assertions. These are
-overall very minor changes, and most users of `regex-automata` should be able
-to move to `0.4` from `0.3` without any changes at all.
-
-`regex-syntax` breaking change release:
-
-This release also includes a `regex-syntax 0.8.0` breaking change release,
-which, like `regex-automata`, was necessary in order to support the new word
-boundary assertions. This release also includes some changes to the `Ast`
-type to reduce heap usage in some cases. If you are using the `Ast` type
-directly, your code may require some minor modifications. Otherwise, users of
-`regex-syntax 0.7` should be able to migrate to `0.8` without any code changes.
-
-`regex-lite` release:
-
-The `regex-lite 0.1.1` release contains support for the new word boundary
-assertions. There are no breaking changes.
-
-
-1.9.6 (2023-09-30)
-==================
-This is a patch release that fixes a panic that can occur when the default
-regex size limit is increased to a large number.
-
-* [BUG aa4e4c71](https://github.com/rust-lang/regex/commit/aa4e4c7120b0090ce0624e3c42a2ed06dd8b918a):
-Fix a bug where computing the maximum haystack length for the bounded
-backtracker could result underflow and thus provoke a panic later in a search
-due to a broken invariant.
-
-
-1.9.5 (2023-09-02)
-==================
-This is a patch release that hopefully mostly fixes a performance bug that
-occurs when sharing a regex across multiple threads.
-
-Issue [#934](https://github.com/rust-lang/regex/issues/934)
-explains this in more detail. It is [also noted in the crate
-documentation](https://docs.rs/regex/latest/regex/#sharing-a-regex-across-threads-can-result-in-contention).
-The bug can appear when sharing a regex across multiple threads simultaneously,
-as might be the case when using a regex from a `OnceLock`, `lazy_static` or
-similar primitive. Usually high contention only results when using many threads
-to execute searches on small haystacks.
-
-One can avoid the contention problem entirely through one of two methods.
-The first is to use lower level APIs from `regex-automata` that require passing
-state explicitly, such as [`meta::Regex::search_with`](https://docs.rs/regex-automata/latest/regex_automata/meta/struct.Regex.html#method.search_with).
-The second is to clone a regex and send it to other threads explicitly. This
-will not use any additional memory usage compared to sharing the regex. The
-only downside of this approach is that it may be less convenient, for example,
-it won't work with things like `OnceLock` or `lazy_static` or `once_cell`.
-
-With that said, as of this release, the contention performance problems have
-been greatly reduced. This was achieved by changing the free-list so that it
-was sharded across threads, and that ensuring each sharded mutex occupies a
-single cache line to mitigate false sharing. So while contention may still
-impact performance in some cases, it should be a lot better now.
-
-Because of the changes to how the free-list works, please report any issues you
-find with this release. That not only includes search time regressions but also
-significant regressions in memory usage. Reporting improvements is also welcome
-as well! If possible, provide a reproduction.
-
-Bug fixes:
-
-* [BUG #934](https://github.com/rust-lang/regex/issues/934):
-Fix a performance bug where high contention on a single regex led to massive
-slow downs.
-
-
-1.9.4 (2023-08-26)
-==================
-This is a patch release that fixes a bug where `RegexSet::is_match(..)` could
-incorrectly return false (even when `RegexSet::matches(..).matched_any()`
-returns true).
-
-Bug fixes:
-
-* [BUG #1070](https://github.com/rust-lang/regex/issues/1070):
-Fix a bug where a prefilter was incorrectly configured for a `RegexSet`.
-
-
-1.9.3 (2023-08-05)
-==================
-This is a patch release that fixes a bug where some searches could result in
-incorrect match offsets being reported. It is difficult to characterize the
-types of regexes susceptible to this bug. They generally involve patterns
-that contain no prefix or suffix literals, but have an inner literal along with
-a regex prefix that can conditionally match.
-
-Bug fixes:
-
-* [BUG #1060](https://github.com/rust-lang/regex/issues/1060):
-Fix a bug with the reverse inner literal optimization reporting incorrect match
-offsets.
-
-
-1.9.2 (2023-08-05)
-==================
-This is a patch release that fixes another memory usage regression. This
-particular regression occurred only when using a `RegexSet`. In some cases,
-much more heap memory (by one or two orders of magnitude) was allocated than in
-versions prior to 1.9.0.
-
-Bug fixes:
-
-* [BUG #1059](https://github.com/rust-lang/regex/issues/1059):
-Fix a memory usage regression when using a `RegexSet`.
-
-
-1.9.1 (2023-07-07)
-==================
-This is a patch release which fixes a memory usage regression. In the regex
-1.9 release, one of the internal engines used a more aggressive allocation
-strategy than what was done previously. This patch release reverts to the
-prior on-demand strategy.
-
-Bug fixes:
-
-* [BUG #1027](https://github.com/rust-lang/regex/issues/1027):
-Change the allocation strategy for the backtracker to be less aggressive.
-
-
-1.9.0 (2023-07-05)
-==================
-This release marks the end of a [years long rewrite of the regex crate
-internals](https://github.com/rust-lang/regex/issues/656). Since this is
-such a big release, please report any issues or regressions you find. We would
-also love to hear about improvements as well.
-
-In addition to many internal improvements that should hopefully result in
-"my regex searches are faster," there have also been a few API additions:
-
-* A new `Captures::extract` method for quickly accessing the substrings
-that match each capture group in a regex.
-* A new inline flag, `R`, which enables CRLF mode. This makes `.` match any
-Unicode scalar value except for `\r` and `\n`, and also makes `(?m:^)` and
-`(?m:$)` match after and before both `\r` and `\n`, respectively, but never
-between a `\r` and `\n`.
-* `RegexBuilder::line_terminator` was added to further customize the line
-terminator used by `(?m:^)` and `(?m:$)` to be any arbitrary byte.
-* The `std` Cargo feature is now actually optional. That is, the `regex` crate
-can be used without the standard library.
-* Because `regex 1.9` may make binary size and compile times even worse, a
-new experimental crate called `regex-lite` has been published. It prioritizes
-binary size and compile times over functionality (like Unicode) and
-performance. It shares no code with the `regex` crate.
-
-New features:
-
-* [FEATURE #244](https://github.com/rust-lang/regex/issues/244):
-One can opt into CRLF mode via the `R` flag.
-e.g., `(?mR:$)` matches just before `\r\n`.
-* [FEATURE #259](https://github.com/rust-lang/regex/issues/259):
-Multi-pattern searches with offsets can be done with `regex-automata 0.3`.
-* [FEATURE #476](https://github.com/rust-lang/regex/issues/476):
-`std` is now an optional feature. `regex` may be used with only `alloc`.
-* [FEATURE #644](https://github.com/rust-lang/regex/issues/644):
-`RegexBuilder::line_terminator` configures how `(?m:^)` and `(?m:$)` behave.
-* [FEATURE #675](https://github.com/rust-lang/regex/issues/675):
-Anchored search APIs are now available in `regex-automata 0.3`.
-* [FEATURE #824](https://github.com/rust-lang/regex/issues/824):
-Add new `Captures::extract` method for easier capture group access.
-* [FEATURE #961](https://github.com/rust-lang/regex/issues/961):
-Add `regex-lite` crate with smaller binary sizes and faster compile times.
-* [FEATURE #1022](https://github.com/rust-lang/regex/pull/1022):
-Add `TryFrom` implementations for the `Regex` type.
-
-Performance improvements:
-
-* [PERF #68](https://github.com/rust-lang/regex/issues/68):
-Added a one-pass DFA engine for faster capture group matching.
-* [PERF #510](https://github.com/rust-lang/regex/issues/510):
-Inner literals are now used to accelerate searches, e.g., `\w+@\w+` will scan
-for `@`.
-* [PERF #787](https://github.com/rust-lang/regex/issues/787),
-[PERF #891](https://github.com/rust-lang/regex/issues/891):
-Makes literal optimizations apply to regexes of the form `\b(foo|bar|quux)\b`.
-
-(There are many more performance improvements as well, but not all of them have
-specific issues devoted to them.)
-
-Bug fixes:
-
-* [BUG #429](https://github.com/rust-lang/regex/issues/429):
-Fix matching bugs related to `\B` and inconsistencies across internal engines.
-* [BUG #517](https://github.com/rust-lang/regex/issues/517):
-Fix matching bug with capture groups.
-* [BUG #579](https://github.com/rust-lang/regex/issues/579):
-Fix matching bug with word boundaries.
-* [BUG #779](https://github.com/rust-lang/regex/issues/779):
-Fix bug where some regexes like `(re)+` were not equivalent to `(re)(re)*`.
-* [BUG #850](https://github.com/rust-lang/regex/issues/850):
-Fix matching bug inconsistency between NFA and DFA engines.
-* [BUG #921](https://github.com/rust-lang/regex/issues/921):
-Fix matching bug where literal extraction got confused by `$`.
-* [BUG #976](https://github.com/rust-lang/regex/issues/976):
-Add documentation to replacement routines about dealing with fallibility.
-* [BUG #1002](https://github.com/rust-lang/regex/issues/1002):
-Use corpus rejection in fuzz testing.
-
-
-1.8.4 (2023-06-05)
-==================
-This is a patch release that fixes a bug where `(?-u:\B)` was allowed in
-Unicode regexes, despite the fact that the current matching engines can report
-match offsets between the code units of a single UTF-8 encoded codepoint. That
-in turn means that match offsets that split a codepoint could be reported,
-which in turn results in panicking when one uses them to slice a `&str`.
-
-This bug occurred in the transition to `regex 1.8` because the underlying
-syntactical error that prevented this regex from compiling was intentionally
-removed. That's because `(?-u:\B)` will be permitted in Unicode regexes in
-`regex 1.9`, but the matching engines will guarantee to never report match
-offsets that split a codepoint. When the underlying syntactical error was
-removed, no code was added to ensure that `(?-u:\B)` didn't compile in the
-`regex 1.8` transition release. This release, `regex 1.8.4`, adds that code
-such that `Regex::new(r"(?-u:\B)")` returns to the `regex <1.8` behavior of
-not compiling. (A `bytes::Regex` can still of course compile it.)
-
-Bug fixes:
-
-* [BUG #1006](https://github.com/rust-lang/regex/issues/1006):
-Fix a bug where `(?-u:\B)` was allowed in Unicode regexes, and in turn could
-lead to match offsets that split a codepoint in `&str`.
-
-
-1.8.3 (2023-05-25)
-==================
-This is a patch release that fixes a bug where the regex would report a
-match at every position even when it shouldn't. This could occur in a very
-small subset of regexes, usually an alternation of simple literals that
-have particular properties. (See the issue linked below for a more precise
-description.)
-
-Bug fixes:
-
-* [BUG #999](https://github.com/rust-lang/regex/issues/999):
-Fix a bug where a match at every position is erroneously reported.
-
-
-1.8.2 (2023-05-22)
-==================
-This is a patch release that fixes a bug where regex compilation could panic
-in debug mode for regexes with large counted repetitions. For example,
-`a{2147483516}{2147483416}{5}` resulted in an integer overflow that wrapped
-in release mode but panicking in debug mode. Despite the unintended wrapping
-arithmetic in release mode, it didn't cause any other logical bugs since the
-errant code was for new analysis that wasn't used yet.
-
-Bug fixes:
-
-* [BUG #995](https://github.com/rust-lang/regex/issues/995):
-Fix a bug where regex compilation with large counted repetitions could panic.
-
-
-1.8.1 (2023-04-21)
-==================
-This is a patch release that fixes a bug where a regex match could be reported
-where none was found. Specifically, the bug occurs when a pattern contains some
-literal prefixes that could be extracted _and_ an optional word boundary in the
-prefix.
-
-Bug fixes:
-
-* [BUG #981](https://github.com/rust-lang/regex/issues/981):
-Fix a bug where a word boundary could interact with prefix literal
-optimizations and lead to a false positive match.
-
-
-1.8.0 (2023-04-20)
-==================
-This is a sizeable release that will be soon followed by another sizeable
-release. Both of them will combined close over 40 existing issues and PRs.
-
-This first release, despite its size, essentially represents preparatory work
-for the second release, which will be even bigger. Namely, this release:
-
-* Increases the MSRV to Rust 1.60.0, which was released about 1 year ago.
-* Upgrades its dependency on `aho-corasick` to the recently released 1.0
-version.
-* Upgrades its dependency on `regex-syntax` to the simultaneously released
-`0.7` version. The changes to `regex-syntax` principally revolve around a
-rewrite of its literal extraction code and a number of simplifications and
-optimizations to its high-level intermediate representation (HIR).
-
-The second release, which will follow ~shortly after the release above, will
-contain a soup-to-nuts rewrite of every regex engine. This will be done by
-bringing [`regex-automata`](https://github.com/BurntSushi/regex-automata) into
-this repository, and then changing the `regex` crate to be nothing but an API
-shim layer on top of `regex-automata`'s API.
-
-These tandem releases are the culmination of about 3
-years of on-and-off work that [began in earnest in March
-2020](https://github.com/rust-lang/regex/issues/656).
-
-Because of the scale of changes involved in these releases, I would love to
-hear about your experience. Especially if you notice undocumented changes in
-behavior or performance changes (positive *or* negative).
-
-Most changes in the first release are listed below. For more details, please
-see the commit log, which reflects a linear and decently documented history
-of all changes.
-
-New features:
-
-* [FEATURE #501](https://github.com/rust-lang/regex/issues/501):
-Permit many more characters to be escaped, even if they have no significance.
-More specifically, any ASCII character except for `[0-9A-Za-z<>]` can now be
-escaped. Also, a new routine, `is_escapeable_character`, has been added to
-`regex-syntax` to query whether a character is escapeable or not.
-* [FEATURE #547](https://github.com/rust-lang/regex/issues/547):
-Add `Regex::captures_at`. This fills a hole in the API, but doesn't otherwise
-introduce any new expressive power.
-* [FEATURE #595](https://github.com/rust-lang/regex/issues/595):
-Capture group names are now Unicode-aware. They can now begin with either a `_`
-or any "alphabetic" codepoint. After the first codepoint, subsequent codepoints
-can be any sequence of alpha-numeric codepoints, along with `_`, `.`, `[` and
-`]`. Note that replacement syntax has not changed.
-* [FEATURE #810](https://github.com/rust-lang/regex/issues/810):
-Add `Match::is_empty` and `Match::len` APIs.
-* [FEATURE #905](https://github.com/rust-lang/regex/issues/905):
-Add an `impl Default for RegexSet`, with the default being the empty set.
-* [FEATURE #908](https://github.com/rust-lang/regex/issues/908):
-A new method, `Regex::static_captures_len`, has been added which returns the
-number of capture groups in the pattern if and only if every possible match
-always contains the same number of matching groups.
-* [FEATURE #955](https://github.com/rust-lang/regex/issues/955):
-Named captures can now be written as `(?<name>re)` in addition to
-`(?P<name>re)`.
-* FEATURE: `regex-syntax` now supports empty character classes.
-* FEATURE: `regex-syntax` now has an optional `std` feature. (This will come
-to `regex` in the second release.)
-* FEATURE: The `Hir` type in `regex-syntax` has had a number of simplifications
-made to it.
-* FEATURE: `regex-syntax` has support for a new `R` flag for enabling CRLF
-mode. This will be supported in `regex` proper in the second release.
-* FEATURE: `regex-syntax` now has proper support for "regex that never
-matches" via `Hir::fail()`.
-* FEATURE: The `hir::literal` module of `regex-syntax` has been completely
-re-worked. It now has more documentation, examples and advice.
-* FEATURE: The `allow_invalid_utf8` option in `regex-syntax` has been renamed
-to `utf8`, and the meaning of the boolean has been flipped.
-
-Performance improvements:
-
-* PERF: The upgrade to `aho-corasick 1.0` may improve performance in some
-cases. It's difficult to characterize exactly which patterns this might impact,
-but if there are a small number of longish (>= 4 bytes) prefix literals, then
-it might be faster than before.
-
-Bug fixes:
-
-* [BUG #514](https://github.com/rust-lang/regex/issues/514):
-Improve `Debug` impl for `Match` so that it doesn't show the entire haystack.
-* BUGS [#516](https://github.com/rust-lang/regex/issues/516),
-[#731](https://github.com/rust-lang/regex/issues/731):
-Fix a number of issues with printing `Hir` values as regex patterns.
-* [BUG #610](https://github.com/rust-lang/regex/issues/610):
-Add explicit example of `foo|bar` in the regex syntax docs.
-* [BUG #625](https://github.com/rust-lang/regex/issues/625):
-Clarify that `SetMatches::len` does not (regretably) refer to the number of
-matches in the set.
-* [BUG #660](https://github.com/rust-lang/regex/issues/660):
-Clarify "verbose mode" in regex syntax documentation.
-* BUG [#738](https://github.com/rust-lang/regex/issues/738),
-[#950](https://github.com/rust-lang/regex/issues/950):
-Fix `CaptureLocations::get` so that it never panics.
-* [BUG #747](https://github.com/rust-lang/regex/issues/747):
-Clarify documentation for `Regex::shortest_match`.
-* [BUG #835](https://github.com/rust-lang/regex/issues/835):
-Fix `\p{Sc}` so that it is equivalent to `\p{Currency_Symbol}`.
-* [BUG #846](https://github.com/rust-lang/regex/issues/846):
-Add more clarifying documentation to the `CompiledTooBig` error variant.
-* [BUG #854](https://github.com/rust-lang/regex/issues/854):
-Clarify that `regex::Regex` searches as if the haystack is a sequence of
-Unicode scalar values.
-* [BUG #884](https://github.com/rust-lang/regex/issues/884):
-Replace `__Nonexhaustive` variants with `#[non_exhaustive]` attribute.
-* [BUG #893](https://github.com/rust-lang/regex/pull/893):
-Optimize case folding since it can get quite slow in some pathological cases.
-* [BUG #895](https://github.com/rust-lang/regex/issues/895):
-Reject `(?-u:\W)` in `regex::Regex` APIs.
-* [BUG #942](https://github.com/rust-lang/regex/issues/942):
-Add a missing `void` keyword to indicate "no parameters" in C API.
-* [BUG #965](https://github.com/rust-lang/regex/issues/965):
-Fix `\p{Lc}` so that it is equivalent to `\p{Cased_Letter}`.
-* [BUG #975](https://github.com/rust-lang/regex/issues/975):
-Clarify documentation for `\pX` syntax.
-
-
-1.7.3 (2023-03-24)
-==================
-This is a small release that fixes a bug in `Regex::shortest_match_at` that
-could cause it to panic, even when the offset given is valid.
-
-Bug fixes:
-
-* [BUG #969](https://github.com/rust-lang/regex/issues/969):
-  Fix a bug in how the reverse DFA was called for `Regex::shortest_match_at`.
-
-
-1.7.2 (2023-03-21)
-==================
-This is a small release that fixes a failing test on FreeBSD.
-
-Bug fixes:
-
-* [BUG #967](https://github.com/rust-lang/regex/issues/967):
-  Fix "no stack overflow" test which can fail due to the small stack size.
-
-
-1.7.1 (2023-01-09)
-==================
-This release was done principally to try and fix the doc.rs rendering for the
-regex crate.
-
-Performance improvements:
-
-* [PERF #930](https://github.com/rust-lang/regex/pull/930):
-  Optimize `replacen`. This also applies to `replace`, but not `replace_all`.
-
-Bug fixes:
-
-* [BUG #945](https://github.com/rust-lang/regex/issues/945):
-  Maybe fix rustdoc rendering by just bumping a new release?
-
-
-1.7.0 (2022-11-05)
-==================
-This release principally includes an upgrade to Unicode 15.
-
-New features:
-
-* [FEATURE #832](https://github.com/rust-lang/regex/issues/916):
-  Upgrade to Unicode 15.
-
-
-1.6.0 (2022-07-05)
-==================
-This release principally includes an upgrade to Unicode 14.
-
-New features:
-
-* [FEATURE #832](https://github.com/rust-lang/regex/pull/832):
-  Clarify that `Captures::len` includes all groups, not just matching groups.
-* [FEATURE #857](https://github.com/rust-lang/regex/pull/857):
-  Add an `ExactSizeIterator` impl for `SubCaptureMatches`.
-* [FEATURE #861](https://github.com/rust-lang/regex/pull/861):
-  Improve `RegexSet` documentation examples.
-* [FEATURE #877](https://github.com/rust-lang/regex/issues/877):
-  Upgrade to Unicode 14.
-
-Bug fixes:
-
-* [BUG #792](https://github.com/rust-lang/regex/issues/792):
-  Fix error message rendering bug.
-
-
-1.5.6 (2022-05-20)
-==================
-This release includes a few bug fixes, including a bug that produced incorrect
-matches when a non-greedy `?` operator was used.
-
-* [BUG #680](https://github.com/rust-lang/regex/issues/680):
-  Fixes a bug where `[[:alnum:][:^ascii:]]` dropped `[:alnum:]` from the class.
-* [BUG #859](https://github.com/rust-lang/regex/issues/859):
-  Fixes a bug where `Hir::is_match_empty` returned `false` for `\b`.
-* [BUG #862](https://github.com/rust-lang/regex/issues/862):
-  Fixes a bug where 'ab??' matches 'ab' instead of 'a' in 'ab'.
-
-
-1.5.5 (2022-03-08)
-==================
-This releases fixes a security bug in the regex compiler. This bug permits a
-vector for a denial-of-service attack in cases where the regex being compiled
-is untrusted. There are no known problems where the regex is itself trusted,
-including in cases of untrusted haystacks.
-
-* [SECURITY #GHSA-m5pq-gvj9-9vr8](https://github.com/rust-lang/regex/security/advisories/GHSA-m5pq-gvj9-9vr8):
-  Fixes a bug in the regex compiler where empty sub-expressions subverted the
-  existing mitigations in place to enforce a size limit on compiled regexes.
-  The Rust Security Response WG published an advisory about this:
-  https://groups.google.com/g/rustlang-security-announcements/c/NcNNL1Jq7Yw
-
-
-1.5.4 (2021-05-06)
-==================
-This release fixes another compilation failure when building regex. This time,
-the fix is for when the `pattern` feature is enabled, which only works on
-nightly Rust. CI has been updated to test this case.
-
-* [BUG #772](https://github.com/rust-lang/regex/pull/772):
-  Fix build when `pattern` feature is enabled.
-
-
-1.5.3 (2021-05-01)
-==================
-This releases fixes a bug when building regex with only the `unicode-perl`
-feature. It turns out that while CI was building this configuration, it wasn't
-actually failing the overall build on a failed compilation.
-
-* [BUG #769](https://github.com/rust-lang/regex/issues/769):
-  Fix build in `regex-syntax` when only the `unicode-perl` feature is enabled.
-
-
-1.5.2 (2021-05-01)
-==================
-This release fixes a performance bug when Unicode word boundaries are used.
-Namely, for certain regexes on certain inputs, it's possible for the lazy DFA
-to stop searching (causing a fallback to a slower engine) when it doesn't
-actually need to.
-
-[PR #768](https://github.com/rust-lang/regex/pull/768) fixes the bug, which was
-originally reported in
-[ripgrep#1860](https://github.com/BurntSushi/ripgrep/issues/1860).
-
-
-1.5.1 (2021-04-30)
-==================
-This is a patch release that fixes a compilation error when the `perf-literal`
-feature is not enabled.
-
-
-1.5.0 (2021-04-30)
-==================
-This release primarily updates to Rust 2018 (finally) and bumps the MSRV to
-Rust 1.41 (from Rust 1.28). Rust 1.41 was chosen because it's still reasonably
-old, and is what's in Debian stable at the time of writing.
-
-This release also drops this crate's own bespoke substring search algorithms
-in favor of a new
-[`memmem` implementation provided by the `memchr` crate](https://docs.rs/memchr/2.4.0/memchr/memmem/index.html).
-This will change the performance profile of some regexes, sometimes getting a
-little worse, and hopefully more frequently, getting a lot better. Please
-report any serious performance regressions if you find them.
-
-
-1.4.6 (2021-04-22)
-==================
-This is a small patch release that fixes the compiler's size check on how much
-heap memory a regex uses. Previously, the compiler did not account for the
-heap usage of Unicode character classes. Now it does. It's possible that this
-may make some regexes fail to compile that previously did compile. If that
-happens, please file an issue.
-
-* [BUG OSS-fuzz#33579](https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=33579):
-  Some regexes can use more heap memory than one would expect.
-
-
-1.4.5 (2021-03-14)
-==================
-This is a small patch release that fixes a regression in the size of a `Regex`
-in the 1.4.4 release. Prior to 1.4.4, a `Regex` was 552 bytes. In the 1.4.4
-release, it was 856 bytes due to internal changes. In this release, a `Regex`
-is now 16 bytes. In general, the size of a `Regex` was never something that was
-on my radar, but this increased size in the 1.4.4 release seems to have crossed
-a threshold and resulted in stack overflows in some programs.
-
-* [BUG #750](https://github.com/rust-lang/regex/pull/750):
-  Fixes stack overflows seemingly caused by a large `Regex` size by decreasing
-  its size.
-
-
-1.4.4 (2021-03-11)
-==================
-This is a small patch release that contains some bug fixes. Notably, it also
-drops the `thread_local` (and `lazy_static`, via transitivity) dependencies.
-
-Bug fixes:
-
-* [BUG #362](https://github.com/rust-lang/regex/pull/362):
-  Memory leaks caused by an internal caching strategy should now be fixed.
-* [BUG #576](https://github.com/rust-lang/regex/pull/576):
-  All regex types now implement `UnwindSafe` and `RefUnwindSafe`.
-* [BUG #728](https://github.com/rust-lang/regex/pull/749):
-  Add missing `Replacer` impls for `Vec<u8>`, `String`, `Cow`, etc.
-
-
-1.4.3 (2021-01-08)
-==================
-This is a small patch release that adds some missing standard trait
-implementations for some types in the public API.
-
-Bug fixes:
-
-* [BUG #734](https://github.com/rust-lang/regex/pull/734):
-  Add `FusedIterator` and `ExactSizeIterator` impls to iterator types.
-* [BUG #735](https://github.com/rust-lang/regex/pull/735):
-  Add missing `Debug` impls to public API types.
-
-
-1.4.2 (2020-11-01)
-==================
-This is a small bug fix release that bans `\P{any}`. We previously banned empty
-classes like `[^\w\W]`, but missed the `\P{any}` case. In the future, we hope
-to permit empty classes.
-
-* [BUG #722](https://github.com/rust-lang/regex/issues/722):
-  Ban `\P{any}` to avoid a panic in the regex compiler. Found by OSS-Fuzz.
-
-
-1.4.1 (2020-10-13)
-==================
-This is a small bug fix release that makes `\p{cf}` work. Previously, it would
-report "property not found" even though `cf` is a valid abbreviation for the
-`Format` general category.
-
-* [BUG #719](https://github.com/rust-lang/regex/issues/719):
-  Fixes bug that prevented `\p{cf}` from working.
-
-
-1.4.0 (2020-10-11)
-==================
-This releases has a few minor documentation fixes as well as some very minor
-API additions. The MSRV remains at Rust 1.28 for now, but this is intended to
-increase to at least Rust 1.41.1 soon.
-
-This release also adds support for OSS-Fuzz. Kudos to
-[@DavidKorczynski](https://github.com/DavidKorczynski)
-for doing the heavy lifting for that!
-
-New features:
-
-* [FEATURE #649](https://github.com/rust-lang/regex/issues/649):
-  Support `[`, `]` and `.` in capture group names.
-* [FEATURE #687](https://github.com/rust-lang/regex/issues/687):
-  Add `is_empty` predicate to `RegexSet`.
-* [FEATURE #689](https://github.com/rust-lang/regex/issues/689):
-  Implement `Clone` for `SubCaptureMatches`.
-* [FEATURE #715](https://github.com/rust-lang/regex/issues/715):
-  Add `empty` constructor to `RegexSet` for convenience.
-
-Bug fixes:
-
-* [BUG #694](https://github.com/rust-lang/regex/issues/694):
-  Fix doc example for `Replacer::replace_append`.
-* [BUG #698](https://github.com/rust-lang/regex/issues/698):
-  Clarify docs for `s` flag when using a `bytes::Regex`.
-* [BUG #711](https://github.com/rust-lang/regex/issues/711):
-  Clarify `is_match` docs to indicate that it can match anywhere in string.
-
-
-1.3.9 (2020-05-28)
-==================
-This release fixes a MSRV (Minimum Support Rust Version) regression in the
-1.3.8 release. Namely, while 1.3.8 compiles on Rust 1.28, it actually does not
-compile on other Rust versions, such as Rust 1.39.
-
-Bug fixes:
-
-* [BUG #685](https://github.com/rust-lang/regex/issues/685):
-  Remove use of `doc_comment` crate, which cannot be used before Rust 1.43.
-
-
-1.3.8 (2020-05-28)
-==================
-This release contains a couple of important bug fixes driven
-by better support for empty-subexpressions in regexes. For
-example, regexes like `b|` are now allowed. Major thanks to
-[@sliquister](https://github.com/sliquister) for implementing support for this
-in [#677](https://github.com/rust-lang/regex/pull/677).
-
-Bug fixes:
-
-* [BUG #523](https://github.com/rust-lang/regex/pull/523):
-  Add note to documentation that spaces can be escaped in `x` mode.
-* [BUG #524](https://github.com/rust-lang/regex/issues/524):
-  Add support for empty sub-expressions, including empty alternations.
-* [BUG #659](https://github.com/rust-lang/regex/issues/659):
-  Fix match bug caused by an empty sub-expression miscompilation.
-
-
-1.3.7 (2020-04-17)
-==================
-This release contains a small bug fix that fixes how `regex` forwards crate
-features to `regex-syntax`. In particular, this will reduce recompilations in
-some cases.
-
-Bug fixes:
-
-* [BUG #665](https://github.com/rust-lang/regex/pull/665):
-  Fix feature forwarding to `regex-syntax`.
-
-
-1.3.6 (2020-03-24)
-==================
-This release contains a sizable (~30%) performance improvement when compiling
-some kinds of large regular expressions.
-
-Performance improvements:
-
-* [PERF #657](https://github.com/rust-lang/regex/pull/657):
-  Improvement performance of compiling large regular expressions.
-
-
-1.3.5 (2020-03-12)
-==================
-This release updates this crate to Unicode 13.
-
-New features:
-
-* [FEATURE #653](https://github.com/rust-lang/regex/pull/653):
-  Update `regex-syntax` to Unicode 13.
-
-
-1.3.4 (2020-01-30)
-==================
-This is a small bug fix release that fixes a bug related to the scoping of
-flags in a regex. Namely, before this fix, a regex like `((?i)a)b)` would
-match `aB` despite the fact that `b` should not be matched case insensitively.
-
-Bug fixes:
-
-* [BUG #640](https://github.com/rust-lang/regex/issues/640):
-  Fix bug related to the scoping of flags in a regex.
-
-
-1.3.3 (2020-01-09)
-==================
-This is a small maintenance release that upgrades the dependency on
-`thread_local` from `0.3` to `1.0`. The minimum supported Rust version remains
-at Rust 1.28.
-
-
-1.3.2 (2020-01-09)
-==================
-This is a small maintenance release with some house cleaning and bug fixes.
-
-New features:
-
-* [FEATURE #631](https://github.com/rust-lang/regex/issues/631):
-  Add a `Match::range` method an a `From<Match> for Range` impl.
-
-Bug fixes:
-
-* [BUG #521](https://github.com/rust-lang/regex/issues/521):
-  Corrects `/-/.splitn("a", 2)` to return `["a"]` instead of `["a", ""]`.
-* [BUG #594](https://github.com/rust-lang/regex/pull/594):
-  Improve error reporting when writing `\p\`.
-* [BUG #627](https://github.com/rust-lang/regex/issues/627):
-  Corrects `/-/.split("a-")` to return `["a", ""]` instead of `["a"]`.
-* [BUG #633](https://github.com/rust-lang/regex/pull/633):
-  Squash deprecation warnings for the `std::error::Error::description` method.
-
-
-1.3.1 (2019-09-04)
-==================
-This is a maintenance release with no changes in order to try to work-around
-a [docs.rs/Cargo issue](https://github.com/rust-lang/docs.rs/issues/400).
-
-
-1.3.0 (2019-09-03)
-==================
-This release adds a plethora of new crate features that permit users of regex
-to shrink its size considerably, in exchange for giving up either functionality
-(such as Unicode support) or runtime performance. When all such features are
-disabled, the dependency tree for `regex` shrinks to exactly 1 crate
-(`regex-syntax`). More information about the new crate features can be
-[found in the docs](https://docs.rs/regex/*/#crate-features).
-
-Note that while this is a new minor version release, the minimum supported
-Rust version for this crate remains at `1.28.0`.
-
-New features:
-
-* [FEATURE #474](https://github.com/rust-lang/regex/issues/474):
-  The `use_std` feature has been deprecated in favor of the `std` feature.
-  The `use_std` feature will be removed in regex 2. Until then, `use_std` will
-  remain as an alias for the `std` feature.
-* [FEATURE #583](https://github.com/rust-lang/regex/issues/583):
-  Add a substantial number of crate features shrinking `regex`.
-
-
-1.2.1 (2019-08-03)
-==================
-This release does a bit of house cleaning. Namely:
-
-* This repository is now using rustfmt.
-* License headers have been removed from all files, in following suit with the
-  Rust project.
-* Teddy has been removed from the `regex` crate, and is now part of the
-  `aho-corasick` crate.
-  [See `aho-corasick`'s new `packed` sub-module for details](https://docs.rs/aho-corasick/0.7.6/aho_corasick/packed/index.html).
-* The `utf8-ranges` crate has been deprecated, with its functionality moving
-  into the
-  [`utf8` sub-module of `regex-syntax`](https://docs.rs/regex-syntax/0.6.11/regex_syntax/utf8/index.html).
-* The `ucd-util` dependency has been dropped, in favor of implementing what
-  little we need inside of `regex-syntax` itself.
-
-In general, this is part of an ongoing (long term) effort to make optimizations
-in the regex engine easier to reason about. The current code is too convoluted
-and thus it is very easy to introduce new bugs. This simplification effort is
-the primary motivation behind re-working the `aho-corasick` crate to not only
-bundle algorithms like Teddy, but to also provide regex-like match semantics
-automatically.
-
-Moving forward, the plan is to join up with the `bstr` and `regex-automata`
-crates, with the former providing more sophisticated substring search
-algorithms (thereby deleting existing code in `regex`) and the latter providing
-ahead-of-time compiled DFAs for cases where they are inexpensive to compute.
-
-
-1.2.0 (2019-07-20)
-==================
-This release updates regex's minimum supported Rust version to 1.28, which was
-release almost 1 year ago. This release also updates regex's Unicode data
-tables to 12.1.0.
-
-
-1.1.9 (2019-07-06)
-==================
-This release contains a bug fix that caused regex's tests to fail, due to a
-dependency on an unreleased behavior in regex-syntax.
-
-* [BUG #593](https://github.com/rust-lang/regex/issues/593):
-  Move an integration-style test on error messages into regex-syntax.
-
-
-1.1.8 (2019-07-04)
-==================
-This release contains a few small internal refactorings. One of which fixes
-an instance of undefined behavior in a part of the SIMD code.
-
-Bug fixes:
-
-* [BUG #545](https://github.com/rust-lang/regex/issues/545):
-  Improves error messages when a repetition operator is used without a number.
-* [BUG #588](https://github.com/rust-lang/regex/issues/588):
-  Removes use of a repr(Rust) union used for type punning in the Teddy matcher.
-* [BUG #591](https://github.com/rust-lang/regex/issues/591):
-  Update docs for running benchmarks and improve failure modes.
-
-
-1.1.7 (2019-06-09)
-==================
-This release fixes up a few warnings as a result of recent deprecations.
-
-
-1.1.6 (2019-04-16)
-==================
-This release fixes a regression introduced by a bug fix (for
-[BUG #557](https://github.com/rust-lang/regex/issues/557)) which could cause
-the regex engine to enter an infinite loop. This bug was originally
-[reported against ripgrep](https://github.com/BurntSushi/ripgrep/issues/1247).
-
-
-1.1.5 (2019-04-01)
-==================
-This release fixes a bug in regex's dependency specification where it requires
-a newer version of regex-syntax, but this wasn't communicated correctly in the
-Cargo.toml. This would have been caught by a minimal version check, but this
-check was disabled because the `rand` crate itself advertises incorrect
-dependency specifications.
-
-Bug fixes:
-
-* [BUG #570](https://github.com/rust-lang/regex/pull/570):
-  Fix regex-syntax minimal version.
-
-
-1.1.4 (2019-03-31)
-==================
-This release fixes a backwards compatibility regression where Regex was no
-longer UnwindSafe. This was caused by the upgrade to aho-corasick 0.7, whose
-AhoCorasick type was itself not UnwindSafe. This has been fixed in aho-corasick
-0.7.4, which we now require.
-
-Bug fixes:
-
-* [BUG #568](https://github.com/rust-lang/regex/pull/568):
-  Fix an API regression where Regex was no longer UnwindSafe.
-
-
-1.1.3 (2019-03-30)
-==================
-This releases fixes a few bugs and adds a performance improvement when a regex
-is a simple alternation of literals.
-
-Performance improvements:
-
-* [OPT #566](https://github.com/rust-lang/regex/pull/566):
-  Upgrades `aho-corasick` to 0.7 and uses it for `foo|bar|...|quux` regexes.
-
-Bug fixes:
-
-* [BUG #527](https://github.com/rust-lang/regex/issues/527):
-  Fix a bug where the parser would panic on patterns like `((?x))`.
-* [BUG #555](https://github.com/rust-lang/regex/issues/555):
-  Fix a bug where the parser would panic on patterns like `(?m){1,1}`.
-* [BUG #557](https://github.com/rust-lang/regex/issues/557):
-  Fix a bug where captures could lead to an incorrect match.
-
-
-1.1.2 (2019-02-27)
-==================
-This release fixes a bug found in the fix introduced in 1.1.1.
-
-Bug fixes:
-
-* [BUG edf45e6f](https://github.com/rust-lang/regex/commit/edf45e6f):
-  Fix bug introduced in reverse suffix literal matcher in the 1.1.1 release.
-
-
-1.1.1 (2019-02-27)
-==================
-This is a small release with one fix for a bug caused by literal optimizations.
-
-Bug fixes:
-
-* [BUG 661bf53d](https://github.com/rust-lang/regex/commit/661bf53d):
-  Fixes a bug in the reverse suffix literal optimization. This was originally
-  reported
-  [against ripgrep](https://github.com/BurntSushi/ripgrep/issues/1203).
-
-
-1.1.0 (2018-11-30)
-==================
-This is a small release with a couple small enhancements. This release also
-increases the minimal supported Rust version (MSRV) to 1.24.1 (from 1.20.0). In
-accordance with this crate's MSRV policy, this release bumps the minor version
-number.
-
-Performance improvements:
-
-* [OPT #511](https://github.com/rust-lang/regex/pull/511),
-  [OPT #540](https://github.com/rust-lang/regex/pull/540):
-  Improve lazy DFA construction for large regex sets.
-
-New features:
-
-* [FEATURE #538](https://github.com/rust-lang/regex/pull/538):
-  Add Emoji and "break" Unicode properties. See [UNICODE.md](UNICODE.md).
-
-Bug fixes:
-
-* [BUG #530](https://github.com/rust-lang/regex/pull/530):
-  Add Unicode license (for data tables).
-* Various typo/doc fixups.
-
-
-1.0.6 (2018-11-06)
-==================
-This is a small release.
-
-Performance improvements:
-
-* [OPT #513](https://github.com/rust-lang/regex/pull/513):
-  Improve performance of compiling large Unicode classes by 8-10%.
-
-Bug fixes:
-
-* [BUG #533](https://github.com/rust-lang/regex/issues/533):
-  Fix definition of `[[:blank:]]` class that regressed in `regex-syntax 0.5`.
-
-
-1.0.5 (2018-09-06)
-==================
-This is a small release with an API enhancement.
-
-New features:
-
-* [FEATURE #509](https://github.com/rust-lang/regex/pull/509):
-  Generalize impls of the `Replacer` trait.
-
-
-1.0.4 (2018-08-25)
-==================
-This is a small release that bumps the quickcheck dependency.
-
-
-1.0.3 (2018-08-24)
-==================
-This is a small bug fix release.
-
-Bug fixes:
-
-* [BUG #504](https://github.com/rust-lang/regex/pull/504):
-  Fix for Cargo's "minimal version" support.
-* [BUG 1e39165f](https://github.com/rust-lang/regex/commit/1e39165f):
-  Fix doc examples for byte regexes.
-
-
-1.0.2 (2018-07-18)
-==================
-This release exposes some new lower level APIs on `Regex` that permit
-amortizing allocation and controlling the location at which a search is
-performed in a more granular way. Most users of the regex crate will not
-need or want to use these APIs.
-
-New features:
-
-* [FEATURE #493](https://github.com/rust-lang/regex/pull/493):
-  Add a few lower level APIs for amortizing allocation and more fine grained
-  searching.
-
-Bug fixes:
-
-* [BUG 3981d2ad](https://github.com/rust-lang/regex/commit/3981d2ad):
-  Correct outdated documentation on `RegexBuilder::dot_matches_new_line`.
-* [BUG 7ebe4ae0](https://github.com/rust-lang/regex/commit/7ebe4ae0):
-  Correct outdated documentation on `Parser::allow_invalid_utf8` in the
-  `regex-syntax` crate.
-* [BUG 24c7770b](https://github.com/rust-lang/regex/commit/24c7770b):
-  Fix a bug in the HIR printer where it wouldn't correctly escape meta
-  characters in character classes.
-
-
-1.0.1 (2018-06-19)
-==================
-This release upgrades regex's Unicode tables to Unicode 11, and enables SIMD
-optimizations automatically on Rust stable (1.27 or newer).
-
-New features:
-
-* [FEATURE #486](https://github.com/rust-lang/regex/pull/486):
-  Implement `size_hint` on `RegexSet` match iterators.
-* [FEATURE #488](https://github.com/rust-lang/regex/pull/488):
-  Update Unicode tables for Unicode 11.
-* [FEATURE #490](https://github.com/rust-lang/regex/pull/490):
-  SIMD optimizations are now enabled automatically in Rust stable, for versions
-  1.27 and up. No compilation flags or features need to be set. CPU support
-  SIMD is detected automatically at runtime.
-
-Bug fixes:
-
-* [BUG #482](https://github.com/rust-lang/regex/pull/482):
-  Present a better compilation error when the `use_std` feature isn't used.
-
-
-1.0.0 (2018-05-01)
-==================
-This release marks the 1.0 release of regex.
-
-While this release includes some breaking changes, most users of older versions
-of the regex library should be able to migrate to 1.0 by simply bumping the
-version number. The important changes are as follows:
-
-* We adopt Rust 1.20 as the new minimum supported version of Rust for regex.
-  We also tentativley adopt a policy that permits bumping the minimum supported
-  version of Rust in minor version releases of regex, but no patch releases.
-  That is, with respect to semver, we do not strictly consider bumping the
-  minimum version of Rust to be a breaking change, but adopt a conservative
-  stance as a compromise.
-* Octal syntax in regular expressions has been disabled by default. This
-  permits better error messages that inform users that backreferences aren't
-  available. Octal syntax can be re-enabled via the corresponding option on
-  `RegexBuilder`.
-* `(?-u:\B)` is no longer allowed in Unicode regexes since it can match at
-  invalid UTF-8 code unit boundaries. `(?-u:\b)` is still allowed in Unicode
-  regexes.
-* The `From<regex_syntax::Error>` impl has been removed. This formally removes
-  the public dependency on `regex-syntax`.
-* A new feature, `use_std`, has been added and enabled by default. Disabling
-  the feature will result in a compilation error. In the future, this may
-  permit us to support `no_std` environments (w/ `alloc`) in a backwards
-  compatible way.
-
-For more information and discussion, please see
-[1.0 release tracking issue](https://github.com/rust-lang/regex/issues/457).
-
-
-0.2.11 (2018-05-01)
-===================
-This release primarily contains bug fixes. Some of them resolve bugs where
-the parser could panic.
-
-New features:
-
-* [FEATURE #459](https://github.com/rust-lang/regex/pull/459):
-  Include C++'s standard regex library and Boost's regex library in the
-  benchmark harness. We now include D/libphobos, C++/std, C++/boost, Oniguruma,
-  PCRE1, PCRE2, RE2 and Tcl in the harness.
-
-Bug fixes:
-
-* [BUG #445](https://github.com/rust-lang/regex/issues/445):
-  Clarify order of indices returned by RegexSet match iterator.
-* [BUG #461](https://github.com/rust-lang/regex/issues/461):
-  Improve error messages for invalid regexes like `[\d-a]`.
-* [BUG #464](https://github.com/rust-lang/regex/issues/464):
-  Fix a bug in the error message pretty printer that could cause a panic when
-  a regex contained a literal `\n` character.
-* [BUG #465](https://github.com/rust-lang/regex/issues/465):
-  Fix a panic in the parser that was caused by applying a repetition operator
-  to `(?flags)`.
-* [BUG #466](https://github.com/rust-lang/regex/issues/466):
-  Fix a bug where `\pC` was not recognized as an alias for `\p{Other}`.
-* [BUG #470](https://github.com/rust-lang/regex/pull/470):
-  Fix a bug where literal searches did more work than necessary for anchored
-  regexes.
-
-
-0.2.10 (2018-03-16)
-===================
-This release primarily updates the regex crate to changes made in `std::arch`
-on nightly Rust.
-
-New features:
-
-* [FEATURE #458](https://github.com/rust-lang/regex/pull/458):
-  The `Hir` type in `regex-syntax` now has a printer.
-
-
-0.2.9 (2018-03-12)
-==================
-This release introduces a new nightly only feature, `unstable`, which enables
-SIMD optimizations for certain types of regexes. No additional compile time
-options are necessary, and the regex crate will automatically choose the
-best CPU features at run time. As a result, the `simd` (nightly only) crate
-dependency has been dropped.
-
-New features:
-
-* [FEATURE #456](https://github.com/rust-lang/regex/pull/456):
-  The regex crate now includes AVX2 optimizations in addition to the extant
-  SSSE3 optimization.
-
-Bug fixes:
-
-* [BUG #455](https://github.com/rust-lang/regex/pull/455):
-  Fix a bug where `(?x)[ / - ]` failed to parse.
-
-
-0.2.8 (2018-03-12)
-==================
-Bug gixes:
-
-* [BUG #454](https://github.com/rust-lang/regex/pull/454):
-  Fix a bug in the nest limit checker being too aggressive.
-
-
-0.2.7 (2018-03-07)
-==================
-This release includes a ground-up rewrite of the regex-syntax crate, which has
-been in development for over a year.
-731
-New features:
-
-* Error messages for invalid regexes have been greatly improved. You get these
-  automatically; you don't need to do anything. In addition to better
-  formatting, error messages will now explicitly call out the use of look
-  around. When regex 1.0 is released, this will happen for backreferences as
-  well.
-* Full support for intersection, difference and symmetric difference of
-  character classes. These can be used via the `&&`, `--` and `~~` binary
-  operators within classes.
-* A Unicode Level 1 conformat implementation of `\p{..}` character classes.
-  Things like `\p{scx:Hira}`, `\p{age:3.2}` or `\p{Changes_When_Casefolded}`
-  now work. All property name and value aliases are supported, and properties
-  are selected via loose matching. e.g., `\p{Greek}` is the same as
-  `\p{G r E e K}`.
-* A new `UNICODE.md` document has been added to this repository that
-  exhaustively documents support for UTS#18.
-* Empty sub-expressions are now permitted in most places. That is, `()+` is
-  now a valid regex.
-* Almost everything in regex-syntax now uses constant stack space, even when
-  performing analysis that requires structural induction. This reduces the risk
-  of a user provided regular expression causing a stack overflow.
-* [FEATURE #174](https://github.com/rust-lang/regex/issues/174):
-  The `Ast` type in `regex-syntax` now contains span information.
-* [FEATURE #424](https://github.com/rust-lang/regex/issues/424):
-  Support `\u`, `\u{...}`, `\U` and `\U{...}` syntax for specifying code points
-  in a regular expression.
-* [FEATURE #449](https://github.com/rust-lang/regex/pull/449):
-  Add a `Replace::by_ref` adapter for use of a replacer without consuming it.
-
-Bug fixes:
-
-* [BUG #446](https://github.com/rust-lang/regex/issues/446):
-  We re-enable the Boyer-Moore literal matcher.
-
-
-0.2.6 (2018-02-08)
-==================
-Bug fixes:
-
-* [BUG #446](https://github.com/rust-lang/regex/issues/446):
-  Fixes a bug in the new Boyer-Moore searcher that results in a match failure.
-  We fix this bug by temporarily disabling Boyer-Moore.
-
-
-0.2.5 (2017-12-30)
-==================
-Bug fixes:
-
-* [BUG #437](https://github.com/rust-lang/regex/issues/437):
-  Fixes a bug in the new Boyer-Moore searcher that results in a panic.
-
-
-0.2.4 (2017-12-30)
-==================
-New features:
-
-* [FEATURE #348](https://github.com/rust-lang/regex/pull/348):
-  Improve performance for capture searches on anchored regex.
-  (Contributed by @ethanpailes. Nice work!)
-* [FEATURE #419](https://github.com/rust-lang/regex/pull/419):
-  Expand literal searching to include Tuned Boyer-Moore in some cases.
-  (Contributed by @ethanpailes. Nice work!)
-
-Bug fixes:
-
-* [BUG](https://github.com/rust-lang/regex/pull/436):
-  The regex compiler plugin has been removed.
-* [BUG](https://github.com/rust-lang/regex/pull/436):
-  `simd` has been bumped to `0.2.1`, which fixes a Rust nightly build error.
-* [BUG](https://github.com/rust-lang/regex/pull/436):
-  Bring the benchmark harness up to date.
-
-
-0.2.3 (2017-11-30)
-==================
-New features:
-
-* [FEATURE #374](https://github.com/rust-lang/regex/pull/374):
-  Add `impl From<Match> for &str`.
-* [FEATURE #380](https://github.com/rust-lang/regex/pull/380):
-  Derive `Clone` and `PartialEq` on `Error`.
-* [FEATURE #400](https://github.com/rust-lang/regex/pull/400):
-  Update to Unicode 10.
-
-Bug fixes:
-
-* [BUG #375](https://github.com/rust-lang/regex/issues/375):
-  Fix a bug that prevented the bounded backtracker from terminating.
-* [BUG #393](https://github.com/rust-lang/regex/issues/393),
-  [BUG #394](https://github.com/rust-lang/regex/issues/394):
-  Fix bug with `replace` methods for empty matches.
-
-
-0.2.2 (2017-05-21)
-==================
-New features:
-
-* [FEATURE #341](https://github.com/rust-lang/regex/issues/341):
-  Support nested character classes and intersection operation.
-  For example, `[\p{Greek}&&\pL]` matches greek letters and
-  `[[0-9]&&[^4]]` matches every decimal digit except `4`.
-  (Much thanks to @robinst, who contributed this awesome feature.)
-
-Bug fixes:
-
-* [BUG #321](https://github.com/rust-lang/regex/issues/321):
-  Fix bug in literal extraction and UTF-8 decoding.
-* [BUG #326](https://github.com/rust-lang/regex/issues/326):
-  Add documentation tip about the `(?x)` flag.
-* [BUG #333](https://github.com/rust-lang/regex/issues/333):
-  Show additional replacement example using curly braces.
-* [BUG #334](https://github.com/rust-lang/regex/issues/334):
-  Fix bug when resolving captures after a match.
-* [BUG #338](https://github.com/rust-lang/regex/issues/338):
-  Add example that uses `Captures::get` to API documentation.
-* [BUG #353](https://github.com/rust-lang/regex/issues/353):
-  Fix RegexSet bug that caused match failure in some cases.
-* [BUG #354](https://github.com/rust-lang/regex/pull/354):
-  Fix panic in parser when `(?x)` is used.
-* [BUG #358](https://github.com/rust-lang/regex/issues/358):
-  Fix literal optimization bug with RegexSet.
-* [BUG #359](https://github.com/rust-lang/regex/issues/359):
-  Fix example code in README.
-* [BUG #365](https://github.com/rust-lang/regex/pull/365):
-  Fix bug in `rure_captures_len` in the C binding.
-* [BUG #367](https://github.com/rust-lang/regex/issues/367):
-  Fix byte class bug that caused a panic.
-
-
-0.2.1
-=====
-One major bug with `replace_all` has been fixed along with a couple of other
-touchups.
-
-* [BUG #312](https://github.com/rust-lang/regex/issues/312):
-  Fix documentation for `NoExpand` to reference correct lifetime parameter.
-* [BUG #314](https://github.com/rust-lang/regex/issues/314):
-  Fix a bug with `replace_all` when replacing a match with the empty string.
-* [BUG #316](https://github.com/rust-lang/regex/issues/316):
-  Note a missing breaking change from the `0.2.0` CHANGELOG entry.
-  (`RegexBuilder::compile` was renamed to `RegexBuilder::build`.)
-* [BUG #324](https://github.com/rust-lang/regex/issues/324):
-  Compiling `regex` should only require one version of `memchr` crate.
-
-
-0.2.0
-=====
-This is a new major release of the regex crate, and is an implementation of the
-[regex 1.0 RFC](https://github.com/rust-lang/rfcs/blob/master/text/1620-regex-1.0.md).
-We are releasing a `0.2` first, and if there are no major problems, we will
-release a `1.0` shortly. For `0.2`, the minimum *supported* Rust version is
-1.12.
-
-There are a number of **breaking changes** in `0.2`. They are split into two
-types. The first type correspond to breaking changes in regular expression
-syntax. The second type correspond to breaking changes in the API.
-
-Breaking changes for regex syntax:
-
-* POSIX character classes now require double bracketing. Previously, the regex
-  `[:upper:]` would parse as the `upper` POSIX character class. Now it parses
-  as the character class containing the characters `:upper:`. The fix to this
-  change is to use `[[:upper:]]` instead. Note that variants like
-  `[[:upper:][:blank:]]` continue to work.
-* The character `[` must always be escaped inside a character class.
-* The characters `&`, `-` and `~` must be escaped if any one of them are
-  repeated consecutively. For example, `[&]`, `[\&]`, `[\&\&]`, `[&-&]` are all
-  equivalent while `[&&]` is illegal. (The motivation for this and the prior
-  change is to provide a backwards compatible path for adding character class
-  set notation.)
-* A `bytes::Regex` now has Unicode mode enabled by default (like the main
-  `Regex` type). This means regexes compiled with `bytes::Regex::new` that
-  don't have the Unicode flag set should add `(?-u)` to recover the original
-  behavior.
-
-Breaking changes for the regex API:
-
-* `find` and `find_iter` now **return `Match` values instead of
-  `(usize, usize)`.** `Match` values have `start` and `end` methods, which
-  return the match offsets. `Match` values also have an `as_str` method,
-  which returns the text of the match itself.
-* The `Captures` type now only provides a single iterator over all capturing
-  matches, which should replace uses of `iter` and `iter_pos`. Uses of
-  `iter_named` should use the `capture_names` method on `Regex`.
-* The `at` method on the `Captures` type has been renamed to `get`, and it
-  now returns a `Match`. Similarly, the `name` method on `Captures` now returns
-  a `Match`.
-* The `replace` methods now return `Cow` values. The `Cow::Borrowed` variant
-  is returned when no replacements are made.
-* The `Replacer` trait has been completely overhauled. This should only
-  impact clients that implement this trait explicitly. Standard uses of
-  the `replace` methods should continue to work unchanged. If you implement
-  the `Replacer` trait, please consult the new documentation.
-* The `quote` free function has been renamed to `escape`.
-* The `Regex::with_size_limit` method has been removed. It is replaced by
-  `RegexBuilder::size_limit`.
-* The `RegexBuilder` type has switched from owned `self` method receivers to
-  `&mut self` method receivers. Most uses will continue to work unchanged, but
-  some code may require naming an intermediate variable to hold the builder.
-* The `compile` method on `RegexBuilder` has been renamed to `build`.
-* The free `is_match` function has been removed. It is replaced by compiling
-  a `Regex` and calling its `is_match` method.
-* The `PartialEq` and `Eq` impls on `Regex` have been dropped. If you relied
-  on these impls, the fix is to define a wrapper type around `Regex`, impl
-  `Deref` on it and provide the necessary impls.
-* The `is_empty` method on `Captures` has been removed. This always returns
-  `false`, so its use is superfluous.
-* The `Syntax` variant of the `Error` type now contains a string instead of
-  a `regex_syntax::Error`. If you were examining syntax errors more closely,
-  you'll need to explicitly use the `regex_syntax` crate to re-parse the regex.
-* The `InvalidSet` variant of the `Error` type has been removed since it is
-  no longer used.
-* Most of the iterator types have been renamed to match conventions. If you
-  were using these iterator types explicitly, please consult the documentation
-  for its new name. For example, `RegexSplits` has been renamed to `Split`.
-
-A number of bugs have been fixed:
-
-* [BUG #151](https://github.com/rust-lang/regex/issues/151):
-  The `Replacer` trait has been changed to permit the caller to control
-  allocation.
-* [BUG #165](https://github.com/rust-lang/regex/issues/165):
-  Remove the free `is_match` function.
-* [BUG #166](https://github.com/rust-lang/regex/issues/166):
-  Expose more knobs (available in `0.1`) and remove `with_size_limit`.
-* [BUG #168](https://github.com/rust-lang/regex/issues/168):
-  Iterators produced by `Captures` now have the correct lifetime parameters.
-* [BUG #175](https://github.com/rust-lang/regex/issues/175):
-  Fix a corner case in the parsing of POSIX character classes.
-* [BUG #178](https://github.com/rust-lang/regex/issues/178):
-  Drop the `PartialEq` and `Eq` impls on `Regex`.
-* [BUG #179](https://github.com/rust-lang/regex/issues/179):
-  Remove `is_empty` from `Captures` since it always returns false.
-* [BUG #276](https://github.com/rust-lang/regex/issues/276):
-  Position of named capture can now be retrieved from a `Captures`.
-* [BUG #296](https://github.com/rust-lang/regex/issues/296):
-  Remove winapi/kernel32-sys dependency on UNIX.
-* [BUG #307](https://github.com/rust-lang/regex/issues/307):
-  Fix error on emscripten.
-
-
-0.1.80
-======
-* [PR #292](https://github.com/rust-lang/regex/pull/292):
-  Fixes bug #291, which was introduced by PR #290.
-
-0.1.79
-======
-* Require regex-syntax 0.3.8.
-
-0.1.78
-======
-* [PR #290](https://github.com/rust-lang/regex/pull/290):
-  Fixes bug #289, which caused some regexes with a certain combination
-  of literals to match incorrectly.
-
-0.1.77
-======
-* [PR #281](https://github.com/rust-lang/regex/pull/281):
-  Fixes bug #280 by disabling all literal optimizations when a pattern
-  is partially anchored.
-
-0.1.76
-======
-* Tweak criteria for using the Teddy literal matcher.
-
-0.1.75
-======
-* [PR #275](https://github.com/rust-lang/regex/pull/275):
-  Improves match verification performance in the Teddy SIMD searcher.
-* [PR #278](https://github.com/rust-lang/regex/pull/278):
-  Replaces slow substring loop in the Teddy SIMD searcher with Aho-Corasick.
-* Implemented DoubleEndedIterator on regex set match iterators.
-
-0.1.74
-======
-* Release regex-syntax 0.3.5 with a minor bug fix.
-* Fix bug #272.
-* Fix bug #277.
-* [PR #270](https://github.com/rust-lang/regex/pull/270):
-  Fixes bugs #264, #268 and an unreported where the DFA cache size could be
-  drastically under estimated in some cases (leading to high unexpected memory
-  usage).
-
-0.1.73
-======
-* Release `regex-syntax 0.3.4`.
-* Bump `regex-syntax` dependency version for `regex` to `0.3.4`.
-
-0.1.72
-======
-* [PR #262](https://github.com/rust-lang/regex/pull/262):
-  Fixes a number of small bugs caught by fuzz testing (AFL).
-
-0.1.71
-======
-* [PR #236](https://github.com/rust-lang/regex/pull/236):
-  Fix a bug in how suffix literals were extracted, which could lead
-  to invalid match behavior in some cases.
-
-0.1.70
-======
-* [PR #231](https://github.com/rust-lang/regex/pull/231):
-  Add SIMD accelerated multiple pattern search.
-* [PR #228](https://github.com/rust-lang/regex/pull/228):
-  Reintroduce the reverse suffix literal optimization.
-* [PR #226](https://github.com/rust-lang/regex/pull/226):
-  Implements NFA state compression in the lazy DFA.
-* [PR #223](https://github.com/rust-lang/regex/pull/223):
-  A fully anchored RegexSet can now short-circuit.
-
-0.1.69
-======
-* [PR #216](https://github.com/rust-lang/regex/pull/216):
-  Tweak the threshold for running backtracking.
-* [PR #217](https://github.com/rust-lang/regex/pull/217):
-  Add upper limit (from the DFA) to capture search (for the NFA).
-* [PR #218](https://github.com/rust-lang/regex/pull/218):
-  Add rure, a C API.
-
-0.1.68
-======
-* [PR #210](https://github.com/rust-lang/regex/pull/210):
-  Fixed a performance bug in `bytes::Regex::replace` where `extend` was used
-  instead of `extend_from_slice`.
-* [PR #211](https://github.com/rust-lang/regex/pull/211):
-  Fixed a bug in the handling of word boundaries in the DFA.
-* [PR #213](https://github.com/rust-lang/pull/213):
-  Added RE2 and Tcl to the benchmark harness. Also added a CLI utility from
-  running regexes using any of the following regex engines: PCRE1, PCRE2,
-  Oniguruma, RE2, Tcl and of course Rust's own regexes.
-
-0.1.67
-======
-* [PR #201](https://github.com/rust-lang/regex/pull/201):
-  Fix undefined behavior in the `regex!` compiler plugin macro.
-* [PR #205](https://github.com/rust-lang/regex/pull/205):
-  More improvements to DFA performance. Competitive with RE2. See PR for
-  benchmarks.
-* [PR #209](https://github.com/rust-lang/regex/pull/209):
-  Release 0.1.66 was semver incompatible since it required a newer version
-  of Rust than previous releases. This PR fixes that. (And `0.1.66` was
-  yanked.)
-
-0.1.66
-======
-* Speculative support for Unicode word boundaries was added to the DFA. This
-  should remove the last common case that disqualified use of the DFA.
-* An optimization that scanned for suffix literals and then matched the regular
-  expression in reverse was removed because it had worst case quadratic time
-  complexity. It was replaced with a more limited optimization where, given any
-  regex of the form `re$`, it will be matched in reverse from the end of the
-  haystack.
-* [PR #202](https://github.com/rust-lang/regex/pull/202):
-  The inner loop of the DFA was heavily optimized to improve cache locality
-  and reduce the overall number of instructions run on each iteration. This
-  represents the first use of `unsafe` in `regex` (to elide bounds checks).
-* [PR #200](https://github.com/rust-lang/regex/pull/200):
-  Use of the `mempool` crate (which used thread local storage) was replaced
-  with a faster version of a similar API in @Amanieu's `thread_local` crate.
-  It should reduce contention when using a regex from multiple threads
-  simultaneously.
-* PCRE2 JIT benchmarks were added. A benchmark comparison can be found
-  [here](https://gist.github.com/anonymous/14683c01993e91689f7206a18675901b).
-  (Includes a comparison with PCRE1's JIT and Oniguruma.)
-* A bug where word boundaries weren't being matched correctly in the DFA was
-  fixed. This only affected use of `bytes::Regex`.
-* [#160](https://github.com/rust-lang/regex/issues/160):
-  `Captures` now has a `Debug` impl.
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/Cargo.toml
deleted file mode 100644
index 8aa455b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/Cargo.toml
+++ /dev/null
@@ -1,202 +0,0 @@
-# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
-#
-# When uploading crates to the registry Cargo will automatically
-# "normalize" Cargo.toml files for maximal compatibility
-# with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g., crates.io) dependencies.
-#
-# If you are reading this file be aware that the original Cargo.toml
-# will likely look very different (and much more reasonable).
-# See Cargo.toml.orig for the original contents.
-
-[package]
-edition = "2021"
-rust-version = "1.65"
-name = "regex"
-version = "1.11.1"
-authors = [
-    "The Rust Project Developers",
-    "Andrew Gallant <jamslam@gmail.com>",
-]
-build = false
-exclude = [
-    "/scripts/*",
-    "/.github/*",
-]
-autolib = false
-autobins = false
-autoexamples = false
-autotests = false
-autobenches = false
-description = """
-An implementation of regular expressions for Rust. This implementation uses
-finite automata and guarantees linear time matching on all inputs.
-"""
-homepage = "https://github.com/rust-lang/regex"
-documentation = "https://docs.rs/regex"
-readme = "README.md"
-categories = ["text-processing"]
-license = "MIT OR Apache-2.0"
-repository = "https://github.com/rust-lang/regex"
-
-[package.metadata.docs.rs]
-all-features = true
-rustdoc-args = [
-    "--cfg",
-    "docsrs",
-]
-
-[profile.bench]
-debug = 2
-
-[profile.dev]
-opt-level = 3
-debug = 2
-
-[profile.release]
-debug = 2
-
-[profile.test]
-opt-level = 3
-debug = 2
-
-[lib]
-name = "regex"
-path = "src/lib.rs"
-
-[[test]]
-name = "integration"
-path = "tests/lib.rs"
-
-[dependencies.aho-corasick]
-version = "1.0.0"
-optional = true
-default-features = false
-
-[dependencies.memchr]
-version = "2.6.0"
-optional = true
-default-features = false
-
-[dependencies.regex-automata]
-version = "0.4.8"
-features = [
-    "alloc",
-    "syntax",
-    "meta",
-    "nfa-pikevm",
-]
-default-features = false
-
-[dependencies.regex-syntax]
-version = "0.8.5"
-default-features = false
-
-[dev-dependencies.anyhow]
-version = "1.0.69"
-
-[dev-dependencies.doc-comment]
-version = "0.3"
-
-[dev-dependencies.env_logger]
-version = "0.9.3"
-features = [
-    "atty",
-    "humantime",
-    "termcolor",
-]
-default-features = false
-
-[dev-dependencies.once_cell]
-version = "1.17.1"
-
-[dev-dependencies.quickcheck]
-version = "1.0.3"
-default-features = false
-
-[dev-dependencies.regex-test]
-version = "0.1.0"
-
-[features]
-default = [
-    "std",
-    "perf",
-    "unicode",
-    "regex-syntax/default",
-]
-logging = [
-    "aho-corasick?/logging",
-    "memchr?/logging",
-    "regex-automata/logging",
-]
-pattern = []
-perf = [
-    "perf-cache",
-    "perf-dfa",
-    "perf-onepass",
-    "perf-backtrack",
-    "perf-inline",
-    "perf-literal",
-]
-perf-backtrack = ["regex-automata/nfa-backtrack"]
-perf-cache = []
-perf-dfa = ["regex-automata/hybrid"]
-perf-dfa-full = [
-    "regex-automata/dfa-build",
-    "regex-automata/dfa-search",
-]
-perf-inline = ["regex-automata/perf-inline"]
-perf-literal = [
-    "dep:aho-corasick",
-    "dep:memchr",
-    "regex-automata/perf-literal",
-]
-perf-onepass = ["regex-automata/dfa-onepass"]
-std = [
-    "aho-corasick?/std",
-    "memchr?/std",
-    "regex-automata/std",
-    "regex-syntax/std",
-]
-unicode = [
-    "unicode-age",
-    "unicode-bool",
-    "unicode-case",
-    "unicode-gencat",
-    "unicode-perl",
-    "unicode-script",
-    "unicode-segment",
-    "regex-automata/unicode",
-    "regex-syntax/unicode",
-]
-unicode-age = [
-    "regex-automata/unicode-age",
-    "regex-syntax/unicode-age",
-]
-unicode-bool = [
-    "regex-automata/unicode-bool",
-    "regex-syntax/unicode-bool",
-]
-unicode-case = [
-    "regex-automata/unicode-case",
-    "regex-syntax/unicode-case",
-]
-unicode-gencat = [
-    "regex-automata/unicode-gencat",
-    "regex-syntax/unicode-gencat",
-]
-unicode-perl = [
-    "regex-automata/unicode-perl",
-    "regex-automata/unicode-word-boundary",
-    "regex-syntax/unicode-perl",
-]
-unicode-script = [
-    "regex-automata/unicode-script",
-    "regex-syntax/unicode-script",
-]
-unicode-segment = [
-    "regex-automata/unicode-segment",
-    "regex-syntax/unicode-segment",
-]
-unstable = ["pattern"]
-use_std = ["std"]
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/Cargo.toml.orig
deleted file mode 100644
index 60be5b9..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/Cargo.toml.orig
+++ /dev/null
@@ -1,261 +0,0 @@
-[package]
-name = "regex"
-version = "1.11.1"  #:version
-authors = ["The Rust Project Developers", "Andrew Gallant <jamslam@gmail.com>"]
-license = "MIT OR Apache-2.0"
-readme = "README.md"
-repository = "https://github.com/rust-lang/regex"
-documentation = "https://docs.rs/regex"
-homepage = "https://github.com/rust-lang/regex"
-description = """
-An implementation of regular expressions for Rust. This implementation uses
-finite automata and guarantees linear time matching on all inputs.
-"""
-categories = ["text-processing"]
-autotests = false
-exclude = ["/scripts/*", "/.github/*"]
-edition = "2021"
-rust-version = "1.65"
-
-[workspace]
-members = [
-  "regex-automata",
-  "regex-capi",
-  "regex-cli",
-  "regex-lite",
-  "regex-syntax",
-  "regex-test",
-]
-
-# Features are documented in the "Crate features" section of the crate docs:
-# https://docs.rs/regex/*/#crate-features
-[features]
-default = ["std", "perf", "unicode", "regex-syntax/default"]
-
-# ECOSYSTEM FEATURES
-
-# The 'std' feature permits the regex crate to use the standard library. This
-# is intended to support future use cases where the regex crate may be able
-# to compile without std, and instead just rely on 'core' and 'alloc' (for
-# example). Currently, this isn't supported, and removing the 'std' feature
-# will prevent regex from compiling.
-std = [
-  "aho-corasick?/std",
-  "memchr?/std",
-  "regex-automata/std",
-  "regex-syntax/std",
-]
-# This feature enables the 'log' crate to emit messages. This is usually
-# only useful for folks working on the regex crate itself, but can be useful
-# if you're trying hard to do some performance hacking on regex patterns
-# themselves. Note that you'll need to pair this with a crate like 'env_logger'
-# to actually emit the log messages somewhere.
-logging = [
-  "aho-corasick?/logging",
-  "memchr?/logging",
-  "regex-automata/logging",
-]
-# The 'use_std' feature is DEPRECATED. It will be removed in regex 2. Until
-# then, it is an alias for the 'std' feature.
-use_std = ["std"]
-
-
-# PERFORMANCE FEATURES
-
-# Enables all default performance features. Note that this specifically does
-# not include perf-dfa-full, because it leads to higher compile times and
-# bigger binaries, and the runtime performance improvement is not obviously
-# worth it.
-perf = [
-  "perf-cache",
-  "perf-dfa",
-  "perf-onepass",
-  "perf-backtrack",
-  "perf-inline",
-  "perf-literal",
-]
-# Enables use of a lazy DFA when possible.
-perf-dfa = ["regex-automata/hybrid"]
-# Enables use of a fully compiled DFA when possible.
-perf-dfa-full = ["regex-automata/dfa-build", "regex-automata/dfa-search"]
-# Enables use of the one-pass regex matcher, which speeds up capture searches
-# even beyond the backtracker.
-perf-onepass = ["regex-automata/dfa-onepass"]
-# Enables use of a bounded backtracker, which speeds up capture searches.
-perf-backtrack = ["regex-automata/nfa-backtrack"]
-# Enables aggressive use of inlining.
-perf-inline = ["regex-automata/perf-inline"]
-# Enables literal optimizations.
-perf-literal = [
-  "dep:aho-corasick",
-  "dep:memchr",
-  "regex-automata/perf-literal",
-]
-# Enables fast caching. (If disabled, caching is still used, but is slower.)
-# Currently, this feature has no effect. It used to remove the thread_local
-# dependency and use a slower internal cache, but now the default cache has
-# been improved and thread_local is no longer a dependency at all.
-perf-cache = []
-
-
-# UNICODE DATA FEATURES
-
-# Enables all Unicode features. This expands if new Unicode features are added.
-unicode = [
-  "unicode-age",
-  "unicode-bool",
-  "unicode-case",
-  "unicode-gencat",
-  "unicode-perl",
-  "unicode-script",
-  "unicode-segment",
-  "regex-automata/unicode",
-  "regex-syntax/unicode",
-]
-# Enables use of the `Age` property, e.g., `\p{Age:3.0}`.
-unicode-age = [
-  "regex-automata/unicode-age",
-  "regex-syntax/unicode-age",
-]
-# Enables use of a smattering of boolean properties, e.g., `\p{Emoji}`.
-unicode-bool = [
-  "regex-automata/unicode-bool",
-  "regex-syntax/unicode-bool",
-]
-# Enables Unicode-aware case insensitive matching, e.g., `(?i)β`.
-unicode-case = [
-  "regex-automata/unicode-case",
-  "regex-syntax/unicode-case",
-]
-# Enables Unicode general categories, e.g., `\p{Letter}` or `\pL`.
-unicode-gencat = [
-  "regex-automata/unicode-gencat",
-  "regex-syntax/unicode-gencat",
-]
-# Enables Unicode-aware Perl classes corresponding to `\w`, `\s` and `\d`.
-unicode-perl = [
-  "regex-automata/unicode-perl",
-  "regex-automata/unicode-word-boundary",
-  "regex-syntax/unicode-perl",
-]
-# Enables Unicode scripts and script extensions, e.g., `\p{Greek}`.
-unicode-script = [
-  "regex-automata/unicode-script",
-  "regex-syntax/unicode-script",
-]
-# Enables Unicode segmentation properties, e.g., `\p{gcb=Extend}`.
-unicode-segment = [
-  "regex-automata/unicode-segment",
-  "regex-syntax/unicode-segment",
-]
-
-
-# UNSTABLE FEATURES (requires Rust nightly)
-
-# A blanket feature that governs whether unstable features are enabled or not.
-# Unstable features are disabled by default, and typically rely on unstable
-# features in rustc itself.
-unstable = ["pattern"]
-
-# Enable to use the unstable pattern traits defined in std. This is enabled
-# by default if the unstable feature is enabled.
-pattern = []
-
-# For very fast multi-prefix literal matching.
-[dependencies.aho-corasick]
-version = "1.0.0"
-optional = true
-default-features = false
-
-# For skipping along search text quickly when a leading byte is known.
-[dependencies.memchr]
-version = "2.6.0"
-optional = true
-default-features = false
-
-# For the actual regex engines.
-[dependencies.regex-automata]
-path = "regex-automata"
-version = "0.4.8"
-default-features = false
-features = ["alloc", "syntax", "meta", "nfa-pikevm"]
-
-# For parsing regular expressions.
-[dependencies.regex-syntax]
-path = "regex-syntax"
-version = "0.8.5"
-default-features = false
-
-[dev-dependencies]
-# For examples.
-once_cell = "1.17.1"
-# For property based tests.
-quickcheck = { version = "1.0.3", default-features = false }
-# To check README's example
-doc-comment = "0.3"
-# For easy error handling in integration tests.
-anyhow = "1.0.69"
-# A library for testing regex engines.
-regex-test = { path = "regex-test", version = "0.1.0" }
-
-[dev-dependencies.env_logger]
-# Note that this is currently using an older version because of the dependency
-# tree explosion that happened in 0.10.
-version = "0.9.3"
-default-features = false
-features = ["atty", "humantime", "termcolor"]
-
-# This test suite reads a whole boatload of tests from the top-level testdata
-# directory, and then runs them against the regex crate API.
-#
-# regex-automata has its own version of them, and runs them against each
-# internal regex engine individually.
-#
-# This means that if you're seeing a failure in this test suite, you should
-# try running regex-automata's tests:
-#
-#     cargo test --manifest-path regex-automata/Cargo.toml --test integration
-#
-# That *might* give you a more targeted test failure. i.e., "only the
-# PikeVM fails this test." Which gives you a narrower place to search. If
-# regex-automata's test suite passes, then the bug might be in the integration
-# of the regex crate and regex-automata. But generally speaking, a failure
-# in this test suite *should* mean there is a corresponding failure in
-# regex-automata's test suite.
-[[test]]
-path = "tests/lib.rs"
-name = "integration"
-
-[package.metadata.docs.rs]
-# We want to document all features.
-all-features = true
-# Since this crate's feature setup is pretty complicated, it is worth opting
-# into a nightly unstable option to show the features that need to be enabled
-# for public API items. To do that, we set 'docsrs', and when that's enabled,
-# we enable the 'doc_auto_cfg' feature.
-#
-# To test this locally, run:
-#
-#     RUSTDOCFLAGS="--cfg docsrs" cargo +nightly doc --all-features
-rustdoc-args = ["--cfg", "docsrs"]
-
-[profile.release]
-debug = true
-
-[profile.bench]
-debug = true
-
-[profile.dev]
-# Running tests takes too long in debug mode, so we forcefully always build
-# with optimizations. Unfortunate, but, ¯\_(ツ)_/¯.
-#
-# It's counter-intuitive that this needs to be set on dev *and* test, but
-# it's because the tests that take a long time to run are run as integration
-# tests in a separate crate. The test.opt-level setting won't apply there, so
-# we need to set the opt-level across the entire build.
-opt-level = 3
-debug = true
-
-[profile.test]
-opt-level = 3
-debug = true
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/Cross.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/Cross.toml
deleted file mode 100644
index 5415e7a4..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/Cross.toml
+++ /dev/null
@@ -1,7 +0,0 @@
-[build.env]
-passthrough = [
-    "RUST_BACKTRACE",
-    "RUST_LOG",
-    "REGEX_TEST",
-    "REGEX_TEST_VERBOSE",
-]
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/LICENSE-APACHE b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/LICENSE-APACHE
deleted file mode 100644
index 16fe87b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/LICENSE-APACHE
+++ /dev/null
@@ -1,201 +0,0 @@
-                              Apache License
-                        Version 2.0, January 2004
-                     http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-   "License" shall mean the terms and conditions for use, reproduction,
-   and distribution as defined by Sections 1 through 9 of this document.
-
-   "Licensor" shall mean the copyright owner or entity authorized by
-   the copyright owner that is granting the License.
-
-   "Legal Entity" shall mean the union of the acting entity and all
-   other entities that control, are controlled by, or are under common
-   control with that entity. For the purposes of this definition,
-   "control" means (i) the power, direct or indirect, to cause the
-   direction or management of such entity, whether by contract or
-   otherwise, or (ii) ownership of fifty percent (50%) or more of the
-   outstanding shares, or (iii) beneficial ownership of such entity.
-
-   "You" (or "Your") shall mean an individual or Legal Entity
-   exercising permissions granted by this License.
-
-   "Source" form shall mean the preferred form for making modifications,
-   including but not limited to software source code, documentation
-   source, and configuration files.
-
-   "Object" form shall mean any form resulting from mechanical
-   transformation or translation of a Source form, including but
-   not limited to compiled object code, generated documentation,
-   and conversions to other media types.
-
-   "Work" shall mean the work of authorship, whether in Source or
-   Object form, made available under the License, as indicated by a
-   copyright notice that is included in or attached to the work
-   (an example is provided in the Appendix below).
-
-   "Derivative Works" shall mean any work, whether in Source or Object
-   form, that is based on (or derived from) the Work and for which the
-   editorial revisions, annotations, elaborations, or other modifications
-   represent, as a whole, an original work of authorship. For the purposes
-   of this License, Derivative Works shall not include works that remain
-   separable from, or merely link (or bind by name) to the interfaces of,
-   the Work and Derivative Works thereof.
-
-   "Contribution" shall mean any work of authorship, including
-   the original version of the Work and any modifications or additions
-   to that Work or Derivative Works thereof, that is intentionally
-   submitted to Licensor for inclusion in the Work by the copyright owner
-   or by an individual or Legal Entity authorized to submit on behalf of
-   the copyright owner. For the purposes of this definition, "submitted"
-   means any form of electronic, verbal, or written communication sent
-   to the Licensor or its representatives, including but not limited to
-   communication on electronic mailing lists, source code control systems,
-   and issue tracking systems that are managed by, or on behalf of, the
-   Licensor for the purpose of discussing and improving the Work, but
-   excluding communication that is conspicuously marked or otherwise
-   designated in writing by the copyright owner as "Not a Contribution."
-
-   "Contributor" shall mean Licensor and any individual or Legal Entity
-   on behalf of whom a Contribution has been received by Licensor and
-   subsequently incorporated within the Work.
-
-2. Grant of Copyright License. Subject to the terms and conditions of
-   this License, each Contributor hereby grants to You a perpetual,
-   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-   copyright license to reproduce, prepare Derivative Works of,
-   publicly display, publicly perform, sublicense, and distribute the
-   Work and such Derivative Works in Source or Object form.
-
-3. Grant of Patent License. Subject to the terms and conditions of
-   this License, each Contributor hereby grants to You a perpetual,
-   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-   (except as stated in this section) patent license to make, have made,
-   use, offer to sell, sell, import, and otherwise transfer the Work,
-   where such license applies only to those patent claims licensable
-   by such Contributor that are necessarily infringed by their
-   Contribution(s) alone or by combination of their Contribution(s)
-   with the Work to which such Contribution(s) was submitted. If You
-   institute patent litigation against any entity (including a
-   cross-claim or counterclaim in a lawsuit) alleging that the Work
-   or a Contribution incorporated within the Work constitutes direct
-   or contributory patent infringement, then any patent licenses
-   granted to You under this License for that Work shall terminate
-   as of the date such litigation is filed.
-
-4. Redistribution. You may reproduce and distribute copies of the
-   Work or Derivative Works thereof in any medium, with or without
-   modifications, and in Source or Object form, provided that You
-   meet the following conditions:
-
-   (a) You must give any other recipients of the Work or
-       Derivative Works a copy of this License; and
-
-   (b) You must cause any modified files to carry prominent notices
-       stating that You changed the files; and
-
-   (c) You must retain, in the Source form of any Derivative Works
-       that You distribute, all copyright, patent, trademark, and
-       attribution notices from the Source form of the Work,
-       excluding those notices that do not pertain to any part of
-       the Derivative Works; and
-
-   (d) If the Work includes a "NOTICE" text file as part of its
-       distribution, then any Derivative Works that You distribute must
-       include a readable copy of the attribution notices contained
-       within such NOTICE file, excluding those notices that do not
-       pertain to any part of the Derivative Works, in at least one
-       of the following places: within a NOTICE text file distributed
-       as part of the Derivative Works; within the Source form or
-       documentation, if provided along with the Derivative Works; or,
-       within a display generated by the Derivative Works, if and
-       wherever such third-party notices normally appear. The contents
-       of the NOTICE file are for informational purposes only and
-       do not modify the License. You may add Your own attribution
-       notices within Derivative Works that You distribute, alongside
-       or as an addendum to the NOTICE text from the Work, provided
-       that such additional attribution notices cannot be construed
-       as modifying the License.
-
-   You may add Your own copyright statement to Your modifications and
-   may provide additional or different license terms and conditions
-   for use, reproduction, or distribution of Your modifications, or
-   for any such Derivative Works as a whole, provided Your use,
-   reproduction, and distribution of the Work otherwise complies with
-   the conditions stated in this License.
-
-5. Submission of Contributions. Unless You explicitly state otherwise,
-   any Contribution intentionally submitted for inclusion in the Work
-   by You to the Licensor shall be under the terms and conditions of
-   this License, without any additional terms or conditions.
-   Notwithstanding the above, nothing herein shall supersede or modify
-   the terms of any separate license agreement you may have executed
-   with Licensor regarding such Contributions.
-
-6. Trademarks. This License does not grant permission to use the trade
-   names, trademarks, service marks, or product names of the Licensor,
-   except as required for reasonable and customary use in describing the
-   origin of the Work and reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty. Unless required by applicable law or
-   agreed to in writing, Licensor provides the Work (and each
-   Contributor provides its Contributions) on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-   implied, including, without limitation, any warranties or conditions
-   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-   PARTICULAR PURPOSE. You are solely responsible for determining the
-   appropriateness of using or redistributing the Work and assume any
-   risks associated with Your exercise of permissions under this License.
-
-8. Limitation of Liability. In no event and under no legal theory,
-   whether in tort (including negligence), contract, or otherwise,
-   unless required by applicable law (such as deliberate and grossly
-   negligent acts) or agreed to in writing, shall any Contributor be
-   liable to You for damages, including any direct, indirect, special,
-   incidental, or consequential damages of any character arising as a
-   result of this License or out of the use or inability to use the
-   Work (including but not limited to damages for loss of goodwill,
-   work stoppage, computer failure or malfunction, or any and all
-   other commercial damages or losses), even if such Contributor
-   has been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability. While redistributing
-   the Work or Derivative Works thereof, You may choose to offer,
-   and charge a fee for, acceptance of support, warranty, indemnity,
-   or other liability obligations and/or rights consistent with this
-   License. However, in accepting such obligations, You may act only
-   on Your own behalf and on Your sole responsibility, not on behalf
-   of any other Contributor, and only if You agree to indemnify,
-   defend, and hold each Contributor harmless for any liability
-   incurred by, or claims asserted against, such Contributor by reason
-   of your accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work.
-
-   To apply the Apache License to your work, attach the following
-   boilerplate notice, with the fields enclosed by brackets "[]"
-   replaced with your own identifying information. (Don't include
-   the brackets!)  The text should be enclosed in the appropriate
-   comment syntax for the file format. We also recommend that a
-   file or class name and description of purpose be included on the
-   same "printed page" as the copyright notice for easier
-   identification within third-party archives.
-
-Copyright [yyyy] [name of copyright owner]
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-	http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/LICENSE-MIT b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/LICENSE-MIT
deleted file mode 100644
index 39d4bdb..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/LICENSE-MIT
+++ /dev/null
@@ -1,25 +0,0 @@
-Copyright (c) 2014 The Rust Project Developers
-
-Permission is hereby granted, free of charge, to any
-person obtaining a copy of this software and associated
-documentation files (the "Software"), to deal in the
-Software without restriction, including without
-limitation the rights to use, copy, modify, merge,
-publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software
-is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice
-shall be included in all copies or substantial portions
-of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
-ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
-TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
-PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
-IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-DEALINGS IN THE SOFTWARE.
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/README.md b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/README.md
deleted file mode 100644
index f1e4c40..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/README.md
+++ /dev/null
@@ -1,332 +0,0 @@
-regex
-=====
-This crate provides routines for searching strings for matches of a [regular
-expression] (aka "regex"). The regex syntax supported by this crate is similar
-to other regex engines, but it lacks several features that are not known how to
-implement efficiently. This includes, but is not limited to, look-around and
-backreferences. In exchange, all regex searches in this crate have worst case
-`O(m * n)` time complexity, where `m` is proportional to the size of the regex
-and `n` is proportional to the size of the string being searched.
-
-[regular expression]: https://en.wikipedia.org/wiki/Regular_expression
-
-[![Build status](https://github.com/rust-lang/regex/workflows/ci/badge.svg)](https://github.com/rust-lang/regex/actions)
-[![Crates.io](https://img.shields.io/crates/v/regex.svg)](https://crates.io/crates/regex)
-
-### Documentation
-
-[Module documentation with examples](https://docs.rs/regex).
-The module documentation also includes a comprehensive description of the
-syntax supported.
-
-Documentation with examples for the various matching functions and iterators
-can be found on the
-[`Regex` type](https://docs.rs/regex/*/regex/struct.Regex.html).
-
-### Usage
-
-To bring this crate into your repository, either add `regex` to your
-`Cargo.toml`, or run `cargo add regex`.
-
-Here's a simple example that matches a date in YYYY-MM-DD format and prints the
-year, month and day:
-
-```rust
-use regex::Regex;
-
-fn main() {
-    let re = Regex::new(r"(?x)
-(?P<year>\d{4})  # the year
--
-(?P<month>\d{2}) # the month
--
-(?P<day>\d{2})   # the day
-").unwrap();
-
-    let caps = re.captures("2010-03-14").unwrap();
-    assert_eq!("2010", &caps["year"]);
-    assert_eq!("03", &caps["month"]);
-    assert_eq!("14", &caps["day"]);
-}
-```
-
-If you have lots of dates in text that you'd like to iterate over, then it's
-easy to adapt the above example with an iterator:
-
-```rust
-use regex::Regex;
-
-fn main() {
-    let re = Regex::new(r"(\d{4})-(\d{2})-(\d{2})").unwrap();
-    let hay = "On 2010-03-14, foo happened. On 2014-10-14, bar happened.";
-
-    let mut dates = vec![];
-    for (_, [year, month, day]) in re.captures_iter(hay).map(|c| c.extract()) {
-        dates.push((year, month, day));
-    }
-    assert_eq!(dates, vec![
-      ("2010", "03", "14"),
-      ("2014", "10", "14"),
-    ]);
-}
-```
-
-### Usage: Avoid compiling the same regex in a loop
-
-It is an anti-pattern to compile the same regular expression in a loop since
-compilation is typically expensive. (It takes anywhere from a few microseconds
-to a few **milliseconds** depending on the size of the regex.) Not only is
-compilation itself expensive, but this also prevents optimizations that reuse
-allocations internally to the matching engines.
-
-In Rust, it can sometimes be a pain to pass regular expressions around if
-they're used from inside a helper function. Instead, we recommend using the
-[`once_cell`](https://crates.io/crates/once_cell) crate to ensure that
-regular expressions are compiled exactly once. For example:
-
-```rust
-use {
-    once_cell::sync::Lazy,
-    regex::Regex,
-};
-
-fn some_helper_function(haystack: &str) -> bool {
-    static RE: Lazy<Regex> = Lazy::new(|| Regex::new(r"...").unwrap());
-    RE.is_match(haystack)
-}
-
-fn main() {
-    assert!(some_helper_function("abc"));
-    assert!(!some_helper_function("ac"));
-}
-```
-
-Specifically, in this example, the regex will be compiled when it is used for
-the first time. On subsequent uses, it will reuse the previous compilation.
-
-### Usage: match regular expressions on `&[u8]`
-
-The main API of this crate (`regex::Regex`) requires the caller to pass a
-`&str` for searching. In Rust, an `&str` is required to be valid UTF-8, which
-means the main API can't be used for searching arbitrary bytes.
-
-To match on arbitrary bytes, use the `regex::bytes::Regex` API. The API is
-identical to the main API, except that it takes an `&[u8]` to search on instead
-of an `&str`. The `&[u8]` APIs also permit disabling Unicode mode in the regex
-even when the pattern would match invalid UTF-8. For example, `(?-u:.)` is
-not allowed in `regex::Regex` but is allowed in `regex::bytes::Regex` since
-`(?-u:.)` matches any byte except for `\n`. Conversely, `.` will match the
-UTF-8 encoding of any Unicode scalar value except for `\n`.
-
-This example shows how to find all null-terminated strings in a slice of bytes:
-
-```rust
-use regex::bytes::Regex;
-
-let re = Regex::new(r"(?-u)(?<cstr>[^\x00]+)\x00").unwrap();
-let text = b"foo\xFFbar\x00baz\x00";
-
-// Extract all of the strings without the null terminator from each match.
-// The unwrap is OK here since a match requires the `cstr` capture to match.
-let cstrs: Vec<&[u8]> =
-    re.captures_iter(text)
-      .map(|c| c.name("cstr").unwrap().as_bytes())
-      .collect();
-assert_eq!(vec![&b"foo\xFFbar"[..], &b"baz"[..]], cstrs);
-```
-
-Notice here that the `[^\x00]+` will match any *byte* except for `NUL`,
-including bytes like `\xFF` which are not valid UTF-8. When using the main API,
-`[^\x00]+` would instead match any valid UTF-8 sequence except for `NUL`.
-
-### Usage: match multiple regular expressions simultaneously
-
-This demonstrates how to use a `RegexSet` to match multiple (possibly
-overlapping) regular expressions in a single scan of the search text:
-
-```rust
-use regex::RegexSet;
-
-let set = RegexSet::new(&[
-    r"\w+",
-    r"\d+",
-    r"\pL+",
-    r"foo",
-    r"bar",
-    r"barfoo",
-    r"foobar",
-]).unwrap();
-
-// Iterate over and collect all of the matches.
-let matches: Vec<_> = set.matches("foobar").into_iter().collect();
-assert_eq!(matches, vec![0, 2, 3, 4, 6]);
-
-// You can also test whether a particular regex matched:
-let matches = set.matches("foobar");
-assert!(!matches.matched(5));
-assert!(matches.matched(6));
-```
-
-
-### Usage: regex internals as a library
-
-The [`regex-automata` directory](./regex-automata/) contains a crate that
-exposes all of the internal matching engines used by the `regex` crate. The
-idea is that the `regex` crate exposes a simple API for 99% of use cases, but
-`regex-automata` exposes oodles of customizable behaviors.
-
-[Documentation for `regex-automata`.](https://docs.rs/regex-automata)
-
-
-### Usage: a regular expression parser
-
-This repository contains a crate that provides a well tested regular expression
-parser, abstract syntax and a high-level intermediate representation for
-convenient analysis. It provides no facilities for compilation or execution.
-This may be useful if you're implementing your own regex engine or otherwise
-need to do analysis on the syntax of a regular expression. It is otherwise not
-recommended for general use.
-
-[Documentation for `regex-syntax`.](https://docs.rs/regex-syntax)
-
-
-### Crate features
-
-This crate comes with several features that permit tweaking the trade off
-between binary size, compilation time and runtime performance. Users of this
-crate can selectively disable Unicode tables, or choose from a variety of
-optimizations performed by this crate to disable.
-
-When all of these features are disabled, runtime match performance may be much
-worse, but if you're matching on short strings, or if high performance isn't
-necessary, then such a configuration is perfectly serviceable. To disable
-all such features, use the following `Cargo.toml` dependency configuration:
-
-```toml
-[dependencies.regex]
-version = "1.3"
-default-features = false
-# Unless you have a specific reason not to, it's good sense to enable standard
-# library support. It enables several optimizations and avoids spin locks. It
-# also shouldn't meaningfully impact compile times or binary size.
-features = ["std"]
-```
-
-This will reduce the dependency tree of `regex` down to two crates:
-`regex-syntax` and `regex-automata`.
-
-The full set of features one can disable are
-[in the "Crate features" section of the documentation](https://docs.rs/regex/1.*/#crate-features).
-
-
-### Performance
-
-One of the goals of this crate is for the regex engine to be "fast." What that
-is a somewhat nebulous goal, it is usually interpreted in one of two ways.
-First, it means that all searches take worst case `O(m * n)` time, where
-`m` is proportional to `len(regex)` and `n` is proportional to `len(haystack)`.
-Second, it means that even aside from the time complexity constraint, regex
-searches are "fast" in practice.
-
-While the first interpretation is pretty unambiguous, the second one remains
-nebulous. While nebulous, it guides this crate's architecture and the sorts of
-the trade offs it makes. For example, here are some general architectural
-statements that follow as a result of the goal to be "fast":
-
-* When given the choice between faster regex searches and faster _Rust compile
-times_, this crate will generally choose faster regex searches.
-* When given the choice between faster regex searches and faster _regex compile
-times_, this crate will generally choose faster regex searches. That is, it is
-generally acceptable for `Regex::new` to get a little slower if it means that
-searches get faster. (This is a somewhat delicate balance to strike, because
-the speed of `Regex::new` needs to remain somewhat reasonable. But this is why
-one should avoid re-compiling the same regex over and over again.)
-* When given the choice between faster regex searches and simpler API
-design, this crate will generally choose faster regex searches. For example,
-if one didn't care about performance, we could like get rid of both of
-the `Regex::is_match` and `Regex::find` APIs and instead just rely on
-`Regex::captures`.
-
-There are perhaps more ways that being "fast" influences things.
-
-While this repository used to provide its own benchmark suite, it has since
-been moved to [rebar](https://github.com/BurntSushi/rebar). The benchmarks are
-quite extensive, and there are many more than what is shown in rebar's README
-(which is just limited to a "curated" set meant to compare performance between
-regex engines). To run all of this crate's benchmarks, first start by cloning
-and installing `rebar`:
-
-```text
-$ git clone https://github.com/BurntSushi/rebar
-$ cd rebar
-$ cargo install --path ./
-```
-
-Then build the benchmark harness for just this crate:
-
-```text
-$ rebar build -e '^rust/regex$'
-```
-
-Run all benchmarks for this crate as tests (each benchmark is executed once to
-ensure it works):
-
-```text
-$ rebar measure -e '^rust/regex$' -t
-```
-
-Record measurements for all benchmarks and save them to a CSV file:
-
-```text
-$ rebar measure -e '^rust/regex$' | tee results.csv
-```
-
-Explore benchmark timings:
-
-```text
-$ rebar cmp results.csv
-```
-
-See the `rebar` documentation for more details on how it works and how to
-compare results with other regex engines.
-
-
-### Hacking
-
-The `regex` crate is, for the most part, a pretty thin wrapper around the
-[`meta::Regex`](https://docs.rs/regex-automata/latest/regex_automata/meta/struct.Regex.html)
-from the
-[`regex-automata` crate](https://docs.rs/regex-automata/latest/regex_automata/).
-Therefore, if you're looking to work on the internals of this crate, you'll
-likely either want to look in `regex-syntax` (for parsing) or `regex-automata`
-(for construction of finite automata and the search routines).
-
-My [blog on regex internals](https://blog.burntsushi.net/regex-internals/)
-goes into more depth.
-
-
-### Minimum Rust version policy
-
-This crate's minimum supported `rustc` version is `1.65.0`.
-
-The policy is that the minimum Rust version required to use this crate can be
-increased in minor version updates. For example, if regex 1.0 requires Rust
-1.20.0, then regex 1.0.z for all values of `z` will also require Rust 1.20.0 or
-newer. However, regex 1.y for `y > 0` may require a newer minimum version of
-Rust.
-
-
-### License
-
-This project is licensed under either of
-
- * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
-   https://www.apache.org/licenses/LICENSE-2.0)
- * MIT license ([LICENSE-MIT](LICENSE-MIT) or
-   https://opensource.org/licenses/MIT)
-
-at your option.
-
-The data in `regex-syntax/src/unicode_tables/` is licensed under the Unicode
-License Agreement
-([LICENSE-UNICODE](https://www.unicode.org/copyright.html#License)).
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/UNICODE.md b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/UNICODE.md
deleted file mode 100644
index 60db0aa..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/UNICODE.md
+++ /dev/null
@@ -1,258 +0,0 @@
-# Unicode conformance
-
-This document describes the regex crate's conformance to Unicode's
-[UTS#18](https://unicode.org/reports/tr18/)
-report, which lays out 3 levels of support: Basic, Extended and Tailored.
-
-Full support for Level 1 ("Basic Unicode Support") is provided with two
-exceptions:
-
-1. Line boundaries are not Unicode aware. Namely, only the `\n`
-   (`END OF LINE`) character is recognized as a line boundary by default.
-   One can opt into `\r\n|\r|\n` being a line boundary via CRLF mode.
-2. The compatibility properties specified by
-   [RL1.2a](https://unicode.org/reports/tr18/#RL1.2a)
-   are ASCII-only definitions.
-
-Little to no support is provided for either Level 2 or Level 3. For the most
-part, this is because the features are either complex/hard to implement, or at
-the very least, very difficult to implement without sacrificing performance.
-For example, tackling canonical equivalence such that matching worked as one
-would expect regardless of normalization form would be a significant
-undertaking. This is at least partially a result of the fact that this regex
-engine is based on finite automata, which admits less flexibility normally
-associated with backtracking implementations.
-
-
-## RL1.1 Hex Notation
-
-[UTS#18 RL1.1](https://unicode.org/reports/tr18/#Hex_notation)
-
-Hex Notation refers to the ability to specify a Unicode code point in a regular
-expression via its hexadecimal code point representation. This is useful in
-environments that have poor Unicode font rendering or if you need to express a
-code point that is not normally displayable. All forms of hexadecimal notation
-are supported
-
-    \x7F        hex character code (exactly two digits)
-    \x{10FFFF}  any hex character code corresponding to a Unicode code point
-    \u007F      hex character code (exactly four digits)
-    \u{7F}      any hex character code corresponding to a Unicode code point
-    \U0000007F  hex character code (exactly eight digits)
-    \U{7F}      any hex character code corresponding to a Unicode code point
-
-Briefly, the `\x{...}`, `\u{...}` and `\U{...}` are all exactly equivalent ways
-of expressing hexadecimal code points. Any number of digits can be written
-within the brackets. In contrast, `\xNN`, `\uNNNN`, `\UNNNNNNNN` are all
-fixed-width variants of the same idea.
-
-Note that when Unicode mode is disabled, any non-ASCII Unicode codepoint is
-banned. Additionally, the `\xNN` syntax represents arbitrary bytes when Unicode
-mode is disabled. That is, the regex `\xFF` matches the Unicode codepoint
-U+00FF (encoded as `\xC3\xBF` in UTF-8) while the regex `(?-u)\xFF` matches
-the literal byte `\xFF`.
-
-
-## RL1.2 Properties
-
-[UTS#18 RL1.2](https://unicode.org/reports/tr18/#Categories)
-
-Full support for Unicode property syntax is provided. Unicode properties
-provide a convenient way to construct character classes of groups of code
-points specified by Unicode. The regex crate does not provide exhaustive
-support, but covers a useful subset. In particular:
-
-* [General categories](https://unicode.org/reports/tr18/#General_Category_Property)
-* [Scripts and Script Extensions](https://unicode.org/reports/tr18/#Script_Property)
-* [Age](https://unicode.org/reports/tr18/#Age)
-* A smattering of boolean properties, including all of those specified by
-  [RL1.2](https://unicode.org/reports/tr18/#RL1.2) explicitly.
-
-In all cases, property name and value abbreviations are supported, and all
-names/values are matched loosely without regard for case, whitespace or
-underscores. Property name aliases can be found in Unicode's
-[`PropertyAliases.txt`](https://www.unicode.org/Public/UCD/latest/ucd/PropertyAliases.txt)
-file, while property value aliases can be found in Unicode's
-[`PropertyValueAliases.txt`](https://www.unicode.org/Public/UCD/latest/ucd/PropertyValueAliases.txt)
-file.
-
-The syntax supported is also consistent with the UTS#18 recommendation:
-
-* `\p{Greek}` selects the `Greek` script. Equivalent expressions follow:
-  `\p{sc:Greek}`, `\p{Script:Greek}`, `\p{Sc=Greek}`, `\p{script=Greek}`,
-  `\P{sc!=Greek}`. Similarly for `General_Category` (or `gc` for short) and
-  `Script_Extensions` (or `scx` for short).
-* `\p{age:3.2}` selects all code points in Unicode 3.2.
-* `\p{Alphabetic}` selects the "alphabetic" property and can be abbreviated
-  via `\p{alpha}` (for example).
-* Single letter variants for properties with single letter abbreviations.
-  For example, `\p{Letter}` can be equivalently written as `\pL`.
-
-The following is a list of all properties supported by the regex crate (starred
-properties correspond to properties required by RL1.2):
-
-* `General_Category` \* (including `Any`, `ASCII` and `Assigned`)
-* `Script` \*
-* `Script_Extensions` \*
-* `Age`
-* `ASCII_Hex_Digit`
-* `Alphabetic` \*
-* `Bidi_Control`
-* `Case_Ignorable`
-* `Cased`
-* `Changes_When_Casefolded`
-* `Changes_When_Casemapped`
-* `Changes_When_Lowercased`
-* `Changes_When_Titlecased`
-* `Changes_When_Uppercased`
-* `Dash`
-* `Default_Ignorable_Code_Point` \*
-* `Deprecated`
-* `Diacritic`
-* `Emoji`
-* `Emoji_Presentation`
-* `Emoji_Modifier`
-* `Emoji_Modifier_Base`
-* `Emoji_Component`
-* `Extended_Pictographic`
-* `Extender`
-* `Grapheme_Base`
-* `Grapheme_Cluster_Break`
-* `Grapheme_Extend`
-* `Hex_Digit`
-* `IDS_Binary_Operator`
-* `IDS_Trinary_Operator`
-* `ID_Continue`
-* `ID_Start`
-* `Join_Control`
-* `Logical_Order_Exception`
-* `Lowercase` \*
-* `Math`
-* `Noncharacter_Code_Point` \*
-* `Pattern_Syntax`
-* `Pattern_White_Space`
-* `Prepended_Concatenation_Mark`
-* `Quotation_Mark`
-* `Radical`
-* `Regional_Indicator`
-* `Sentence_Break`
-* `Sentence_Terminal`
-* `Soft_Dotted`
-* `Terminal_Punctuation`
-* `Unified_Ideograph`
-* `Uppercase` \*
-* `Variation_Selector`
-* `White_Space` \*
-* `Word_Break`
-* `XID_Continue`
-* `XID_Start`
-
-
-## RL1.2a Compatibility Properties
-
-[UTS#18 RL1.2a](https://unicode.org/reports/tr18/#RL1.2a)
-
-The regex crate only provides ASCII definitions of the
-[compatibility properties documented in UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties)
-(sans the `\X` class, for matching grapheme clusters, which isn't provided
-at all). This is because it seems to be consistent with most other regular
-expression engines, and in particular, because these are often referred to as
-"ASCII" or "POSIX" character classes.
-
-Note that the `\w`, `\s` and `\d` character classes **are** Unicode aware.
-Their traditional ASCII definition can be used by disabling Unicode. That is,
-`[[:word:]]` and `(?-u)\w` are equivalent.
-
-
-## RL1.3 Subtraction and Intersection
-
-[UTS#18 RL1.3](https://unicode.org/reports/tr18/#Subtraction_and_Intersection)
-
-The regex crate provides full support for nested character classes, along with
-union, intersection (`&&`), difference (`--`) and symmetric difference (`~~`)
-operations on arbitrary character classes.
-
-For example, to match all non-ASCII letters, you could use either
-`[\p{Letter}--\p{Ascii}]` (difference) or `[\p{Letter}&&[^\p{Ascii}]]`
-(intersecting the negation).
-
-
-## RL1.4 Simple Word Boundaries
-
-[UTS#18 RL1.4](https://unicode.org/reports/tr18/#Simple_Word_Boundaries)
-
-The regex crate provides basic Unicode aware word boundary assertions. A word
-boundary assertion can be written as `\b`, or `\B` as its negation. A word
-boundary negation corresponds to a zero-width match, where its adjacent
-characters correspond to word and non-word, or non-word and word characters.
-
-Conformance in this case chooses to define word character in the same way that
-the `\w` character class is defined: a code point that is a member of one of
-the following classes:
-
-* `\p{Alphabetic}`
-* `\p{Join_Control}`
-* `\p{gc:Mark}`
-* `\p{gc:Decimal_Number}`
-* `\p{gc:Connector_Punctuation}`
-
-In particular, this differs slightly from the
-[prescription given in RL1.4](https://unicode.org/reports/tr18/#Simple_Word_Boundaries)
-but is permissible according to
-[UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties).
-Namely, it is convenient and simpler to have `\w` and `\b` be in sync with
-one another.
-
-Finally, Unicode word boundaries can be disabled, which will cause ASCII word
-boundaries to be used instead. That is, `\b` is a Unicode word boundary while
-`(?-u)\b` is an ASCII-only word boundary. This can occasionally be beneficial
-if performance is important, since the implementation of Unicode word
-boundaries is currently sub-optimal on non-ASCII text.
-
-
-## RL1.5 Simple Loose Matches
-
-[UTS#18 RL1.5](https://unicode.org/reports/tr18/#Simple_Loose_Matches)
-
-The regex crate provides full support for case insensitive matching in
-accordance with RL1.5. That is, it uses the "simple" case folding mapping. The
-"simple" mapping was chosen because of a key convenient property: every
-"simple" mapping is a mapping from exactly one code point to exactly one other
-code point. This makes case insensitive matching of character classes, for
-example, straight-forward to implement.
-
-When case insensitive mode is enabled (e.g., `(?i)[a]` is equivalent to `a|A`),
-then all characters classes are case folded as well.
-
-
-## RL1.6 Line Boundaries
-
-[UTS#18 RL1.6](https://unicode.org/reports/tr18/#Line_Boundaries)
-
-The regex crate only provides support for recognizing the `\n` (`END OF LINE`)
-character as a line boundary by default. One can also opt into treating
-`\r\n|\r|\n` as a line boundary via CRLF mode. This choice was made mostly for
-implementation convenience, and to avoid performance cliffs that Unicode word
-boundaries are subject to.
-
-
-## RL1.7 Code Points
-
-[UTS#18 RL1.7](https://unicode.org/reports/tr18/#Supplementary_Characters)
-
-The regex crate provides full support for Unicode code point matching. Namely,
-the fundamental atom of any match is always a single code point.
-
-Given Rust's strong ties to UTF-8, the following guarantees are also provided:
-
-* All matches are reported on valid UTF-8 code unit boundaries. That is, any
-  match range returned by the public regex API is guaranteed to successfully
-  slice the string that was searched.
-* By consequence of the above, it is impossible to match surrogode code points.
-  No support for UTF-16 is provided, so this is never necessary.
-
-Note that when Unicode mode is disabled, the fundamental atom of matching is
-no longer a code point but a single byte. When Unicode mode is disabled, many
-Unicode features are disabled as well. For example, `(?-u)\pL` is not a valid
-regex but `\pL(?-u)\xFF` (matches any Unicode `Letter` followed by the literal
-byte `\xFF`) is, for example.
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/bench/README.md b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/bench/README.md
deleted file mode 100644
index 3cc6a1a7..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/bench/README.md
+++ /dev/null
@@ -1,2 +0,0 @@
-Benchmarks for this crate have been moved into the rebar project:
-https://github.com/BurntSushi/rebar
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/README.md b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/README.md
deleted file mode 100644
index 432b06a..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-This directory contains various recordings of results. These are committed to
-the repository so that they can be compared over time. (At the time of writing,
-there is no tooling for facilitating this comparison. It has to be done
-manually.)
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/compile-test/2023-04-19_1.7.3.csv b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/compile-test/2023-04-19_1.7.3.csv
deleted file mode 100644
index af62da10..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/compile-test/2023-04-19_1.7.3.csv
+++ /dev/null
@@ -1,11 +0,0 @@
-name,crate,revision,profile,duration,size,relative-size
-regex__dev__std_perf_unicode,regex,9582040009,dev,1.824209152s,3434992,3113064
-regex__dev__std,regex,9582040009,dev,1.206314935s,1362392,1040464
-regex__dev__std_perf,regex,9582040009,dev,1.543583435s,2726384,2404456
-regex__dev__std_unicode,regex,9582040009,dev,1.490095643s,2066904,1744976
-regex__dev__std_unicode-case_unicode-perl,regex,9582040009,dev,1.292011694s,1812952,1491024
-regex__release__std_perf_unicode,regex,9582040009,release,2.398133563s,1616216,1294368
-regex__release__std,regex,9582040009,release,1.413680252s,694592,372744
-regex__release__std_perf,regex,9582040009,release,2.341496191s,1124696,802848
-regex__release__std_unicode,regex,9582040009,release,1.671407822s,1190208,868360
-regex__release__std_unicode-case_unicode-perl,regex,9582040009,release,1.441712198s,932160,610312
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/compile-test/2023-04-20_master.csv b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/compile-test/2023-04-20_master.csv
deleted file mode 100644
index 4c3e916..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/compile-test/2023-04-20_master.csv
+++ /dev/null
@@ -1,11 +0,0 @@
-name,crate,revision,profile,duration,size,relative-size
-regex__dev__std_perf_unicode,regex,f1f99af2bc,dev,1.834267609s,3799536,3477608
-regex__dev__std,regex,f1f99af2bc,dev,1.263958602s,1427928,1106000
-regex__dev__std_perf,regex,f1f99af2bc,dev,1.631302845s,3234288,2912360
-regex__dev__std_unicode,regex,f1f99af2bc,dev,1.550536696s,1997272,1675344
-regex__dev__std_unicode-case_unicode-perl,regex,f1f99af2bc,dev,1.341622852s,1739224,1417296
-regex__release__std_perf_unicode,regex,f1f99af2bc,release,2.475080323s,1755480,1433632
-regex__release__std,regex,f1f99af2bc,release,1.45990031s,731456,409608
-regex__release__std_perf,regex,f1f99af2bc,release,2.421787211s,1259864,938016
-regex__release__std_unicode,regex,f1f99af2bc,release,1.693972619s,1227072,905224
-regex__release__std_unicode-case_unicode-perl,regex,f1f99af2bc,release,1.528003306s,969024,647176
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/compile-test/2023-07-05.csv b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/compile-test/2023-07-05.csv
deleted file mode 100644
index 6ec81f55..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/compile-test/2023-07-05.csv
+++ /dev/null
@@ -1,37 +0,0 @@
-name,crate,revision,profile,duration,size,relative-size
-regex__dev__std_perf_unicode,regex,53786ce797,dev,2.414172223s,4143600,3764328
-regex__dev__std_perf_unicode_perf-dfa-full,regex,53786ce797,dev,2.900927164s,4815368,4436096
-regex__dev__std,regex,53786ce797,dev,1.662626059s,2062808,1683536
-regex__dev__std_perf,regex,53786ce797,dev,2.136755026s,3574256,3194984
-regex__dev__std_unicode,regex,53786ce797,dev,1.943953132s,2623960,2244688
-regex__dev__std_unicode-case_unicode-perl,regex,53786ce797,dev,1.753222606s,2374104,1994832
-regex-lite__dev__std_string,regex,53786ce797,dev,498.158769ms,727504,348232
-regex-automata__dev__std_syntax_perf_unicode_meta_nfa_dfa_hybrid,regex-automata,53786ce797,dev,2.900832296s,4872712,4493440
-regex-automata__dev__std_syntax_nfa-pikevm,regex-automata,53786ce797,dev,1.413429089s,1501648,1122376
-regex-automata__dev__std_syntax_nfa-backtrack,regex-automata,53786ce797,dev,1.412429191s,1505744,1126472
-regex-automata__dev__std_syntax_hybrid,regex-automata,53786ce797,dev,1.678331978s,1632720,1253448
-regex-automata__dev__std_syntax_dfa-onepass,regex-automata,53786ce797,dev,1.594526299s,1526224,1146952
-regex-automata__dev__std_syntax_unicode_meta_nfa_dfa_hybrid,regex-automata,53786ce797,dev,2.992024402s,3500504,3121232
-regex-automata__dev__std_syntax_perf_unicode_meta_nfa_hybrid_dfa-onepass,regex-automata,53786ce797,dev,2.378489598s,4119024,3739752
-regex-automata__dev__std_syntax_perf_meta_nfa_dfa_hybrid,regex-automata,53786ce797,dev,2.695475914s,4299272,3920000
-regex-automata__dev__std_syntax_perf_meta_nfa_hybrid_dfa-onepass,regex-automata,53786ce797,dev,2.120929251s,3549680,3170408
-regex-automata__dev__std_unicode_meta,regex-automata,53786ce797,dev,1.89728585s,2492888,2113616
-regex-automata__dev__std_meta,regex-automata,53786ce797,dev,1.604628942s,1927640,1548368
-regex__release__std_perf_unicode,regex,53786ce797,release,3.333636908s,2025816,1650720
-regex__release__std_perf_unicode_perf-dfa-full,regex,53786ce797,release,3.805434309s,2210160,1835064
-regex__release__std,regex,53786ce797,release,1.789749444s,932160,557064
-regex__release__std_perf,regex,53786ce797,release,2.734249431s,1505624,1130528
-regex__release__std_unicode,regex,53786ce797,release,2.04945845s,1431872,1056776
-regex__release__std_unicode-case_unicode-perl,regex,53786ce797,release,1.893829903s,1173824,798728
-regex-lite__release__std_string,regex,53786ce797,release,648.517079ms,473400,98304
-regex-automata__release__std_syntax_perf_unicode_meta_nfa_dfa_hybrid,regex-automata,53786ce797,release,3.893237683s,2242928,1867832
-regex-automata__release__std_syntax_nfa-pikevm,regex-automata,53786ce797,release,1.556952008s,780600,405504
-regex-automata__release__std_syntax_nfa-backtrack,regex-automata,53786ce797,release,1.576471926s,768312,393216
-regex-automata__release__std_syntax_hybrid,regex-automata,53786ce797,release,1.819539266s,813368,438272
-regex-automata__release__std_syntax_dfa-onepass,regex-automata,53786ce797,release,1.672511482s,776504,401408
-regex-automata__release__std_syntax_unicode_meta_nfa_dfa_hybrid,regex-automata,53786ce797,release,3.227157436s,1767744,1392648
-regex-automata__release__std_syntax_perf_unicode_meta_nfa_hybrid_dfa-onepass,regex-automata,53786ce797,release,3.340235296s,2005336,1630240
-regex-automata__release__std_syntax_perf_meta_nfa_dfa_hybrid,regex-automata,53786ce797,release,3.640335773s,1718640,1343544
-regex-automata__release__std_syntax_perf_meta_nfa_hybrid_dfa-onepass,regex-automata,53786ce797,release,2.876306297s,1489240,1114144
-regex-automata__release__std_unicode_meta,regex-automata,53786ce797,release,1.945654415s,1362240,987144
-regex-automata__release__std_meta,regex-automata,53786ce797,release,1.740500411s,862528,487432
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/compile-test/README.md b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/compile-test/README.md
deleted file mode 100644
index 7291d5d..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/compile-test/README.md
+++ /dev/null
@@ -1,27 +0,0 @@
-This directory contains the results of compilation tests. Specifically,
-the results are from testing both the from scratch compilation time and
-relative binary size increases of various features for both the `regex` and
-`regex-automata` crates.
-
-Here's an example of how to run these tests for just the `regex` crate. You'll
-need the `regex-cli` command installed, which can be found in the `regex-cli`
-directory in the root of this repository.
-
-This must be run in the root of a checkout of this repository.
-
-```
-$ mkdir /tmp/regex-compile-test
-$ regex-cli compile-test ./ /tmp/regex-compile-test | tee record/compile-test/2023-04-19_1.7.3.csv
-```
-
-You can then look at the results using a tool like [`xsv`][xsv]:
-
-```
-$ xsv table record/compile-test/2023-04-19_1.7.3.csv
-```
-
-Note that the relative binary size is computed by building a "baseline" hello
-world program, and then subtracting that from the size of a binary that uses
-the regex crate.
-
-[xsv]: https://github.com/BurntSushi/xsv
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/.gitignore b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/.gitignore
deleted file mode 100644
index a9a5aec..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-tmp
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/01-lazy-dfa/dynamic b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/01-lazy-dfa/dynamic
deleted file mode 100644
index 9ef21737..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/01-lazy-dfa/dynamic
+++ /dev/null
@@ -1,73 +0,0 @@
-     Running target/release/dynamic-e87a67d7ea67f0eb
-
-running 67 tests
-test bench::anchored_literal_long_match            ... bench:          75 ns/iter (+/- 3) = 5200 MB/s
-test bench::anchored_literal_long_non_match        ... bench:          61 ns/iter (+/- 2) = 6393 MB/s
-test bench::anchored_literal_short_match           ... bench:          75 ns/iter (+/- 3) = 346 MB/s
-test bench::anchored_literal_short_non_match       ... bench:          61 ns/iter (+/- 1) = 426 MB/s
-test bench::easy0_1K                               ... bench:         196 ns/iter (+/- 8) = 5224 MB/s
-test bench::easy0_1MB                              ... bench:     255,138 ns/iter (+/- 4,820) = 4109 MB/s
-test bench::easy0_32                               ... bench:          71 ns/iter (+/- 2) = 450 MB/s
-test bench::easy0_32K                              ... bench:       5,392 ns/iter (+/- 108) = 6077 MB/s
-test bench::easy1_1K                               ... bench:         241 ns/iter (+/- 37) = 4248 MB/s
-test bench::easy1_1MB                              ... bench:     334,872 ns/iter (+/- 3,433) = 3131 MB/s
-test bench::easy1_32                               ... bench:          65 ns/iter (+/- 2) = 492 MB/s
-test bench::easy1_32K                              ... bench:       6,139 ns/iter (+/- 703) = 5337 MB/s
-test bench::hard_1K                                ... bench:       4,654 ns/iter (+/- 63) = 220 MB/s
-test bench::hard_1MB                               ... bench:   4,719,487 ns/iter (+/- 71,818) = 222 MB/s
-test bench::hard_32                                ... bench:         199 ns/iter (+/- 8) = 160 MB/s
-test bench::hard_32K                               ... bench:     147,389 ns/iter (+/- 4,391) = 222 MB/s
-test bench::literal                                ... bench:          20 ns/iter (+/- 4) = 2550 MB/s
-test bench::match_class                            ... bench:          85 ns/iter (+/- 4) = 952 MB/s
-test bench::match_class_in_range                   ... bench:          32 ns/iter (+/- 3) = 2531 MB/s
-test bench::match_class_unicode                    ... bench:         783 ns/iter (+/- 13) = 205 MB/s
-test bench::medium_1K                              ... bench:       1,334 ns/iter (+/- 154) = 767 MB/s
-test bench::medium_1MB                             ... bench:   2,044,757 ns/iter (+/- 72,936) = 512 MB/s
-test bench::medium_32                              ... bench:          99 ns/iter (+/- 18) = 323 MB/s
-test bench::medium_32K                             ... bench:      59,603 ns/iter (+/- 13,750) = 549 MB/s
-test bench::no_exponential                         ... bench:         553 ns/iter (+/- 150) = 180 MB/s
-test bench::not_literal                            ... bench:         293 ns/iter (+/- 59) = 174 MB/s
-test bench::one_pass_long_prefix                   ... bench:         177 ns/iter (+/- 35) = 146 MB/s
-test bench::one_pass_long_prefix_not               ... bench:         175 ns/iter (+/- 47) = 148 MB/s
-test bench::one_pass_short                         ... bench:         134 ns/iter (+/- 34) = 126 MB/s
-test bench::one_pass_short_not                     ... bench:         136 ns/iter (+/- 39) = 125 MB/s
-test bench::replace_all                            ... bench:         153 ns/iter (+/- 17)
-test bench_dynamic_compile::compile_huge           ... bench:     165,209 ns/iter (+/- 4,396)
-test bench_dynamic_compile::compile_huge_bytes     ... bench:  18,795,770 ns/iter (+/- 2,674,909)
-test bench_dynamic_compile::compile_simple         ... bench:       6,883 ns/iter (+/- 391)
-test bench_dynamic_compile::compile_simple_bytes   ... bench:       7,281 ns/iter (+/- 751)
-test bench_dynamic_compile::compile_small          ... bench:       9,091 ns/iter (+/- 1,125)
-test bench_dynamic_compile::compile_small_bytes    ... bench:     182,815 ns/iter (+/- 3,814)
-test bench_dynamic_parse::parse_huge               ... bench:       1,233 ns/iter (+/- 123)
-test bench_dynamic_parse::parse_simple             ... bench:       2,015 ns/iter (+/- 108)
-test bench_dynamic_parse::parse_small              ... bench:       2,500 ns/iter (+/- 76)
-test bench_sherlock::before_holmes                 ... bench:   2,741,811 ns/iter (+/- 58,389) = 216 MB/s
-test bench_sherlock::everything_greedy             ... bench:   7,807,696 ns/iter (+/- 328,585) = 76 MB/s
-test bench_sherlock::everything_greedy_nl          ... bench:   5,424,922 ns/iter (+/- 78,937) = 109 MB/s
-test bench_sherlock::holmes_cochar_watson          ... bench:     266,557 ns/iter (+/- 3,832) = 2231 MB/s
-test bench_sherlock::holmes_coword_watson          ... bench:   1,327,967 ns/iter (+/- 12,773) = 448 MB/s
-test bench_sherlock::line_boundary_sherlock_holmes ... bench:   2,690,485 ns/iter (+/- 17,393) = 221 MB/s
-test bench_sherlock::name_alt1                     ... bench:      77,206 ns/iter (+/- 951) = 7705 MB/s
-test bench_sherlock::name_alt2                     ... bench:     303,775 ns/iter (+/- 5,030) = 1958 MB/s
-test bench_sherlock::name_alt3                     ... bench:   1,385,153 ns/iter (+/- 15,871) = 429 MB/s
-test bench_sherlock::name_alt3_nocase              ... bench:   1,473,833 ns/iter (+/- 9,825) = 403 MB/s
-test bench_sherlock::name_alt4                     ... bench:     300,912 ns/iter (+/- 3,896) = 1977 MB/s
-test bench_sherlock::name_alt4_nocase              ... bench:   1,421,519 ns/iter (+/- 16,246) = 418 MB/s
-test bench_sherlock::name_holmes                   ... bench:      52,027 ns/iter (+/- 785) = 11435 MB/s
-test bench_sherlock::name_holmes_nocase            ... bench:   1,241,204 ns/iter (+/- 16,862) = 479 MB/s
-test bench_sherlock::name_sherlock                 ... bench:      34,378 ns/iter (+/- 677) = 17305 MB/s
-test bench_sherlock::name_sherlock_holmes          ... bench:      34,463 ns/iter (+/- 580) = 17262 MB/s
-test bench_sherlock::name_sherlock_holmes_nocase   ... bench:   1,281,540 ns/iter (+/- 11,054) = 464 MB/s
-test bench_sherlock::name_sherlock_nocase          ... bench:   1,281,293 ns/iter (+/- 13,129) = 464 MB/s
-test bench_sherlock::name_whitespace               ... bench:      60,463 ns/iter (+/- 815) = 9839 MB/s
-test bench_sherlock::no_match_common               ... bench:     568,357 ns/iter (+/- 11,237) = 1046 MB/s
-test bench_sherlock::no_match_uncommon             ... bench:      23,656 ns/iter (+/- 340) = 25149 MB/s
-test bench_sherlock::quotes                        ... bench:     977,907 ns/iter (+/- 13,926) = 608 MB/s
-test bench_sherlock::the_lower                     ... bench:     794,285 ns/iter (+/- 8,513) = 749 MB/s
-test bench_sherlock::the_nocase                    ... bench:   1,837,240 ns/iter (+/- 22,738) = 323 MB/s
-test bench_sherlock::the_upper                     ... bench:      54,083 ns/iter (+/- 1,153) = 11000 MB/s
-test bench_sherlock::the_whitespace                ... bench:   1,986,579 ns/iter (+/- 9,292) = 299 MB/s
-test bench_sherlock::word_ending_n                 ... bench:  55,205,101 ns/iter (+/- 93,542) = 10 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 67 measured
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/01-lazy-dfa/dynamic-no-lazy-dfa b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/01-lazy-dfa/dynamic-no-lazy-dfa
deleted file mode 100644
index 50d3a136..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/01-lazy-dfa/dynamic-no-lazy-dfa
+++ /dev/null
@@ -1,85 +0,0 @@
-   Compiling regex v0.1.48 (file:///home/andrew/data/projects/rust/regex)
-src/dfa.rs:73:1: 94:2 warning: function is never used: `can_exec`, #[warn(dead_code)] on by default
-src/dfa.rs:73 pub fn can_exec(insts: &Insts) -> bool {
-src/dfa.rs:74     use inst::EmptyLook::*;
-src/dfa.rs:75     // If for some reason we manage to allocate a regex program with more
-src/dfa.rs:76     // than 2^32-1 instructions, then we can't execute the DFA because we
-src/dfa.rs:77     // use 32 bit pointers.
-src/dfa.rs:78     if insts.len() > ::std::u32::MAX as usize {
-              ...
-src/exec.rs:12:11: 12:15 warning: unused import, #[warn(unused_imports)] on by default
-src/exec.rs:12 use dfa::{self, Dfa, DfaResult};
-                         ^~~~
-     Running target/release/dynamic-e87a67d7ea67f0eb
-
-running 67 tests
-test bench::anchored_literal_long_match            ... bench:         169 ns/iter (+/- 1) = 2307 MB/s
-test bench::anchored_literal_long_non_match        ... bench:          85 ns/iter (+/- 0) = 4588 MB/s
-test bench::anchored_literal_short_match           ... bench:         158 ns/iter (+/- 3) = 164 MB/s
-test bench::anchored_literal_short_non_match       ... bench:          84 ns/iter (+/- 2) = 309 MB/s
-test bench::easy0_1K                               ... bench:         318 ns/iter (+/- 2) = 3220 MB/s
-test bench::easy0_1MB                              ... bench:     257,205 ns/iter (+/- 2,448) = 4076 MB/s
-test bench::easy0_32                               ... bench:          82 ns/iter (+/- 1) = 390 MB/s
-test bench::easy0_32K                              ... bench:       8,666 ns/iter (+/- 104) = 3781 MB/s
-test bench::easy1_1K                               ... bench:         293 ns/iter (+/- 2) = 3494 MB/s
-test bench::easy1_1MB                              ... bench:     329,774 ns/iter (+/- 6,296) = 3179 MB/s
-test bench::easy1_32                               ... bench:          77 ns/iter (+/- 0) = 415 MB/s
-test bench::easy1_32K                              ... bench:       8,856 ns/iter (+/- 93) = 3700 MB/s
-test bench::hard_1K                                ... bench:      31,888 ns/iter (+/- 83) = 32 MB/s
-test bench::hard_1MB                               ... bench:  58,435,108 ns/iter (+/- 64,537) = 17 MB/s
-test bench::hard_32                                ... bench:       1,048 ns/iter (+/- 12) = 30 MB/s
-test bench::hard_32K                               ... bench:   1,033,930 ns/iter (+/- 4,224) = 31 MB/s
-test bench::literal                                ... bench:          20 ns/iter (+/- 0) = 2550 MB/s
-test bench::match_class                            ... bench:          84 ns/iter (+/- 0) = 964 MB/s
-test bench::match_class_in_range                   ... bench:          33 ns/iter (+/- 0) = 2454 MB/s
-test bench::match_class_unicode                    ... bench:       2,218 ns/iter (+/- 8) = 72 MB/s
-test bench::medium_1K                              ... bench:       1,368 ns/iter (+/- 9) = 748 MB/s
-test bench::medium_1MB                             ... bench:   2,034,481 ns/iter (+/- 3,608) = 515 MB/s
-test bench::medium_32                              ... bench:         141 ns/iter (+/- 0) = 226 MB/s
-test bench::medium_32K                             ... bench:      59,949 ns/iter (+/- 421) = 546 MB/s
-test bench::no_exponential                         ... bench:     336,653 ns/iter (+/- 1,757)
-test bench::not_literal                            ... bench:       1,247 ns/iter (+/- 5) = 40 MB/s
-test bench::one_pass_long_prefix                   ... bench:         264 ns/iter (+/- 2) = 98 MB/s
-test bench::one_pass_long_prefix_not               ... bench:         267 ns/iter (+/- 1) = 97 MB/s
-test bench::one_pass_short                         ... bench:         768 ns/iter (+/- 5) = 22 MB/s
-test bench::one_pass_short_not                     ... bench:         797 ns/iter (+/- 20) = 21 MB/s
-test bench::replace_all                            ... bench:         149 ns/iter (+/- 0)
-test bench_dynamic_compile::compile_huge           ... bench:     161,349 ns/iter (+/- 1,462)
-test bench_dynamic_compile::compile_huge_bytes     ... bench:  18,050,519 ns/iter (+/- 105,846)
-test bench_dynamic_compile::compile_simple         ... bench:       6,664 ns/iter (+/- 390)
-test bench_dynamic_compile::compile_simple_bytes   ... bench:       7,035 ns/iter (+/- 370)
-test bench_dynamic_compile::compile_small          ... bench:       8,914 ns/iter (+/- 347)
-test bench_dynamic_compile::compile_small_bytes    ... bench:     186,970 ns/iter (+/- 2,134)
-test bench_dynamic_parse::parse_huge               ... bench:       1,238 ns/iter (+/- 11)
-test bench_dynamic_parse::parse_simple             ... bench:       2,005 ns/iter (+/- 19)
-test bench_dynamic_parse::parse_small              ... bench:       2,494 ns/iter (+/- 11)
-test bench_sherlock::before_holmes                 ... bench:  42,005,594 ns/iter (+/- 57,752) = 14 MB/s
-test bench_sherlock::everything_greedy             ... bench:  38,431,063 ns/iter (+/- 28,840) = 15 MB/s
-test bench_sherlock::everything_greedy_nl          ... bench:  32,003,966 ns/iter (+/- 50,270) = 18 MB/s
-test bench_sherlock::holmes_cochar_watson          ... bench:   1,457,068 ns/iter (+/- 3,202) = 408 MB/s
-test bench_sherlock::holmes_coword_watson          ... bench: 136,035,549 ns/iter (+/- 75,381) = 4 MB/s
-test bench_sherlock::line_boundary_sherlock_holmes ... bench:  33,024,291 ns/iter (+/- 67,902) = 18 MB/s
-test bench_sherlock::name_alt1                     ... bench:     157,989 ns/iter (+/- 917) = 3765 MB/s
-test bench_sherlock::name_alt2                     ... bench:     545,254 ns/iter (+/- 1,908) = 1091 MB/s
-test bench_sherlock::name_alt3                     ... bench:   2,245,964 ns/iter (+/- 2,478) = 264 MB/s
-test bench_sherlock::name_alt3_nocase              ... bench:   4,792,290 ns/iter (+/- 31,760) = 124 MB/s
-test bench_sherlock::name_alt4                     ... bench:     584,204 ns/iter (+/- 2,084) = 1018 MB/s
-test bench_sherlock::name_alt4_nocase              ... bench:   2,318,020 ns/iter (+/- 8,493) = 256 MB/s
-test bench_sherlock::name_holmes                   ... bench:      51,880 ns/iter (+/- 299) = 11467 MB/s
-test bench_sherlock::name_holmes_nocase            ... bench:   1,414,500 ns/iter (+/- 2,497) = 420 MB/s
-test bench_sherlock::name_sherlock                 ... bench:      34,294 ns/iter (+/- 349) = 17348 MB/s
-test bench_sherlock::name_sherlock_holmes          ... bench:      34,531 ns/iter (+/- 199) = 17228 MB/s
-test bench_sherlock::name_sherlock_holmes_nocase   ... bench:   1,692,651 ns/iter (+/- 8,846) = 351 MB/s
-test bench_sherlock::name_sherlock_nocase          ... bench:   1,657,413 ns/iter (+/- 5,534) = 358 MB/s
-test bench_sherlock::name_whitespace               ... bench:     131,372 ns/iter (+/- 605) = 4528 MB/s
-test bench_sherlock::no_match_common               ... bench:     567,065 ns/iter (+/- 2,763) = 1049 MB/s
-test bench_sherlock::no_match_uncommon             ... bench:      23,782 ns/iter (+/- 85) = 25016 MB/s
-test bench_sherlock::quotes                        ... bench:  11,251,366 ns/iter (+/- 24,960) = 52 MB/s
-test bench_sherlock::the_lower                     ... bench:     789,781 ns/iter (+/- 2,072) = 753 MB/s
-test bench_sherlock::the_nocase                    ... bench:   1,807,509 ns/iter (+/- 4,685) = 329 MB/s
-test bench_sherlock::the_upper                     ... bench:      53,542 ns/iter (+/- 198) = 11111 MB/s
-test bench_sherlock::the_whitespace                ... bench:   5,410,444 ns/iter (+/- 14,766) = 109 MB/s
-test bench_sherlock::word_ending_n                 ... bench:  56,017,874 ns/iter (+/- 60,047) = 10 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 67 measured
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/01-lazy-dfa/native b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/01-lazy-dfa/native
deleted file mode 100644
index 61fc08da..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/01-lazy-dfa/native
+++ /dev/null
@@ -1,65 +0,0 @@
-   Compiling regex_macros v0.1.28 (file:///home/andrew/data/projects/rust/regex/regex_macros)
-     Running regex_macros/target/release/native-f2ffefeeda527264
-
-running 58 tests
-test bench::anchored_literal_long_match            ... bench:         189 ns/iter (+/- 16) = 2063 MB/s
-test bench::anchored_literal_long_non_match        ... bench:          47 ns/iter (+/- 1) = 8297 MB/s
-test bench::anchored_literal_short_match           ... bench:         177 ns/iter (+/- 5) = 146 MB/s
-test bench::anchored_literal_short_non_match       ... bench:          46 ns/iter (+/- 1) = 565 MB/s
-test bench::easy0_1K                               ... bench:      26,578 ns/iter (+/- 1,140) = 38 MB/s
-test bench::easy0_1MB                              ... bench:  27,229,730 ns/iter (+/- 261,126) = 38 MB/s
-test bench::easy0_32                               ... bench:         867 ns/iter (+/- 45) = 36 MB/s
-test bench::easy0_32K                              ... bench:     847,113 ns/iter (+/- 276,910) = 38 MB/s
-test bench::easy1_1K                               ... bench:      23,525 ns/iter (+/- 278) = 43 MB/s
-test bench::easy1_1MB                              ... bench:  24,075,047 ns/iter (+/- 40,396) = 43 MB/s
-test bench::easy1_32                               ... bench:         767 ns/iter (+/- 14) = 41 MB/s
-test bench::easy1_32K                              ... bench:     752,730 ns/iter (+/- 9,284) = 43 MB/s
-test bench::hard_1K                                ... bench:      44,053 ns/iter (+/- 513) = 23 MB/s
-test bench::hard_1MB                               ... bench:  44,982,170 ns/iter (+/- 76,683) = 23 MB/s
-test bench::hard_32                                ... bench:       1,418 ns/iter (+/- 26) = 22 MB/s
-test bench::hard_32K                               ... bench:   1,407,013 ns/iter (+/- 13,426) = 23 MB/s
-test bench::literal                                ... bench:       1,202 ns/iter (+/- 16) = 42 MB/s
-test bench::match_class                            ... bench:       2,057 ns/iter (+/- 29) = 39 MB/s
-test bench::match_class_in_range                   ... bench:       2,060 ns/iter (+/- 34) = 39 MB/s
-test bench::match_class_unicode                    ... bench:      12,945 ns/iter (+/- 156) = 12 MB/s
-test bench::medium_1K                              ... bench:      27,874 ns/iter (+/- 315) = 36 MB/s
-test bench::medium_1MB                             ... bench:  28,614,500 ns/iter (+/- 544,256) = 36 MB/s
-test bench::medium_32                              ... bench:         896 ns/iter (+/- 85) = 35 MB/s
-test bench::medium_32K                             ... bench:     892,349 ns/iter (+/- 35,511) = 36 MB/s
-test bench::no_exponential                         ... bench:     319,270 ns/iter (+/- 19,837)
-test bench::not_literal                            ... bench:       1,477 ns/iter (+/- 104) = 34 MB/s
-test bench::one_pass_long_prefix                   ... bench:         653 ns/iter (+/- 10) = 39 MB/s
-test bench::one_pass_long_prefix_not               ... bench:         651 ns/iter (+/- 6) = 39 MB/s
-test bench::one_pass_short                         ... bench:       1,016 ns/iter (+/- 24) = 16 MB/s
-test bench::one_pass_short_not                     ... bench:       1,588 ns/iter (+/- 28) = 10 MB/s
-test bench::replace_all                            ... bench:       1,078 ns/iter (+/- 55)
-test bench_sherlock::before_holmes                 ... bench:  54,264,124 ns/iter (+/- 564,692) = 10 MB/s
-test bench_sherlock::everything_greedy             ... bench:  22,724,158 ns/iter (+/- 44,361) = 26 MB/s
-test bench_sherlock::everything_greedy_nl          ... bench:  22,168,804 ns/iter (+/- 66,296) = 26 MB/s
-test bench_sherlock::holmes_cochar_watson          ... bench:  24,791,824 ns/iter (+/- 37,522) = 23 MB/s
-test bench_sherlock::holmes_coword_watson          ... bench: 885,999,793 ns/iter (+/- 39,704,278)
-test bench_sherlock::line_boundary_sherlock_holmes ... bench:  25,113,805 ns/iter (+/- 672,050) = 23 MB/s
-test bench_sherlock::name_alt1                     ... bench:  23,382,716 ns/iter (+/- 3,696,517) = 25 MB/s
-test bench_sherlock::name_alt2                     ... bench:  23,585,220 ns/iter (+/- 3,724,922) = 25 MB/s
-test bench_sherlock::name_alt3                     ... bench:  80,283,635 ns/iter (+/- 3,165,029) = 7 MB/s
-test bench_sherlock::name_alt3_nocase              ... bench:  77,357,394 ns/iter (+/- 268,133) = 7 MB/s
-test bench_sherlock::name_alt4                     ... bench:  22,736,520 ns/iter (+/- 43,231) = 26 MB/s
-test bench_sherlock::name_alt4_nocase              ... bench:  26,921,524 ns/iter (+/- 140,162) = 22 MB/s
-test bench_sherlock::name_holmes                   ... bench:  15,145,735 ns/iter (+/- 65,980) = 39 MB/s
-test bench_sherlock::name_holmes_nocase            ... bench:  16,285,042 ns/iter (+/- 71,956) = 36 MB/s
-test bench_sherlock::name_sherlock                 ... bench:  16,189,653 ns/iter (+/- 99,929) = 36 MB/s
-test bench_sherlock::name_sherlock_holmes          ... bench:  14,975,742 ns/iter (+/- 118,052) = 39 MB/s
-test bench_sherlock::name_sherlock_holmes_nocase   ... bench:  16,904,928 ns/iter (+/- 201,104) = 35 MB/s
-test bench_sherlock::name_sherlock_nocase          ... bench:  16,335,907 ns/iter (+/- 118,725) = 36 MB/s
-test bench_sherlock::name_whitespace               ... bench:  14,837,905 ns/iter (+/- 52,201) = 40 MB/s
-test bench_sherlock::no_match_common               ... bench:  16,036,625 ns/iter (+/- 108,268) = 37 MB/s
-test bench_sherlock::no_match_uncommon             ... bench:  15,278,356 ns/iter (+/- 81,123) = 38 MB/s
-test bench_sherlock::quotes                        ... bench:  21,580,801 ns/iter (+/- 198,772) = 27 MB/s
-test bench_sherlock::the_lower                     ... bench:  16,059,120 ns/iter (+/- 160,640) = 37 MB/s
-test bench_sherlock::the_nocase                    ... bench:  17,376,836 ns/iter (+/- 103,371) = 34 MB/s
-test bench_sherlock::the_upper                     ... bench:  15,259,087 ns/iter (+/- 93,807) = 38 MB/s
-test bench_sherlock::the_whitespace                ... bench:  18,835,951 ns/iter (+/- 160,674) = 31 MB/s
-test bench_sherlock::word_ending_n                 ... bench:  59,832,390 ns/iter (+/- 4,478,911) = 9 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 58 measured
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/01-lazy-dfa/nfa b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/01-lazy-dfa/nfa
deleted file mode 100644
index 994137b5..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/01-lazy-dfa/nfa
+++ /dev/null
@@ -1,74 +0,0 @@
-   Compiling regex v0.1.48 (file:///home/andrew/data/projects/rust/regex)
-     Running target/release/dynamic_nfa-1e40ce11bcb7c666
-
-running 67 tests
-test bench::anchored_literal_long_match            ... bench:         306 ns/iter (+/- 6) = 1274 MB/s
-test bench::anchored_literal_long_non_match        ... bench:          95 ns/iter (+/- 1) = 4105 MB/s
-test bench::anchored_literal_short_match           ... bench:         315 ns/iter (+/- 2) = 82 MB/s
-test bench::anchored_literal_short_non_match       ... bench:          96 ns/iter (+/- 2) = 270 MB/s
-test bench::easy0_1K                               ... bench:         206 ns/iter (+/- 1) = 4970 MB/s
-test bench::easy0_1MB                              ... bench:     255,834 ns/iter (+/- 1,273) = 4098 MB/s
-test bench::easy0_32                               ... bench:          72 ns/iter (+/- 2) = 444 MB/s
-test bench::easy0_32K                              ... bench:       5,315 ns/iter (+/- 25) = 6165 MB/s
-test bench::easy1_1K                               ... bench:         274 ns/iter (+/- 0) = 3737 MB/s
-test bench::easy1_1MB                              ... bench:     337,047 ns/iter (+/- 1,972) = 3111 MB/s
-test bench::easy1_32                               ... bench:          76 ns/iter (+/- 2) = 421 MB/s
-test bench::easy1_32K                              ... bench:       6,111 ns/iter (+/- 39) = 5362 MB/s
-test bench::hard_1K                                ... bench:      59,596 ns/iter (+/- 264) = 17 MB/s
-test bench::hard_1MB                               ... bench:  58,947,188 ns/iter (+/- 205,874) = 17 MB/s
-test bench::hard_32                                ... bench:       1,978 ns/iter (+/- 22) = 16 MB/s
-test bench::hard_32K                               ... bench:   1,846,347 ns/iter (+/- 14,253) = 17 MB/s
-test bench::literal                                ... bench:         172 ns/iter (+/- 1) = 296 MB/s
-test bench::match_class                            ... bench:         240 ns/iter (+/- 1) = 337 MB/s
-test bench::match_class_in_range                   ... bench:         190 ns/iter (+/- 2) = 426 MB/s
-test bench::match_class_unicode                    ... bench:       4,145 ns/iter (+/- 24) = 38 MB/s
-test bench::medium_1K                              ... bench:       1,195 ns/iter (+/- 8) = 856 MB/s
-test bench::medium_1MB                             ... bench:   2,028,649 ns/iter (+/- 11,235) = 516 MB/s
-test bench::medium_32                              ... bench:          84 ns/iter (+/- 0) = 380 MB/s
-test bench::medium_32K                             ... bench:      56,134 ns/iter (+/- 369) = 583 MB/s
-test bench::no_exponential                         ... bench:         536 ns/iter (+/- 4) = 186 MB/s
-test bench::not_literal                            ... bench:       2,428 ns/iter (+/- 31) = 21 MB/s
-test bench::one_pass_long_prefix                   ... bench:         756 ns/iter (+/- 2) = 34 MB/s
-test bench::one_pass_long_prefix_not               ... bench:         756 ns/iter (+/- 12) = 34 MB/s
-test bench::one_pass_short                         ... bench:       1,813 ns/iter (+/- 5) = 9 MB/s
-test bench::one_pass_short_not                     ... bench:       2,588 ns/iter (+/- 8) = 6 MB/s
-test bench::replace_all                            ... bench:         905 ns/iter (+/- 7)
-test bench_dynamic_compile::compile_huge           ... bench:     161,517 ns/iter (+/- 1,287)
-test bench_dynamic_compile::compile_huge_bytes     ... bench:  18,395,715 ns/iter (+/- 98,986)
-test bench_dynamic_compile::compile_simple         ... bench:       6,623 ns/iter (+/- 296)
-test bench_dynamic_compile::compile_simple_bytes   ... bench:       7,047 ns/iter (+/- 232)
-test bench_dynamic_compile::compile_small          ... bench:       8,948 ns/iter (+/- 526)
-test bench_dynamic_compile::compile_small_bytes    ... bench:     186,796 ns/iter (+/- 817)
-test bench_dynamic_parse::parse_huge               ... bench:       1,238 ns/iter (+/- 6)
-test bench_dynamic_parse::parse_simple             ... bench:       1,977 ns/iter (+/- 12)
-test bench_dynamic_parse::parse_small              ... bench:       2,502 ns/iter (+/- 18)
-test bench_sherlock::before_holmes                 ... bench:  45,045,123 ns/iter (+/- 261,188) = 13 MB/s
-test bench_sherlock::everything_greedy             ... bench:  38,685,654 ns/iter (+/- 107,136) = 15 MB/s
-test bench_sherlock::everything_greedy_nl          ... bench:  36,407,787 ns/iter (+/- 160,253) = 16 MB/s
-test bench_sherlock::holmes_cochar_watson          ... bench:   1,417,371 ns/iter (+/- 6,533) = 419 MB/s
-test bench_sherlock::holmes_coword_watson          ... bench: 139,298,695 ns/iter (+/- 154,012) = 4 MB/s
-test bench_sherlock::line_boundary_sherlock_holmes ... bench:  32,734,005 ns/iter (+/- 98,729) = 18 MB/s
-test bench_sherlock::name_alt1                     ... bench:     153,016 ns/iter (+/- 739) = 3888 MB/s
-test bench_sherlock::name_alt2                     ... bench:     534,038 ns/iter (+/- 1,909) = 1114 MB/s
-test bench_sherlock::name_alt3                     ... bench:   2,220,778 ns/iter (+/- 6,374) = 267 MB/s
-test bench_sherlock::name_alt3_nocase              ... bench:   4,744,134 ns/iter (+/- 11,703) = 125 MB/s
-test bench_sherlock::name_alt4                     ... bench:     569,971 ns/iter (+/- 2,256) = 1043 MB/s
-test bench_sherlock::name_alt4_nocase              ... bench:   2,324,966 ns/iter (+/- 3,082) = 255 MB/s
-test bench_sherlock::name_holmes                   ... bench:     268,146 ns/iter (+/- 1,238) = 2218 MB/s
-test bench_sherlock::name_holmes_nocase            ... bench:   1,409,583 ns/iter (+/- 2,808) = 422 MB/s
-test bench_sherlock::name_sherlock                 ... bench:      95,280 ns/iter (+/- 316) = 6244 MB/s
-test bench_sherlock::name_sherlock_holmes          ... bench:     116,097 ns/iter (+/- 461) = 5124 MB/s
-test bench_sherlock::name_sherlock_holmes_nocase   ... bench:   1,691,210 ns/iter (+/- 3,712) = 351 MB/s
-test bench_sherlock::name_sherlock_nocase          ... bench:   1,651,722 ns/iter (+/- 7,070) = 360 MB/s
-test bench_sherlock::name_whitespace               ... bench:     130,960 ns/iter (+/- 923) = 4542 MB/s
-test bench_sherlock::no_match_common               ... bench:     568,008 ns/iter (+/- 1,723) = 1047 MB/s
-test bench_sherlock::no_match_uncommon             ... bench:      23,669 ns/iter (+/- 84) = 25135 MB/s
-test bench_sherlock::quotes                        ... bench:  11,055,260 ns/iter (+/- 24,883) = 53 MB/s
-test bench_sherlock::the_lower                     ... bench:   2,934,498 ns/iter (+/- 4,553) = 202 MB/s
-test bench_sherlock::the_nocase                    ... bench:   4,268,193 ns/iter (+/- 8,164) = 139 MB/s
-test bench_sherlock::the_upper                     ... bench:     272,832 ns/iter (+/- 1,436) = 2180 MB/s
-test bench_sherlock::the_whitespace                ... bench:   5,409,934 ns/iter (+/- 7,678) = 109 MB/s
-test bench_sherlock::word_ending_n                 ... bench:  55,252,656 ns/iter (+/- 68,442) = 10 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 67 measured
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/01-lazy-dfa/pcre b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/01-lazy-dfa/pcre
deleted file mode 100644
index 22a66e6..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/01-lazy-dfa/pcre
+++ /dev/null
@@ -1,60 +0,0 @@
-   Compiling regex v0.1.48 (file:///home/andrew/data/projects/rust/regex)
-     Running target/release/pcre-781840b9a3e9c199
-
-running 53 tests
-test anchored_literal_long_match             ... bench:          90 ns/iter (+/- 7) = 4333 MB/s
-test anchored_literal_long_non_match         ... bench:          60 ns/iter (+/- 2) = 6500 MB/s
-test anchored_literal_short_match            ... bench:          87 ns/iter (+/- 6) = 298 MB/s
-test anchored_literal_short_non_match        ... bench:          58 ns/iter (+/- 4) = 448 MB/s
-test easy0_1K                                ... bench:         258 ns/iter (+/- 14) = 3968 MB/s
-test easy0_1MB                               ... bench:     226,139 ns/iter (+/- 1,637) = 4636 MB/s
-test easy0_32                                ... bench:          60 ns/iter (+/- 7) = 533 MB/s
-test easy0_32K                               ... bench:       7,028 ns/iter (+/- 120) = 4662 MB/s
-test easy1_1K                                ... bench:         794 ns/iter (+/- 20) = 1289 MB/s
-test easy1_1MB                               ... bench:     751,438 ns/iter (+/- 11,372) = 1395 MB/s
-test easy1_32                                ... bench:          71 ns/iter (+/- 3) = 450 MB/s
-test easy1_32K                               ... bench:      23,042 ns/iter (+/- 1,453) = 1422 MB/s
-test hard_1K                                 ... bench:      30,841 ns/iter (+/- 1,287) = 33 MB/s
-test hard_1MB                                ... bench:  35,239,100 ns/iter (+/- 632,179) = 29 MB/s
-test hard_32                                 ... bench:          86 ns/iter (+/- 11) = 372 MB/s
-test hard_32K                                ... bench:     993,011 ns/iter (+/- 63,648) = 32 MB/s
-test literal                                 ... bench:         130 ns/iter (+/- 11) = 392 MB/s
-test match_class                             ... bench:         183 ns/iter (+/- 33) = 442 MB/s
-test match_class_in_range                    ... bench:         175 ns/iter (+/- 18) = 462 MB/s
-test match_class_unicode                     ... bench:         513 ns/iter (+/- 8) = 313 MB/s
-test medium_1K                               ... bench:         278 ns/iter (+/- 6) = 3683 MB/s
-test medium_1MB                              ... bench:     240,699 ns/iter (+/- 17,344) = 4356 MB/s
-test medium_32                               ... bench:          61 ns/iter (+/- 13) = 524 MB/s
-test medium_32K                              ... bench:       7,369 ns/iter (+/- 105) = 4446 MB/s
-test not_literal                             ... bench:         274 ns/iter (+/- 17) = 186 MB/s
-test one_pass_long_prefix                    ... bench:          87 ns/iter (+/- 19) = 298 MB/s
-test one_pass_long_prefix_not                ... bench:          86 ns/iter (+/- 13) = 302 MB/s
-test one_pass_short                          ... bench:         117 ns/iter (+/- 44) = 145 MB/s
-test one_pass_short_not                      ... bench:         122 ns/iter (+/- 6) = 139 MB/s
-test sherlock::before_holmes                 ... bench:  14,450,308 ns/iter (+/- 617,786) = 41 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     546,919 ns/iter (+/- 4,880) = 1087 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     194,524 ns/iter (+/- 6,230) = 3058 MB/s
-test sherlock::name_alt1                     ... bench:     457,899 ns/iter (+/- 7,781) = 1299 MB/s
-test sherlock::name_alt2                     ... bench:     496,659 ns/iter (+/- 6,529) = 1197 MB/s
-test sherlock::name_alt3                     ... bench:     983,620 ns/iter (+/- 45,359) = 604 MB/s
-test sherlock::name_alt3_nocase              ... bench:   3,500,367 ns/iter (+/- 79,807) = 169 MB/s
-test sherlock::name_alt4                     ... bench:     972,128 ns/iter (+/- 22,195) = 611 MB/s
-test sherlock::name_alt4_nocase              ... bench:   1,877,017 ns/iter (+/- 39,079) = 316 MB/s
-test sherlock::name_holmes                   ... bench:     398,258 ns/iter (+/- 4,338) = 1493 MB/s
-test sherlock::name_holmes_nocase            ... bench:     492,292 ns/iter (+/- 4,667) = 1208 MB/s
-test sherlock::name_sherlock                 ... bench:     268,891 ns/iter (+/- 18,063) = 2212 MB/s
-test sherlock::name_sherlock_holmes          ... bench:     197,067 ns/iter (+/- 8,027) = 3018 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:   1,112,501 ns/iter (+/- 44,457) = 534 MB/s
-test sherlock::name_sherlock_nocase          ... bench:   1,332,423 ns/iter (+/- 39,227) = 446 MB/s
-test sherlock::name_whitespace               ... bench:     267,257 ns/iter (+/- 964) = 2226 MB/s
-test sherlock::no_match_common               ... bench:     595,211 ns/iter (+/- 3,739) = 999 MB/s
-test sherlock::no_match_uncommon             ... bench:     584,057 ns/iter (+/- 6,825) = 1018 MB/s
-test sherlock::quotes                        ... bench:   1,208,235 ns/iter (+/- 37,629) = 492 MB/s
-test sherlock::the_lower                     ... bench:   1,210,851 ns/iter (+/- 35,900) = 491 MB/s
-test sherlock::the_nocase                    ... bench:   1,286,611 ns/iter (+/- 35,689) = 462 MB/s
-test sherlock::the_upper                     ... bench:     776,113 ns/iter (+/- 6,236) = 766 MB/s
-test sherlock::the_whitespace                ... bench:   1,368,468 ns/iter (+/- 135,282) = 434 MB/s
-test sherlock::word_ending_n                 ... bench:  12,018,618 ns/iter (+/- 266,497) = 49 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 53 measured
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/02-set/dynamic b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/02-set/dynamic
deleted file mode 100644
index 69c9f71..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/02-set/dynamic
+++ /dev/null
@@ -1,78 +0,0 @@
-   Compiling regex v0.1.52 (file:///home/andrew/data/projects/rust/regex)
-     Running target/release/dynamic-a76738dddf3bdc6b
-
-running 71 tests
-test misc::anchored_literal_long_match            ... bench:          74 ns/iter (+/- 8) = 5270 MB/s
-test misc::anchored_literal_long_non_match        ... bench:          58 ns/iter (+/- 0) = 6724 MB/s
-test misc::anchored_literal_short_match           ... bench:          73 ns/iter (+/- 0) = 356 MB/s
-test misc::anchored_literal_short_non_match       ... bench:          58 ns/iter (+/- 0) = 448 MB/s
-test misc::easy0_1K                               ... bench:         214 ns/iter (+/- 2) = 4785 MB/s
-test misc::easy0_1MB                              ... bench:     247,056 ns/iter (+/- 1,777) = 4244 MB/s
-test misc::easy0_32                               ... bench:          64 ns/iter (+/- 0) = 500 MB/s
-test misc::easy0_32K                              ... bench:       5,281 ns/iter (+/- 29) = 6204 MB/s
-test misc::easy1_1K                               ... bench:         278 ns/iter (+/- 5) = 3683 MB/s
-test misc::easy1_1MB                              ... bench:     320,041 ns/iter (+/- 4,243) = 3276 MB/s
-test misc::easy1_32                               ... bench:          65 ns/iter (+/- 0) = 492 MB/s
-test misc::easy1_32K                              ... bench:       5,885 ns/iter (+/- 83) = 5568 MB/s
-test misc::hard_1K                                ... bench:       4,685 ns/iter (+/- 20) = 218 MB/s
-test misc::hard_1MB                               ... bench:   4,745,020 ns/iter (+/- 19,440) = 220 MB/s
-test misc::hard_32                                ... bench:         197 ns/iter (+/- 1) = 162 MB/s
-test misc::hard_32K                               ... bench:     147,409 ns/iter (+/- 656) = 222 MB/s
-test misc::literal                                ... bench:          20 ns/iter (+/- 1) = 2550 MB/s
-test misc::match_class                            ... bench:          86 ns/iter (+/- 3) = 941 MB/s
-test misc::match_class_in_range                   ... bench:          32 ns/iter (+/- 2) = 2531 MB/s
-test misc::match_class_unicode                    ... bench:         801 ns/iter (+/- 36) = 200 MB/s
-test misc::medium_1K                              ... bench:       1,213 ns/iter (+/- 237) = 844 MB/s
-test misc::medium_1MB                             ... bench:   1,991,418 ns/iter (+/- 239,612) = 526 MB/s
-test misc::medium_32                              ... bench:         100 ns/iter (+/- 8) = 320 MB/s
-test misc::medium_32K                             ... bench:      57,080 ns/iter (+/- 709) = 574 MB/s
-test misc::no_exponential                         ... bench:         522 ns/iter (+/- 17) = 191 MB/s
-test misc::not_literal                            ... bench:         290 ns/iter (+/- 6) = 175 MB/s
-test misc::one_pass_long_prefix                   ... bench:         176 ns/iter (+/- 15) = 147 MB/s
-test misc::one_pass_long_prefix_not               ... bench:         183 ns/iter (+/- 28) = 142 MB/s
-test misc::one_pass_short                         ... bench:         136 ns/iter (+/- 8) = 125 MB/s
-test misc::one_pass_short_not                     ... bench:         135 ns/iter (+/- 14) = 125 MB/s
-test misc::replace_all                            ... bench:         149 ns/iter (+/- 34)
-test rust_compile::compile_huge           ... bench:     158,759 ns/iter (+/- 4,546)
-test rust_compile::compile_huge_bytes     ... bench:  17,538,290 ns/iter (+/- 1,735,383)
-test rust_compile::compile_simple         ... bench:       5,935 ns/iter (+/- 429)
-test rust_compile::compile_simple_bytes   ... bench:       6,682 ns/iter (+/- 293)
-test rust_compile::compile_small          ... bench:       7,664 ns/iter (+/- 473)
-test rust_compile::compile_small_bytes    ... bench:     175,272 ns/iter (+/- 4,492)
-test rust_parse::parse_huge               ... bench:       1,199 ns/iter (+/- 38)
-test rust_parse::parse_simple             ... bench:       1,849 ns/iter (+/- 28)
-test rust_parse::parse_small              ... bench:       2,470 ns/iter (+/- 35)
-test sherlock::before_holmes                 ... bench:   2,750,028 ns/iter (+/- 21,847) = 216 MB/s
-test sherlock::everything_greedy             ... bench:   7,896,337 ns/iter (+/- 68,883) = 75 MB/s
-test sherlock::everything_greedy_nl          ... bench:   5,498,247 ns/iter (+/- 65,952) = 108 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     260,499 ns/iter (+/- 4,984) = 2283 MB/s
-test sherlock::holmes_coword_watson          ... bench:   1,331,443 ns/iter (+/- 34,716) = 446 MB/s
-test sherlock::letters                       ... bench:  60,985,848 ns/iter (+/- 592,838) = 9 MB/s
-test sherlock::letters_lower                 ... bench:  59,041,695 ns/iter (+/- 186,034) = 10 MB/s
-test sherlock::letters_upper                 ... bench:   4,714,214 ns/iter (+/- 35,672) = 126 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:   2,730,524 ns/iter (+/- 69,565) = 217 MB/s
-test sherlock::name_alt1                     ... bench:      41,866 ns/iter (+/- 682) = 14210 MB/s
-test sherlock::name_alt2                     ... bench:     194,322 ns/iter (+/- 6,628) = 3061 MB/s
-test sherlock::name_alt3                     ... bench:   1,252,965 ns/iter (+/- 18,828) = 474 MB/s
-test sherlock::name_alt3_nocase              ... bench:   1,476,169 ns/iter (+/- 14,557) = 403 MB/s
-test sherlock::name_alt4                     ... bench:     298,639 ns/iter (+/- 3,905) = 1992 MB/s
-test sherlock::name_alt4_nocase              ... bench:   1,426,191 ns/iter (+/- 23,584) = 417 MB/s
-test sherlock::name_holmes                   ... bench:      49,719 ns/iter (+/- 811) = 11965 MB/s
-test sherlock::name_holmes_nocase            ... bench:   1,191,400 ns/iter (+/- 19,175) = 499 MB/s
-test sherlock::name_sherlock                 ... bench:      34,091 ns/iter (+/- 877) = 17451 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      33,785 ns/iter (+/- 1,207) = 17609 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:   1,235,442 ns/iter (+/- 18,023) = 481 MB/s
-test sherlock::name_sherlock_nocase          ... bench:   1,236,252 ns/iter (+/- 26,934) = 481 MB/s
-test sherlock::name_whitespace               ... bench:      60,200 ns/iter (+/- 1,873) = 9882 MB/s
-test sherlock::no_match_common               ... bench:     559,886 ns/iter (+/- 20,306) = 1062 MB/s
-test sherlock::no_match_uncommon             ... bench:      23,631 ns/iter (+/- 497) = 25175 MB/s
-test sherlock::quotes                        ... bench:     967,379 ns/iter (+/- 12,856) = 614 MB/s
-test sherlock::the_lower                     ... bench:     766,950 ns/iter (+/- 21,944) = 775 MB/s
-test sherlock::the_nocase                    ... bench:   1,706,539 ns/iter (+/- 26,003) = 348 MB/s
-test sherlock::the_upper                     ... bench:      52,529 ns/iter (+/- 1,208) = 11325 MB/s
-test sherlock::the_whitespace                ... bench:   2,012,952 ns/iter (+/- 26,968) = 295 MB/s
-test sherlock::word_ending_n                 ... bench:  55,578,841 ns/iter (+/- 537,463) = 10 MB/s
-test sherlock::words                         ... bench:  19,103,327 ns/iter (+/- 102,828) = 31 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 71 measured
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/03-bytes/onig b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/03-bytes/onig
deleted file mode 100644
index aaf666b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/03-bytes/onig
+++ /dev/null
@@ -1,68 +0,0 @@
-   Compiling regex-benchmark v0.1.0 (file:///home/andrew/data/projects/rust/regex/benches)
-     Running benches/target/release/onig-e3bc363aa56fb408
-
-running 61 tests
-test misc::anchored_literal_long_match       ... bench:          70 ns/iter (+/- 1) = 5571 MB/s
-test misc::anchored_literal_long_non_match   ... bench:         424 ns/iter (+/- 4) = 919 MB/s
-test misc::anchored_literal_short_match      ... bench:          70 ns/iter (+/- 1) = 371 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          38 ns/iter (+/- 0) = 684 MB/s
-test misc::easy0_1K                          ... bench:         176 ns/iter (+/- 2) = 5818 MB/s
-test misc::easy0_1MB                         ... bench:     163,547 ns/iter (+/- 1,451) = 6411 MB/s
-test misc::easy0_32                          ... bench:          20 ns/iter (+/- 1) = 1600 MB/s
-test misc::easy0_32K                         ... bench:       5,056 ns/iter (+/- 64) = 6481 MB/s
-test misc::easy1_1K                          ... bench:       4,103 ns/iter (+/- 11) = 249 MB/s
-test misc::easy1_1MB                         ... bench:   4,198,406 ns/iter (+/- 62,171) = 249 MB/s
-test misc::easy1_32                          ... bench:         139 ns/iter (+/- 1) = 230 MB/s
-test misc::easy1_32K                         ... bench:     131,083 ns/iter (+/- 1,310) = 249 MB/s
-test misc::hard_1K                           ... bench:         163 ns/iter (+/- 3) = 6282 MB/s
-test misc::hard_1MB                          ... bench:     163,910 ns/iter (+/- 2,368) = 6397 MB/s
-test misc::hard_32                           ... bench:          20 ns/iter (+/- 1) = 1600 MB/s
-test misc::hard_32K                          ... bench:       5,002 ns/iter (+/- 306) = 6550 MB/s
-test misc::literal                           ... bench:         226 ns/iter (+/- 0) = 225 MB/s
-test misc::match_class                       ... bench:         337 ns/iter (+/- 2) = 240 MB/s
-test misc::match_class_in_range              ... bench:         337 ns/iter (+/- 1) = 240 MB/s
-test misc::match_class_unicode               ... bench:       2,004 ns/iter (+/- 26) = 80 MB/s
-test misc::medium_1K                         ... bench:         191 ns/iter (+/- 2) = 5361 MB/s
-test misc::medium_1MB                        ... bench:     164,027 ns/iter (+/- 2,494) = 6392 MB/s
-test misc::medium_32                         ... bench:          22 ns/iter (+/- 1) = 1454 MB/s
-test misc::medium_32K                        ... bench:       4,962 ns/iter (+/- 60) = 6603 MB/s
-test misc::not_literal                       ... bench:         359 ns/iter (+/- 5) = 142 MB/s
-test misc::one_pass_long_prefix              ... bench:          94 ns/iter (+/- 3) = 276 MB/s
-test misc::one_pass_long_prefix_not          ... bench:         101 ns/iter (+/- 1) = 257 MB/s
-test misc::one_pass_short                    ... bench:         332 ns/iter (+/- 6) = 51 MB/s
-test misc::one_pass_short_not                ... bench:         318 ns/iter (+/- 4) = 53 MB/s
-test sherlock::before_holmes                 ... bench:  70,859,542 ns/iter (+/- 594,306) = 8 MB/s
-test sherlock::everything_greedy             ... bench:   5,129,894 ns/iter (+/- 33,792) = 115 MB/s
-test sherlock::holmes_cochar_watson          ... bench:   2,388,047 ns/iter (+/- 19,666) = 249 MB/s
-test sherlock::ing_suffix                    ... bench:  28,413,935 ns/iter (+/- 800,513) = 20 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   2,636,327 ns/iter (+/- 66,410) = 225 MB/s
-test sherlock::letters                       ... bench:  26,471,724 ns/iter (+/- 872,994) = 22 MB/s
-test sherlock::letters_lower                 ... bench:  26,124,489 ns/iter (+/- 556,750) = 22 MB/s
-test sherlock::letters_upper                 ... bench:  11,268,144 ns/iter (+/- 338,510) = 52 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     195,797 ns/iter (+/- 1,621) = 3038 MB/s
-test sherlock::name_alt1                     ... bench:   2,100,763 ns/iter (+/- 16,823) = 283 MB/s
-test sherlock::name_alt2                     ... bench:   2,212,816 ns/iter (+/- 17,997) = 268 MB/s
-test sherlock::name_alt3                     ... bench:   3,031,567 ns/iter (+/- 35,631) = 196 MB/s
-test sherlock::name_alt3_nocase              ... bench:  39,737,911 ns/iter (+/- 166,863) = 14 MB/s
-test sherlock::name_alt4                     ... bench:   2,230,681 ns/iter (+/- 18,856) = 266 MB/s
-test sherlock::name_alt4_nocase              ... bench:   8,294,698 ns/iter (+/- 36,887) = 71 MB/s
-test sherlock::name_holmes                   ... bench:     402,600 ns/iter (+/- 6,232) = 1477 MB/s
-test sherlock::name_holmes_nocase            ... bench:   4,074,155 ns/iter (+/- 23,317) = 146 MB/s
-test sherlock::name_sherlock                 ... bench:     270,225 ns/iter (+/- 2,815) = 2201 MB/s
-test sherlock::name_sherlock_holmes          ... bench:     196,502 ns/iter (+/- 2,168) = 3027 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:   4,397,347 ns/iter (+/- 28,567) = 135 MB/s
-test sherlock::name_sherlock_nocase          ... bench:   4,400,574 ns/iter (+/- 25,127) = 135 MB/s
-test sherlock::name_whitespace               ... bench:     274,462 ns/iter (+/- 3,180) = 2167 MB/s
-test sherlock::no_match_common               ... bench:     596,601 ns/iter (+/- 9,285) = 997 MB/s
-test sherlock::no_match_uncommon             ... bench:     586,258 ns/iter (+/- 7,702) = 1014 MB/s
-test sherlock::quotes                        ... bench:   4,069,570 ns/iter (+/- 20,372) = 146 MB/s
-test sherlock::repeated_class_negation       ... bench:  44,936,445 ns/iter (+/- 103,467) = 13 MB/s
-test sherlock::the_lower                     ... bench:   1,300,513 ns/iter (+/- 12,884) = 457 MB/s
-test sherlock::the_nocase                    ... bench:   5,141,237 ns/iter (+/- 25,487) = 115 MB/s
-test sherlock::the_upper                     ... bench:     821,454 ns/iter (+/- 13,420) = 724 MB/s
-test sherlock::the_whitespace                ... bench:   2,009,530 ns/iter (+/- 14,082) = 296 MB/s
-test sherlock::word_ending_n                 ... bench:  27,847,316 ns/iter (+/- 47,618) = 21 MB/s
-test sherlock::words                         ... bench:  21,105,627 ns/iter (+/- 33,436) = 28 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 61 measured
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/03-bytes/pcre b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/03-bytes/pcre
deleted file mode 100644
index 236613a..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/03-bytes/pcre
+++ /dev/null
@@ -1,66 +0,0 @@
-     Running benches/target/release/pcre-855c18fb35cdf072
-
-running 60 tests
-test misc::anchored_literal_long_match       ... bench:          88 ns/iter (+/- 12) = 4431 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          58 ns/iter (+/- 1) = 6724 MB/s
-test misc::anchored_literal_short_match      ... bench:          88 ns/iter (+/- 1) = 295 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          60 ns/iter (+/- 3) = 433 MB/s
-test misc::easy0_1K                          ... bench:         266 ns/iter (+/- 1) = 3849 MB/s
-test misc::easy0_1MB                         ... bench:     227,366 ns/iter (+/- 794) = 4611 MB/s
-test misc::easy0_32                          ... bench:          62 ns/iter (+/- 2) = 516 MB/s
-test misc::easy0_32K                         ... bench:       7,061 ns/iter (+/- 109) = 4640 MB/s
-test misc::easy1_1K                          ... bench:         805 ns/iter (+/- 10) = 1272 MB/s
-test misc::easy1_1MB                         ... bench:     751,948 ns/iter (+/- 6,995) = 1394 MB/s
-test misc::easy1_32                          ... bench:          71 ns/iter (+/- 1) = 450 MB/s
-test misc::easy1_32K                         ... bench:      23,635 ns/iter (+/- 213) = 1386 MB/s
-test misc::hard_1K                           ... bench:      31,008 ns/iter (+/- 299) = 33 MB/s
-test misc::hard_1MB                          ... bench:  35,078,241 ns/iter (+/- 94,197) = 29 MB/s
-test misc::hard_32                           ... bench:         313 ns/iter (+/- 1) = 102 MB/s
-test misc::hard_32K                          ... bench:     995,958 ns/iter (+/- 10,945) = 32 MB/s
-test misc::literal                           ... bench:         130 ns/iter (+/- 1) = 392 MB/s
-test misc::match_class                       ... bench:         176 ns/iter (+/- 2) = 460 MB/s
-test misc::match_class_in_range              ... bench:         178 ns/iter (+/- 1) = 455 MB/s
-test misc::match_class_unicode               ... bench:         511 ns/iter (+/- 6) = 315 MB/s
-test misc::medium_1K                         ... bench:         275 ns/iter (+/- 4) = 3723 MB/s
-test misc::medium_1MB                        ... bench:     239,603 ns/iter (+/- 1,808) = 4376 MB/s
-test misc::medium_32                         ... bench:          62 ns/iter (+/- 1) = 516 MB/s
-test misc::medium_32K                        ... bench:       7,385 ns/iter (+/- 43) = 4437 MB/s
-test misc::not_literal                       ... bench:         274 ns/iter (+/- 3) = 186 MB/s
-test misc::one_pass_long_prefix              ... bench:          87 ns/iter (+/- 1) = 298 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          88 ns/iter (+/- 0) = 295 MB/s
-test misc::one_pass_short                    ... bench:         115 ns/iter (+/- 0) = 147 MB/s
-test misc::one_pass_short_not                ... bench:         118 ns/iter (+/- 0) = 144 MB/s
-test sherlock::before_holmes                 ... bench:  14,338,348 ns/iter (+/- 23,734) = 41 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     547,196 ns/iter (+/- 4,100) = 1087 MB/s
-test sherlock::ing_suffix                    ... bench:   6,012,620 ns/iter (+/- 51,777) = 98 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   6,374,577 ns/iter (+/- 46,486) = 93 MB/s
-test sherlock::letters                       ... bench:  28,575,184 ns/iter (+/- 65,051) = 20 MB/s
-test sherlock::letters_lower                 ... bench:  25,819,606 ns/iter (+/- 180,823) = 23 MB/s
-test sherlock::letters_upper                 ... bench:   3,227,381 ns/iter (+/- 11,443) = 184 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     193,512 ns/iter (+/- 1,316) = 3074 MB/s
-test sherlock::name_alt1                     ... bench:     454,510 ns/iter (+/- 2,721) = 1308 MB/s
-test sherlock::name_alt2                     ... bench:     499,453 ns/iter (+/- 4,692) = 1191 MB/s
-test sherlock::name_alt3                     ... bench:   1,085,732 ns/iter (+/- 6,841) = 547 MB/s
-test sherlock::name_alt3_nocase              ... bench:   3,194,995 ns/iter (+/- 12,655) = 186 MB/s
-test sherlock::name_alt4                     ... bench:     944,353 ns/iter (+/- 12,661) = 629 MB/s
-test sherlock::name_alt4_nocase              ... bench:   1,646,368 ns/iter (+/- 12,376) = 361 MB/s
-test sherlock::name_holmes                   ... bench:     395,019 ns/iter (+/- 3,929) = 1506 MB/s
-test sherlock::name_holmes_nocase            ... bench:     493,327 ns/iter (+/- 7,213) = 1205 MB/s
-test sherlock::name_sherlock                 ... bench:     266,400 ns/iter (+/- 1,591) = 2233 MB/s
-test sherlock::name_sherlock_holmes          ... bench:     196,357 ns/iter (+/- 1,770) = 3029 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:   1,259,747 ns/iter (+/- 4,939) = 472 MB/s
-test sherlock::name_sherlock_nocase          ... bench:   1,128,970 ns/iter (+/- 6,730) = 526 MB/s
-test sherlock::name_whitespace               ... bench:     267,323 ns/iter (+/- 1,296) = 2225 MB/s
-test sherlock::no_match_common               ... bench:     595,372 ns/iter (+/- 5,690) = 999 MB/s
-test sherlock::no_match_uncommon             ... bench:     585,406 ns/iter (+/- 5,719) = 1016 MB/s
-test sherlock::quotes                        ... bench:   1,223,528 ns/iter (+/- 6,579) = 486 MB/s
-test sherlock::repeated_class_negation       ... bench:   6,440,584 ns/iter (+/- 20,444) = 92 MB/s
-test sherlock::the_lower                     ... bench:   1,220,999 ns/iter (+/- 7,595) = 487 MB/s
-test sherlock::the_nocase                    ... bench:   1,263,078 ns/iter (+/- 15,321) = 471 MB/s
-test sherlock::the_upper                     ... bench:     781,141 ns/iter (+/- 15,408) = 761 MB/s
-test sherlock::the_whitespace                ... bench:   1,383,414 ns/iter (+/- 548,289) = 430 MB/s
-test sherlock::word_ending_n                 ... bench:  12,709,045 ns/iter (+/- 51,420) = 46 MB/s
-test sherlock::words                         ... bench:  10,798,918 ns/iter (+/- 40,027) = 55 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 60 measured
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/03-bytes/rust b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/03-bytes/rust
deleted file mode 100644
index 6dec097..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/03-bytes/rust
+++ /dev/null
@@ -1,83 +0,0 @@
-   Compiling regex-syntax v0.2.5 (file:///home/andrew/data/projects/rust/regex/benches)
-   Compiling regex v0.1.55 (file:///home/andrew/data/projects/rust/regex/benches)
-   Compiling regex-benchmark v0.1.0 (file:///home/andrew/data/projects/rust/regex/benches)
-     Running benches/target/release/rust-50db306d093e5666
-
-running 74 tests
-test misc::anchored_literal_long_match       ... bench:          75 ns/iter (+/- 5) = 5200 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          56 ns/iter (+/- 0) = 6964 MB/s
-test misc::anchored_literal_short_match      ... bench:          79 ns/iter (+/- 0) = 329 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          56 ns/iter (+/- 1) = 464 MB/s
-test misc::easy0_1K                          ... bench:         138 ns/iter (+/- 0) = 7420 MB/s
-test misc::easy0_1MB                         ... bench:     247,159 ns/iter (+/- 724) = 4242 MB/s
-test misc::easy0_32                          ... bench:          71 ns/iter (+/- 0) = 450 MB/s
-test misc::easy0_32K                         ... bench:       5,474 ns/iter (+/- 34) = 5986 MB/s
-test misc::easy1_1K                          ... bench:         273 ns/iter (+/- 1) = 3750 MB/s
-test misc::easy1_1MB                         ... bench:     317,946 ns/iter (+/- 2,512) = 3297 MB/s
-test misc::easy1_32                          ... bench:          67 ns/iter (+/- 0) = 477 MB/s
-test misc::easy1_32K                         ... bench:       5,882 ns/iter (+/- 32) = 5570 MB/s
-test misc::hard_1K                           ... bench:       4,713 ns/iter (+/- 13) = 217 MB/s
-test misc::hard_1MB                          ... bench:   4,732,901 ns/iter (+/- 6,948) = 221 MB/s
-test misc::hard_32                           ... bench:         201 ns/iter (+/- 0) = 159 MB/s
-test misc::hard_32K                          ... bench:     147,994 ns/iter (+/- 900) = 221 MB/s
-test misc::literal                           ... bench:          19 ns/iter (+/- 0) = 2684 MB/s
-test misc::match_class                       ... bench:          85 ns/iter (+/- 0) = 952 MB/s
-test misc::match_class_in_range              ... bench:          30 ns/iter (+/- 1) = 2700 MB/s
-test misc::match_class_unicode               ... bench:         806 ns/iter (+/- 2) = 199 MB/s
-test misc::medium_1K                         ... bench:       1,384 ns/iter (+/- 10) = 739 MB/s
-test misc::medium_1MB                        ... bench:   1,974,381 ns/iter (+/- 7,383) = 531 MB/s
-test misc::medium_32                         ... bench:         130 ns/iter (+/- 0) = 246 MB/s
-test misc::medium_32K                        ... bench:      52,783 ns/iter (+/- 465) = 620 MB/s
-test misc::no_exponential                    ... bench:         536 ns/iter (+/- 13) = 186 MB/s
-test misc::not_literal                       ... bench:         293 ns/iter (+/- 1) = 174 MB/s
-test misc::one_pass_long_prefix              ... bench:         179 ns/iter (+/- 1) = 145 MB/s
-test misc::one_pass_long_prefix_not          ... bench:         180 ns/iter (+/- 2) = 144 MB/s
-test misc::one_pass_short                    ... bench:         139 ns/iter (+/- 1) = 122 MB/s
-test misc::one_pass_short_not                ... bench:         142 ns/iter (+/- 1) = 119 MB/s
-test misc::replace_all                       ... bench:         171 ns/iter (+/- 1)
-test rust_compile::compile_huge              ... bench:     126,158 ns/iter (+/- 1,790)
-test rust_compile::compile_huge_bytes        ... bench:  18,088,719 ns/iter (+/- 518,980)
-test rust_compile::compile_simple            ... bench:       6,141 ns/iter (+/- 394)
-test rust_compile::compile_simple_bytes      ... bench:       6,669 ns/iter (+/- 306)
-test rust_compile::compile_small             ... bench:       7,431 ns/iter (+/- 275)
-test rust_compile::compile_small_bytes       ... bench:     191,002 ns/iter (+/- 1,297)
-test rust_parse::parse_huge                  ... bench:       1,204 ns/iter (+/- 9)
-test rust_parse::parse_simple                ... bench:       1,905 ns/iter (+/- 16)
-test rust_parse::parse_small                 ... bench:       2,454 ns/iter (+/- 24)
-test sherlock::before_holmes                 ... bench:   2,748,082 ns/iter (+/- 11,406) = 216 MB/s
-test sherlock::everything_greedy             ... bench:   7,833,414 ns/iter (+/- 42,538) = 75 MB/s
-test sherlock::everything_greedy_nl          ... bench:   5,426,141 ns/iter (+/- 31,378) = 109 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     262,322 ns/iter (+/- 5,243) = 2267 MB/s
-test sherlock::holmes_coword_watson          ... bench:   1,324,677 ns/iter (+/- 21,666) = 449 MB/s
-test sherlock::ing_suffix                    ... bench:   3,179,928 ns/iter (+/- 40,246) = 187 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   3,525,004 ns/iter (+/- 37,262) = 168 MB/s
-test sherlock::letters                       ... bench:  60,268,445 ns/iter (+/- 1,958,610) = 9 MB/s
-test sherlock::letters_lower                 ... bench:  57,743,679 ns/iter (+/- 84,675) = 10 MB/s
-test sherlock::letters_upper                 ... bench:   4,549,709 ns/iter (+/- 9,312) = 130 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:   2,690,794 ns/iter (+/- 2,796) = 221 MB/s
-test sherlock::name_alt1                     ... bench:      42,476 ns/iter (+/- 346) = 14006 MB/s
-test sherlock::name_alt2                     ... bench:     199,058 ns/iter (+/- 1,498) = 2988 MB/s
-test sherlock::name_alt3                     ... bench:   1,248,439 ns/iter (+/- 3,051) = 476 MB/s
-test sherlock::name_alt3_nocase              ... bench:   1,463,628 ns/iter (+/- 2,799) = 406 MB/s
-test sherlock::name_alt4                     ... bench:     296,390 ns/iter (+/- 798) = 2007 MB/s
-test sherlock::name_alt4_nocase              ... bench:   1,415,770 ns/iter (+/- 3,400) = 420 MB/s
-test sherlock::name_holmes                   ... bench:      49,713 ns/iter (+/- 317) = 11967 MB/s
-test sherlock::name_holmes_nocase            ... bench:   1,181,147 ns/iter (+/- 2,842) = 503 MB/s
-test sherlock::name_sherlock                 ... bench:      34,263 ns/iter (+/- 136) = 17363 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      34,179 ns/iter (+/- 188) = 17406 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:   1,236,384 ns/iter (+/- 5,012) = 481 MB/s
-test sherlock::name_sherlock_nocase          ... bench:   1,232,613 ns/iter (+/- 5,009) = 482 MB/s
-test sherlock::name_whitespace               ... bench:      60,024 ns/iter (+/- 187) = 9911 MB/s
-test sherlock::no_match_common               ... bench:     558,607 ns/iter (+/- 2,595) = 1065 MB/s
-test sherlock::no_match_uncommon             ... bench:      24,049 ns/iter (+/- 54) = 24738 MB/s
-test sherlock::quotes                        ... bench:     966,792 ns/iter (+/- 2,982) = 615 MB/s
-test sherlock::repeated_class_negation       ... bench:  84,186,484 ns/iter (+/- 66,800) = 7 MB/s
-test sherlock::the_lower                     ... bench:     773,759 ns/iter (+/- 2,759) = 768 MB/s
-test sherlock::the_nocase                    ... bench:   1,705,648 ns/iter (+/- 4,604) = 348 MB/s
-test sherlock::the_upper                     ... bench:      52,729 ns/iter (+/- 209) = 11282 MB/s
-test sherlock::the_whitespace                ... bench:   1,981,215 ns/iter (+/- 8,080) = 300 MB/s
-test sherlock::word_ending_n                 ... bench:  53,482,650 ns/iter (+/- 73,844) = 11 MB/s
-test sherlock::words                         ... bench:  18,961,987 ns/iter (+/- 27,794) = 31 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 74 measured
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/03-bytes/rust-bytes b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/03-bytes/rust-bytes
deleted file mode 100644
index 735d259..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/03-bytes/rust-bytes
+++ /dev/null
@@ -1,66 +0,0 @@
-   Compiling regex-benchmark v0.1.0 (file:///home/andrew/data/projects/rust/regex/benches)
-     Running benches/target/release/rust_bytes-9f3b188bc741e04b
-
-running 59 tests
-test misc::anchored_literal_long_match       ... bench:          75 ns/iter (+/- 6) = 5200 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          55 ns/iter (+/- 0) = 7090 MB/s
-test misc::anchored_literal_short_match      ... bench:          75 ns/iter (+/- 0) = 346 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          55 ns/iter (+/- 0) = 472 MB/s
-test misc::easy0_1K                          ... bench:         245 ns/iter (+/- 0) = 4179 MB/s
-test misc::easy0_1MB                         ... bench:     251,614 ns/iter (+/- 1,143) = 4167 MB/s
-test misc::easy0_32                          ... bench:          62 ns/iter (+/- 1) = 516 MB/s
-test misc::easy0_32K                         ... bench:       5,281 ns/iter (+/- 66) = 6204 MB/s
-test misc::easy1_1K                          ... bench:         266 ns/iter (+/- 1) = 3849 MB/s
-test misc::easy1_1MB                         ... bench:     325,060 ns/iter (+/- 2,011) = 3225 MB/s
-test misc::easy1_32                          ... bench:          73 ns/iter (+/- 0) = 438 MB/s
-test misc::easy1_32K                         ... bench:       5,609 ns/iter (+/- 41) = 5842 MB/s
-test misc::hard_1K                           ... bench:       4,678 ns/iter (+/- 38) = 218 MB/s
-test misc::hard_1MB                          ... bench:   4,736,631 ns/iter (+/- 26,227) = 221 MB/s
-test misc::hard_32                           ... bench:         199 ns/iter (+/- 0) = 160 MB/s
-test misc::hard_32K                          ... bench:     148,282 ns/iter (+/- 1,353) = 220 MB/s
-test misc::literal                           ... bench:          18 ns/iter (+/- 0) = 2833 MB/s
-test misc::match_class                       ... bench:          83 ns/iter (+/- 0) = 975 MB/s
-test misc::match_class_in_range              ... bench:          30 ns/iter (+/- 0) = 2700 MB/s
-test misc::medium_1K                         ... bench:       1,147 ns/iter (+/- 10) = 892 MB/s
-test misc::medium_1MB                        ... bench:   1,953,230 ns/iter (+/- 10,530) = 536 MB/s
-test misc::medium_32                         ... bench:          99 ns/iter (+/- 0) = 323 MB/s
-test misc::medium_32K                        ... bench:      54,705 ns/iter (+/- 349) = 598 MB/s
-test misc::no_exponential                    ... bench:         534 ns/iter (+/- 4) = 187 MB/s
-test misc::not_literal                       ... bench:         292 ns/iter (+/- 3) = 174 MB/s
-test misc::one_pass_long_prefix              ... bench:         179 ns/iter (+/- 1) = 145 MB/s
-test misc::one_pass_long_prefix_not          ... bench:         180 ns/iter (+/- 2) = 144 MB/s
-test misc::one_pass_short                    ... bench:         139 ns/iter (+/- 0) = 122 MB/s
-test misc::one_pass_short_not                ... bench:         139 ns/iter (+/- 0) = 122 MB/s
-test sherlock::before_holmes                 ... bench:   2,778,686 ns/iter (+/- 8,735) = 214 MB/s
-test sherlock::everything_greedy             ... bench:   7,884,691 ns/iter (+/- 37,268) = 75 MB/s
-test sherlock::everything_greedy_nl          ... bench:   5,406,627 ns/iter (+/- 24,707) = 110 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     262,175 ns/iter (+/- 1,995) = 2269 MB/s
-test sherlock::holmes_coword_watson          ... bench:   1,299,904 ns/iter (+/- 5,090) = 457 MB/s
-test sherlock::ing_suffix                    ... bench:   3,202,899 ns/iter (+/- 20,810) = 185 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   3,367,381 ns/iter (+/- 14,143) = 176 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:   2,725,593 ns/iter (+/- 10,736) = 218 MB/s
-test sherlock::name_alt1                     ... bench:      42,161 ns/iter (+/- 355) = 14110 MB/s
-test sherlock::name_alt2                     ... bench:     195,390 ns/iter (+/- 1,112) = 3044 MB/s
-test sherlock::name_alt3                     ... bench:   1,248,432 ns/iter (+/- 3,244) = 476 MB/s
-test sherlock::name_alt3_nocase              ... bench:   3,371,906 ns/iter (+/- 42,421) = 176 MB/s
-test sherlock::name_alt4                     ... bench:     296,423 ns/iter (+/- 1,812) = 2007 MB/s
-test sherlock::name_alt4_nocase              ... bench:   1,753,178 ns/iter (+/- 23,269) = 339 MB/s
-test sherlock::name_holmes                   ... bench:      49,554 ns/iter (+/- 261) = 12005 MB/s
-test sherlock::name_holmes_nocase            ... bench:   1,347,682 ns/iter (+/- 5,678) = 441 MB/s
-test sherlock::name_sherlock                 ... bench:      33,937 ns/iter (+/- 208) = 17530 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      33,870 ns/iter (+/- 225) = 17565 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:   1,212,233 ns/iter (+/- 5,452) = 490 MB/s
-test sherlock::name_sherlock_nocase          ... bench:   1,190,590 ns/iter (+/- 3,248) = 499 MB/s
-test sherlock::name_whitespace               ... bench:      59,434 ns/iter (+/- 253) = 10009 MB/s
-test sherlock::no_match_common               ... bench:     565,962 ns/iter (+/- 4,601) = 1051 MB/s
-test sherlock::no_match_uncommon             ... bench:      23,729 ns/iter (+/- 218) = 25071 MB/s
-test sherlock::quotes                        ... bench:     966,904 ns/iter (+/- 7,115) = 615 MB/s
-test sherlock::repeated_class_negation       ... bench: 121,271,073 ns/iter (+/- 242,789) = 4 MB/s
-test sherlock::the_lower                     ... bench:     778,850 ns/iter (+/- 6,781) = 763 MB/s
-test sherlock::the_nocase                    ... bench:   2,876,190 ns/iter (+/- 8,611) = 206 MB/s
-test sherlock::the_upper                     ... bench:      52,617 ns/iter (+/- 315) = 11306 MB/s
-test sherlock::the_whitespace                ... bench:   1,982,270 ns/iter (+/- 11,079) = 300 MB/s
-test sherlock::word_ending_n                 ... bench:  76,442,330 ns/iter (+/- 236,690) = 7 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 59 measured
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/04/onig b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/04/onig
deleted file mode 100644
index 81b4098..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/04/onig
+++ /dev/null
@@ -1,78 +0,0 @@
-   Compiling regex-benchmark v0.1.0 (file:///home/andrew/data/projects/rust/regex/bench)
-     Running target/release/bench-0d58c0af2e68ae0d
-
-running 71 tests
-test misc::anchored_literal_long_match       ... bench:          66 ns/iter (+/- 1) = 5909 MB/s
-test misc::anchored_literal_long_non_match   ... bench:         414 ns/iter (+/- 2) = 942 MB/s
-test misc::anchored_literal_short_match      ... bench:          66 ns/iter (+/- 1) = 393 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          36 ns/iter (+/- 0) = 722 MB/s
-test misc::easy0_1K                          ... bench:         217 ns/iter (+/- 2) = 4843 MB/s
-test misc::easy0_1MB                         ... bench:     130,657 ns/iter (+/- 365) = 8025 MB/s
-test misc::easy0_32                          ... bench:          84 ns/iter (+/- 1) = 702 MB/s
-test misc::easy0_32K                         ... bench:       4,092 ns/iter (+/- 25) = 8014 MB/s
-test misc::easy1_1K                          ... bench:       3,682 ns/iter (+/- 25) = 283 MB/s
-test misc::easy1_1MB                         ... bench:   3,613,381 ns/iter (+/- 5,960) = 290 MB/s
-test misc::easy1_32                          ... bench:         237 ns/iter (+/- 2) = 219 MB/s
-test misc::easy1_32K                         ... bench:     113,040 ns/iter (+/- 303) = 290 MB/s
-test misc::hard_1K                           ... bench:     184,299 ns/iter (+/- 2,508) = 5 MB/s
-test misc::hard_1MB                          ... bench: 198,378,531 ns/iter (+/- 150,404) = 5 MB/s
-test misc::hard_32                           ... bench:       5,765 ns/iter (+/- 26) = 10 MB/s
-test misc::hard_32K                          ... bench:   6,177,362 ns/iter (+/- 21,959) = 5 MB/s
-test misc::literal                           ... bench:         219 ns/iter (+/- 1) = 232 MB/s
-test misc::long_needle1                      ... bench:   6,978,321 ns/iter (+/- 120,792) = 14 MB/s
-test misc::long_needle2                      ... bench:   6,981,122 ns/iter (+/- 120,371) = 14 MB/s
-test misc::match_class                       ... bench:         329 ns/iter (+/- 5) = 246 MB/s
-test misc::match_class_in_range              ... bench:         332 ns/iter (+/- 1) = 243 MB/s
-test misc::match_class_unicode               ... bench:       1,980 ns/iter (+/- 23) = 81 MB/s
-test misc::medium_1K                         ... bench:         232 ns/iter (+/- 0) = 4534 MB/s
-test misc::medium_1MB                        ... bench:     130,702 ns/iter (+/- 997) = 8022 MB/s
-test misc::medium_32                         ... bench:          95 ns/iter (+/- 1) = 631 MB/s
-test misc::medium_32K                        ... bench:       4,103 ns/iter (+/- 13) = 7993 MB/s
-test misc::not_literal                       ... bench:         353 ns/iter (+/- 2) = 144 MB/s
-test misc::one_pass_long_prefix              ... bench:          89 ns/iter (+/- 1) = 292 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          97 ns/iter (+/- 0) = 268 MB/s
-test misc::one_pass_short                    ... bench:         329 ns/iter (+/- 4) = 51 MB/s
-test misc::one_pass_short_not                ... bench:         324 ns/iter (+/- 4) = 52 MB/s
-test misc::reallyhard2_1K                    ... bench:     563,552 ns/iter (+/- 2,559) = 1 MB/s
-test misc::reallyhard_1K                     ... bench:     184,200 ns/iter (+/- 553) = 5 MB/s
-test misc::reallyhard_1MB                    ... bench: 198,336,145 ns/iter (+/- 149,796) = 5 MB/s
-test misc::reallyhard_32                     ... bench:       5,766 ns/iter (+/- 16) = 10 MB/s
-test misc::reallyhard_32K                    ... bench:   6,174,904 ns/iter (+/- 5,491) = 5 MB/s
-test sherlock::before_holmes                 ... bench:  70,476,093 ns/iter (+/- 271,168) = 8 MB/s
-test sherlock::everything_greedy             ... bench:   5,175,140 ns/iter (+/- 19,413) = 114 MB/s
-test sherlock::holmes_cochar_watson          ... bench:   2,379,427 ns/iter (+/- 5,816) = 250 MB/s
-test sherlock::ing_suffix                    ... bench:  28,275,131 ns/iter (+/- 49,569) = 21 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   2,648,838 ns/iter (+/- 9,247) = 224 MB/s
-test sherlock::letters                       ... bench:  25,940,039 ns/iter (+/- 57,724) = 22 MB/s
-test sherlock::letters_lower                 ... bench:  25,680,050 ns/iter (+/- 48,209) = 23 MB/s
-test sherlock::letters_upper                 ... bench:  11,122,063 ns/iter (+/- 28,302) = 53 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     195,903 ns/iter (+/- 1,162) = 3036 MB/s
-test sherlock::name_alt1                     ... bench:   2,100,175 ns/iter (+/- 4,251) = 283 MB/s
-test sherlock::name_alt2                     ... bench:   2,210,122 ns/iter (+/- 7,514) = 269 MB/s
-test sherlock::name_alt3                     ... bench:   3,025,653 ns/iter (+/- 9,375) = 196 MB/s
-test sherlock::name_alt3_nocase              ... bench:  39,475,102 ns/iter (+/- 51,488) = 15 MB/s
-test sherlock::name_alt4                     ... bench:   2,225,952 ns/iter (+/- 7,340) = 267 MB/s
-test sherlock::name_alt4_nocase              ... bench:   8,227,413 ns/iter (+/- 18,088) = 72 MB/s
-test sherlock::name_alt5                     ... bench:   2,300,803 ns/iter (+/- 6,325) = 258 MB/s
-test sherlock::name_alt5_nocase              ... bench:  11,488,783 ns/iter (+/- 28,880) = 51 MB/s
-test sherlock::name_holmes                   ... bench:     400,760 ns/iter (+/- 907) = 1484 MB/s
-test sherlock::name_holmes_nocase            ... bench:   4,044,850 ns/iter (+/- 11,665) = 147 MB/s
-test sherlock::name_sherlock                 ... bench:     269,021 ns/iter (+/- 791) = 2211 MB/s
-test sherlock::name_sherlock_holmes          ... bench:     196,161 ns/iter (+/- 899) = 3032 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:   4,363,621 ns/iter (+/- 5,339) = 136 MB/s
-test sherlock::name_sherlock_nocase          ... bench:   4,389,375 ns/iter (+/- 11,077) = 135 MB/s
-test sherlock::name_whitespace               ... bench:     273,691 ns/iter (+/- 957) = 2173 MB/s
-test sherlock::no_match_common               ... bench:     588,744 ns/iter (+/- 1,732) = 1010 MB/s
-test sherlock::no_match_really_common        ... bench:     673,335 ns/iter (+/- 1,407) = 883 MB/s
-test sherlock::no_match_uncommon             ... bench:     578,009 ns/iter (+/- 5,111) = 1029 MB/s
-test sherlock::quotes                        ... bench:   4,066,005 ns/iter (+/- 10,116) = 146 MB/s
-test sherlock::repeated_class_negation       ... bench:  43,374,733 ns/iter (+/- 48,409) = 13 MB/s
-test sherlock::the_lower                     ... bench:   1,275,300 ns/iter (+/- 5,351) = 466 MB/s
-test sherlock::the_nocase                    ... bench:   5,100,832 ns/iter (+/- 11,024) = 116 MB/s
-test sherlock::the_upper                     ... bench:     816,606 ns/iter (+/- 3,370) = 728 MB/s
-test sherlock::the_whitespace                ... bench:   2,079,544 ns/iter (+/- 4,585) = 286 MB/s
-test sherlock::word_ending_n                 ... bench:  27,699,175 ns/iter (+/- 58,998) = 21 MB/s
-test sherlock::words                         ... bench:  19,460,356 ns/iter (+/- 29,406) = 30 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 71 measured
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/04/pcre1-jit b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/04/pcre1-jit
deleted file mode 100644
index 2118d1f5..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/04/pcre1-jit
+++ /dev/null
@@ -1,77 +0,0 @@
-   Compiling regex-benchmark v0.1.0 (file:///home/andrew/data/projects/rust/regex/bench)
-     Running target/release/bench-0d58c0af2e68ae0d
-
-running 70 tests
-test misc::anchored_literal_long_match       ... bench:          32 ns/iter (+/- 0) = 12187 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          27 ns/iter (+/- 1) = 14444 MB/s
-test misc::anchored_literal_short_match      ... bench:          31 ns/iter (+/- 0) = 838 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          27 ns/iter (+/- 2) = 962 MB/s
-test misc::easy0_1K                          ... bench:         247 ns/iter (+/- 1) = 4255 MB/s
-test misc::easy0_1MB                         ... bench:     193,485 ns/iter (+/- 906) = 5419 MB/s
-test misc::easy0_32                          ... bench:          55 ns/iter (+/- 1) = 1072 MB/s
-test misc::easy0_32K                         ... bench:       6,057 ns/iter (+/- 19) = 5414 MB/s
-test misc::easy1_1K                          ... bench:         604 ns/iter (+/- 3) = 1728 MB/s
-test misc::easy1_1MB                         ... bench:     553,893 ns/iter (+/- 1,299) = 1893 MB/s
-test misc::easy1_32                          ... bench:          81 ns/iter (+/- 1) = 641 MB/s
-test misc::easy1_32K                         ... bench:      17,335 ns/iter (+/- 33) = 1891 MB/s
-test misc::hard_1K                           ... bench:      56,956 ns/iter (+/- 148) = 18 MB/s
-test misc::hard_1MB                          ... bench:  63,576,485 ns/iter (+/- 93,278) = 16 MB/s
-test misc::hard_32                           ... bench:       1,744 ns/iter (+/- 10) = 33 MB/s
-test misc::hard_32K                          ... bench:   1,931,799 ns/iter (+/- 7,752) = 16 MB/s
-test misc::literal                           ... bench:          73 ns/iter (+/- 1) = 698 MB/s
-test misc::long_needle1                      ... bench:     532,256 ns/iter (+/- 4,633) = 187 MB/s
-test misc::long_needle2                      ... bench:     532,131 ns/iter (+/- 3,771) = 187 MB/s
-test misc::match_class                       ... bench:         120 ns/iter (+/- 0) = 675 MB/s
-test misc::match_class_in_range              ... bench:         119 ns/iter (+/- 0) = 680 MB/s
-test misc::match_class_unicode               ... bench:         456 ns/iter (+/- 2) = 353 MB/s
-test misc::medium_1K                         ... bench:         260 ns/iter (+/- 1) = 4046 MB/s
-test misc::medium_1MB                        ... bench:     206,175 ns/iter (+/- 983) = 5085 MB/s
-test misc::medium_32                         ... bench:          58 ns/iter (+/- 0) = 1034 MB/s
-test misc::medium_32K                        ... bench:       6,443 ns/iter (+/- 26) = 5090 MB/s
-test misc::not_literal                       ... bench:         216 ns/iter (+/- 0) = 236 MB/s
-test misc::one_pass_long_prefix              ... bench:          31 ns/iter (+/- 0) = 838 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          31 ns/iter (+/- 0) = 838 MB/s
-test misc::one_pass_short                    ... bench:          59 ns/iter (+/- 0) = 288 MB/s
-test misc::one_pass_short_not                ... bench:          63 ns/iter (+/- 2) = 269 MB/s
-test misc::reallyhard2_1K                    ... bench:      96,070 ns/iter (+/- 238) = 10 MB/s
-test misc::reallyhard_1K                     ... bench:      60,783 ns/iter (+/- 170) = 17 MB/s
-test misc::reallyhard_1MB                    ... bench:  60,899,076 ns/iter (+/- 483,661) = 17 MB/s
-test misc::reallyhard_32                     ... bench:       1,822 ns/iter (+/- 58) = 32 MB/s
-test misc::reallyhard_32K                    ... bench:   1,809,770 ns/iter (+/- 45,348) = 18 MB/s
-test sherlock::before_holmes                 ... bench:  14,513,309 ns/iter (+/- 146,332) = 40 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     543,738 ns/iter (+/- 4,549) = 1094 MB/s
-test sherlock::ing_suffix                    ... bench:   5,561,653 ns/iter (+/- 44,720) = 106 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   6,116,112 ns/iter (+/- 91,799) = 97 MB/s
-test sherlock::letters                       ... bench:  15,633,185 ns/iter (+/- 313,036) = 38 MB/s
-test sherlock::letters_lower                 ... bench:  15,228,423 ns/iter (+/- 290,879) = 39 MB/s
-test sherlock::letters_upper                 ... bench:   3,279,472 ns/iter (+/- 48,073) = 181 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     192,935 ns/iter (+/- 1,405) = 3083 MB/s
-test sherlock::name_alt1                     ... bench:     452,708 ns/iter (+/- 4,728) = 1314 MB/s
-test sherlock::name_alt2                     ... bench:     477,092 ns/iter (+/- 6,192) = 1246 MB/s
-test sherlock::name_alt3                     ... bench:     959,514 ns/iter (+/- 25,214) = 620 MB/s
-test sherlock::name_alt3_nocase              ... bench:   3,478,546 ns/iter (+/- 52,300) = 171 MB/s
-test sherlock::name_alt4                     ... bench:     947,187 ns/iter (+/- 9,985) = 628 MB/s
-test sherlock::name_alt4_nocase              ... bench:   1,852,289 ns/iter (+/- 30,616) = 321 MB/s
-test sherlock::name_alt5                     ... bench:     655,616 ns/iter (+/- 9,327) = 907 MB/s
-test sherlock::name_alt5_nocase              ... bench:   1,957,627 ns/iter (+/- 47,271) = 303 MB/s
-test sherlock::name_holmes                   ... bench:     383,813 ns/iter (+/- 1,185) = 1550 MB/s
-test sherlock::name_holmes_nocase            ... bench:     478,335 ns/iter (+/- 4,851) = 1243 MB/s
-test sherlock::name_sherlock                 ... bench:     263,611 ns/iter (+/- 875) = 2256 MB/s
-test sherlock::name_sherlock_holmes          ... bench:     193,687 ns/iter (+/- 1,070) = 3071 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:   1,258,447 ns/iter (+/- 32,369) = 472 MB/s
-test sherlock::name_sherlock_nocase          ... bench:   1,330,069 ns/iter (+/- 36,657) = 447 MB/s
-test sherlock::name_whitespace               ... bench:     264,340 ns/iter (+/- 2,723) = 2250 MB/s
-test sherlock::no_match_common               ... bench:     589,309 ns/iter (+/- 5,038) = 1009 MB/s
-test sherlock::no_match_really_common        ... bench:     683,909 ns/iter (+/- 4,987) = 869 MB/s
-test sherlock::no_match_uncommon             ... bench:     578,309 ns/iter (+/- 2,831) = 1028 MB/s
-test sherlock::quotes                        ... bench:   1,184,492 ns/iter (+/- 27,247) = 502 MB/s
-test sherlock::repeated_class_negation       ... bench:   7,208,342 ns/iter (+/- 17,978) = 82 MB/s
-test sherlock::the_lower                     ... bench:   1,001,754 ns/iter (+/- 6,215) = 593 MB/s
-test sherlock::the_nocase                    ... bench:   1,043,260 ns/iter (+/- 10,217) = 570 MB/s
-test sherlock::the_upper                     ... bench:     753,058 ns/iter (+/- 1,640) = 790 MB/s
-test sherlock::the_whitespace                ... bench:   1,195,227 ns/iter (+/- 9,524) = 497 MB/s
-test sherlock::word_ending_n                 ... bench:  11,767,448 ns/iter (+/- 15,460) = 50 MB/s
-test sherlock::words                         ... bench:   7,551,361 ns/iter (+/- 25,566) = 78 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 70 measured
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/04/pcre2-jit b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/04/pcre2-jit
deleted file mode 100644
index 9a110b5..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/04/pcre2-jit
+++ /dev/null
@@ -1,77 +0,0 @@
-   Compiling regex-benchmark v0.1.0 (file:///home/andrew/data/projects/rust/regex/bench)
-     Running target/release/bench-0d58c0af2e68ae0d
-
-running 70 tests
-test misc::anchored_literal_long_match       ... bench:          22 ns/iter (+/- 0) = 17727 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          14 ns/iter (+/- 0) = 27857 MB/s
-test misc::anchored_literal_short_match      ... bench:          21 ns/iter (+/- 0) = 1238 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          14 ns/iter (+/- 1) = 1857 MB/s
-test misc::easy0_1K                          ... bench:         235 ns/iter (+/- 2) = 4472 MB/s
-test misc::easy0_1MB                         ... bench:     193,652 ns/iter (+/- 524) = 5414 MB/s
-test misc::easy0_32                          ... bench:          43 ns/iter (+/- 0) = 1372 MB/s
-test misc::easy0_32K                         ... bench:       6,024 ns/iter (+/- 12) = 5444 MB/s
-test misc::easy1_1K                          ... bench:         235 ns/iter (+/- 4) = 4442 MB/s
-test misc::easy1_1MB                         ... bench:     193,685 ns/iter (+/- 617) = 5413 MB/s
-test misc::easy1_32                          ... bench:          45 ns/iter (+/- 0) = 1155 MB/s
-test misc::easy1_32K                         ... bench:       6,018 ns/iter (+/- 9) = 5448 MB/s
-test misc::hard_1K                           ... bench:       1,880 ns/iter (+/- 7) = 559 MB/s
-test misc::hard_1MB                          ... bench:   1,283,101 ns/iter (+/- 4,420) = 817 MB/s
-test misc::hard_32                           ... bench:         119 ns/iter (+/- 2) = 495 MB/s
-test misc::hard_32K                          ... bench:      39,919 ns/iter (+/- 95) = 821 MB/s
-test misc::literal                           ... bench:          18 ns/iter (+/- 1) = 2833 MB/s
-test misc::long_needle1                      ... bench:     513,050 ns/iter (+/- 2,267) = 194 MB/s
-test misc::long_needle2                      ... bench:     518,009 ns/iter (+/- 3,066) = 193 MB/s
-test misc::match_class                       ... bench:         106 ns/iter (+/- 1) = 764 MB/s
-test misc::match_class_in_range              ... bench:          24 ns/iter (+/- 1) = 3375 MB/s
-test misc::match_class_unicode               ... bench:         370 ns/iter (+/- 2) = 435 MB/s
-test misc::medium_1K                         ... bench:         237 ns/iter (+/- 0) = 4438 MB/s
-test misc::medium_1MB                        ... bench:     193,478 ns/iter (+/- 540) = 5419 MB/s
-test misc::medium_32                         ... bench:          46 ns/iter (+/- 0) = 1304 MB/s
-test misc::medium_32K                        ... bench:       6,024 ns/iter (+/- 15) = 5444 MB/s
-test misc::not_literal                       ... bench:         274 ns/iter (+/- 1) = 186 MB/s
-test misc::one_pass_long_prefix              ... bench:          19 ns/iter (+/- 1) = 1368 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          19 ns/iter (+/- 0) = 1368 MB/s
-test misc::one_pass_short                    ... bench:          47 ns/iter (+/- 0) = 361 MB/s
-test misc::one_pass_short_not                ... bench:          50 ns/iter (+/- 2) = 340 MB/s
-test misc::reallyhard2_1K                    ... bench:       4,959 ns/iter (+/- 34) = 209 MB/s
-test misc::reallyhard_1K                     ... bench:       2,145 ns/iter (+/- 17) = 489 MB/s
-test misc::reallyhard_1MB                    ... bench:   1,292,683 ns/iter (+/- 3,342) = 811 MB/s
-test misc::reallyhard_32                     ... bench:         124 ns/iter (+/- 4) = 475 MB/s
-test misc::reallyhard_32K                    ... bench:      47,263 ns/iter (+/- 173) = 693 MB/s
-test sherlock::before_holmes                 ... bench:   4,706,445 ns/iter (+/- 23,483) = 126 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     488,613 ns/iter (+/- 2,921) = 1217 MB/s
-test sherlock::ing_suffix                    ... bench:   1,886,092 ns/iter (+/- 9,951) = 315 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   5,091,401 ns/iter (+/- 21,315) = 116 MB/s
-test sherlock::letters                       ... bench:  10,082,811 ns/iter (+/- 41,989) = 59 MB/s
-test sherlock::letters_lower                 ... bench:   9,640,481 ns/iter (+/- 46,499) = 61 MB/s
-test sherlock::letters_upper                 ... bench:   1,772,105 ns/iter (+/- 8,833) = 335 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     192,075 ns/iter (+/- 1,043) = 3097 MB/s
-test sherlock::name_alt1                     ... bench:     447,382 ns/iter (+/- 2,142) = 1329 MB/s
-test sherlock::name_alt2                     ... bench:     447,421 ns/iter (+/- 2,077) = 1329 MB/s
-test sherlock::name_alt3                     ... bench:     963,775 ns/iter (+/- 1,684) = 617 MB/s
-test sherlock::name_alt3_nocase              ... bench:   3,152,920 ns/iter (+/- 5,757) = 188 MB/s
-test sherlock::name_alt4                     ... bench:      80,204 ns/iter (+/- 379) = 7417 MB/s
-test sherlock::name_alt4_nocase              ... bench:   1,665,405 ns/iter (+/- 7,134) = 357 MB/s
-test sherlock::name_alt5                     ... bench:     649,701 ns/iter (+/- 1,722) = 915 MB/s
-test sherlock::name_alt5_nocase              ... bench:   1,773,323 ns/iter (+/- 9,648) = 335 MB/s
-test sherlock::name_holmes                   ... bench:     377,003 ns/iter (+/- 3,390) = 1578 MB/s
-test sherlock::name_holmes_nocase            ... bench:     472,947 ns/iter (+/- 1,011) = 1257 MB/s
-test sherlock::name_sherlock                 ... bench:     262,237 ns/iter (+/- 1,268) = 2268 MB/s
-test sherlock::name_sherlock_holmes          ... bench:     192,306 ns/iter (+/- 520) = 3093 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:   1,318,573 ns/iter (+/- 1,462) = 451 MB/s
-test sherlock::name_sherlock_nocase          ... bench:   1,110,629 ns/iter (+/- 13,163) = 535 MB/s
-test sherlock::name_whitespace               ... bench:     262,889 ns/iter (+/- 637) = 2263 MB/s
-test sherlock::no_match_common               ... bench:     388,869 ns/iter (+/- 1,512) = 1529 MB/s
-test sherlock::no_match_really_common        ... bench:     422,058 ns/iter (+/- 1,788) = 1409 MB/s
-test sherlock::no_match_uncommon             ... bench:      30,594 ns/iter (+/- 166) = 19446 MB/s
-test sherlock::quotes                        ... bench:     569,628 ns/iter (+/- 2,052) = 1044 MB/s
-test sherlock::repeated_class_negation       ... bench:   6,410,128 ns/iter (+/- 19,866) = 92 MB/s
-test sherlock::the_lower                     ... bench:     648,366 ns/iter (+/- 5,142) = 917 MB/s
-test sherlock::the_nocase                    ... bench:     694,035 ns/iter (+/- 4,844) = 857 MB/s
-test sherlock::the_upper                     ... bench:      54,007 ns/iter (+/- 486) = 11015 MB/s
-test sherlock::the_whitespace                ... bench:     850,430 ns/iter (+/- 9,641) = 699 MB/s
-test sherlock::word_ending_n                 ... bench:   5,768,961 ns/iter (+/- 20,924) = 103 MB/s
-test sherlock::words                         ... bench:   5,866,550 ns/iter (+/- 34,451) = 101 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 70 measured
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/04/re2 b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/04/re2
deleted file mode 100644
index 31a6e6d..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/04/re2
+++ /dev/null
@@ -1,79 +0,0 @@
-   Compiling regex-benchmark v0.1.0 (file:///home/andrew/data/projects/rust/regex/bench)
-     Running target/release/bench-0d58c0af2e68ae0d
-
-running 72 tests
-test misc::anchored_literal_long_match       ... bench:         119 ns/iter (+/- 2) = 3277 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          45 ns/iter (+/- 0) = 8666 MB/s
-test misc::anchored_literal_short_match      ... bench:         120 ns/iter (+/- 1) = 216 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          45 ns/iter (+/- 0) = 577 MB/s
-test misc::easy0_1K                          ... bench:         187 ns/iter (+/- 0) = 5620 MB/s
-test misc::easy0_1MB                         ... bench:      39,573 ns/iter (+/- 600) = 26497 MB/s
-test misc::easy0_32                          ... bench:         165 ns/iter (+/- 1) = 357 MB/s
-test misc::easy0_32K                         ... bench:         971 ns/iter (+/- 20) = 33774 MB/s
-test misc::easy1_1K                          ... bench:         175 ns/iter (+/- 1) = 5965 MB/s
-test misc::easy1_1MB                         ... bench:      39,451 ns/iter (+/- 183) = 26579 MB/s
-test misc::easy1_32                          ... bench:         153 ns/iter (+/- 1) = 339 MB/s
-test misc::easy1_32K                         ... bench:         942 ns/iter (+/- 24) = 34806 MB/s
-test misc::hard_1K                           ... bench:       2,362 ns/iter (+/- 11) = 444 MB/s
-test misc::hard_1MB                          ... bench:   2,386,627 ns/iter (+/- 12,925) = 439 MB/s
-test misc::hard_32                           ... bench:         228 ns/iter (+/- 1) = 258 MB/s
-test misc::hard_32K                          ... bench:      74,482 ns/iter (+/- 190) = 440 MB/s
-test misc::literal                           ... bench:         120 ns/iter (+/- 0) = 425 MB/s
-test misc::long_needle1                      ... bench:     184,777 ns/iter (+/- 1,644) = 541 MB/s
-test misc::long_needle2                      ... bench:     184,685 ns/iter (+/- 289) = 541 MB/s
-test misc::match_class                       ... bench:         267 ns/iter (+/- 1) = 303 MB/s
-test misc::match_class_in_range              ... bench:         267 ns/iter (+/- 1) = 303 MB/s
-test misc::match_class_unicode               ... bench:         491 ns/iter (+/- 3) = 327 MB/s
-test misc::medium_1K                         ... bench:       2,065 ns/iter (+/- 4) = 509 MB/s
-test misc::medium_1MB                        ... bench:   1,938,951 ns/iter (+/- 11,278) = 540 MB/s
-test misc::medium_32                         ... bench:         302 ns/iter (+/- 149) = 198 MB/s
-test misc::medium_32K                        ... bench:      60,766 ns/iter (+/- 1,018) = 539 MB/s
-test misc::not_literal                       ... bench:         203 ns/iter (+/- 2) = 251 MB/s
-test misc::one_pass_long_prefix              ... bench:         119 ns/iter (+/- 1) = 218 MB/s
-test misc::one_pass_long_prefix_not          ... bench:         161 ns/iter (+/- 0) = 161 MB/s
-test misc::one_pass_short                    ... bench:         143 ns/iter (+/- 0) = 118 MB/s
-test misc::one_pass_short_not                ... bench:         145 ns/iter (+/- 1) = 117 MB/s
-test misc::reallyhard2_1K                    ... bench:       2,030 ns/iter (+/- 22) = 512 MB/s
-test misc::reallyhard_1K                     ... bench:       2,362 ns/iter (+/- 18) = 444 MB/s
-test misc::reallyhard_1MB                    ... bench:   2,386,760 ns/iter (+/- 22,075) = 439 MB/s
-test misc::reallyhard_32                     ... bench:         230 ns/iter (+/- 2) = 256 MB/s
-test misc::reallyhard_32K                    ... bench:      74,506 ns/iter (+/- 740) = 440 MB/s
-test sherlock::before_holmes                 ... bench:   1,446,270 ns/iter (+/- 5,771) = 411 MB/s
-test sherlock::everything_greedy             ... bench:   9,111,570 ns/iter (+/- 54,091) = 65 MB/s
-test sherlock::everything_greedy_nl          ... bench:   2,489,649 ns/iter (+/- 23,310) = 238 MB/s
-test sherlock::holmes_cochar_watson          ... bench:   1,176,642 ns/iter (+/- 2,181) = 505 MB/s
-test sherlock::holmes_coword_watson          ... bench:   1,389,000 ns/iter (+/- 258,245) = 428 MB/s
-test sherlock::ing_suffix                    ... bench:   3,050,918 ns/iter (+/- 16,854) = 195 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   1,954,264 ns/iter (+/- 4,656) = 304 MB/s
-test sherlock::letters                       ... bench: 111,162,180 ns/iter (+/- 108,719) = 5 MB/s
-test sherlock::letters_lower                 ... bench: 106,751,460 ns/iter (+/- 414,985) = 5 MB/s
-test sherlock::letters_upper                 ... bench:   4,705,474 ns/iter (+/- 10,913) = 126 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:   2,539,425 ns/iter (+/- 5,440) = 234 MB/s
-test sherlock::name_alt1                     ... bench:      77,719 ns/iter (+/- 275) = 7654 MB/s
-test sherlock::name_alt2                     ... bench:   1,319,600 ns/iter (+/- 2,771) = 450 MB/s
-test sherlock::name_alt3                     ... bench:   1,433,629 ns/iter (+/- 2,943) = 414 MB/s
-test sherlock::name_alt3_nocase              ... bench:   2,748,137 ns/iter (+/- 4,343) = 216 MB/s
-test sherlock::name_alt4                     ... bench:   1,354,024 ns/iter (+/- 2,312) = 439 MB/s
-test sherlock::name_alt4_nocase              ... bench:   2,018,381 ns/iter (+/- 2,442) = 294 MB/s
-test sherlock::name_alt5                     ... bench:   1,348,150 ns/iter (+/- 3,870) = 441 MB/s
-test sherlock::name_alt5_nocase              ... bench:   2,114,276 ns/iter (+/- 3,365) = 281 MB/s
-test sherlock::name_holmes                   ... bench:     168,436 ns/iter (+/- 1,503) = 3532 MB/s
-test sherlock::name_holmes_nocase            ... bench:   1,645,658 ns/iter (+/- 3,816) = 361 MB/s
-test sherlock::name_sherlock                 ... bench:      59,010 ns/iter (+/- 380) = 10081 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      60,467 ns/iter (+/- 179) = 9838 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:   1,539,137 ns/iter (+/- 5,506) = 386 MB/s
-test sherlock::name_sherlock_nocase          ... bench:   1,535,058 ns/iter (+/- 3,352) = 387 MB/s
-test sherlock::name_whitespace               ... bench:      62,700 ns/iter (+/- 440) = 9488 MB/s
-test sherlock::no_match_common               ... bench:     439,560 ns/iter (+/- 1,545) = 1353 MB/s
-test sherlock::no_match_really_common        ... bench:     439,333 ns/iter (+/- 1,020) = 1354 MB/s
-test sherlock::no_match_uncommon             ... bench:      23,882 ns/iter (+/- 134) = 24911 MB/s
-test sherlock::quotes                        ... bench:   1,396,564 ns/iter (+/- 2,785) = 425 MB/s
-test sherlock::the_lower                     ... bench:   2,478,251 ns/iter (+/- 5,859) = 240 MB/s
-test sherlock::the_nocase                    ... bench:   3,708,713 ns/iter (+/- 6,919) = 160 MB/s
-test sherlock::the_upper                     ... bench:     232,490 ns/iter (+/- 4,478) = 2558 MB/s
-test sherlock::the_whitespace                ... bench:   2,286,399 ns/iter (+/- 5,006) = 260 MB/s
-test sherlock::word_ending_n                 ... bench:   3,295,919 ns/iter (+/- 27,810) = 180 MB/s
-test sherlock::words                         ... bench:  30,375,810 ns/iter (+/- 37,415) = 19 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 72 measured
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/04/rust b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/04/rust
deleted file mode 100644
index 01e6f44..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/04/rust
+++ /dev/null
@@ -1,81 +0,0 @@
-   Compiling regex-benchmark v0.1.0 (file:///home/andrew/data/projects/rust/regex/bench)
-     Running target/release/bench-0d58c0af2e68ae0d
-
-running 74 tests
-test misc::anchored_literal_long_match       ... bench:          24 ns/iter (+/- 1) = 16250 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          21 ns/iter (+/- 0) = 18571 MB/s
-test misc::anchored_literal_short_match      ... bench:          22 ns/iter (+/- 1) = 1181 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          21 ns/iter (+/- 0) = 1238 MB/s
-test misc::easy0_1K                          ... bench:          18 ns/iter (+/- 6) = 58388 MB/s
-test misc::easy0_1MB                         ... bench:          21 ns/iter (+/- 4) = 49933476 MB/s
-test misc::easy0_32                          ... bench:          17 ns/iter (+/- 0) = 3470 MB/s
-test misc::easy0_32K                         ... bench:          18 ns/iter (+/- 9) = 1821944 MB/s
-test misc::easy1_1K                          ... bench:          52 ns/iter (+/- 0) = 20076 MB/s
-test misc::easy1_1MB                         ... bench:          55 ns/iter (+/- 0) = 19065381 MB/s
-test misc::easy1_32                          ... bench:          50 ns/iter (+/- 0) = 1040 MB/s
-test misc::easy1_32K                         ... bench:          50 ns/iter (+/- 0) = 655760 MB/s
-test misc::hard_1K                           ... bench:          66 ns/iter (+/- 0) = 15924 MB/s
-test misc::hard_1MB                          ... bench:          70 ns/iter (+/- 1) = 14980042 MB/s
-test misc::hard_32                           ... bench:          62 ns/iter (+/- 1) = 951 MB/s
-test misc::hard_32K                          ... bench:          62 ns/iter (+/- 1) = 528951 MB/s
-test misc::literal                           ... bench:          17 ns/iter (+/- 0) = 3000 MB/s
-test misc::long_needle1                      ... bench:       2,359 ns/iter (+/- 37) = 42391 MB/s
-test misc::long_needle2                      ... bench:     634,783 ns/iter (+/- 4,313) = 157 MB/s
-test misc::match_class                       ... bench:          82 ns/iter (+/- 1) = 987 MB/s
-test misc::match_class_in_range              ... bench:          30 ns/iter (+/- 15) = 2700 MB/s
-test misc::match_class_unicode               ... bench:         317 ns/iter (+/- 2) = 507 MB/s
-test misc::medium_1K                         ... bench:          18 ns/iter (+/- 0) = 58444 MB/s
-test misc::medium_1MB                        ... bench:          22 ns/iter (+/- 0) = 47663818 MB/s
-test misc::medium_32                         ... bench:          18 ns/iter (+/- 0) = 3333 MB/s
-test misc::medium_32K                        ... bench:          18 ns/iter (+/- 0) = 1822000 MB/s
-test misc::not_literal                       ... bench:         115 ns/iter (+/- 0) = 443 MB/s
-test misc::one_pass_long_prefix              ... bench:          69 ns/iter (+/- 1) = 376 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          68 ns/iter (+/- 0) = 382 MB/s
-test misc::one_pass_short                    ... bench:          50 ns/iter (+/- 0) = 340 MB/s
-test misc::one_pass_short_not                ... bench:          52 ns/iter (+/- 0) = 326 MB/s
-test misc::reallyhard2_1K                    ... bench:       1,939 ns/iter (+/- 12) = 536 MB/s
-test misc::reallyhard_1K                     ... bench:       1,964 ns/iter (+/- 7) = 535 MB/s
-test misc::reallyhard_1MB                    ... bench:   1,934,967 ns/iter (+/- 4,626) = 541 MB/s
-test misc::reallyhard_32                     ... bench:         130 ns/iter (+/- 0) = 453 MB/s
-test misc::reallyhard_32K                    ... bench:      60,581 ns/iter (+/- 176) = 541 MB/s
-test misc::replace_all                       ... bench:         142 ns/iter (+/- 1)
-test sherlock::before_holmes                 ... bench:   1,127,747 ns/iter (+/- 2,052) = 527 MB/s
-test sherlock::everything_greedy             ... bench:   2,598,664 ns/iter (+/- 6,137) = 228 MB/s
-test sherlock::everything_greedy_nl          ... bench:   1,202,183 ns/iter (+/- 1,965) = 494 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     220,378 ns/iter (+/- 1,229) = 2699 MB/s
-test sherlock::holmes_coword_watson          ... bench:     631,731 ns/iter (+/- 2,071) = 941 MB/s
-test sherlock::ing_suffix                    ... bench:   1,344,980 ns/iter (+/- 1,799) = 442 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   1,286,425 ns/iter (+/- 2,965) = 462 MB/s
-test sherlock::letters                       ... bench:  24,356,951 ns/iter (+/- 47,224) = 24 MB/s
-test sherlock::letters_lower                 ... bench:  23,816,732 ns/iter (+/- 44,203) = 24 MB/s
-test sherlock::letters_upper                 ... bench:   2,051,873 ns/iter (+/- 8,712) = 289 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:   1,102,534 ns/iter (+/- 6,071) = 539 MB/s
-test sherlock::name_alt1                     ... bench:      36,474 ns/iter (+/- 308) = 16311 MB/s
-test sherlock::name_alt2                     ... bench:     185,668 ns/iter (+/- 1,023) = 3204 MB/s
-test sherlock::name_alt3                     ... bench:   1,152,554 ns/iter (+/- 1,991) = 516 MB/s
-test sherlock::name_alt3_nocase              ... bench:   1,254,885 ns/iter (+/- 5,387) = 474 MB/s
-test sherlock::name_alt4                     ... bench:     228,721 ns/iter (+/- 854) = 2601 MB/s
-test sherlock::name_alt4_nocase              ... bench:   1,223,457 ns/iter (+/- 2,307) = 486 MB/s
-test sherlock::name_alt5                     ... bench:     317,372 ns/iter (+/- 951) = 1874 MB/s
-test sherlock::name_alt5_nocase              ... bench:   1,224,434 ns/iter (+/- 3,886) = 485 MB/s
-test sherlock::name_holmes                   ... bench:      42,905 ns/iter (+/- 217) = 13866 MB/s
-test sherlock::name_holmes_nocase            ... bench:   1,080,290 ns/iter (+/- 5,686) = 550 MB/s
-test sherlock::name_sherlock                 ... bench:      70,041 ns/iter (+/- 444) = 8494 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      36,092 ns/iter (+/- 189) = 16483 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:   1,156,696 ns/iter (+/- 3,922) = 514 MB/s
-test sherlock::name_sherlock_nocase          ... bench:   1,156,088 ns/iter (+/- 2,453) = 514 MB/s
-test sherlock::name_whitespace               ... bench:      79,560 ns/iter (+/- 426) = 7477 MB/s
-test sherlock::no_match_common               ... bench:      25,940 ns/iter (+/- 119) = 22934 MB/s
-test sherlock::no_match_really_common        ... bench:     364,911 ns/iter (+/- 1,302) = 1630 MB/s
-test sherlock::no_match_uncommon             ... bench:      25,851 ns/iter (+/- 112) = 23013 MB/s
-test sherlock::quotes                        ... bench:     561,575 ns/iter (+/- 2,083) = 1059 MB/s
-test sherlock::repeated_class_negation       ... bench:  88,961,089 ns/iter (+/- 132,661) = 6 MB/s
-test sherlock::the_lower                     ... bench:     609,891 ns/iter (+/- 1,451) = 975 MB/s
-test sherlock::the_nocase                    ... bench:   1,622,541 ns/iter (+/- 6,851) = 366 MB/s
-test sherlock::the_upper                     ... bench:      48,810 ns/iter (+/- 245) = 12188 MB/s
-test sherlock::the_whitespace                ... bench:   1,192,755 ns/iter (+/- 4,168) = 498 MB/s
-test sherlock::word_ending_n                 ... bench:   1,991,440 ns/iter (+/- 7,313) = 298 MB/s
-test sherlock::words                         ... bench:   9,688,357 ns/iter (+/- 17,267) = 61 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 74 measured
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/04/tcl b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/04/tcl
deleted file mode 100644
index 934bf6e..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/04/tcl
+++ /dev/null
@@ -1,72 +0,0 @@
-   Compiling regex-benchmark v0.1.0 (file:///home/andrew/data/projects/rust/regex/bench)
-     Running target/release/bench-0d58c0af2e68ae0d
-
-running 65 tests
-test misc::anchored_literal_long_match       ... bench:         925 ns/iter (+/- 16) = 421 MB/s
-test misc::anchored_literal_long_non_match   ... bench:         144 ns/iter (+/- 2) = 2708 MB/s
-test misc::anchored_literal_short_match      ... bench:         920 ns/iter (+/- 11) = 28 MB/s
-test misc::anchored_literal_short_non_match  ... bench:         144 ns/iter (+/- 1) = 180 MB/s
-test misc::easy0_1K                          ... bench:      14,228 ns/iter (+/- 204) = 73 MB/s
-test misc::easy0_1MB                         ... bench:   3,728,677 ns/iter (+/- 4,564) = 281 MB/s
-test misc::easy0_32                          ... bench:      10,023 ns/iter (+/- 156) = 5 MB/s
-test misc::easy0_32K                         ... bench:     125,851 ns/iter (+/- 287) = 260 MB/s
-test misc::easy1_1K                          ... bench:       8,797 ns/iter (+/- 90) = 118 MB/s
-test misc::easy1_1MB                         ... bench:   3,722,675 ns/iter (+/- 4,912) = 281 MB/s
-test misc::easy1_32                          ... bench:       5,189 ns/iter (+/- 77) = 10 MB/s
-test misc::easy1_32K                         ... bench:     121,106 ns/iter (+/- 694) = 270 MB/s
-test misc::hard_1K                           ... bench:      17,111 ns/iter (+/- 251) = 61 MB/s
-test misc::hard_1MB                          ... bench:   3,743,313 ns/iter (+/- 7,634) = 280 MB/s
-test misc::hard_32                           ... bench:      13,489 ns/iter (+/- 220) = 4 MB/s
-test misc::hard_32K                          ... bench:     129,358 ns/iter (+/- 257) = 253 MB/s
-test misc::literal                           ... bench:         629 ns/iter (+/- 5) = 81 MB/s
-test misc::long_needle1                      ... bench:  21,495,182 ns/iter (+/- 41,993) = 4 MB/s
-test misc::long_needle2                      ... bench:  21,501,034 ns/iter (+/- 34,033) = 4 MB/s
-test misc::match_class                       ... bench:         732 ns/iter (+/- 3) = 110 MB/s
-test misc::match_class_in_range              ... bench:         736 ns/iter (+/- 6) = 110 MB/s
-test misc::medium_1K                         ... bench:      14,433 ns/iter (+/- 49) = 72 MB/s
-test misc::medium_1MB                        ... bench:   3,729,861 ns/iter (+/- 4,198) = 281 MB/s
-test misc::medium_32                         ... bench:      10,756 ns/iter (+/- 75) = 5 MB/s
-test misc::medium_32K                        ... bench:     126,593 ns/iter (+/- 169) = 259 MB/s
-test misc::not_literal                       ... bench:       2,350 ns/iter (+/- 13) = 21 MB/s
-test misc::one_pass_long_prefix              ... bench:       9,183 ns/iter (+/- 198) = 2 MB/s
-test misc::one_pass_long_prefix_not          ... bench:       8,470 ns/iter (+/- 110) = 3 MB/s
-test misc::one_pass_short                    ... bench:         956 ns/iter (+/- 4) = 17 MB/s
-test misc::one_pass_short_not                ... bench:       1,042 ns/iter (+/- 13) = 16 MB/s
-test misc::reallyhard2_1K                    ... bench:     129,563 ns/iter (+/- 336) = 8 MB/s
-test misc::reallyhard_1K                     ... bench:      16,656 ns/iter (+/- 152) = 63 MB/s
-test misc::reallyhard_1MB                    ... bench:   3,744,123 ns/iter (+/- 4,556) = 280 MB/s
-test misc::reallyhard_32                     ... bench:      12,910 ns/iter (+/- 112) = 4 MB/s
-test misc::reallyhard_32K                    ... bench:     129,293 ns/iter (+/- 301) = 253 MB/s
-test sherlock::before_holmes                 ... bench:   3,593,560 ns/iter (+/- 8,574) = 165 MB/s
-test sherlock::holmes_cochar_watson          ... bench:   2,906,271 ns/iter (+/- 5,153) = 204 MB/s
-test sherlock::ing_suffix                    ... bench:   7,016,213 ns/iter (+/- 30,321) = 84 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:  24,592,817 ns/iter (+/- 78,720) = 24 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:   2,457,984 ns/iter (+/- 3,932) = 242 MB/s
-test sherlock::name_alt1                     ... bench:   2,569,156 ns/iter (+/- 5,789) = 231 MB/s
-test sherlock::name_alt2                     ... bench:   3,686,183 ns/iter (+/- 13,550) = 161 MB/s
-test sherlock::name_alt3                     ... bench:   6,715,311 ns/iter (+/- 15,208) = 88 MB/s
-test sherlock::name_alt3_nocase              ... bench:   9,702,060 ns/iter (+/- 32,628) = 61 MB/s
-test sherlock::name_alt4                     ... bench:   3,834,029 ns/iter (+/- 3,955) = 155 MB/s
-test sherlock::name_alt4_nocase              ... bench:   4,762,730 ns/iter (+/- 751,201) = 124 MB/s
-test sherlock::name_alt5                     ... bench:   4,582,303 ns/iter (+/- 8,073) = 129 MB/s
-test sherlock::name_alt5_nocase              ... bench:   5,583,652 ns/iter (+/- 14,573) = 106 MB/s
-test sherlock::name_holmes                   ... bench:   2,968,764 ns/iter (+/- 6,198) = 200 MB/s
-test sherlock::name_holmes_nocase            ... bench:   3,066,080 ns/iter (+/- 8,986) = 194 MB/s
-test sherlock::name_sherlock                 ... bench:   2,372,708 ns/iter (+/- 3,272) = 250 MB/s
-test sherlock::name_sherlock_holmes          ... bench:   2,607,914 ns/iter (+/- 3,361) = 228 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:   2,641,260 ns/iter (+/- 9,409) = 225 MB/s
-test sherlock::name_sherlock_nocase          ... bench:   2,420,591 ns/iter (+/- 11,053) = 245 MB/s
-test sherlock::name_whitespace               ... bench:   2,592,553 ns/iter (+/- 3,476) = 229 MB/s
-test sherlock::no_match_common               ... bench:   2,114,367 ns/iter (+/- 1,665) = 281 MB/s
-test sherlock::no_match_really_common        ... bench:   2,114,835 ns/iter (+/- 2,491) = 281 MB/s
-test sherlock::no_match_uncommon             ... bench:   2,105,274 ns/iter (+/- 1,657) = 282 MB/s
-test sherlock::quotes                        ... bench:  10,978,890 ns/iter (+/- 30,645) = 54 MB/s
-test sherlock::repeated_class_negation       ... bench:  69,836,043 ns/iter (+/- 117,415) = 8 MB/s
-test sherlock::the_lower                     ... bench:   9,343,518 ns/iter (+/- 29,387) = 63 MB/s
-test sherlock::the_nocase                    ... bench:   9,690,676 ns/iter (+/- 42,585) = 61 MB/s
-test sherlock::the_upper                     ... bench:   2,780,398 ns/iter (+/- 6,949) = 213 MB/s
-test sherlock::the_whitespace                ... bench:  11,562,612 ns/iter (+/- 78,789) = 51 MB/s
-test sherlock::words                         ... bench:  64,139,234 ns/iter (+/- 491,422) = 9 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 65 measured
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/onig b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/onig
deleted file mode 100644
index 373b1495..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/onig
+++ /dev/null
@@ -1,99 +0,0 @@
-
-running 94 tests
-test misc::anchored_literal_long_match       ... bench:         158 ns/iter (+/- 4) = 2468 MB/s
-test misc::anchored_literal_long_non_match   ... bench:         495 ns/iter (+/- 7) = 787 MB/s
-test misc::anchored_literal_short_match      ... bench:         160 ns/iter (+/- 3) = 162 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          44 ns/iter (+/- 4) = 590 MB/s
-test misc::easy0_1K                          ... bench:         315 ns/iter (+/- 15) = 3336 MB/s
-test misc::easy0_1MB                         ... bench:     136,864 ns/iter (+/- 5,984) = 7661 MB/s
-test misc::easy0_32                          ... bench:         163 ns/iter (+/- 11) = 361 MB/s
-test misc::easy0_32K                         ... bench:       4,562 ns/iter (+/- 255) = 7188 MB/s
-test misc::easy1_1K                          ... bench:       3,947 ns/iter (+/- 199) = 264 MB/s
-test misc::easy1_1MB                         ... bench:   3,920,564 ns/iter (+/- 122,902) = 267 MB/s
-test misc::easy1_32                          ... bench:         321 ns/iter (+/- 20) = 161 MB/s
-test misc::easy1_32K                         ... bench:     121,449 ns/iter (+/- 4,899) = 269 MB/s
-test misc::hard_1K                           ... bench:     125,960 ns/iter (+/- 7,255) = 8 MB/s
-test misc::hard_1MB                          ... bench: 134,129,947 ns/iter (+/- 4,797,942) = 7 MB/s
-test misc::hard_32                           ... bench:       4,044 ns/iter (+/- 227) = 14 MB/s
-test misc::hard_32K                          ... bench:   4,183,228 ns/iter (+/- 127,808) = 7 MB/s
-test misc::literal                           ... bench:         331 ns/iter (+/- 21) = 154 MB/s
-test misc::long_needle1                      ... bench:   5,715,563 ns/iter (+/- 250,535) = 17 MB/s
-test misc::long_needle2                      ... bench:   5,779,968 ns/iter (+/- 195,784) = 17 MB/s
-test misc::match_class                       ... bench:         431 ns/iter (+/- 5) = 187 MB/s
-test misc::match_class_in_range              ... bench:         427 ns/iter (+/- 27) = 189 MB/s
-test misc::match_class_unicode               ... bench:       1,946 ns/iter (+/- 88) = 82 MB/s
-test misc::medium_1K                         ... bench:         325 ns/iter (+/- 23) = 3236 MB/s
-test misc::medium_1MB                        ... bench:     138,022 ns/iter (+/- 5,142) = 7597 MB/s
-test misc::medium_32                         ... bench:         182 ns/iter (+/- 7) = 329 MB/s
-test misc::medium_32K                        ... bench:       4,511 ns/iter (+/- 190) = 7270 MB/s
-test misc::not_literal                       ... bench:         436 ns/iter (+/- 25) = 116 MB/s
-test misc::one_pass_long_prefix              ... bench:         168 ns/iter (+/- 6) = 154 MB/s
-test misc::one_pass_long_prefix_not          ... bench:         176 ns/iter (+/- 7) = 147 MB/s
-test misc::one_pass_short                    ... bench:         325 ns/iter (+/- 16) = 52 MB/s
-test misc::one_pass_short_not                ... bench:         322 ns/iter (+/- 21) = 52 MB/s
-test misc::reallyhard2_1K                    ... bench:     289,956 ns/iter (+/- 16,350) = 3 MB/s
-test misc::reallyhard_1K                     ... bench:     126,089 ns/iter (+/- 5,350) = 8 MB/s
-test misc::reallyhard_1MB                    ... bench: 133,197,312 ns/iter (+/- 3,057,491) = 7 MB/s
-test misc::reallyhard_32                     ... bench:       4,060 ns/iter (+/- 11) = 14 MB/s
-test misc::reallyhard_32K                    ... bench:   4,215,469 ns/iter (+/- 200,526) = 7 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:      27,622 ns/iter (+/- 778) = 289 MB/s
-test regexdna::find_new_lines                ... bench:  30,503,604 ns/iter (+/- 1,120,697) = 166 MB/s
-test regexdna::subst1                        ... bench:  23,276,552 ns/iter (+/- 1,019,308) = 218 MB/s
-test regexdna::subst10                       ... bench:  23,199,415 ns/iter (+/- 790,938) = 219 MB/s
-test regexdna::subst11                       ... bench:  23,138,469 ns/iter (+/- 884,700) = 219 MB/s
-test regexdna::subst2                        ... bench:  23,076,376 ns/iter (+/- 644,391) = 220 MB/s
-test regexdna::subst3                        ... bench:  23,115,770 ns/iter (+/- 737,666) = 219 MB/s
-test regexdna::subst4                        ... bench:  23,093,288 ns/iter (+/- 1,003,519) = 220 MB/s
-test regexdna::subst5                        ... bench:  23,618,534 ns/iter (+/- 773,260) = 215 MB/s
-test regexdna::subst6                        ... bench:  23,301,581 ns/iter (+/- 679,681) = 218 MB/s
-test regexdna::subst7                        ... bench:  23,371,339 ns/iter (+/- 714,433) = 217 MB/s
-test regexdna::subst8                        ... bench:  23,187,513 ns/iter (+/- 863,031) = 219 MB/s
-test regexdna::subst9                        ... bench:  23,143,027 ns/iter (+/- 890,422) = 219 MB/s
-test regexdna::variant1                      ... bench: 104,906,982 ns/iter (+/- 3,391,942) = 48 MB/s
-test regexdna::variant2                      ... bench: 118,326,728 ns/iter (+/- 3,378,748) = 42 MB/s
-test regexdna::variant3                      ... bench: 109,348,596 ns/iter (+/- 3,647,056) = 46 MB/s
-test regexdna::variant4                      ... bench: 104,574,675 ns/iter (+/- 3,236,753) = 48 MB/s
-test regexdna::variant5                      ... bench: 102,968,132 ns/iter (+/- 2,792,754) = 49 MB/s
-test regexdna::variant6                      ... bench: 103,783,112 ns/iter (+/- 2,851,581) = 48 MB/s
-test regexdna::variant7                      ... bench: 103,939,805 ns/iter (+/- 3,118,277) = 48 MB/s
-test regexdna::variant8                      ... bench: 109,722,594 ns/iter (+/- 3,739,958) = 46 MB/s
-test regexdna::variant9                      ... bench: 128,702,724 ns/iter (+/- 3,739,103) = 39 MB/s
-test sherlock::before_after_holmes           ... bench:  39,219,739 ns/iter (+/- 1,622,425) = 15 MB/s
-test sherlock::before_holmes                 ... bench:  37,454,934 ns/iter (+/- 1,055,140) = 15 MB/s
-test sherlock::everything_greedy             ... bench:   7,341,629 ns/iter (+/- 241,072) = 81 MB/s
-test sherlock::holmes_cochar_watson          ... bench:   2,298,534 ns/iter (+/- 94,224) = 258 MB/s
-test sherlock::ing_suffix                    ... bench:  18,533,670 ns/iter (+/- 505,855) = 32 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   2,419,034 ns/iter (+/- 124,616) = 245 MB/s
-test sherlock::letters                       ... bench:  61,910,045 ns/iter (+/- 2,122,755) = 9 MB/s
-test sherlock::letters_lower                 ... bench:  60,831,022 ns/iter (+/- 2,559,720) = 9 MB/s
-test sherlock::letters_upper                 ... bench:  10,747,265 ns/iter (+/- 761,147) = 55 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     215,376 ns/iter (+/- 13,622) = 2762 MB/s
-test sherlock::name_alt1                     ... bench:   2,282,320 ns/iter (+/- 154,104) = 260 MB/s
-test sherlock::name_alt2                     ... bench:   2,206,087 ns/iter (+/- 158,376) = 269 MB/s
-test sherlock::name_alt3                     ... bench:   2,771,932 ns/iter (+/- 181,216) = 214 MB/s
-test sherlock::name_alt3_nocase              ... bench:  19,198,056 ns/iter (+/- 816,668) = 30 MB/s
-test sherlock::name_alt4                     ... bench:   2,254,798 ns/iter (+/- 135,379) = 263 MB/s
-test sherlock::name_alt4_nocase              ... bench:   5,734,254 ns/iter (+/- 411,596) = 103 MB/s
-test sherlock::name_alt5                     ... bench:   2,276,779 ns/iter (+/- 172,557) = 261 MB/s
-test sherlock::name_alt5_nocase              ... bench:   7,314,318 ns/iter (+/- 377,963) = 81 MB/s
-test sherlock::name_holmes                   ... bench:     477,888 ns/iter (+/- 37,472) = 1244 MB/s
-test sherlock::name_holmes_nocase            ... bench:   3,487,005 ns/iter (+/- 278,896) = 170 MB/s
-test sherlock::name_sherlock                 ... bench:     295,313 ns/iter (+/- 16,739) = 2014 MB/s
-test sherlock::name_sherlock_holmes          ... bench:     216,522 ns/iter (+/- 15,594) = 2747 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:   3,480,703 ns/iter (+/- 272,332) = 170 MB/s
-test sherlock::name_sherlock_nocase          ... bench:   3,511,444 ns/iter (+/- 283,013) = 169 MB/s
-test sherlock::name_whitespace               ... bench:     304,043 ns/iter (+/- 19,186) = 1956 MB/s
-test sherlock::no_match_common               ... bench:     632,615 ns/iter (+/- 44,676) = 940 MB/s
-test sherlock::no_match_really_common        ... bench:     727,565 ns/iter (+/- 54,169) = 817 MB/s
-test sherlock::no_match_uncommon             ... bench:     624,061 ns/iter (+/- 37,791) = 953 MB/s
-test sherlock::quotes                        ... bench:   3,776,688 ns/iter (+/- 186,393) = 157 MB/s
-test sherlock::repeated_class_negation       ... bench:  34,354,179 ns/iter (+/- 1,534,267) = 17 MB/s
-test sherlock::the_lower                     ... bench:   1,965,787 ns/iter (+/- 137,099) = 302 MB/s
-test sherlock::the_nocase                    ... bench:   4,853,843 ns/iter (+/- 259,890) = 122 MB/s
-test sherlock::the_upper                     ... bench:     949,071 ns/iter (+/- 66,016) = 626 MB/s
-test sherlock::the_whitespace                ... bench:   2,173,683 ns/iter (+/- 142,384) = 273 MB/s
-test sherlock::word_ending_n                 ... bench:  19,711,057 ns/iter (+/- 942,152) = 30 MB/s
-test sherlock::words                         ... bench:  21,979,387 ns/iter (+/- 1,250,588) = 27 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 94 measured
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/onig-vs-rust b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/onig-vs-rust
deleted file mode 100644
index 4625e8f..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/onig-vs-rust
+++ /dev/null
@@ -1,95 +0,0 @@
- name                                     onig ns/iter           rust ns/iter           diff ns/iter    diff % 
- misc::anchored_literal_long_match        158 (2468 MB/s)        24 (16250 MB/s)                -134   -84.81% 
- misc::anchored_literal_long_non_match    495 (787 MB/s)         27 (14444 MB/s)                -468   -94.55% 
- misc::anchored_literal_short_match       160 (162 MB/s)         22 (1181 MB/s)                 -138   -86.25% 
- misc::anchored_literal_short_non_match   44 (590 MB/s)          24 (1083 MB/s)                  -20   -45.45% 
- misc::easy0_1K                           315 (3336 MB/s)        16 (65687 MB/s)                -299   -94.92% 
- misc::easy0_1MB                          136,864 (7661 MB/s)    20 (52430150 MB/s)         -136,844   -99.99% 
- misc::easy0_32                           163 (361 MB/s)         16 (3687 MB/s)                 -147   -90.18% 
- misc::easy0_32K                          4,562 (7188 MB/s)      16 (2049687 MB/s)            -4,546   -99.65% 
- misc::easy1_1K                           3,947 (264 MB/s)       48 (21750 MB/s)              -3,899   -98.78% 
- misc::easy1_1MB                          3,920,564 (267 MB/s)   48 (21845750 MB/s)       -3,920,516  -100.00% 
- misc::easy1_32                           321 (161 MB/s)         46 (1130 MB/s)                 -275   -85.67% 
- misc::easy1_32K                          121,449 (269 MB/s)     47 (697617 MB/s)           -121,402   -99.96% 
- misc::hard_1K                            125,960 (8 MB/s)       58 (18120 MB/s)            -125,902   -99.95% 
- misc::hard_1MB                           134,129,947 (7 MB/s)   61 (17190213 MB/s)     -134,129,886  -100.00% 
- misc::hard_32                            4,044 (14 MB/s)        58 (1017 MB/s)               -3,986   -98.57% 
- misc::hard_32K                           4,183,228 (7 MB/s)     56 (585625 MB/s)         -4,183,172  -100.00% 
- misc::literal                            331 (154 MB/s)         16 (3187 MB/s)                 -315   -95.17% 
- misc::long_needle1                       5,715,563 (17 MB/s)    2,226 (44924 MB/s)       -5,713,337   -99.96% 
- misc::long_needle2                       5,779,968 (17 MB/s)    576,997 (173 MB/s)       -5,202,971   -90.02% 
- misc::match_class                        431 (187 MB/s)         65 (1246 MB/s)                 -366   -84.92% 
- misc::match_class_in_range               427 (189 MB/s)         27 (3000 MB/s)                 -400   -93.68% 
- misc::match_class_unicode                1,946 (82 MB/s)        283 (568 MB/s)               -1,663   -85.46% 
- misc::medium_1K                          325 (3236 MB/s)        16 (65750 MB/s)                -309   -95.08% 
- misc::medium_1MB                         138,022 (7597 MB/s)    21 (49933523 MB/s)         -138,001   -99.98% 
- misc::medium_32                          182 (329 MB/s)         17 (3529 MB/s)                 -165   -90.66% 
- misc::medium_32K                         4,511 (7270 MB/s)      17 (1929176 MB/s)            -4,494   -99.62% 
- misc::not_literal                        436 (116 MB/s)         105 (485 MB/s)                 -331   -75.92% 
- misc::one_pass_long_prefix               168 (154 MB/s)         68 (382 MB/s)                  -100   -59.52% 
- misc::one_pass_long_prefix_not           176 (147 MB/s)         58 (448 MB/s)                  -118   -67.05% 
- misc::one_pass_short                     325 (52 MB/s)          45 (377 MB/s)                  -280   -86.15% 
- misc::one_pass_short_not                 322 (52 MB/s)          50 (340 MB/s)                  -272   -84.47% 
- misc::reallyhard2_1K                     289,956 (3 MB/s)       83 (12530 MB/s)            -289,873   -99.97% 
- misc::reallyhard_1K                      126,089 (8 MB/s)       1,822 (576 MB/s)           -124,267   -98.55% 
- misc::reallyhard_1MB                     133,197,312 (7 MB/s)   1,768,327 (592 MB/s)   -131,428,985   -98.67% 
- misc::reallyhard_32                      4,060 (14 MB/s)        121 (487 MB/s)               -3,939   -97.02% 
- misc::reallyhard_32K                     4,215,469 (7 MB/s)     56,375 (581 MB/s)        -4,159,094   -98.66% 
- misc::reverse_suffix_no_quadratic        27,622 (289 MB/s)      5,803 (1378 MB/s)           -21,819   -78.99% 
- regexdna::find_new_lines                 30,503,604 (166 MB/s)  14,818,233 (343 MB/s)   -15,685,371   -51.42% 
- regexdna::subst1                         23,276,552 (218 MB/s)  896,790 (5668 MB/s)     -22,379,762   -96.15% 
- regexdna::subst10                        23,199,415 (219 MB/s)  957,325 (5310 MB/s)     -22,242,090   -95.87% 
- regexdna::subst11                        23,138,469 (219 MB/s)  917,248 (5542 MB/s)     -22,221,221   -96.04% 
- regexdna::subst2                         23,076,376 (220 MB/s)  892,129 (5698 MB/s)     -22,184,247   -96.13% 
- regexdna::subst3                         23,115,770 (219 MB/s)  929,250 (5470 MB/s)     -22,186,520   -95.98% 
- regexdna::subst4                         23,093,288 (220 MB/s)  872,581 (5825 MB/s)     -22,220,707   -96.22% 
- regexdna::subst5                         23,618,534 (215 MB/s)  875,804 (5804 MB/s)     -22,742,730   -96.29% 
- regexdna::subst6                         23,301,581 (218 MB/s)  884,639 (5746 MB/s)     -22,416,942   -96.20% 
- regexdna::subst7                         23,371,339 (217 MB/s)  872,791 (5824 MB/s)     -22,498,548   -96.27% 
- regexdna::subst8                         23,187,513 (219 MB/s)  873,833 (5817 MB/s)     -22,313,680   -96.23% 
- regexdna::subst9                         23,143,027 (219 MB/s)  886,744 (5732 MB/s)     -22,256,283   -96.17% 
- regexdna::variant1                       104,906,982 (48 MB/s)  3,699,267 (1374 MB/s)  -101,207,715   -96.47% 
- regexdna::variant2                       118,326,728 (42 MB/s)  6,760,952 (751 MB/s)   -111,565,776   -94.29% 
- regexdna::variant3                       109,348,596 (46 MB/s)  8,030,646 (633 MB/s)   -101,317,950   -92.66% 
- regexdna::variant4                       104,574,675 (48 MB/s)  8,077,290 (629 MB/s)    -96,497,385   -92.28% 
- regexdna::variant5                       102,968,132 (49 MB/s)  6,787,242 (748 MB/s)    -96,180,890   -93.41% 
- regexdna::variant6                       103,783,112 (48 MB/s)  6,577,777 (772 MB/s)    -97,205,335   -93.66% 
- regexdna::variant7                       103,939,805 (48 MB/s)  6,705,580 (758 MB/s)    -97,234,225   -93.55% 
- regexdna::variant8                       109,722,594 (46 MB/s)  6,818,785 (745 MB/s)   -102,903,809   -93.79% 
- regexdna::variant9                       128,702,724 (39 MB/s)  6,821,453 (745 MB/s)   -121,881,271   -94.70% 
- sherlock::before_after_holmes            39,219,739 (15 MB/s)   1,029,866 (577 MB/s)    -38,189,873   -97.37% 
- sherlock::before_holmes                  37,454,934 (15 MB/s)   76,633 (7763 MB/s)      -37,378,301   -99.80% 
- sherlock::everything_greedy              7,341,629 (81 MB/s)    2,375,079 (250 MB/s)     -4,966,550   -67.65% 
- sherlock::holmes_cochar_watson           2,298,534 (258 MB/s)   144,725 (4110 MB/s)      -2,153,809   -93.70% 
- sherlock::ing_suffix                     18,533,670 (32 MB/s)   436,202 (1363 MB/s)     -18,097,468   -97.65% 
- sherlock::ing_suffix_limited_space       2,419,034 (245 MB/s)   1,182,943 (502 MB/s)     -1,236,091   -51.10% 
- sherlock::letters                        61,910,045 (9 MB/s)    24,390,452 (24 MB/s)    -37,519,593   -60.60% 
- sherlock::letters_lower                  60,831,022 (9 MB/s)    23,784,108 (25 MB/s)    -37,046,914   -60.90% 
- sherlock::letters_upper                  10,747,265 (55 MB/s)   1,993,838 (298 MB/s)     -8,753,427   -81.45% 
- sherlock::line_boundary_sherlock_holmes  215,376 (2762 MB/s)    999,414 (595 MB/s)          784,038   364.03% 
- sherlock::name_alt1                      2,282,320 (260 MB/s)   34,298 (17345 MB/s)      -2,248,022   -98.50% 
- sherlock::name_alt2                      2,206,087 (269 MB/s)   124,226 (4789 MB/s)      -2,081,861   -94.37% 
- sherlock::name_alt3                      2,771,932 (214 MB/s)   137,742 (4319 MB/s)      -2,634,190   -95.03% 
- sherlock::name_alt3_nocase               19,198,056 (30 MB/s)   1,293,763 (459 MB/s)    -17,904,293   -93.26% 
- sherlock::name_alt4                      2,254,798 (263 MB/s)   164,900 (3607 MB/s)      -2,089,898   -92.69% 
- sherlock::name_alt4_nocase               5,734,254 (103 MB/s)   235,023 (2531 MB/s)      -5,499,231   -95.90% 
- sherlock::name_alt5                      2,276,779 (261 MB/s)   127,928 (4650 MB/s)      -2,148,851   -94.38% 
- sherlock::name_alt5_nocase               7,314,318 (81 MB/s)    659,591 (901 MB/s)       -6,654,727   -90.98% 
- sherlock::name_holmes                    477,888 (1244 MB/s)    40,902 (14545 MB/s)        -436,986   -91.44% 
- sherlock::name_holmes_nocase             3,487,005 (170 MB/s)   198,658 (2994 MB/s)      -3,288,347   -94.30% 
- sherlock::name_sherlock                  295,313 (2014 MB/s)    68,924 (8631 MB/s)         -226,389   -76.66% 
- sherlock::name_sherlock_holmes           216,522 (2747 MB/s)    31,640 (18803 MB/s)        -184,882   -85.39% 
- sherlock::name_sherlock_holmes_nocase    3,480,703 (170 MB/s)   173,522 (3428 MB/s)      -3,307,181   -95.01% 
- sherlock::name_sherlock_nocase           3,511,444 (169 MB/s)   170,888 (3481 MB/s)      -3,340,556   -95.13% 
- sherlock::name_whitespace                304,043 (1956 MB/s)    84,314 (7056 MB/s)         -219,729   -72.27% 
- sherlock::no_match_common                632,615 (940 MB/s)     20,727 (28703 MB/s)        -611,888   -96.72% 
- sherlock::no_match_really_common         727,565 (817 MB/s)     381,476 (1559 MB/s)        -346,089   -47.57% 
- sherlock::no_match_uncommon              624,061 (953 MB/s)     20,786 (28621 MB/s)        -603,275   -96.67% 
- sherlock::quotes                         3,776,688 (157 MB/s)   531,487 (1119 MB/s)      -3,245,201   -85.93% 
- sherlock::repeated_class_negation        34,354,179 (17 MB/s)   85,881,944 (6 MB/s)      51,527,765   149.99% 
- sherlock::the_lower                      1,965,787 (302 MB/s)   654,110 (909 MB/s)       -1,311,677   -66.73% 
- sherlock::the_nocase                     4,853,843 (122 MB/s)   474,456 (1253 MB/s)      -4,379,387   -90.23% 
- sherlock::the_upper                      949,071 (626 MB/s)     43,746 (13599 MB/s)        -905,325   -95.39% 
- sherlock::the_whitespace                 2,173,683 (273 MB/s)   1,181,974 (503 MB/s)       -991,709   -45.62% 
- sherlock::word_ending_n                  19,711,057 (30 MB/s)   1,925,578 (308 MB/s)    -17,785,479   -90.23% 
- sherlock::words                          21,979,387 (27 MB/s)   9,697,201 (61 MB/s)     -12,282,186   -55.88% 
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/pcre1 b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/pcre1
deleted file mode 100644
index 51af361..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/pcre1
+++ /dev/null
@@ -1,98 +0,0 @@
-
-running 93 tests
-test misc::anchored_literal_long_match       ... bench:          30 ns/iter (+/- 0) = 13000 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          24 ns/iter (+/- 1) = 16250 MB/s
-test misc::anchored_literal_short_match      ... bench:          29 ns/iter (+/- 1) = 896 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          24 ns/iter (+/- 1) = 1083 MB/s
-test misc::easy0_1K                          ... bench:         260 ns/iter (+/- 15) = 4042 MB/s
-test misc::easy0_1MB                         ... bench:     202,849 ns/iter (+/- 7,973) = 5169 MB/s
-test misc::easy0_32                          ... bench:          47 ns/iter (+/- 3) = 1255 MB/s
-test misc::easy0_32K                         ... bench:       6,378 ns/iter (+/- 236) = 5141 MB/s
-test misc::easy1_1K                          ... bench:         248 ns/iter (+/- 15) = 4209 MB/s
-test misc::easy1_1MB                         ... bench:     203,105 ns/iter (+/- 7,590) = 5162 MB/s
-test misc::easy1_32                          ... bench:          51 ns/iter (+/- 1) = 1019 MB/s
-test misc::easy1_32K                         ... bench:       6,508 ns/iter (+/- 160) = 5038 MB/s
-test misc::hard_1K                           ... bench:       1,324 ns/iter (+/- 46) = 793 MB/s
-test misc::hard_1MB                          ... bench:   1,134,691 ns/iter (+/- 41,296) = 924 MB/s
-test misc::hard_32                           ... bench:         113 ns/iter (+/- 13) = 522 MB/s
-test misc::hard_32K                          ... bench:      42,269 ns/iter (+/- 2,298) = 775 MB/s
-test misc::literal                           ... bench:          28 ns/iter (+/- 0) = 1821 MB/s
-test misc::long_needle1                      ... bench:     547,122 ns/iter (+/- 34,029) = 182 MB/s
-test misc::long_needle2                      ... bench:     546,018 ns/iter (+/- 24,721) = 183 MB/s
-test misc::match_class                       ... bench:          97 ns/iter (+/- 5) = 835 MB/s
-test misc::match_class_in_range              ... bench:          30 ns/iter (+/- 1) = 2700 MB/s
-test misc::match_class_unicode               ... bench:         343 ns/iter (+/- 2) = 469 MB/s
-test misc::medium_1K                         ... bench:         253 ns/iter (+/- 15) = 4158 MB/s
-test misc::medium_1MB                        ... bench:     202,025 ns/iter (+/- 11,252) = 5190 MB/s
-test misc::medium_32                         ... bench:          51 ns/iter (+/- 2) = 1176 MB/s
-test misc::medium_32K                        ... bench:       6,406 ns/iter (+/- 318) = 5119 MB/s
-test misc::not_literal                       ... bench:         169 ns/iter (+/- 6) = 301 MB/s
-test misc::one_pass_long_prefix              ... bench:          28 ns/iter (+/- 1) = 928 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          28 ns/iter (+/- 0) = 928 MB/s
-test misc::one_pass_short                    ... bench:          54 ns/iter (+/- 0) = 314 MB/s
-test misc::one_pass_short_not                ... bench:          55 ns/iter (+/- 3) = 309 MB/s
-test misc::reallyhard2_1K                    ... bench:       4,664 ns/iter (+/- 123) = 222 MB/s
-test misc::reallyhard_1K                     ... bench:       1,595 ns/iter (+/- 34) = 658 MB/s
-test misc::reallyhard_1MB                    ... bench:   1,377,542 ns/iter (+/- 2,203) = 761 MB/s
-test misc::reallyhard_32                     ... bench:         106 ns/iter (+/- 2) = 556 MB/s
-test misc::reallyhard_32K                    ... bench:      43,256 ns/iter (+/- 1,230) = 758 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:       4,607 ns/iter (+/- 68) = 1736 MB/s
-test regexdna::find_new_lines                ... bench:   2,840,298 ns/iter (+/- 128,040) = 1789 MB/s
-test regexdna::subst1                        ... bench:   1,284,283 ns/iter (+/- 39,986) = 3958 MB/s
-test regexdna::subst10                       ... bench:   1,269,531 ns/iter (+/- 63,116) = 4004 MB/s
-test regexdna::subst11                       ... bench:   1,286,171 ns/iter (+/- 49,256) = 3952 MB/s
-test regexdna::subst2                        ... bench:   1,303,022 ns/iter (+/- 1,553) = 3901 MB/s
-test regexdna::subst3                        ... bench:   1,295,961 ns/iter (+/- 57,880) = 3922 MB/s
-test regexdna::subst4                        ... bench:   1,313,706 ns/iter (+/- 2,115) = 3869 MB/s
-test regexdna::subst5                        ... bench:   1,286,339 ns/iter (+/- 2,093) = 3951 MB/s
-test regexdna::subst6                        ... bench:   1,385,644 ns/iter (+/- 3,387) = 3668 MB/s
-test regexdna::subst7                        ... bench:   1,286,743 ns/iter (+/- 2,339) = 3950 MB/s
-test regexdna::subst8                        ... bench:   1,306,406 ns/iter (+/- 1,686) = 3891 MB/s
-test regexdna::subst9                        ... bench:   1,280,365 ns/iter (+/- 52,649) = 3970 MB/s
-test regexdna::variant1                      ... bench:  15,271,875 ns/iter (+/- 510,399) = 332 MB/s
-test regexdna::variant2                      ... bench:  16,704,090 ns/iter (+/- 446,145) = 304 MB/s
-test regexdna::variant3                      ... bench:  20,745,546 ns/iter (+/- 500,573) = 245 MB/s
-test regexdna::variant4                      ... bench:  19,285,154 ns/iter (+/- 543,793) = 263 MB/s
-test regexdna::variant5                      ... bench:  17,234,130 ns/iter (+/- 291,232) = 294 MB/s
-test regexdna::variant6                      ... bench:  17,462,350 ns/iter (+/- 510,036) = 291 MB/s
-test regexdna::variant7                      ... bench:  19,671,680 ns/iter (+/- 562,610) = 258 MB/s
-test regexdna::variant8                      ... bench:  24,515,319 ns/iter (+/- 725,298) = 207 MB/s
-test regexdna::variant9                      ... bench:  22,623,755 ns/iter (+/- 637,538) = 224 MB/s
-test sherlock::before_after_holmes           ... bench:   4,510,830 ns/iter (+/- 170,864) = 131 MB/s
-test sherlock::before_holmes                 ... bench:   4,706,836 ns/iter (+/- 186,202) = 126 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     523,122 ns/iter (+/- 988) = 1137 MB/s
-test sherlock::ing_suffix                    ... bench:   2,030,438 ns/iter (+/- 9,228) = 293 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   4,996,956 ns/iter (+/- 197,705) = 119 MB/s
-test sherlock::letters                       ... bench:  13,529,105 ns/iter (+/- 496,645) = 43 MB/s
-test sherlock::letters_lower                 ... bench:  13,681,607 ns/iter (+/- 448,932) = 43 MB/s
-test sherlock::letters_upper                 ... bench:   1,904,757 ns/iter (+/- 94,484) = 312 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     207,695 ns/iter (+/- 8,892) = 2864 MB/s
-test sherlock::name_alt1                     ... bench:     486,857 ns/iter (+/- 21,004) = 1221 MB/s
-test sherlock::name_alt2                     ... bench:     483,926 ns/iter (+/- 26,860) = 1229 MB/s
-test sherlock::name_alt3                     ... bench:     978,827 ns/iter (+/- 43,851) = 607 MB/s
-test sherlock::name_alt3_nocase              ... bench:   2,986,143 ns/iter (+/- 78,155) = 199 MB/s
-test sherlock::name_alt4                     ... bench:      78,104 ns/iter (+/- 4,056) = 7617 MB/s
-test sherlock::name_alt4_nocase              ... bench:   1,638,351 ns/iter (+/- 62,542) = 363 MB/s
-test sherlock::name_alt5                     ... bench:     685,723 ns/iter (+/- 26,092) = 867 MB/s
-test sherlock::name_alt5_nocase              ... bench:   1,817,760 ns/iter (+/- 80,781) = 327 MB/s
-test sherlock::name_holmes                   ... bench:     411,102 ns/iter (+/- 1,887) = 1447 MB/s
-test sherlock::name_holmes_nocase            ... bench:     516,003 ns/iter (+/- 2,295) = 1152 MB/s
-test sherlock::name_sherlock                 ... bench:     284,300 ns/iter (+/- 1,117) = 2092 MB/s
-test sherlock::name_sherlock_holmes          ... bench:     209,139 ns/iter (+/- 380) = 2844 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:   1,118,324 ns/iter (+/- 1,654) = 531 MB/s
-test sherlock::name_sherlock_nocase          ... bench:   1,126,992 ns/iter (+/- 1,180) = 527 MB/s
-test sherlock::name_whitespace               ... bench:     284,672 ns/iter (+/- 510) = 2089 MB/s
-test sherlock::no_match_common               ... bench:     439,955 ns/iter (+/- 939) = 1352 MB/s
-test sherlock::no_match_really_common        ... bench:     439,266 ns/iter (+/- 3,751) = 1354 MB/s
-test sherlock::no_match_uncommon             ... bench:      28,872 ns/iter (+/- 31) = 20605 MB/s
-test sherlock::quotes                        ... bench:     522,877 ns/iter (+/- 32,723) = 1137 MB/s
-test sherlock::repeated_class_negation       ... bench:   5,997,745 ns/iter (+/- 209,544) = 99 MB/s
-test sherlock::the_lower                     ... bench:     747,234 ns/iter (+/- 43,110) = 796 MB/s
-test sherlock::the_nocase                    ... bench:     802,320 ns/iter (+/- 27,715) = 741 MB/s
-test sherlock::the_upper                     ... bench:      58,163 ns/iter (+/- 2,202) = 10228 MB/s
-test sherlock::the_whitespace                ... bench:     920,781 ns/iter (+/- 30,609) = 646 MB/s
-test sherlock::word_ending_n                 ... bench:   5,703,864 ns/iter (+/- 191,007) = 104 MB/s
-test sherlock::words                         ... bench:   6,786,318 ns/iter (+/- 168,049) = 87 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 93 measured
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/pcre1-vs-rust b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/pcre1-vs-rust
deleted file mode 100644
index 1d8c0d6..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/pcre1-vs-rust
+++ /dev/null
@@ -1,94 +0,0 @@
- name                                     pcre1 ns/iter          rust ns/iter           diff ns/iter    diff % 
- misc::anchored_literal_long_match        30 (13000 MB/s)        24 (16250 MB/s)                  -6   -20.00% 
- misc::anchored_literal_long_non_match    24 (16250 MB/s)        27 (14444 MB/s)                   3    12.50% 
- misc::anchored_literal_short_match       29 (896 MB/s)          22 (1181 MB/s)                   -7   -24.14% 
- misc::anchored_literal_short_non_match   24 (1083 MB/s)         24 (1083 MB/s)                    0     0.00% 
- misc::easy0_1K                           260 (4042 MB/s)        16 (65687 MB/s)                -244   -93.85% 
- misc::easy0_1MB                          202,849 (5169 MB/s)    20 (52430150 MB/s)         -202,829   -99.99% 
- misc::easy0_32                           47 (1255 MB/s)         16 (3687 MB/s)                  -31   -65.96% 
- misc::easy0_32K                          6,378 (5141 MB/s)      16 (2049687 MB/s)            -6,362   -99.75% 
- misc::easy1_1K                           248 (4209 MB/s)        48 (21750 MB/s)                -200   -80.65% 
- misc::easy1_1MB                          203,105 (5162 MB/s)    48 (21845750 MB/s)         -203,057   -99.98% 
- misc::easy1_32                           51 (1019 MB/s)         46 (1130 MB/s)                   -5    -9.80% 
- misc::easy1_32K                          6,508 (5038 MB/s)      47 (697617 MB/s)             -6,461   -99.28% 
- misc::hard_1K                            1,324 (793 MB/s)       58 (18120 MB/s)              -1,266   -95.62% 
- misc::hard_1MB                           1,134,691 (924 MB/s)   61 (17190213 MB/s)       -1,134,630   -99.99% 
- misc::hard_32                            113 (522 MB/s)         58 (1017 MB/s)                  -55   -48.67% 
- misc::hard_32K                           42,269 (775 MB/s)      56 (585625 MB/s)            -42,213   -99.87% 
- misc::literal                            28 (1821 MB/s)         16 (3187 MB/s)                  -12   -42.86% 
- misc::long_needle1                       547,122 (182 MB/s)     2,226 (44924 MB/s)         -544,896   -99.59% 
- misc::long_needle2                       546,018 (183 MB/s)     576,997 (173 MB/s)           30,979     5.67% 
- misc::match_class                        97 (835 MB/s)          65 (1246 MB/s)                  -32   -32.99% 
- misc::match_class_in_range               30 (2700 MB/s)         27 (3000 MB/s)                   -3   -10.00% 
- misc::match_class_unicode                343 (469 MB/s)         283 (568 MB/s)                  -60   -17.49% 
- misc::medium_1K                          253 (4158 MB/s)        16 (65750 MB/s)                -237   -93.68% 
- misc::medium_1MB                         202,025 (5190 MB/s)    21 (49933523 MB/s)         -202,004   -99.99% 
- misc::medium_32                          51 (1176 MB/s)         17 (3529 MB/s)                  -34   -66.67% 
- misc::medium_32K                         6,406 (5119 MB/s)      17 (1929176 MB/s)            -6,389   -99.73% 
- misc::not_literal                        169 (301 MB/s)         105 (485 MB/s)                  -64   -37.87% 
- misc::one_pass_long_prefix               28 (928 MB/s)          68 (382 MB/s)                    40   142.86% 
- misc::one_pass_long_prefix_not           28 (928 MB/s)          58 (448 MB/s)                    30   107.14% 
- misc::one_pass_short                     54 (314 MB/s)          45 (377 MB/s)                    -9   -16.67% 
- misc::one_pass_short_not                 55 (309 MB/s)          50 (340 MB/s)                    -5    -9.09% 
- misc::reallyhard2_1K                     4,664 (222 MB/s)       83 (12530 MB/s)              -4,581   -98.22% 
- misc::reallyhard_1K                      1,595 (658 MB/s)       1,822 (576 MB/s)                227    14.23% 
- misc::reallyhard_1MB                     1,377,542 (761 MB/s)   1,768,327 (592 MB/s)        390,785    28.37% 
- misc::reallyhard_32                      106 (556 MB/s)         121 (487 MB/s)                   15    14.15% 
- misc::reallyhard_32K                     43,256 (758 MB/s)      56,375 (581 MB/s)            13,119    30.33% 
- misc::reverse_suffix_no_quadratic        4,607 (1736 MB/s)      5,803 (1378 MB/s)             1,196    25.96% 
- regexdna::find_new_lines                 2,840,298 (1789 MB/s)  14,818,233 (343 MB/s)    11,977,935   421.71% 
- regexdna::subst1                         1,284,283 (3958 MB/s)  896,790 (5668 MB/s)        -387,493   -30.17% 
- regexdna::subst10                        1,269,531 (4004 MB/s)  957,325 (5310 MB/s)        -312,206   -24.59% 
- regexdna::subst11                        1,286,171 (3952 MB/s)  917,248 (5542 MB/s)        -368,923   -28.68% 
- regexdna::subst2                         1,303,022 (3901 MB/s)  892,129 (5698 MB/s)        -410,893   -31.53% 
- regexdna::subst3                         1,295,961 (3922 MB/s)  929,250 (5470 MB/s)        -366,711   -28.30% 
- regexdna::subst4                         1,313,706 (3869 MB/s)  872,581 (5825 MB/s)        -441,125   -33.58% 
- regexdna::subst5                         1,286,339 (3951 MB/s)  875,804 (5804 MB/s)        -410,535   -31.91% 
- regexdna::subst6                         1,385,644 (3668 MB/s)  884,639 (5746 MB/s)        -501,005   -36.16% 
- regexdna::subst7                         1,286,743 (3950 MB/s)  872,791 (5824 MB/s)        -413,952   -32.17% 
- regexdna::subst8                         1,306,406 (3891 MB/s)  873,833 (5817 MB/s)        -432,573   -33.11% 
- regexdna::subst9                         1,280,365 (3970 MB/s)  886,744 (5732 MB/s)        -393,621   -30.74% 
- regexdna::variant1                       15,271,875 (332 MB/s)  3,699,267 (1374 MB/s)   -11,572,608   -75.78% 
- regexdna::variant2                       16,704,090 (304 MB/s)  6,760,952 (751 MB/s)     -9,943,138   -59.53% 
- regexdna::variant3                       20,745,546 (245 MB/s)  8,030,646 (633 MB/s)    -12,714,900   -61.29% 
- regexdna::variant4                       19,285,154 (263 MB/s)  8,077,290 (629 MB/s)    -11,207,864   -58.12% 
- regexdna::variant5                       17,234,130 (294 MB/s)  6,787,242 (748 MB/s)    -10,446,888   -60.62% 
- regexdna::variant6                       17,462,350 (291 MB/s)  6,577,777 (772 MB/s)    -10,884,573   -62.33% 
- regexdna::variant7                       19,671,680 (258 MB/s)  6,705,580 (758 MB/s)    -12,966,100   -65.91% 
- regexdna::variant8                       24,515,319 (207 MB/s)  6,818,785 (745 MB/s)    -17,696,534   -72.19% 
- regexdna::variant9                       22,623,755 (224 MB/s)  6,821,453 (745 MB/s)    -15,802,302   -69.85% 
- sherlock::before_after_holmes            4,510,830 (131 MB/s)   1,029,866 (577 MB/s)     -3,480,964   -77.17% 
- sherlock::before_holmes                  4,706,836 (126 MB/s)   76,633 (7763 MB/s)       -4,630,203   -98.37% 
- sherlock::holmes_cochar_watson           523,122 (1137 MB/s)    144,725 (4110 MB/s)        -378,397   -72.33% 
- sherlock::ing_suffix                     2,030,438 (293 MB/s)   436,202 (1363 MB/s)      -1,594,236   -78.52% 
- sherlock::ing_suffix_limited_space       4,996,956 (119 MB/s)   1,182,943 (502 MB/s)     -3,814,013   -76.33% 
- sherlock::letters                        13,529,105 (43 MB/s)   24,390,452 (24 MB/s)     10,861,347    80.28% 
- sherlock::letters_lower                  13,681,607 (43 MB/s)   23,784,108 (25 MB/s)     10,102,501    73.84% 
- sherlock::letters_upper                  1,904,757 (312 MB/s)   1,993,838 (298 MB/s)         89,081     4.68% 
- sherlock::line_boundary_sherlock_holmes  207,695 (2864 MB/s)    999,414 (595 MB/s)          791,719   381.19% 
- sherlock::name_alt1                      486,857 (1221 MB/s)    34,298 (17345 MB/s)        -452,559   -92.96% 
- sherlock::name_alt2                      483,926 (1229 MB/s)    124,226 (4789 MB/s)        -359,700   -74.33% 
- sherlock::name_alt3                      978,827 (607 MB/s)     137,742 (4319 MB/s)        -841,085   -85.93% 
- sherlock::name_alt3_nocase               2,986,143 (199 MB/s)   1,293,763 (459 MB/s)     -1,692,380   -56.67% 
- sherlock::name_alt4                      78,104 (7617 MB/s)     164,900 (3607 MB/s)          86,796   111.13% 
- sherlock::name_alt4_nocase               1,638,351 (363 MB/s)   235,023 (2531 MB/s)      -1,403,328   -85.65% 
- sherlock::name_alt5                      685,723 (867 MB/s)     127,928 (4650 MB/s)        -557,795   -81.34% 
- sherlock::name_alt5_nocase               1,817,760 (327 MB/s)   659,591 (901 MB/s)       -1,158,169   -63.71% 
- sherlock::name_holmes                    411,102 (1447 MB/s)    40,902 (14545 MB/s)        -370,200   -90.05% 
- sherlock::name_holmes_nocase             516,003 (1152 MB/s)    198,658 (2994 MB/s)        -317,345   -61.50% 
- sherlock::name_sherlock                  284,300 (2092 MB/s)    68,924 (8631 MB/s)         -215,376   -75.76% 
- sherlock::name_sherlock_holmes           209,139 (2844 MB/s)    31,640 (18803 MB/s)        -177,499   -84.87% 
- sherlock::name_sherlock_holmes_nocase    1,118,324 (531 MB/s)   173,522 (3428 MB/s)        -944,802   -84.48% 
- sherlock::name_sherlock_nocase           1,126,992 (527 MB/s)   170,888 (3481 MB/s)        -956,104   -84.84% 
- sherlock::name_whitespace                284,672 (2089 MB/s)    84,314 (7056 MB/s)         -200,358   -70.38% 
- sherlock::no_match_common                439,955 (1352 MB/s)    20,727 (28703 MB/s)        -419,228   -95.29% 
- sherlock::no_match_really_common         439,266 (1354 MB/s)    381,476 (1559 MB/s)         -57,790   -13.16% 
- sherlock::no_match_uncommon              28,872 (20605 MB/s)    20,786 (28621 MB/s)          -8,086   -28.01% 
- sherlock::quotes                         522,877 (1137 MB/s)    531,487 (1119 MB/s)           8,610     1.65% 
- sherlock::repeated_class_negation        5,997,745 (99 MB/s)    85,881,944 (6 MB/s)      79,884,199  1331.90% 
- sherlock::the_lower                      747,234 (796 MB/s)     654,110 (909 MB/s)          -93,124   -12.46% 
- sherlock::the_nocase                     802,320 (741 MB/s)     474,456 (1253 MB/s)        -327,864   -40.86% 
- sherlock::the_upper                      58,163 (10228 MB/s)    43,746 (13599 MB/s)         -14,417   -24.79% 
- sherlock::the_whitespace                 920,781 (646 MB/s)     1,181,974 (503 MB/s)        261,193    28.37% 
- sherlock::word_ending_n                  5,703,864 (104 MB/s)   1,925,578 (308 MB/s)     -3,778,286   -66.24% 
- sherlock::words                          6,786,318 (87 MB/s)    9,697,201 (61 MB/s)       2,910,883    42.89% 
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/pcre2 b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/pcre2
deleted file mode 100644
index 76b3242..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/pcre2
+++ /dev/null
@@ -1,98 +0,0 @@
-
-running 93 tests
-test misc::anchored_literal_long_match       ... bench:          20 ns/iter (+/- 0) = 19500 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          15 ns/iter (+/- 0) = 26000 MB/s
-test misc::anchored_literal_short_match      ... bench:          19 ns/iter (+/- 1) = 1368 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          13 ns/iter (+/- 0) = 2000 MB/s
-test misc::easy0_1K                          ... bench:         241 ns/iter (+/- 9) = 4360 MB/s
-test misc::easy0_1MB                         ... bench:     207,103 ns/iter (+/- 8,557) = 5063 MB/s
-test misc::easy0_32                          ... bench:          39 ns/iter (+/- 0) = 1512 MB/s
-test misc::easy0_32K                         ... bench:       6,522 ns/iter (+/- 20) = 5028 MB/s
-test misc::easy1_1K                          ... bench:         247 ns/iter (+/- 3) = 4226 MB/s
-test misc::easy1_1MB                         ... bench:     206,893 ns/iter (+/- 9,489) = 5068 MB/s
-test misc::easy1_32                          ... bench:          41 ns/iter (+/- 0) = 1268 MB/s
-test misc::easy1_32K                         ... bench:       6,516 ns/iter (+/- 301) = 5031 MB/s
-test misc::hard_1K                           ... bench:       1,566 ns/iter (+/- 79) = 671 MB/s
-test misc::hard_1MB                          ... bench:   1,119,234 ns/iter (+/- 38,605) = 936 MB/s
-test misc::hard_32                           ... bench:          95 ns/iter (+/- 4) = 621 MB/s
-test misc::hard_32K                          ... bench:      34,411 ns/iter (+/- 1,542) = 953 MB/s
-test misc::literal                           ... bench:          18 ns/iter (+/- 0) = 2833 MB/s
-test misc::long_needle1                      ... bench:     550,340 ns/iter (+/- 30,668) = 181 MB/s
-test misc::long_needle2                      ... bench:     553,056 ns/iter (+/- 25,618) = 180 MB/s
-test misc::match_class                       ... bench:          82 ns/iter (+/- 1) = 987 MB/s
-test misc::match_class_in_range              ... bench:          20 ns/iter (+/- 1) = 4050 MB/s
-test misc::match_class_unicode               ... bench:         351 ns/iter (+/- 14) = 458 MB/s
-test misc::medium_1K                         ... bench:         242 ns/iter (+/- 13) = 4347 MB/s
-test misc::medium_1MB                        ... bench:     207,290 ns/iter (+/- 1,458) = 5058 MB/s
-test misc::medium_32                         ... bench:          41 ns/iter (+/- 0) = 1463 MB/s
-test misc::medium_32K                        ... bench:       6,529 ns/iter (+/- 293) = 5023 MB/s
-test misc::not_literal                       ... bench:         161 ns/iter (+/- 7) = 316 MB/s
-test misc::one_pass_long_prefix              ... bench:          17 ns/iter (+/- 1) = 1529 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          18 ns/iter (+/- 1) = 1444 MB/s
-test misc::one_pass_short                    ... bench:          45 ns/iter (+/- 2) = 377 MB/s
-test misc::one_pass_short_not                ... bench:          49 ns/iter (+/- 2) = 346 MB/s
-test misc::reallyhard2_1K                    ... bench:       4,487 ns/iter (+/- 190) = 231 MB/s
-test misc::reallyhard_1K                     ... bench:       1,260 ns/iter (+/- 46) = 834 MB/s
-test misc::reallyhard_1MB                    ... bench:   1,361,796 ns/iter (+/- 46,490) = 770 MB/s
-test misc::reallyhard_32                     ... bench:          93 ns/iter (+/- 8) = 634 MB/s
-test misc::reallyhard_32K                    ... bench:      42,503 ns/iter (+/- 1,721) = 771 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:       4,559 ns/iter (+/- 171) = 1754 MB/s
-test regexdna::find_new_lines                ... bench:   1,887,325 ns/iter (+/- 74,026) = 2693 MB/s
-test regexdna::subst1                        ... bench:     963,089 ns/iter (+/- 3,478) = 5278 MB/s
-test regexdna::subst10                       ... bench:     968,221 ns/iter (+/- 5,406) = 5250 MB/s
-test regexdna::subst11                       ... bench:     961,661 ns/iter (+/- 45,597) = 5286 MB/s
-test regexdna::subst2                        ... bench:     956,430 ns/iter (+/- 32,654) = 5314 MB/s
-test regexdna::subst3                        ... bench:     961,204 ns/iter (+/- 48,799) = 5288 MB/s
-test regexdna::subst4                        ... bench:     961,897 ns/iter (+/- 50,762) = 5284 MB/s
-test regexdna::subst5                        ... bench:     953,412 ns/iter (+/- 69,554) = 5331 MB/s
-test regexdna::subst6                        ... bench:     962,362 ns/iter (+/- 42,136) = 5282 MB/s
-test regexdna::subst7                        ... bench:     961,694 ns/iter (+/- 100,348) = 5285 MB/s
-test regexdna::subst8                        ... bench:     963,230 ns/iter (+/- 10,882) = 5277 MB/s
-test regexdna::subst9                        ... bench:     960,246 ns/iter (+/- 27,407) = 5293 MB/s
-test regexdna::variant1                      ... bench:  15,553,281 ns/iter (+/- 566,810) = 326 MB/s
-test regexdna::variant2                      ... bench:  16,563,452 ns/iter (+/- 546,097) = 306 MB/s
-test regexdna::variant3                      ... bench:  20,405,916 ns/iter (+/- 809,236) = 249 MB/s
-test regexdna::variant4                      ... bench:  19,489,291 ns/iter (+/- 710,721) = 260 MB/s
-test regexdna::variant5                      ... bench:  17,406,769 ns/iter (+/- 656,024) = 292 MB/s
-test regexdna::variant6                      ... bench:  17,412,027 ns/iter (+/- 730,347) = 291 MB/s
-test regexdna::variant7                      ... bench:  19,509,193 ns/iter (+/- 783,850) = 260 MB/s
-test regexdna::variant8                      ... bench:  24,295,734 ns/iter (+/- 816,832) = 209 MB/s
-test regexdna::variant9                      ... bench:  22,541,558 ns/iter (+/- 783,104) = 225 MB/s
-test sherlock::before_after_holmes           ... bench:   4,583,804 ns/iter (+/- 124,057) = 129 MB/s
-test sherlock::before_holmes                 ... bench:   4,640,546 ns/iter (+/- 241,311) = 128 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     509,088 ns/iter (+/- 25,069) = 1168 MB/s
-test sherlock::ing_suffix                    ... bench:   1,865,631 ns/iter (+/- 68,625) = 318 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   4,922,883 ns/iter (+/- 232,825) = 120 MB/s
-test sherlock::letters                       ... bench:   9,848,144 ns/iter (+/- 206,915) = 60 MB/s
-test sherlock::letters_lower                 ... bench:   9,723,642 ns/iter (+/- 370,000) = 61 MB/s
-test sherlock::letters_upper                 ... bench:   1,762,773 ns/iter (+/- 86,671) = 337 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     206,367 ns/iter (+/- 8,874) = 2882 MB/s
-test sherlock::name_alt1                     ... bench:     485,953 ns/iter (+/- 15,036) = 1224 MB/s
-test sherlock::name_alt2                     ... bench:     483,813 ns/iter (+/- 17,822) = 1229 MB/s
-test sherlock::name_alt3                     ... bench:     903,013 ns/iter (+/- 38,445) = 658 MB/s
-test sherlock::name_alt3_nocase              ... bench:   2,993,633 ns/iter (+/- 131,218) = 198 MB/s
-test sherlock::name_alt4                     ... bench:      78,831 ns/iter (+/- 2,012) = 7546 MB/s
-test sherlock::name_alt4_nocase              ... bench:   1,647,202 ns/iter (+/- 5,838) = 361 MB/s
-test sherlock::name_alt5                     ... bench:     678,798 ns/iter (+/- 1,146) = 876 MB/s
-test sherlock::name_alt5_nocase              ... bench:   1,792,461 ns/iter (+/- 3,532) = 331 MB/s
-test sherlock::name_holmes                   ... bench:     406,138 ns/iter (+/- 1,157) = 1464 MB/s
-test sherlock::name_holmes_nocase            ... bench:     517,884 ns/iter (+/- 8,548) = 1148 MB/s
-test sherlock::name_sherlock                 ... bench:     282,357 ns/iter (+/- 13,583) = 2107 MB/s
-test sherlock::name_sherlock_holmes          ... bench:     207,894 ns/iter (+/- 1,847) = 2861 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:   1,122,583 ns/iter (+/- 52,189) = 529 MB/s
-test sherlock::name_sherlock_nocase          ... bench:   1,092,252 ns/iter (+/- 43,130) = 544 MB/s
-test sherlock::name_whitespace               ... bench:     280,360 ns/iter (+/- 12,136) = 2122 MB/s
-test sherlock::no_match_common               ... bench:     436,303 ns/iter (+/- 19,423) = 1363 MB/s
-test sherlock::no_match_really_common        ... bench:     417,686 ns/iter (+/- 15,258) = 1424 MB/s
-test sherlock::no_match_uncommon             ... bench:      28,504 ns/iter (+/- 1,032) = 20871 MB/s
-test sherlock::quotes                        ... bench:     541,513 ns/iter (+/- 21,121) = 1098 MB/s
-test sherlock::repeated_class_negation       ... bench:   5,489,721 ns/iter (+/- 185,165) = 108 MB/s
-test sherlock::the_lower                     ... bench:     680,710 ns/iter (+/- 29,403) = 873 MB/s
-test sherlock::the_nocase                    ... bench:     737,040 ns/iter (+/- 4,391) = 807 MB/s
-test sherlock::the_upper                     ... bench:      50,026 ns/iter (+/- 205) = 11892 MB/s
-test sherlock::the_whitespace                ... bench:     885,922 ns/iter (+/- 9,145) = 671 MB/s
-test sherlock::word_ending_n                 ... bench:   5,424,773 ns/iter (+/- 154,353) = 109 MB/s
-test sherlock::words                         ... bench:   5,753,231 ns/iter (+/- 177,890) = 103 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 93 measured
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/pcre2-vs-rust b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/pcre2-vs-rust
deleted file mode 100644
index 3d89e19..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/pcre2-vs-rust
+++ /dev/null
@@ -1,94 +0,0 @@
- name                                     pcre2 ns/iter          rust ns/iter           diff ns/iter    diff % 
- misc::anchored_literal_long_match        20 (19500 MB/s)        24 (16250 MB/s)                   4    20.00% 
- misc::anchored_literal_long_non_match    15 (26000 MB/s)        27 (14444 MB/s)                  12    80.00% 
- misc::anchored_literal_short_match       19 (1368 MB/s)         22 (1181 MB/s)                    3    15.79% 
- misc::anchored_literal_short_non_match   13 (2000 MB/s)         24 (1083 MB/s)                   11    84.62% 
- misc::easy0_1K                           241 (4360 MB/s)        16 (65687 MB/s)                -225   -93.36% 
- misc::easy0_1MB                          207,103 (5063 MB/s)    20 (52430150 MB/s)         -207,083   -99.99% 
- misc::easy0_32                           39 (1512 MB/s)         16 (3687 MB/s)                  -23   -58.97% 
- misc::easy0_32K                          6,522 (5028 MB/s)      16 (2049687 MB/s)            -6,506   -99.75% 
- misc::easy1_1K                           247 (4226 MB/s)        48 (21750 MB/s)                -199   -80.57% 
- misc::easy1_1MB                          206,893 (5068 MB/s)    48 (21845750 MB/s)         -206,845   -99.98% 
- misc::easy1_32                           41 (1268 MB/s)         46 (1130 MB/s)                    5    12.20% 
- misc::easy1_32K                          6,516 (5031 MB/s)      47 (697617 MB/s)             -6,469   -99.28% 
- misc::hard_1K                            1,566 (671 MB/s)       58 (18120 MB/s)              -1,508   -96.30% 
- misc::hard_1MB                           1,119,234 (936 MB/s)   61 (17190213 MB/s)       -1,119,173   -99.99% 
- misc::hard_32                            95 (621 MB/s)          58 (1017 MB/s)                  -37   -38.95% 
- misc::hard_32K                           34,411 (953 MB/s)      56 (585625 MB/s)            -34,355   -99.84% 
- misc::literal                            18 (2833 MB/s)         16 (3187 MB/s)                   -2   -11.11% 
- misc::long_needle1                       550,340 (181 MB/s)     2,226 (44924 MB/s)         -548,114   -99.60% 
- misc::long_needle2                       553,056 (180 MB/s)     576,997 (173 MB/s)           23,941     4.33% 
- misc::match_class                        82 (987 MB/s)          65 (1246 MB/s)                  -17   -20.73% 
- misc::match_class_in_range               20 (4050 MB/s)         27 (3000 MB/s)                    7    35.00% 
- misc::match_class_unicode                351 (458 MB/s)         283 (568 MB/s)                  -68   -19.37% 
- misc::medium_1K                          242 (4347 MB/s)        16 (65750 MB/s)                -226   -93.39% 
- misc::medium_1MB                         207,290 (5058 MB/s)    21 (49933523 MB/s)         -207,269   -99.99% 
- misc::medium_32                          41 (1463 MB/s)         17 (3529 MB/s)                  -24   -58.54% 
- misc::medium_32K                         6,529 (5023 MB/s)      17 (1929176 MB/s)            -6,512   -99.74% 
- misc::not_literal                        161 (316 MB/s)         105 (485 MB/s)                  -56   -34.78% 
- misc::one_pass_long_prefix               17 (1529 MB/s)         68 (382 MB/s)                    51   300.00% 
- misc::one_pass_long_prefix_not           18 (1444 MB/s)         58 (448 MB/s)                    40   222.22% 
- misc::one_pass_short                     45 (377 MB/s)          45 (377 MB/s)                     0     0.00% 
- misc::one_pass_short_not                 49 (346 MB/s)          50 (340 MB/s)                     1     2.04% 
- misc::reallyhard2_1K                     4,487 (231 MB/s)       83 (12530 MB/s)              -4,404   -98.15% 
- misc::reallyhard_1K                      1,260 (834 MB/s)       1,822 (576 MB/s)                562    44.60% 
- misc::reallyhard_1MB                     1,361,796 (770 MB/s)   1,768,327 (592 MB/s)        406,531    29.85% 
- misc::reallyhard_32                      93 (634 MB/s)          121 (487 MB/s)                   28    30.11% 
- misc::reallyhard_32K                     42,503 (771 MB/s)      56,375 (581 MB/s)            13,872    32.64% 
- misc::reverse_suffix_no_quadratic        4,559 (1754 MB/s)      5,803 (1378 MB/s)             1,244    27.29% 
- regexdna::find_new_lines                 1,887,325 (2693 MB/s)  14,818,233 (343 MB/s)    12,930,908   685.14% 
- regexdna::subst1                         963,089 (5278 MB/s)    896,790 (5668 MB/s)         -66,299    -6.88% 
- regexdna::subst10                        968,221 (5250 MB/s)    957,325 (5310 MB/s)         -10,896    -1.13% 
- regexdna::subst11                        961,661 (5286 MB/s)    917,248 (5542 MB/s)         -44,413    -4.62% 
- regexdna::subst2                         956,430 (5314 MB/s)    892,129 (5698 MB/s)         -64,301    -6.72% 
- regexdna::subst3                         961,204 (5288 MB/s)    929,250 (5470 MB/s)         -31,954    -3.32% 
- regexdna::subst4                         961,897 (5284 MB/s)    872,581 (5825 MB/s)         -89,316    -9.29% 
- regexdna::subst5                         953,412 (5331 MB/s)    875,804 (5804 MB/s)         -77,608    -8.14% 
- regexdna::subst6                         962,362 (5282 MB/s)    884,639 (5746 MB/s)         -77,723    -8.08% 
- regexdna::subst7                         961,694 (5285 MB/s)    872,791 (5824 MB/s)         -88,903    -9.24% 
- regexdna::subst8                         963,230 (5277 MB/s)    873,833 (5817 MB/s)         -89,397    -9.28% 
- regexdna::subst9                         960,246 (5293 MB/s)    886,744 (5732 MB/s)         -73,502    -7.65% 
- regexdna::variant1                       15,553,281 (326 MB/s)  3,699,267 (1374 MB/s)   -11,854,014   -76.22% 
- regexdna::variant2                       16,563,452 (306 MB/s)  6,760,952 (751 MB/s)     -9,802,500   -59.18% 
- regexdna::variant3                       20,405,916 (249 MB/s)  8,030,646 (633 MB/s)    -12,375,270   -60.65% 
- regexdna::variant4                       19,489,291 (260 MB/s)  8,077,290 (629 MB/s)    -11,412,001   -58.56% 
- regexdna::variant5                       17,406,769 (292 MB/s)  6,787,242 (748 MB/s)    -10,619,527   -61.01% 
- regexdna::variant6                       17,412,027 (291 MB/s)  6,577,777 (772 MB/s)    -10,834,250   -62.22% 
- regexdna::variant7                       19,509,193 (260 MB/s)  6,705,580 (758 MB/s)    -12,803,613   -65.63% 
- regexdna::variant8                       24,295,734 (209 MB/s)  6,818,785 (745 MB/s)    -17,476,949   -71.93% 
- regexdna::variant9                       22,541,558 (225 MB/s)  6,821,453 (745 MB/s)    -15,720,105   -69.74% 
- sherlock::before_after_holmes            4,583,804 (129 MB/s)   1,029,866 (577 MB/s)     -3,553,938   -77.53% 
- sherlock::before_holmes                  4,640,546 (128 MB/s)   76,633 (7763 MB/s)       -4,563,913   -98.35% 
- sherlock::holmes_cochar_watson           509,088 (1168 MB/s)    144,725 (4110 MB/s)        -364,363   -71.57% 
- sherlock::ing_suffix                     1,865,631 (318 MB/s)   436,202 (1363 MB/s)      -1,429,429   -76.62% 
- sherlock::ing_suffix_limited_space       4,922,883 (120 MB/s)   1,182,943 (502 MB/s)     -3,739,940   -75.97% 
- sherlock::letters                        9,848,144 (60 MB/s)    24,390,452 (24 MB/s)     14,542,308   147.67% 
- sherlock::letters_lower                  9,723,642 (61 MB/s)    23,784,108 (25 MB/s)     14,060,466   144.60% 
- sherlock::letters_upper                  1,762,773 (337 MB/s)   1,993,838 (298 MB/s)        231,065    13.11% 
- sherlock::line_boundary_sherlock_holmes  206,367 (2882 MB/s)    999,414 (595 MB/s)          793,047   384.29% 
- sherlock::name_alt1                      485,953 (1224 MB/s)    34,298 (17345 MB/s)        -451,655   -92.94% 
- sherlock::name_alt2                      483,813 (1229 MB/s)    124,226 (4789 MB/s)        -359,587   -74.32% 
- sherlock::name_alt3                      903,013 (658 MB/s)     137,742 (4319 MB/s)        -765,271   -84.75% 
- sherlock::name_alt3_nocase               2,993,633 (198 MB/s)   1,293,763 (459 MB/s)     -1,699,870   -56.78% 
- sherlock::name_alt4                      78,831 (7546 MB/s)     164,900 (3607 MB/s)          86,069   109.18% 
- sherlock::name_alt4_nocase               1,647,202 (361 MB/s)   235,023 (2531 MB/s)      -1,412,179   -85.73% 
- sherlock::name_alt5                      678,798 (876 MB/s)     127,928 (4650 MB/s)        -550,870   -81.15% 
- sherlock::name_alt5_nocase               1,792,461 (331 MB/s)   659,591 (901 MB/s)       -1,132,870   -63.20% 
- sherlock::name_holmes                    406,138 (1464 MB/s)    40,902 (14545 MB/s)        -365,236   -89.93% 
- sherlock::name_holmes_nocase             517,884 (1148 MB/s)    198,658 (2994 MB/s)        -319,226   -61.64% 
- sherlock::name_sherlock                  282,357 (2107 MB/s)    68,924 (8631 MB/s)         -213,433   -75.59% 
- sherlock::name_sherlock_holmes           207,894 (2861 MB/s)    31,640 (18803 MB/s)        -176,254   -84.78% 
- sherlock::name_sherlock_holmes_nocase    1,122,583 (529 MB/s)   173,522 (3428 MB/s)        -949,061   -84.54% 
- sherlock::name_sherlock_nocase           1,092,252 (544 MB/s)   170,888 (3481 MB/s)        -921,364   -84.35% 
- sherlock::name_whitespace                280,360 (2122 MB/s)    84,314 (7056 MB/s)         -196,046   -69.93% 
- sherlock::no_match_common                436,303 (1363 MB/s)    20,727 (28703 MB/s)        -415,576   -95.25% 
- sherlock::no_match_really_common         417,686 (1424 MB/s)    381,476 (1559 MB/s)         -36,210    -8.67% 
- sherlock::no_match_uncommon              28,504 (20871 MB/s)    20,786 (28621 MB/s)          -7,718   -27.08% 
- sherlock::quotes                         541,513 (1098 MB/s)    531,487 (1119 MB/s)         -10,026    -1.85% 
- sherlock::repeated_class_negation        5,489,721 (108 MB/s)   85,881,944 (6 MB/s)      80,392,223  1464.41% 
- sherlock::the_lower                      680,710 (873 MB/s)     654,110 (909 MB/s)          -26,600    -3.91% 
- sherlock::the_nocase                     737,040 (807 MB/s)     474,456 (1253 MB/s)        -262,584   -35.63% 
- sherlock::the_upper                      50,026 (11892 MB/s)    43,746 (13599 MB/s)          -6,280   -12.55% 
- sherlock::the_whitespace                 885,922 (671 MB/s)     1,181,974 (503 MB/s)        296,052    33.42% 
- sherlock::word_ending_n                  5,424,773 (109 MB/s)   1,925,578 (308 MB/s)     -3,499,195   -64.50% 
- sherlock::words                          5,753,231 (103 MB/s)   9,697,201 (61 MB/s)       3,943,970    68.55% 
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/re2 b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/re2
deleted file mode 100644
index d1f0bea4..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/re2
+++ /dev/null
@@ -1,101 +0,0 @@
-
-running 96 tests
-test misc::anchored_literal_long_match       ... bench:         102 ns/iter (+/- 3) = 3823 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          20 ns/iter (+/- 0) = 19500 MB/s
-test misc::anchored_literal_short_match      ... bench:          95 ns/iter (+/- 8) = 273 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          17 ns/iter (+/- 0) = 1529 MB/s
-test misc::easy0_1K                          ... bench:         149 ns/iter (+/- 10) = 7053 MB/s
-test misc::easy0_1MB                         ... bench:      29,234 ns/iter (+/- 886) = 35869 MB/s
-test misc::easy0_32                          ... bench:         126 ns/iter (+/- 4) = 468 MB/s
-test misc::easy0_32K                         ... bench:       1,266 ns/iter (+/- 42) = 25904 MB/s
-test misc::easy1_1K                          ... bench:         130 ns/iter (+/- 4) = 8030 MB/s
-test misc::easy1_1MB                         ... bench:      29,218 ns/iter (+/- 791) = 35888 MB/s
-test misc::easy1_32                          ... bench:         112 ns/iter (+/- 7) = 464 MB/s
-test misc::easy1_32K                         ... bench:       1,251 ns/iter (+/- 45) = 26209 MB/s
-test misc::hard_1K                           ... bench:       2,357 ns/iter (+/- 33) = 445 MB/s
-test misc::hard_1MB                          ... bench:   2,149,909 ns/iter (+/- 151,258) = 487 MB/s
-test misc::hard_32                           ... bench:         195 ns/iter (+/- 16) = 302 MB/s
-test misc::hard_32K                          ... bench:     105,137 ns/iter (+/- 6,252) = 311 MB/s
-test misc::literal                           ... bench:          89 ns/iter (+/- 3) = 573 MB/s
-test misc::long_needle1                      ... bench:     170,090 ns/iter (+/- 5,891) = 587 MB/s
-test misc::long_needle2                      ... bench:     174,341 ns/iter (+/- 7,949) = 573 MB/s
-test misc::match_class                       ... bench:         220 ns/iter (+/- 16) = 368 MB/s
-test misc::match_class_in_range              ... bench:         215 ns/iter (+/- 16) = 376 MB/s
-test misc::match_class_unicode               ... bench:         382 ns/iter (+/- 27) = 421 MB/s
-test misc::medium_1K                         ... bench:       1,939 ns/iter (+/- 153) = 542 MB/s
-test misc::medium_1MB                        ... bench:   1,775,335 ns/iter (+/- 91,241) = 590 MB/s
-test misc::medium_32                         ... bench:         190 ns/iter (+/- 12) = 315 MB/s
-test misc::medium_32K                        ... bench:      83,245 ns/iter (+/- 5,385) = 393 MB/s
-test misc::no_exponential                    ... bench:         269 ns/iter (+/- 22) = 371 MB/s
-test misc::not_literal                       ... bench:         167 ns/iter (+/- 13) = 305 MB/s
-test misc::one_pass_long_prefix              ... bench:          84 ns/iter (+/- 7) = 309 MB/s
-test misc::one_pass_long_prefix_not          ... bench:         137 ns/iter (+/- 12) = 189 MB/s
-test misc::one_pass_short                    ... bench:         108 ns/iter (+/- 3) = 157 MB/s
-test misc::one_pass_short_not                ... bench:         105 ns/iter (+/- 6) = 161 MB/s
-test misc::reallyhard2_1K                    ... bench:       1,811 ns/iter (+/- 44) = 574 MB/s
-test misc::reallyhard_1K                     ... bench:       2,324 ns/iter (+/- 223) = 452 MB/s
-test misc::reallyhard_1MB                    ... bench:   2,033,298 ns/iter (+/- 148,939) = 515 MB/s
-test misc::reallyhard_32                     ... bench:         185 ns/iter (+/- 8) = 318 MB/s
-test misc::reallyhard_32K                    ... bench:      83,263 ns/iter (+/- 4,231) = 393 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:      13,501 ns/iter (+/- 1,380) = 592 MB/s
-test regexdna::find_new_lines                ... bench:  31,464,067 ns/iter (+/- 2,248,457) = 161 MB/s
-test regexdna::subst1                        ... bench:   5,257,629 ns/iter (+/- 142,910) = 966 MB/s
-test regexdna::subst10                       ... bench:   5,189,384 ns/iter (+/- 130,525) = 979 MB/s
-test regexdna::subst11                       ... bench:   5,261,936 ns/iter (+/- 309,355) = 966 MB/s
-test regexdna::subst2                        ... bench:   5,268,281 ns/iter (+/- 348,592) = 964 MB/s
-test regexdna::subst3                        ... bench:   5,245,664 ns/iter (+/- 403,198) = 969 MB/s
-test regexdna::subst4                        ... bench:   5,264,833 ns/iter (+/- 312,063) = 965 MB/s
-test regexdna::subst5                        ... bench:   5,181,850 ns/iter (+/- 117,306) = 981 MB/s
-test regexdna::subst6                        ... bench:   5,200,226 ns/iter (+/- 124,723) = 977 MB/s
-test regexdna::subst7                        ... bench:   5,233,678 ns/iter (+/- 367,749) = 971 MB/s
-test regexdna::subst8                        ... bench:   5,242,400 ns/iter (+/- 317,859) = 969 MB/s
-test regexdna::subst9                        ... bench:   5,325,464 ns/iter (+/- 395,485) = 954 MB/s
-test regexdna::variant1                      ... bench:  24,377,246 ns/iter (+/- 733,355) = 208 MB/s
-test regexdna::variant2                      ... bench:  26,405,686 ns/iter (+/- 771,755) = 192 MB/s
-test regexdna::variant3                      ... bench:  25,130,419 ns/iter (+/- 1,245,527) = 202 MB/s
-test regexdna::variant4                      ... bench:  32,527,780 ns/iter (+/- 5,073,721) = 156 MB/s
-test regexdna::variant5                      ... bench:  31,081,800 ns/iter (+/- 1,256,796) = 163 MB/s
-test regexdna::variant6                      ... bench:  28,744,478 ns/iter (+/- 1,243,565) = 176 MB/s
-test regexdna::variant7                      ... bench:  26,693,756 ns/iter (+/- 886,566) = 190 MB/s
-test regexdna::variant8                      ... bench:  21,478,184 ns/iter (+/- 1,374,415) = 236 MB/s
-test regexdna::variant9                      ... bench:  18,639,814 ns/iter (+/- 519,136) = 272 MB/s
-test sherlock::before_after_holmes           ... bench:   1,552,265 ns/iter (+/- 105,467) = 383 MB/s
-test sherlock::before_holmes                 ... bench:   1,360,446 ns/iter (+/- 111,123) = 437 MB/s
-test sherlock::everything_greedy             ... bench:   6,356,610 ns/iter (+/- 343,163) = 93 MB/s
-test sherlock::everything_greedy_nl          ... bench:   2,380,946 ns/iter (+/- 36,936) = 249 MB/s
-test sherlock::holmes_cochar_watson          ... bench:   1,144,439 ns/iter (+/- 25,948) = 519 MB/s
-test sherlock::holmes_coword_watson          ... bench:   1,503,311 ns/iter (+/- 99,075) = 395 MB/s
-test sherlock::ing_suffix                    ... bench:   3,003,144 ns/iter (+/- 239,408) = 198 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   1,721,656 ns/iter (+/- 129,795) = 345 MB/s
-test sherlock::letters                       ... bench:  73,833,131 ns/iter (+/- 2,542,107) = 8 MB/s
-test sherlock::letters_lower                 ... bench:  72,250,289 ns/iter (+/- 1,280,826) = 8 MB/s
-test sherlock::letters_upper                 ... bench:   3,397,481 ns/iter (+/- 160,294) = 175 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:   3,694,486 ns/iter (+/- 403,679) = 161 MB/s
-test sherlock::name_alt1                     ... bench:      70,121 ns/iter (+/- 3,926) = 8484 MB/s
-test sherlock::name_alt2                     ... bench:   1,120,245 ns/iter (+/- 36,040) = 531 MB/s
-test sherlock::name_alt3                     ... bench:   1,247,630 ns/iter (+/- 127,226) = 476 MB/s
-test sherlock::name_alt3_nocase              ... bench:   2,894,586 ns/iter (+/- 201,023) = 205 MB/s
-test sherlock::name_alt4                     ... bench:   1,142,872 ns/iter (+/- 82,896) = 520 MB/s
-test sherlock::name_alt4_nocase              ... bench:   1,785,266 ns/iter (+/- 166,100) = 333 MB/s
-test sherlock::name_alt5                     ... bench:   1,167,553 ns/iter (+/- 91,672) = 509 MB/s
-test sherlock::name_alt5_nocase              ... bench:   2,023,732 ns/iter (+/- 74,558) = 293 MB/s
-test sherlock::name_holmes                   ... bench:     126,480 ns/iter (+/- 6,959) = 4703 MB/s
-test sherlock::name_holmes_nocase            ... bench:   1,420,548 ns/iter (+/- 75,407) = 418 MB/s
-test sherlock::name_sherlock                 ... bench:      57,090 ns/iter (+/- 1,392) = 10420 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      57,965 ns/iter (+/- 2,996) = 10263 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:   1,837,721 ns/iter (+/- 66,965) = 323 MB/s
-test sherlock::name_sherlock_nocase          ... bench:   1,672,451 ns/iter (+/- 204,590) = 355 MB/s
-test sherlock::name_whitespace               ... bench:      60,342 ns/iter (+/- 3,290) = 9859 MB/s
-test sherlock::no_match_common               ... bench:     434,496 ns/iter (+/- 35,617) = 1369 MB/s
-test sherlock::no_match_really_common        ... bench:     431,778 ns/iter (+/- 11,799) = 1377 MB/s
-test sherlock::no_match_uncommon             ... bench:      19,313 ns/iter (+/- 1,167) = 30804 MB/s
-test sherlock::quotes                        ... bench:   1,301,485 ns/iter (+/- 92,772) = 457 MB/s
-test sherlock::the_lower                     ... bench:   1,846,403 ns/iter (+/- 39,799) = 322 MB/s
-test sherlock::the_nocase                    ... bench:   2,956,115 ns/iter (+/- 136,011) = 201 MB/s
-test sherlock::the_upper                     ... bench:     165,976 ns/iter (+/- 5,838) = 3584 MB/s
-test sherlock::the_whitespace                ... bench:   1,816,669 ns/iter (+/- 117,437) = 327 MB/s
-test sherlock::word_ending_n                 ... bench:   2,601,847 ns/iter (+/- 166,024) = 228 MB/s
-test sherlock::words                         ... bench:  21,137,049 ns/iter (+/- 750,253) = 28 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 96 measured
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/re2-vs-rust b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/re2-vs-rust
deleted file mode 100644
index 180e431..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/re2-vs-rust
+++ /dev/null
@@ -1,97 +0,0 @@
- name                                     re2 ns/iter            rust ns/iter           diff ns/iter    diff % 
- misc::anchored_literal_long_match        102 (3823 MB/s)        24 (16250 MB/s)                 -78   -76.47% 
- misc::anchored_literal_long_non_match    20 (19500 MB/s)        27 (14444 MB/s)                   7    35.00% 
- misc::anchored_literal_short_match       95 (273 MB/s)          22 (1181 MB/s)                  -73   -76.84% 
- misc::anchored_literal_short_non_match   17 (1529 MB/s)         24 (1083 MB/s)                    7    41.18% 
- misc::easy0_1K                           149 (7053 MB/s)        16 (65687 MB/s)                -133   -89.26% 
- misc::easy0_1MB                          29,234 (35869 MB/s)    20 (52430150 MB/s)          -29,214   -99.93% 
- misc::easy0_32                           126 (468 MB/s)         16 (3687 MB/s)                 -110   -87.30% 
- misc::easy0_32K                          1,266 (25904 MB/s)     16 (2049687 MB/s)            -1,250   -98.74% 
- misc::easy1_1K                           130 (8030 MB/s)        48 (21750 MB/s)                 -82   -63.08% 
- misc::easy1_1MB                          29,218 (35888 MB/s)    48 (21845750 MB/s)          -29,170   -99.84% 
- misc::easy1_32                           112 (464 MB/s)         46 (1130 MB/s)                  -66   -58.93% 
- misc::easy1_32K                          1,251 (26209 MB/s)     47 (697617 MB/s)             -1,204   -96.24% 
- misc::hard_1K                            2,357 (445 MB/s)       58 (18120 MB/s)              -2,299   -97.54% 
- misc::hard_1MB                           2,149,909 (487 MB/s)   61 (17190213 MB/s)       -2,149,848  -100.00% 
- misc::hard_32                            195 (302 MB/s)         58 (1017 MB/s)                 -137   -70.26% 
- misc::hard_32K                           105,137 (311 MB/s)     56 (585625 MB/s)           -105,081   -99.95% 
- misc::literal                            89 (573 MB/s)          16 (3187 MB/s)                  -73   -82.02% 
- misc::long_needle1                       170,090 (587 MB/s)     2,226 (44924 MB/s)         -167,864   -98.69% 
- misc::long_needle2                       174,341 (573 MB/s)     576,997 (173 MB/s)          402,656   230.96% 
- misc::match_class                        220 (368 MB/s)         65 (1246 MB/s)                 -155   -70.45% 
- misc::match_class_in_range               215 (376 MB/s)         27 (3000 MB/s)                 -188   -87.44% 
- misc::match_class_unicode                382 (421 MB/s)         283 (568 MB/s)                  -99   -25.92% 
- misc::medium_1K                          1,939 (542 MB/s)       16 (65750 MB/s)              -1,923   -99.17% 
- misc::medium_1MB                         1,775,335 (590 MB/s)   21 (49933523 MB/s)       -1,775,314  -100.00% 
- misc::medium_32                          190 (315 MB/s)         17 (3529 MB/s)                 -173   -91.05% 
- misc::medium_32K                         83,245 (393 MB/s)      17 (1929176 MB/s)           -83,228   -99.98% 
- misc::no_exponential                     269 (371 MB/s)         394 (253 MB/s)                  125    46.47% 
- misc::not_literal                        167 (305 MB/s)         105 (485 MB/s)                  -62   -37.13% 
- misc::one_pass_long_prefix               84 (309 MB/s)          68 (382 MB/s)                   -16   -19.05% 
- misc::one_pass_long_prefix_not           137 (189 MB/s)         58 (448 MB/s)                   -79   -57.66% 
- misc::one_pass_short                     108 (157 MB/s)         45 (377 MB/s)                   -63   -58.33% 
- misc::one_pass_short_not                 105 (161 MB/s)         50 (340 MB/s)                   -55   -52.38% 
- misc::reallyhard2_1K                     1,811 (574 MB/s)       83 (12530 MB/s)              -1,728   -95.42% 
- misc::reallyhard_1K                      2,324 (452 MB/s)       1,822 (576 MB/s)               -502   -21.60% 
- misc::reallyhard_1MB                     2,033,298 (515 MB/s)   1,768,327 (592 MB/s)       -264,971   -13.03% 
- misc::reallyhard_32                      185 (318 MB/s)         121 (487 MB/s)                  -64   -34.59% 
- misc::reallyhard_32K                     83,263 (393 MB/s)      56,375 (581 MB/s)           -26,888   -32.29% 
- misc::reverse_suffix_no_quadratic        13,501 (592 MB/s)      5,803 (1378 MB/s)            -7,698   -57.02% 
- regexdna::find_new_lines                 31,464,067 (161 MB/s)  14,818,233 (343 MB/s)   -16,645,834   -52.90% 
- regexdna::subst1                         5,257,629 (966 MB/s)   896,790 (5668 MB/s)      -4,360,839   -82.94% 
- regexdna::subst10                        5,189,384 (979 MB/s)   957,325 (5310 MB/s)      -4,232,059   -81.55% 
- regexdna::subst11                        5,261,936 (966 MB/s)   917,248 (5542 MB/s)      -4,344,688   -82.57% 
- regexdna::subst2                         5,268,281 (964 MB/s)   892,129 (5698 MB/s)      -4,376,152   -83.07% 
- regexdna::subst3                         5,245,664 (969 MB/s)   929,250 (5470 MB/s)      -4,316,414   -82.29% 
- regexdna::subst4                         5,264,833 (965 MB/s)   872,581 (5825 MB/s)      -4,392,252   -83.43% 
- regexdna::subst5                         5,181,850 (981 MB/s)   875,804 (5804 MB/s)      -4,306,046   -83.10% 
- regexdna::subst6                         5,200,226 (977 MB/s)   884,639 (5746 MB/s)      -4,315,587   -82.99% 
- regexdna::subst7                         5,233,678 (971 MB/s)   872,791 (5824 MB/s)      -4,360,887   -83.32% 
- regexdna::subst8                         5,242,400 (969 MB/s)   873,833 (5817 MB/s)      -4,368,567   -83.33% 
- regexdna::subst9                         5,325,464 (954 MB/s)   886,744 (5732 MB/s)      -4,438,720   -83.35% 
- regexdna::variant1                       24,377,246 (208 MB/s)  3,699,267 (1374 MB/s)   -20,677,979   -84.82% 
- regexdna::variant2                       26,405,686 (192 MB/s)  6,760,952 (751 MB/s)    -19,644,734   -74.40% 
- regexdna::variant3                       25,130,419 (202 MB/s)  8,030,646 (633 MB/s)    -17,099,773   -68.04% 
- regexdna::variant4                       32,527,780 (156 MB/s)  8,077,290 (629 MB/s)    -24,450,490   -75.17% 
- regexdna::variant5                       31,081,800 (163 MB/s)  6,787,242 (748 MB/s)    -24,294,558   -78.16% 
- regexdna::variant6                       28,744,478 (176 MB/s)  6,577,777 (772 MB/s)    -22,166,701   -77.12% 
- regexdna::variant7                       26,693,756 (190 MB/s)  6,705,580 (758 MB/s)    -19,988,176   -74.88% 
- regexdna::variant8                       21,478,184 (236 MB/s)  6,818,785 (745 MB/s)    -14,659,399   -68.25% 
- regexdna::variant9                       18,639,814 (272 MB/s)  6,821,453 (745 MB/s)    -11,818,361   -63.40% 
- sherlock::before_after_holmes            1,552,265 (383 MB/s)   1,029,866 (577 MB/s)       -522,399   -33.65% 
- sherlock::before_holmes                  1,360,446 (437 MB/s)   76,633 (7763 MB/s)       -1,283,813   -94.37% 
- sherlock::everything_greedy              6,356,610 (93 MB/s)    2,375,079 (250 MB/s)     -3,981,531   -62.64% 
- sherlock::everything_greedy_nl           2,380,946 (249 MB/s)   916,250 (649 MB/s)       -1,464,696   -61.52% 
- sherlock::holmes_cochar_watson           1,144,439 (519 MB/s)   144,725 (4110 MB/s)        -999,714   -87.35% 
- sherlock::holmes_coword_watson           1,503,311 (395 MB/s)   565,247 (1052 MB/s)        -938,064   -62.40% 
- sherlock::ing_suffix                     3,003,144 (198 MB/s)   436,202 (1363 MB/s)      -2,566,942   -85.48% 
- sherlock::ing_suffix_limited_space       1,721,656 (345 MB/s)   1,182,943 (502 MB/s)       -538,713   -31.29% 
- sherlock::letters                        73,833,131 (8 MB/s)    24,390,452 (24 MB/s)    -49,442,679   -66.97% 
- sherlock::letters_lower                  72,250,289 (8 MB/s)    23,784,108 (25 MB/s)    -48,466,181   -67.08% 
- sherlock::letters_upper                  3,397,481 (175 MB/s)   1,993,838 (298 MB/s)     -1,403,643   -41.31% 
- sherlock::line_boundary_sherlock_holmes  3,694,486 (161 MB/s)   999,414 (595 MB/s)       -2,695,072   -72.95% 
- sherlock::name_alt1                      70,121 (8484 MB/s)     34,298 (17345 MB/s)         -35,823   -51.09% 
- sherlock::name_alt2                      1,120,245 (531 MB/s)   124,226 (4789 MB/s)        -996,019   -88.91% 
- sherlock::name_alt3                      1,247,630 (476 MB/s)   137,742 (4319 MB/s)      -1,109,888   -88.96% 
- sherlock::name_alt3_nocase               2,894,586 (205 MB/s)   1,293,763 (459 MB/s)     -1,600,823   -55.30% 
- sherlock::name_alt4                      1,142,872 (520 MB/s)   164,900 (3607 MB/s)        -977,972   -85.57% 
- sherlock::name_alt4_nocase               1,785,266 (333 MB/s)   235,023 (2531 MB/s)      -1,550,243   -86.84% 
- sherlock::name_alt5                      1,167,553 (509 MB/s)   127,928 (4650 MB/s)      -1,039,625   -89.04% 
- sherlock::name_alt5_nocase               2,023,732 (293 MB/s)   659,591 (901 MB/s)       -1,364,141   -67.41% 
- sherlock::name_holmes                    126,480 (4703 MB/s)    40,902 (14545 MB/s)         -85,578   -67.66% 
- sherlock::name_holmes_nocase             1,420,548 (418 MB/s)   198,658 (2994 MB/s)      -1,221,890   -86.02% 
- sherlock::name_sherlock                  57,090 (10420 MB/s)    68,924 (8631 MB/s)           11,834    20.73% 
- sherlock::name_sherlock_holmes           57,965 (10263 MB/s)    31,640 (18803 MB/s)         -26,325   -45.42% 
- sherlock::name_sherlock_holmes_nocase    1,837,721 (323 MB/s)   173,522 (3428 MB/s)      -1,664,199   -90.56% 
- sherlock::name_sherlock_nocase           1,672,451 (355 MB/s)   170,888 (3481 MB/s)      -1,501,563   -89.78% 
- sherlock::name_whitespace                60,342 (9859 MB/s)     84,314 (7056 MB/s)           23,972    39.73% 
- sherlock::no_match_common                434,496 (1369 MB/s)    20,727 (28703 MB/s)        -413,769   -95.23% 
- sherlock::no_match_really_common         431,778 (1377 MB/s)    381,476 (1559 MB/s)         -50,302   -11.65% 
- sherlock::no_match_uncommon              19,313 (30804 MB/s)    20,786 (28621 MB/s)           1,473     7.63% 
- sherlock::quotes                         1,301,485 (457 MB/s)   531,487 (1119 MB/s)        -769,998   -59.16% 
- sherlock::the_lower                      1,846,403 (322 MB/s)   654,110 (909 MB/s)       -1,192,293   -64.57% 
- sherlock::the_nocase                     2,956,115 (201 MB/s)   474,456 (1253 MB/s)      -2,481,659   -83.95% 
- sherlock::the_upper                      165,976 (3584 MB/s)    43,746 (13599 MB/s)        -122,230   -73.64% 
- sherlock::the_whitespace                 1,816,669 (327 MB/s)   1,181,974 (503 MB/s)       -634,695   -34.94% 
- sherlock::word_ending_n                  2,601,847 (228 MB/s)   1,925,578 (308 MB/s)       -676,269   -25.99% 
- sherlock::words                          21,137,049 (28 MB/s)   9,697,201 (61 MB/s)     -11,439,848   -54.12% 
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/rust b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/rust
deleted file mode 100644
index 22848ccf..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/rust
+++ /dev/null
@@ -1,103 +0,0 @@
-
-running 98 tests
-test misc::anchored_literal_long_match       ... bench:          24 ns/iter (+/- 0) = 16250 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          27 ns/iter (+/- 0) = 14444 MB/s
-test misc::anchored_literal_short_match      ... bench:          22 ns/iter (+/- 0) = 1181 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          24 ns/iter (+/- 1) = 1083 MB/s
-test misc::easy0_1K                          ... bench:          16 ns/iter (+/- 0) = 65687 MB/s
-test misc::easy0_1MB                         ... bench:          20 ns/iter (+/- 0) = 52430150 MB/s
-test misc::easy0_32                          ... bench:          16 ns/iter (+/- 0) = 3687 MB/s
-test misc::easy0_32K                         ... bench:          16 ns/iter (+/- 0) = 2049687 MB/s
-test misc::easy1_1K                          ... bench:          48 ns/iter (+/- 2) = 21750 MB/s
-test misc::easy1_1MB                         ... bench:          48 ns/iter (+/- 2) = 21845750 MB/s
-test misc::easy1_32                          ... bench:          46 ns/iter (+/- 0) = 1130 MB/s
-test misc::easy1_32K                         ... bench:          47 ns/iter (+/- 0) = 697617 MB/s
-test misc::hard_1K                           ... bench:          58 ns/iter (+/- 0) = 18120 MB/s
-test misc::hard_1MB                          ... bench:          61 ns/iter (+/- 0) = 17190213 MB/s
-test misc::hard_32                           ... bench:          58 ns/iter (+/- 0) = 1017 MB/s
-test misc::hard_32K                          ... bench:          56 ns/iter (+/- 2) = 585625 MB/s
-test misc::literal                           ... bench:          16 ns/iter (+/- 0) = 3187 MB/s
-test misc::long_needle1                      ... bench:       2,226 ns/iter (+/- 139) = 44924 MB/s
-test misc::long_needle2                      ... bench:     576,997 ns/iter (+/- 21,660) = 173 MB/s
-test misc::match_class                       ... bench:          65 ns/iter (+/- 3) = 1246 MB/s
-test misc::match_class_in_range              ... bench:          27 ns/iter (+/- 0) = 3000 MB/s
-test misc::match_class_unicode               ... bench:         283 ns/iter (+/- 15) = 568 MB/s
-test misc::medium_1K                         ... bench:          16 ns/iter (+/- 0) = 65750 MB/s
-test misc::medium_1MB                        ... bench:          21 ns/iter (+/- 1) = 49933523 MB/s
-test misc::medium_32                         ... bench:          17 ns/iter (+/- 0) = 3529 MB/s
-test misc::medium_32K                        ... bench:          17 ns/iter (+/- 0) = 1929176 MB/s
-test misc::no_exponential                    ... bench:         394 ns/iter (+/- 0) = 253 MB/s
-test misc::not_literal                       ... bench:         105 ns/iter (+/- 0) = 485 MB/s
-test misc::one_pass_long_prefix              ... bench:          68 ns/iter (+/- 0) = 382 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          58 ns/iter (+/- 3) = 448 MB/s
-test misc::one_pass_short                    ... bench:          45 ns/iter (+/- 2) = 377 MB/s
-test misc::one_pass_short_not                ... bench:          50 ns/iter (+/- 16) = 340 MB/s
-test misc::reallyhard2_1K                    ... bench:          83 ns/iter (+/- 4) = 12530 MB/s
-test misc::reallyhard_1K                     ... bench:       1,822 ns/iter (+/- 72) = 576 MB/s
-test misc::reallyhard_1MB                    ... bench:   1,768,327 ns/iter (+/- 67,421) = 592 MB/s
-test misc::reallyhard_32                     ... bench:         121 ns/iter (+/- 4) = 487 MB/s
-test misc::reallyhard_32K                    ... bench:      56,375 ns/iter (+/- 1,404) = 581 MB/s
-test misc::replace_all                       ... bench:         142 ns/iter (+/- 0)
-test misc::reverse_suffix_no_quadratic       ... bench:       5,803 ns/iter (+/- 6) = 1378 MB/s
-test regexdna::find_new_lines                ... bench:  14,818,233 ns/iter (+/- 430,454) = 343 MB/s
-test regexdna::subst1                        ... bench:     896,790 ns/iter (+/- 2,273) = 5668 MB/s
-test regexdna::subst10                       ... bench:     957,325 ns/iter (+/- 7,490) = 5310 MB/s
-test regexdna::subst11                       ... bench:     917,248 ns/iter (+/- 12,886) = 5542 MB/s
-test regexdna::subst2                        ... bench:     892,129 ns/iter (+/- 36,230) = 5698 MB/s
-test regexdna::subst3                        ... bench:     929,250 ns/iter (+/- 38,312) = 5470 MB/s
-test regexdna::subst4                        ... bench:     872,581 ns/iter (+/- 27,431) = 5825 MB/s
-test regexdna::subst5                        ... bench:     875,804 ns/iter (+/- 30,611) = 5804 MB/s
-test regexdna::subst6                        ... bench:     884,639 ns/iter (+/- 44,927) = 5746 MB/s
-test regexdna::subst7                        ... bench:     872,791 ns/iter (+/- 31,810) = 5824 MB/s
-test regexdna::subst8                        ... bench:     873,833 ns/iter (+/- 37,335) = 5817 MB/s
-test regexdna::subst9                        ... bench:     886,744 ns/iter (+/- 42,880) = 5732 MB/s
-test regexdna::variant1                      ... bench:   3,699,267 ns/iter (+/- 134,945) = 1374 MB/s
-test regexdna::variant2                      ... bench:   6,760,952 ns/iter (+/- 228,082) = 751 MB/s
-test regexdna::variant3                      ... bench:   8,030,646 ns/iter (+/- 271,204) = 633 MB/s
-test regexdna::variant4                      ... bench:   8,077,290 ns/iter (+/- 266,264) = 629 MB/s
-test regexdna::variant5                      ... bench:   6,787,242 ns/iter (+/- 226,071) = 748 MB/s
-test regexdna::variant6                      ... bench:   6,577,777 ns/iter (+/- 226,332) = 772 MB/s
-test regexdna::variant7                      ... bench:   6,705,580 ns/iter (+/- 232,953) = 758 MB/s
-test regexdna::variant8                      ... bench:   6,818,785 ns/iter (+/- 241,075) = 745 MB/s
-test regexdna::variant9                      ... bench:   6,821,453 ns/iter (+/- 257,044) = 745 MB/s
-test sherlock::before_after_holmes           ... bench:   1,029,866 ns/iter (+/- 42,662) = 577 MB/s
-test sherlock::before_holmes                 ... bench:      76,633 ns/iter (+/- 1,135) = 7763 MB/s
-test sherlock::everything_greedy             ... bench:   2,375,079 ns/iter (+/- 102,532) = 250 MB/s
-test sherlock::everything_greedy_nl          ... bench:     916,250 ns/iter (+/- 37,950) = 649 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     144,725 ns/iter (+/- 8,793) = 4110 MB/s
-test sherlock::holmes_coword_watson          ... bench:     565,247 ns/iter (+/- 24,056) = 1052 MB/s
-test sherlock::ing_suffix                    ... bench:     436,202 ns/iter (+/- 19,863) = 1363 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   1,182,943 ns/iter (+/- 38,658) = 502 MB/s
-test sherlock::letters                       ... bench:  24,390,452 ns/iter (+/- 869,008) = 24 MB/s
-test sherlock::letters_lower                 ... bench:  23,784,108 ns/iter (+/- 796,195) = 25 MB/s
-test sherlock::letters_upper                 ... bench:   1,993,838 ns/iter (+/- 77,697) = 298 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     999,414 ns/iter (+/- 31,202) = 595 MB/s
-test sherlock::name_alt1                     ... bench:      34,298 ns/iter (+/- 1,091) = 17345 MB/s
-test sherlock::name_alt2                     ... bench:     124,226 ns/iter (+/- 5,579) = 4789 MB/s
-test sherlock::name_alt3                     ... bench:     137,742 ns/iter (+/- 6,496) = 4319 MB/s
-test sherlock::name_alt3_nocase              ... bench:   1,293,763 ns/iter (+/- 51,097) = 459 MB/s
-test sherlock::name_alt4                     ... bench:     164,900 ns/iter (+/- 10,023) = 3607 MB/s
-test sherlock::name_alt4_nocase              ... bench:     235,023 ns/iter (+/- 14,465) = 2531 MB/s
-test sherlock::name_alt5                     ... bench:     127,928 ns/iter (+/- 6,882) = 4650 MB/s
-test sherlock::name_alt5_nocase              ... bench:     659,591 ns/iter (+/- 20,587) = 901 MB/s
-test sherlock::name_holmes                   ... bench:      40,902 ns/iter (+/- 402) = 14545 MB/s
-test sherlock::name_holmes_nocase            ... bench:     198,658 ns/iter (+/- 3,782) = 2994 MB/s
-test sherlock::name_sherlock                 ... bench:      68,924 ns/iter (+/- 1,456) = 8631 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      31,640 ns/iter (+/- 383) = 18803 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:     173,522 ns/iter (+/- 7,812) = 3428 MB/s
-test sherlock::name_sherlock_nocase          ... bench:     170,888 ns/iter (+/- 612) = 3481 MB/s
-test sherlock::name_whitespace               ... bench:      84,314 ns/iter (+/- 508) = 7056 MB/s
-test sherlock::no_match_common               ... bench:      20,727 ns/iter (+/- 565) = 28703 MB/s
-test sherlock::no_match_really_common        ... bench:     381,476 ns/iter (+/- 2,338) = 1559 MB/s
-test sherlock::no_match_uncommon             ... bench:      20,786 ns/iter (+/- 717) = 28621 MB/s
-test sherlock::quotes                        ... bench:     531,487 ns/iter (+/- 5,517) = 1119 MB/s
-test sherlock::repeated_class_negation       ... bench:  85,881,944 ns/iter (+/- 4,906,514) = 6 MB/s
-test sherlock::the_lower                     ... bench:     654,110 ns/iter (+/- 34,542) = 909 MB/s
-test sherlock::the_nocase                    ... bench:     474,456 ns/iter (+/- 16,549) = 1253 MB/s
-test sherlock::the_upper                     ... bench:      43,746 ns/iter (+/- 579) = 13599 MB/s
-test sherlock::the_whitespace                ... bench:   1,181,974 ns/iter (+/- 3,005) = 503 MB/s
-test sherlock::word_ending_n                 ... bench:   1,925,578 ns/iter (+/- 3,811) = 308 MB/s
-test sherlock::words                         ... bench:   9,697,201 ns/iter (+/- 156,772) = 61 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 98 measured
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/tcl b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/tcl
deleted file mode 100644
index 3e1778b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/tcl
+++ /dev/null
@@ -1,94 +0,0 @@
-
-running 89 tests
-test misc::anchored_literal_long_match       ... bench:         662 ns/iter (+/- 12) = 589 MB/s
-test misc::anchored_literal_long_non_match   ... bench:         133 ns/iter (+/- 1) = 2932 MB/s
-test misc::anchored_literal_short_match      ... bench:         616 ns/iter (+/- 18) = 42 MB/s
-test misc::anchored_literal_short_non_match  ... bench:         122 ns/iter (+/- 1) = 213 MB/s
-test misc::easy0_1K                          ... bench:      11,816 ns/iter (+/- 92) = 88 MB/s
-test misc::easy0_1MB                         ... bench:   3,409,439 ns/iter (+/- 94,972) = 307 MB/s
-test misc::easy0_32                          ... bench:       8,785 ns/iter (+/- 183) = 6 MB/s
-test misc::easy0_32K                         ... bench:     115,371 ns/iter (+/- 2,279) = 284 MB/s
-test misc::easy1_1K                          ... bench:       7,038 ns/iter (+/- 145) = 148 MB/s
-test misc::easy1_1MB                         ... bench:   3,396,028 ns/iter (+/- 100,173) = 308 MB/s
-test misc::easy1_32                          ... bench:       3,687 ns/iter (+/- 44) = 14 MB/s
-test misc::easy1_32K                         ... bench:     109,689 ns/iter (+/- 3,757) = 298 MB/s
-test misc::hard_1K                           ... bench:      14,836 ns/iter (+/- 518) = 70 MB/s
-test misc::hard_1MB                          ... bench:   3,376,015 ns/iter (+/- 95,045) = 310 MB/s
-test misc::hard_32                           ... bench:      11,278 ns/iter (+/- 389) = 5 MB/s
-test misc::hard_32K                          ... bench:     115,400 ns/iter (+/- 4,738) = 284 MB/s
-test misc::literal                           ... bench:         511 ns/iter (+/- 11) = 99 MB/s
-test misc::long_needle1                      ... bench:  18,076,901 ns/iter (+/- 523,761) = 5 MB/s
-test misc::long_needle2                      ... bench:  18,497,725 ns/iter (+/- 465,516) = 5 MB/s
-test misc::match_class                       ... bench:         620 ns/iter (+/- 23) = 130 MB/s
-test misc::match_class_in_range              ... bench:         605 ns/iter (+/- 26) = 133 MB/s
-test misc::medium_1K                         ... bench:      12,355 ns/iter (+/- 390) = 85 MB/s
-test misc::medium_1MB                        ... bench:   3,410,978 ns/iter (+/- 112,021) = 307 MB/s
-test misc::medium_32                         ... bench:       9,086 ns/iter (+/- 287) = 6 MB/s
-test misc::medium_32K                        ... bench:     116,944 ns/iter (+/- 5,654) = 280 MB/s
-test misc::no_exponential                    ... bench:   2,379,518 ns/iter (+/- 92,628)
-test misc::not_literal                       ... bench:       1,979 ns/iter (+/- 116) = 25 MB/s
-test misc::one_pass_long_prefix              ... bench:       6,932 ns/iter (+/- 464) = 3 MB/s
-test misc::one_pass_long_prefix_not          ... bench:       6,242 ns/iter (+/- 384) = 4 MB/s
-test misc::one_pass_short                    ... bench:         630 ns/iter (+/- 42) = 26 MB/s
-test misc::one_pass_short_not                ... bench:         718 ns/iter (+/- 64) = 23 MB/s
-test misc::reallyhard2_1K                    ... bench:     108,421 ns/iter (+/- 6,489) = 9 MB/s
-test misc::reallyhard_1K                     ... bench:      14,330 ns/iter (+/- 814) = 73 MB/s
-test misc::reallyhard_1MB                    ... bench:   3,287,965 ns/iter (+/- 203,546) = 318 MB/s
-test misc::reallyhard_32                     ... bench:      11,193 ns/iter (+/- 683) = 5 MB/s
-test misc::reallyhard_32K                    ... bench:     112,731 ns/iter (+/- 5,966) = 290 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:      26,907 ns/iter (+/- 2,396) = 297 MB/s
-test regexdna::find_new_lines                ... bench:  48,223,361 ns/iter (+/- 2,855,654) = 105 MB/s
-test regexdna::subst1                        ... bench:  27,177,359 ns/iter (+/- 1,359,987) = 187 MB/s
-test regexdna::subst10                       ... bench:  26,722,144 ns/iter (+/- 1,090,216) = 190 MB/s
-test regexdna::subst11                       ... bench:  27,382,875 ns/iter (+/- 1,656,754) = 185 MB/s
-test regexdna::subst2                        ... bench:  26,957,766 ns/iter (+/- 1,433,630) = 188 MB/s
-test regexdna::subst3                        ... bench:  27,195,925 ns/iter (+/- 1,828,460) = 186 MB/s
-test regexdna::subst4                        ... bench:  26,342,249 ns/iter (+/- 1,949,172) = 192 MB/s
-test regexdna::subst5                        ... bench:  26,543,675 ns/iter (+/- 2,143,336) = 191 MB/s
-test regexdna::subst6                        ... bench:  26,185,452 ns/iter (+/- 2,199,220) = 194 MB/s
-test regexdna::subst7                        ... bench:  26,338,573 ns/iter (+/- 2,124,778) = 193 MB/s
-test regexdna::subst8                        ... bench:  26,468,652 ns/iter (+/- 1,923,567) = 192 MB/s
-test regexdna::subst9                        ... bench:  26,487,784 ns/iter (+/- 1,250,319) = 191 MB/s
-test regexdna::variant1                      ... bench:  16,325,983 ns/iter (+/- 491,000) = 311 MB/s
-test regexdna::variant2                      ... bench:  16,845,952 ns/iter (+/- 470,062) = 301 MB/s
-test regexdna::variant3                      ... bench:  19,258,030 ns/iter (+/- 525,045) = 263 MB/s
-test regexdna::variant4                      ... bench:  18,018,713 ns/iter (+/- 1,235,670) = 282 MB/s
-test regexdna::variant5                      ... bench:  19,583,528 ns/iter (+/- 1,756,762) = 259 MB/s
-test regexdna::variant6                      ... bench:  17,630,308 ns/iter (+/- 973,191) = 288 MB/s
-test regexdna::variant7                      ... bench:  17,121,666 ns/iter (+/- 1,274,478) = 296 MB/s
-test regexdna::variant8                      ... bench:  17,154,863 ns/iter (+/- 425,504) = 296 MB/s
-test regexdna::variant9                      ... bench:  17,930,482 ns/iter (+/- 587,712) = 283 MB/s
-test sherlock::before_after_holmes           ... bench:   2,600,503 ns/iter (+/- 383,440) = 228 MB/s
-test sherlock::before_holmes                 ... bench:   3,145,648 ns/iter (+/- 37,316) = 189 MB/s
-test sherlock::holmes_cochar_watson          ... bench:   2,668,355 ns/iter (+/- 193,724) = 222 MB/s
-test sherlock::ing_suffix                    ... bench:   5,638,296 ns/iter (+/- 69,345) = 105 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:  22,466,946 ns/iter (+/- 659,956) = 26 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:   2,251,996 ns/iter (+/- 66,639) = 264 MB/s
-test sherlock::name_alt1                     ... bench:   2,276,056 ns/iter (+/- 64,088) = 261 MB/s
-test sherlock::name_alt2                     ... bench:   3,196,348 ns/iter (+/- 202,979) = 186 MB/s
-test sherlock::name_alt3                     ... bench:   5,260,374 ns/iter (+/- 426,028) = 113 MB/s
-test sherlock::name_alt3_nocase              ... bench:   8,529,394 ns/iter (+/- 558,731) = 69 MB/s
-test sherlock::name_alt4                     ... bench:   2,787,972 ns/iter (+/- 153,839) = 213 MB/s
-test sherlock::name_alt4_nocase              ... bench:   3,370,452 ns/iter (+/- 140,385) = 176 MB/s
-test sherlock::name_alt5                     ... bench:   3,795,793 ns/iter (+/- 182,240) = 156 MB/s
-test sherlock::name_alt5_nocase              ... bench:   4,691,422 ns/iter (+/- 161,515) = 126 MB/s
-test sherlock::name_holmes                   ... bench:   2,513,139 ns/iter (+/- 72,157) = 236 MB/s
-test sherlock::name_holmes_nocase            ... bench:   2,636,441 ns/iter (+/- 78,402) = 225 MB/s
-test sherlock::name_sherlock                 ... bench:   2,015,753 ns/iter (+/- 104,000) = 295 MB/s
-test sherlock::name_sherlock_holmes          ... bench:   2,180,684 ns/iter (+/- 162,201) = 272 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:   2,306,664 ns/iter (+/- 165,960) = 257 MB/s
-test sherlock::name_sherlock_nocase          ... bench:   2,065,630 ns/iter (+/- 155,223) = 288 MB/s
-test sherlock::name_whitespace               ... bench:   2,266,188 ns/iter (+/- 173,380) = 262 MB/s
-test sherlock::no_match_common               ... bench:   1,881,887 ns/iter (+/- 123,883) = 316 MB/s
-test sherlock::no_match_really_common        ... bench:   1,804,352 ns/iter (+/- 33,396) = 329 MB/s
-test sherlock::no_match_uncommon             ... bench:   1,809,300 ns/iter (+/- 123,888) = 328 MB/s
-test sherlock::quotes                        ... bench:   9,682,507 ns/iter (+/- 1,200,909) = 61 MB/s
-test sherlock::repeated_class_negation       ... bench:  68,600,251 ns/iter (+/- 2,043,582) = 8 MB/s
-test sherlock::the_lower                     ... bench:   6,849,558 ns/iter (+/- 517,709) = 86 MB/s
-test sherlock::the_nocase                    ... bench:   7,354,742 ns/iter (+/- 390,834) = 80 MB/s
-test sherlock::the_upper                     ... bench:   2,442,364 ns/iter (+/- 174,452) = 243 MB/s
-test sherlock::the_whitespace                ... bench:   9,210,338 ns/iter (+/- 651,675) = 64 MB/s
-test sherlock::words                         ... bench:  47,863,652 ns/iter (+/- 3,536,998) = 12 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 89 measured
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/tcl-vs-rust b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/tcl-vs-rust
deleted file mode 100644
index 0faefe97..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/05/tcl-vs-rust
+++ /dev/null
@@ -1,90 +0,0 @@
- name                                     tcl ns/iter            rust ns/iter           diff ns/iter    diff % 
- misc::anchored_literal_long_match        662 (589 MB/s)         24 (16250 MB/s)                -638   -96.37% 
- misc::anchored_literal_long_non_match    133 (2932 MB/s)        27 (14444 MB/s)                -106   -79.70% 
- misc::anchored_literal_short_match       616 (42 MB/s)          22 (1181 MB/s)                 -594   -96.43% 
- misc::anchored_literal_short_non_match   122 (213 MB/s)         24 (1083 MB/s)                  -98   -80.33% 
- misc::easy0_1K                           11,816 (88 MB/s)       16 (65687 MB/s)             -11,800   -99.86% 
- misc::easy0_1MB                          3,409,439 (307 MB/s)   20 (52430150 MB/s)       -3,409,419  -100.00% 
- misc::easy0_32                           8,785 (6 MB/s)         16 (3687 MB/s)               -8,769   -99.82% 
- misc::easy0_32K                          115,371 (284 MB/s)     16 (2049687 MB/s)          -115,355   -99.99% 
- misc::easy1_1K                           7,038 (148 MB/s)       48 (21750 MB/s)              -6,990   -99.32% 
- misc::easy1_1MB                          3,396,028 (308 MB/s)   48 (21845750 MB/s)       -3,395,980  -100.00% 
- misc::easy1_32                           3,687 (14 MB/s)        46 (1130 MB/s)               -3,641   -98.75% 
- misc::easy1_32K                          109,689 (298 MB/s)     47 (697617 MB/s)           -109,642   -99.96% 
- misc::hard_1K                            14,836 (70 MB/s)       58 (18120 MB/s)             -14,778   -99.61% 
- misc::hard_1MB                           3,376,015 (310 MB/s)   61 (17190213 MB/s)       -3,375,954  -100.00% 
- misc::hard_32                            11,278 (5 MB/s)        58 (1017 MB/s)              -11,220   -99.49% 
- misc::hard_32K                           115,400 (284 MB/s)     56 (585625 MB/s)           -115,344   -99.95% 
- misc::literal                            511 (99 MB/s)          16 (3187 MB/s)                 -495   -96.87% 
- misc::long_needle1                       18,076,901 (5 MB/s)    2,226 (44924 MB/s)      -18,074,675   -99.99% 
- misc::long_needle2                       18,497,725 (5 MB/s)    576,997 (173 MB/s)      -17,920,728   -96.88% 
- misc::match_class                        620 (130 MB/s)         65 (1246 MB/s)                 -555   -89.52% 
- misc::match_class_in_range               605 (133 MB/s)         27 (3000 MB/s)                 -578   -95.54% 
- misc::medium_1K                          12,355 (85 MB/s)       16 (65750 MB/s)             -12,339   -99.87% 
- misc::medium_1MB                         3,410,978 (307 MB/s)   21 (49933523 MB/s)       -3,410,957  -100.00% 
- misc::medium_32                          9,086 (6 MB/s)         17 (3529 MB/s)               -9,069   -99.81% 
- misc::medium_32K                         116,944 (280 MB/s)     17 (1929176 MB/s)          -116,927   -99.99% 
- misc::no_exponential                     2,379,518              394 (253 MB/s)           -2,379,124   -99.98% 
- misc::not_literal                        1,979 (25 MB/s)        105 (485 MB/s)               -1,874   -94.69% 
- misc::one_pass_long_prefix               6,932 (3 MB/s)         68 (382 MB/s)                -6,864   -99.02% 
- misc::one_pass_long_prefix_not           6,242 (4 MB/s)         58 (448 MB/s)                -6,184   -99.07% 
- misc::one_pass_short                     630 (26 MB/s)          45 (377 MB/s)                  -585   -92.86% 
- misc::one_pass_short_not                 718 (23 MB/s)          50 (340 MB/s)                  -668   -93.04% 
- misc::reallyhard2_1K                     108,421 (9 MB/s)       83 (12530 MB/s)            -108,338   -99.92% 
- misc::reallyhard_1K                      14,330 (73 MB/s)       1,822 (576 MB/s)            -12,508   -87.29% 
- misc::reallyhard_1MB                     3,287,965 (318 MB/s)   1,768,327 (592 MB/s)     -1,519,638   -46.22% 
- misc::reallyhard_32                      11,193 (5 MB/s)        121 (487 MB/s)              -11,072   -98.92% 
- misc::reallyhard_32K                     112,731 (290 MB/s)     56,375 (581 MB/s)           -56,356   -49.99% 
- misc::reverse_suffix_no_quadratic        26,907 (297 MB/s)      5,803 (1378 MB/s)           -21,104   -78.43% 
- regexdna::find_new_lines                 48,223,361 (105 MB/s)  14,818,233 (343 MB/s)   -33,405,128   -69.27% 
- regexdna::subst1                         27,177,359 (187 MB/s)  896,790 (5668 MB/s)     -26,280,569   -96.70% 
- regexdna::subst10                        26,722,144 (190 MB/s)  957,325 (5310 MB/s)     -25,764,819   -96.42% 
- regexdna::subst11                        27,382,875 (185 MB/s)  917,248 (5542 MB/s)     -26,465,627   -96.65% 
- regexdna::subst2                         26,957,766 (188 MB/s)  892,129 (5698 MB/s)     -26,065,637   -96.69% 
- regexdna::subst3                         27,195,925 (186 MB/s)  929,250 (5470 MB/s)     -26,266,675   -96.58% 
- regexdna::subst4                         26,342,249 (192 MB/s)  872,581 (5825 MB/s)     -25,469,668   -96.69% 
- regexdna::subst5                         26,543,675 (191 MB/s)  875,804 (5804 MB/s)     -25,667,871   -96.70% 
- regexdna::subst6                         26,185,452 (194 MB/s)  884,639 (5746 MB/s)     -25,300,813   -96.62% 
- regexdna::subst7                         26,338,573 (193 MB/s)  872,791 (5824 MB/s)     -25,465,782   -96.69% 
- regexdna::subst8                         26,468,652 (192 MB/s)  873,833 (5817 MB/s)     -25,594,819   -96.70% 
- regexdna::subst9                         26,487,784 (191 MB/s)  886,744 (5732 MB/s)     -25,601,040   -96.65% 
- regexdna::variant1                       16,325,983 (311 MB/s)  3,699,267 (1374 MB/s)   -12,626,716   -77.34% 
- regexdna::variant2                       16,845,952 (301 MB/s)  6,760,952 (751 MB/s)    -10,085,000   -59.87% 
- regexdna::variant3                       19,258,030 (263 MB/s)  8,030,646 (633 MB/s)    -11,227,384   -58.30% 
- regexdna::variant4                       18,018,713 (282 MB/s)  8,077,290 (629 MB/s)     -9,941,423   -55.17% 
- regexdna::variant5                       19,583,528 (259 MB/s)  6,787,242 (748 MB/s)    -12,796,286   -65.34% 
- regexdna::variant6                       17,630,308 (288 MB/s)  6,577,777 (772 MB/s)    -11,052,531   -62.69% 
- regexdna::variant7                       17,121,666 (296 MB/s)  6,705,580 (758 MB/s)    -10,416,086   -60.84% 
- regexdna::variant8                       17,154,863 (296 MB/s)  6,818,785 (745 MB/s)    -10,336,078   -60.25% 
- regexdna::variant9                       17,930,482 (283 MB/s)  6,821,453 (745 MB/s)    -11,109,029   -61.96% 
- sherlock::before_after_holmes            2,600,503 (228 MB/s)   1,029,866 (577 MB/s)     -1,570,637   -60.40% 
- sherlock::before_holmes                  3,145,648 (189 MB/s)   76,633 (7763 MB/s)       -3,069,015   -97.56% 
- sherlock::holmes_cochar_watson           2,668,355 (222 MB/s)   144,725 (4110 MB/s)      -2,523,630   -94.58% 
- sherlock::ing_suffix                     5,638,296 (105 MB/s)   436,202 (1363 MB/s)      -5,202,094   -92.26% 
- sherlock::ing_suffix_limited_space       22,466,946 (26 MB/s)   1,182,943 (502 MB/s)    -21,284,003   -94.73% 
- sherlock::line_boundary_sherlock_holmes  2,251,996 (264 MB/s)   999,414 (595 MB/s)       -1,252,582   -55.62% 
- sherlock::name_alt1                      2,276,056 (261 MB/s)   34,298 (17345 MB/s)      -2,241,758   -98.49% 
- sherlock::name_alt2                      3,196,348 (186 MB/s)   124,226 (4789 MB/s)      -3,072,122   -96.11% 
- sherlock::name_alt3                      5,260,374 (113 MB/s)   137,742 (4319 MB/s)      -5,122,632   -97.38% 
- sherlock::name_alt3_nocase               8,529,394 (69 MB/s)    1,293,763 (459 MB/s)     -7,235,631   -84.83% 
- sherlock::name_alt4                      2,787,972 (213 MB/s)   164,900 (3607 MB/s)      -2,623,072   -94.09% 
- sherlock::name_alt4_nocase               3,370,452 (176 MB/s)   235,023 (2531 MB/s)      -3,135,429   -93.03% 
- sherlock::name_alt5                      3,795,793 (156 MB/s)   127,928 (4650 MB/s)      -3,667,865   -96.63% 
- sherlock::name_alt5_nocase               4,691,422 (126 MB/s)   659,591 (901 MB/s)       -4,031,831   -85.94% 
- sherlock::name_holmes                    2,513,139 (236 MB/s)   40,902 (14545 MB/s)      -2,472,237   -98.37% 
- sherlock::name_holmes_nocase             2,636,441 (225 MB/s)   198,658 (2994 MB/s)      -2,437,783   -92.46% 
- sherlock::name_sherlock                  2,015,753 (295 MB/s)   68,924 (8631 MB/s)       -1,946,829   -96.58% 
- sherlock::name_sherlock_holmes           2,180,684 (272 MB/s)   31,640 (18803 MB/s)      -2,149,044   -98.55% 
- sherlock::name_sherlock_holmes_nocase    2,306,664 (257 MB/s)   173,522 (3428 MB/s)      -2,133,142   -92.48% 
- sherlock::name_sherlock_nocase           2,065,630 (288 MB/s)   170,888 (3481 MB/s)      -1,894,742   -91.73% 
- sherlock::name_whitespace                2,266,188 (262 MB/s)   84,314 (7056 MB/s)       -2,181,874   -96.28% 
- sherlock::no_match_common                1,881,887 (316 MB/s)   20,727 (28703 MB/s)      -1,861,160   -98.90% 
- sherlock::no_match_really_common         1,804,352 (329 MB/s)   381,476 (1559 MB/s)      -1,422,876   -78.86% 
- sherlock::no_match_uncommon              1,809,300 (328 MB/s)   20,786 (28621 MB/s)      -1,788,514   -98.85% 
- sherlock::quotes                         9,682,507 (61 MB/s)    531,487 (1119 MB/s)      -9,151,020   -94.51% 
- sherlock::repeated_class_negation        68,600,251 (8 MB/s)    85,881,944 (6 MB/s)      17,281,693    25.19% 
- sherlock::the_lower                      6,849,558 (86 MB/s)    654,110 (909 MB/s)       -6,195,448   -90.45% 
- sherlock::the_nocase                     7,354,742 (80 MB/s)    474,456 (1253 MB/s)      -6,880,286   -93.55% 
- sherlock::the_upper                      2,442,364 (243 MB/s)   43,746 (13599 MB/s)      -2,398,618   -98.21% 
- sherlock::the_whitespace                 9,210,338 (64 MB/s)    1,181,974 (503 MB/s)     -8,028,364   -87.17% 
- sherlock::words                          47,863,652 (12 MB/s)   9,697,201 (61 MB/s)     -38,166,451   -79.74% 
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/06/dphobos-dmd b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/06/dphobos-dmd
deleted file mode 100644
index bffdd29..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/06/dphobos-dmd
+++ /dev/null
@@ -1,98 +0,0 @@
-running 95 tests
-test misc::anchored_literal_long_match       ... bench:         356 ns/iter (+/- 4) = 1095 MB/s
-test misc::anchored_literal_long_non_match   ... bench:         280 ns/iter (+/- 18) = 1392 MB/s
-test misc::anchored_literal_short_match      ... bench:         351 ns/iter (+/- 16) = 74 MB/s
-test misc::anchored_literal_short_non_match  ... bench:         274 ns/iter (+/- 17) = 94 MB/s
-test misc::easy0_1K                          ... bench:         810 ns/iter (+/- 38) = 1297 MB/s
-test misc::easy0_1MB                         ... bench:      25,296 ns/iter (+/- 3,592) = 41453 MB/s
-test misc::easy0_32                          ... bench:         745 ns/iter (+/- 60) = 79 MB/s
-test misc::easy0_32K                         ... bench:       1,111 ns/iter (+/- 82) = 29518 MB/s
-test misc::easy1_1K                          ... bench:         730 ns/iter (+/- 20) = 1430 MB/s
-test misc::easy1_1MB                         ... bench:      25,442 ns/iter (+/- 2,076) = 41215 MB/s
-test misc::easy1_32                          ... bench:         730 ns/iter (+/- 79) = 71 MB/s
-test misc::easy1_32K                         ... bench:       1,104 ns/iter (+/- 93) = 29699 MB/s
-test misc::hard_1K                           ... bench:      18,238 ns/iter (+/- 1,173) = 57 MB/s
-test misc::hard_1MB                          ... bench:  19,302,344 ns/iter (+/- 2,039,538) = 54 MB/s
-test misc::hard_32                           ... bench:       2,508 ns/iter (+/- 119) = 23 MB/s
-test misc::hard_32K                          ... bench:     666,948 ns/iter (+/- 58,067) = 49 MB/s
-test misc::literal                           ... bench:         196 ns/iter (+/- 17) = 260 MB/s
-test misc::long_needle1                      ... bench:      82,532 ns/iter (+/- 4,618) = 1211 MB/s
-test misc::long_needle2                      ... bench:      84,079 ns/iter (+/- 5,930) = 1189 MB/s
-test misc::match_class                       ... bench:         300 ns/iter (+/- 41) = 270 MB/s
-test misc::match_class_in_range              ... bench:         258 ns/iter (+/- 16) = 313 MB/s
-test misc::match_class_unicode               ... bench:       1,563 ns/iter (+/- 171) = 103 MB/s
-test misc::medium_1K                         ... bench:       1,541 ns/iter (+/- 127) = 682 MB/s
-test misc::medium_1MB                        ... bench:     617,650 ns/iter (+/- 59,618) = 1697 MB/s
-test misc::medium_32                         ... bench:         985 ns/iter (+/- 62) = 60 MB/s
-test misc::medium_32K                        ... bench:      19,948 ns/iter (+/- 1,388) = 1644 MB/s
-test misc::no_exponential                    ... bench:     430,777 ns/iter (+/- 52,435)
-test misc::not_literal                       ... bench:       1,202 ns/iter (+/- 60) = 42 MB/s
-test misc::one_pass_long_prefix              ... bench:         630 ns/iter (+/- 45) = 41 MB/s
-test misc::one_pass_long_prefix_not          ... bench:         617 ns/iter (+/- 60) = 42 MB/s
-test misc::one_pass_short                    ... bench:       1,102 ns/iter (+/- 38) = 15 MB/s
-test misc::one_pass_short_not                ... bench:       1,481 ns/iter (+/- 44) = 11 MB/s
-test misc::reallyhard2_1K                    ... bench:      40,749 ns/iter (+/- 2,027) = 25 MB/s
-test misc::reallyhard_1K                     ... bench:      18,987 ns/iter (+/- 1,419) = 55 MB/s
-test misc::reallyhard_1MB                    ... bench:  19,923,786 ns/iter (+/- 1,499,750) = 52 MB/s
-test misc::reallyhard_32                     ... bench:       2,369 ns/iter (+/- 115) = 24 MB/s
-test misc::reallyhard_32K                    ... bench:     627,664 ns/iter (+/- 30,507) = 52 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:       4,933 ns/iter (+/- 700) = 1621 MB/s
-test regexdna::find_new_lines                ... bench:  28,886,666 ns/iter (+/- 1,969,669) = 175 MB/s
-test regexdna::subst1                        ... bench:   6,722,884 ns/iter (+/- 431,722) = 756 MB/s
-test regexdna::subst10                       ... bench:   6,923,833 ns/iter (+/- 677,840) = 734 MB/s
-test regexdna::subst11                       ... bench:   6,917,738 ns/iter (+/- 306,829) = 734 MB/s
-test regexdna::subst2                        ... bench:   6,914,593 ns/iter (+/- 625,342) = 735 MB/s
-test regexdna::subst3                        ... bench:   6,582,793 ns/iter (+/- 297,052) = 772 MB/s
-test regexdna::subst4                        ... bench:   6,528,804 ns/iter (+/- 463,331) = 778 MB/s
-test regexdna::subst5                        ... bench:   6,886,457 ns/iter (+/- 1,015,943) = 738 MB/s
-test regexdna::subst6                        ... bench:   6,789,493 ns/iter (+/- 573,137) = 748 MB/s
-test regexdna::subst7                        ... bench:   6,533,609 ns/iter (+/- 372,293) = 778 MB/s
-test regexdna::subst8                        ... bench:   6,536,845 ns/iter (+/- 290,249) = 777 MB/s
-test regexdna::subst9                        ... bench:   6,509,834 ns/iter (+/- 402,426) = 780 MB/s
-test regexdna::variant1                      ... bench:   5,746,639 ns/iter (+/- 205,103) = 884 MB/s
-test regexdna::variant2                      ... bench:   7,661,372 ns/iter (+/- 145,811) = 663 MB/s
-test regexdna::variant3                      ... bench:  12,801,668 ns/iter (+/- 337,572) = 397 MB/s
-test regexdna::variant4                      ... bench:  11,109,679 ns/iter (+/- 357,680) = 457 MB/s
-test regexdna::variant5                      ... bench:  11,238,093 ns/iter (+/- 1,571,929) = 452 MB/s
-test regexdna::variant6                      ... bench:   8,453,224 ns/iter (+/- 185,044) = 601 MB/s
-test regexdna::variant7                      ... bench:   8,784,446 ns/iter (+/- 153,626) = 578 MB/s
-test regexdna::variant8                      ... bench:  11,151,797 ns/iter (+/- 366,593) = 455 MB/s
-test regexdna::variant9                      ... bench:  22,206,248 ns/iter (+/- 1,143,965) = 228 MB/s
-test sherlock::before_after_holmes           ... bench:  23,458,512 ns/iter (+/- 1,982,069) = 25 MB/s
-test sherlock::before_holmes                 ... bench:  23,040,796 ns/iter (+/- 688,881) = 25 MB/s
-test sherlock::holmes_cochar_watson          ... bench:   1,035,156 ns/iter (+/- 113,881) = 574 MB/s
-test sherlock::holmes_coword_watson          ... bench: 118,126,447 ns/iter (+/- 8,394,250) = 5 MB/s
-test sherlock::ing_suffix                    ... bench:  16,122,434 ns/iter (+/- 236,636) = 36 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:  22,239,435 ns/iter (+/- 364,604) = 26 MB/s
-test sherlock::letters                       ... bench:  92,002,273 ns/iter (+/- 2,056,908) = 6 MB/s
-test sherlock::letters_lower                 ... bench:  90,778,580 ns/iter (+/- 4,179,255) = 6 MB/s
-test sherlock::letters_upper                 ... bench:   3,392,415 ns/iter (+/- 143,338) = 175 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     428,636 ns/iter (+/- 14,993) = 1387 MB/s
-test sherlock::name_alt1                     ... bench:     432,574 ns/iter (+/- 13,731) = 1375 MB/s
-test sherlock::name_alt2                     ... bench:     644,165 ns/iter (+/- 15,049) = 923 MB/s
-test sherlock::name_alt3                     ... bench:   1,176,979 ns/iter (+/- 105,694) = 505 MB/s
-test sherlock::name_alt3_nocase              ... bench:   2,054,990 ns/iter (+/- 91,909) = 289 MB/s
-test sherlock::name_alt4                     ... bench:     712,039 ns/iter (+/- 36,911) = 835 MB/s
-test sherlock::name_alt4_nocase              ... bench:     993,415 ns/iter (+/- 27,355) = 598 MB/s
-test sherlock::name_alt5                     ... bench:     757,045 ns/iter (+/- 29,126) = 785 MB/s
-test sherlock::name_alt5_nocase              ... bench:     953,821 ns/iter (+/- 37,252) = 623 MB/s
-test sherlock::name_holmes                   ... bench:     186,801 ns/iter (+/- 6,676) = 3184 MB/s
-test sherlock::name_holmes_nocase            ... bench:     539,857 ns/iter (+/- 40,614) = 1102 MB/s
-test sherlock::name_sherlock                 ... bench:      56,113 ns/iter (+/- 4,566) = 10602 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      67,558 ns/iter (+/- 6,746) = 8806 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:     429,123 ns/iter (+/- 51,647) = 1386 MB/s
-test sherlock::name_sherlock_nocase          ... bench:     396,070 ns/iter (+/- 33,934) = 1502 MB/s
-test sherlock::name_whitespace               ... bench:      84,630 ns/iter (+/- 6,285) = 7029 MB/s
-test sherlock::no_match_common               ... bench:     292,844 ns/iter (+/- 24,013) = 2031 MB/s
-test sherlock::no_match_really_common        ... bench:     290,986 ns/iter (+/- 10,163) = 2044 MB/s
-test sherlock::no_match_uncommon             ... bench:      14,041 ns/iter (+/- 599) = 42371 MB/s
-test sherlock::quotes                        ... bench:   6,489,945 ns/iter (+/- 132,983) = 91 MB/s
-test sherlock::repeated_class_negation       ... bench:  49,479,000 ns/iter (+/- 965,144) = 12 MB/s
-test sherlock::the_lower                     ... bench:   2,268,881 ns/iter (+/- 134,889) = 262 MB/s
-test sherlock::the_nocase                    ... bench:   2,906,824 ns/iter (+/- 72,615) = 204 MB/s
-test sherlock::the_upper                     ... bench:     211,138 ns/iter (+/- 9,935) = 2817 MB/s
-test sherlock::the_whitespace                ... bench:   3,488,249 ns/iter (+/- 254,294) = 170 MB/s
-test sherlock::word_ending_n                 ... bench:  30,917,395 ns/iter (+/- 2,298,620) = 19 MB/s
-test sherlock::words                         ... bench:  39,830,572 ns/iter (+/- 2,662,348) = 14 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 95 measured; 0 filtered out
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/06/dphobos-dmd-ct b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/06/dphobos-dmd-ct
deleted file mode 100644
index 426fa6c..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/06/dphobos-dmd-ct
+++ /dev/null
@@ -1,99 +0,0 @@
-
-running 94 tests
-test misc::anchored_literal_long_match       ... bench:         336 ns/iter (+/- 145) = 1160 MB/s
-test misc::anchored_literal_long_non_match   ... bench:         246 ns/iter (+/- 27) = 1585 MB/s
-test misc::anchored_literal_short_match      ... bench:         313 ns/iter (+/- 32) = 83 MB/s
-test misc::anchored_literal_short_non_match  ... bench:         248 ns/iter (+/- 31) = 104 MB/s
-test misc::easy0_1K                          ... bench:         792 ns/iter (+/- 109) = 1327 MB/s
-test misc::easy0_1MB                         ... bench:      24,706 ns/iter (+/- 812) = 42443 MB/s
-test misc::easy0_32                          ... bench:         793 ns/iter (+/- 77) = 74 MB/s
-test misc::easy0_32K                         ... bench:       1,179 ns/iter (+/- 55) = 27815 MB/s
-test misc::easy1_1K                          ... bench:         720 ns/iter (+/- 85) = 1450 MB/s
-test misc::easy1_1MB                         ... bench:      24,647 ns/iter (+/- 761) = 42544 MB/s
-test misc::easy1_32                          ... bench:         717 ns/iter (+/- 28) = 72 MB/s
-test misc::easy1_32K                         ... bench:       1,140 ns/iter (+/- 116) = 28761 MB/s
-test misc::hard_1K                           ... bench:      19,153 ns/iter (+/- 2,063) = 54 MB/s
-test misc::hard_1MB                          ... bench:  19,966,822 ns/iter (+/- 1,979,640) = 52 MB/s
-test misc::hard_32                           ... bench:       2,617 ns/iter (+/- 354) = 22 MB/s
-test misc::hard_32K                          ... bench:     621,150 ns/iter (+/- 24,244) = 52 MB/s
-test misc::literal                           ... bench:         194 ns/iter (+/- 28) = 262 MB/s
-test misc::long_needle1                      ... bench:      83,293 ns/iter (+/- 3,287) = 1200 MB/s
-test misc::long_needle2                      ... bench:      83,214 ns/iter (+/- 3,344) = 1201 MB/s
-test misc::match_class                       ... bench:         301 ns/iter (+/- 38) = 269 MB/s
-test misc::match_class_in_range              ... bench:         258 ns/iter (+/- 27) = 313 MB/s
-test misc::match_class_unicode               ... bench:       1,565 ns/iter (+/- 187) = 102 MB/s
-test misc::medium_1K                         ... bench:       1,572 ns/iter (+/- 230) = 669 MB/s
-test misc::medium_1MB                        ... bench:     609,944 ns/iter (+/- 23,088) = 1719 MB/s
-test misc::medium_32                         ... bench:         980 ns/iter (+/- 112) = 61 MB/s
-test misc::medium_32K                        ... bench:      20,058 ns/iter (+/- 884) = 1635 MB/s
-test misc::not_literal                       ... bench:       1,218 ns/iter (+/- 67) = 41 MB/s
-test misc::one_pass_long_prefix              ... bench:         588 ns/iter (+/- 93) = 44 MB/s
-test misc::one_pass_long_prefix_not          ... bench:         595 ns/iter (+/- 77) = 43 MB/s
-test misc::one_pass_short                    ... bench:       1,114 ns/iter (+/- 52) = 15 MB/s
-test misc::one_pass_short_not                ... bench:       1,481 ns/iter (+/- 183) = 11 MB/s
-test misc::reallyhard2_1K                    ... bench:      40,858 ns/iter (+/- 1,860) = 25 MB/s
-test misc::reallyhard_1K                     ... bench:      18,678 ns/iter (+/- 835) = 56 MB/s
-test misc::reallyhard_1MB                    ... bench:  19,824,750 ns/iter (+/- 354,159) = 52 MB/s
-test misc::reallyhard_32                     ... bench:       2,340 ns/iter (+/- 68) = 25 MB/s
-test misc::reallyhard_32K                    ... bench:     621,351 ns/iter (+/- 21,369) = 52 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:       4,919 ns/iter (+/- 224) = 1626 MB/s
-test regexdna::find_new_lines                ... bench:  27,265,128 ns/iter (+/- 1,416,486) = 186 MB/s
-test regexdna::subst1                        ... bench:   6,414,636 ns/iter (+/- 696,943) = 792 MB/s
-test regexdna::subst10                       ... bench:   6,426,829 ns/iter (+/- 206,773) = 790 MB/s
-test regexdna::subst11                       ... bench:   6,435,800 ns/iter (+/- 439,175) = 789 MB/s
-test regexdna::subst2                        ... bench:   6,428,455 ns/iter (+/- 214,961) = 790 MB/s
-test regexdna::subst3                        ... bench:   6,428,692 ns/iter (+/- 681,910) = 790 MB/s
-test regexdna::subst4                        ... bench:   6,425,781 ns/iter (+/- 129,718) = 791 MB/s
-test regexdna::subst5                        ... bench:   6,414,376 ns/iter (+/- 151,827) = 792 MB/s
-test regexdna::subst6                        ... bench:   6,455,032 ns/iter (+/- 423,915) = 787 MB/s
-test regexdna::subst7                        ... bench:   6,668,649 ns/iter (+/- 686,734) = 762 MB/s
-test regexdna::subst8                        ... bench:   6,393,791 ns/iter (+/- 172,533) = 795 MB/s
-test regexdna::subst9                        ... bench:   6,426,100 ns/iter (+/- 175,951) = 791 MB/s
-test regexdna::variant1                      ... bench:   5,612,507 ns/iter (+/- 128,406) = 905 MB/s
-test regexdna::variant2                      ... bench:   7,572,661 ns/iter (+/- 159,047) = 671 MB/s
-test regexdna::variant3                      ... bench:  12,287,183 ns/iter (+/- 378,305) = 413 MB/s
-test regexdna::variant4                      ... bench:  11,223,976 ns/iter (+/- 1,191,250) = 452 MB/s
-test regexdna::variant5                      ... bench:  11,016,081 ns/iter (+/- 714,537) = 461 MB/s
-test regexdna::variant6                      ... bench:   8,198,798 ns/iter (+/- 471,338) = 620 MB/s
-test regexdna::variant7                      ... bench:   8,895,886 ns/iter (+/- 885,690) = 571 MB/s
-test regexdna::variant8                      ... bench:  11,000,942 ns/iter (+/- 886,538) = 462 MB/s
-test regexdna::variant9                      ... bench:  20,761,109 ns/iter (+/- 629,876) = 244 MB/s
-test sherlock::before_after_holmes           ... bench:  24,417,513 ns/iter (+/- 2,359,425) = 24 MB/s
-test sherlock::before_holmes                 ... bench:  24,435,196 ns/iter (+/- 2,164,187) = 24 MB/s
-test sherlock::holmes_cochar_watson          ... bench:   1,025,780 ns/iter (+/- 121,876) = 579 MB/s
-test sherlock::holmes_coword_watson          ... bench: 122,988,753 ns/iter (+/- 7,606,302) = 4 MB/s
-test sherlock::ing_suffix                    ... bench:  16,322,427 ns/iter (+/- 321,746) = 36 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:  21,993,282 ns/iter (+/- 434,365) = 27 MB/s
-test sherlock::letters                       ... bench:  88,877,258 ns/iter (+/- 504,024) = 6 MB/s
-test sherlock::letters_lower                 ... bench:  87,709,419 ns/iter (+/- 659,859) = 6 MB/s
-test sherlock::letters_upper                 ... bench:   3,299,811 ns/iter (+/- 78,850) = 180 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     431,548 ns/iter (+/- 54,716) = 1378 MB/s
-test sherlock::name_alt1                     ... bench:     470,052 ns/iter (+/- 49,985) = 1265 MB/s
-test sherlock::name_alt2                     ... bench:     705,694 ns/iter (+/- 45,724) = 843 MB/s
-test sherlock::name_alt3                     ... bench:   1,148,456 ns/iter (+/- 51,018) = 518 MB/s
-test sherlock::name_alt3_nocase              ... bench:   2,026,355 ns/iter (+/- 220,043) = 293 MB/s
-test sherlock::name_alt4                     ... bench:     699,625 ns/iter (+/- 40,361) = 850 MB/s
-test sherlock::name_alt4_nocase              ... bench:     979,151 ns/iter (+/- 41,460) = 607 MB/s
-test sherlock::name_alt5                     ... bench:     751,646 ns/iter (+/- 31,601) = 791 MB/s
-test sherlock::name_alt5_nocase              ... bench:     950,701 ns/iter (+/- 102,078) = 625 MB/s
-test sherlock::name_holmes                   ... bench:     184,935 ns/iter (+/- 6,633) = 3216 MB/s
-test sherlock::name_holmes_nocase            ... bench:     532,703 ns/iter (+/- 33,919) = 1116 MB/s
-test sherlock::name_sherlock                 ... bench:      55,468 ns/iter (+/- 1,776) = 10725 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      67,327 ns/iter (+/- 5,464) = 8836 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:     417,724 ns/iter (+/- 45,520) = 1424 MB/s
-test sherlock::name_sherlock_nocase          ... bench:     392,285 ns/iter (+/- 14,778) = 1516 MB/s
-test sherlock::name_whitespace               ... bench:      77,112 ns/iter (+/- 2,785) = 7715 MB/s
-test sherlock::no_match_common               ... bench:     291,222 ns/iter (+/- 10,477) = 2042 MB/s
-test sherlock::no_match_really_common        ... bench:     291,393 ns/iter (+/- 10,834) = 2041 MB/s
-test sherlock::no_match_uncommon             ... bench:      14,016 ns/iter (+/- 376) = 42446 MB/s
-test sherlock::quotes                        ... bench:   6,557,639 ns/iter (+/- 158,929) = 90 MB/s
-test sherlock::repeated_class_negation       ... bench:  49,697,910 ns/iter (+/- 773,749) = 11 MB/s
-test sherlock::the_lower                     ... bench:   2,236,055 ns/iter (+/- 72,024) = 266 MB/s
-test sherlock::the_nocase                    ... bench:   2,892,430 ns/iter (+/- 89,222) = 205 MB/s
-test sherlock::the_upper                     ... bench:     207,035 ns/iter (+/- 8,624) = 2873 MB/s
-test sherlock::the_whitespace                ... bench:   3,435,267 ns/iter (+/- 416,560) = 173 MB/s
-test sherlock::word_ending_n                 ... bench:  31,751,871 ns/iter (+/- 374,472) = 18 MB/s
-test sherlock::words                         ... bench:  38,793,659 ns/iter (+/- 3,022,370) = 15 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 94 measured; 0 filtered out
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/06/dphobos-ldc b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/06/dphobos-ldc
deleted file mode 100644
index 29f5595c7..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/06/dphobos-ldc
+++ /dev/null
@@ -1,100 +0,0 @@
-
-running 95 tests
-test misc::anchored_literal_long_match       ... bench:         203 ns/iter (+/- 13) = 1921 MB/s
-test misc::anchored_literal_long_non_match   ... bench:         126 ns/iter (+/- 5) = 3095 MB/s
-test misc::anchored_literal_short_match      ... bench:         204 ns/iter (+/- 4) = 127 MB/s
-test misc::anchored_literal_short_non_match  ... bench:         127 ns/iter (+/- 8) = 204 MB/s
-test misc::easy0_1K                          ... bench:         571 ns/iter (+/- 44) = 1840 MB/s
-test misc::easy0_1MB                         ... bench:      25,321 ns/iter (+/- 421) = 41412 MB/s
-test misc::easy0_32                          ... bench:         553 ns/iter (+/- 9) = 106 MB/s
-test misc::easy0_32K                         ... bench:         971 ns/iter (+/- 29) = 33774 MB/s
-test misc::easy1_1K                          ... bench:         508 ns/iter (+/- 22) = 2055 MB/s
-test misc::easy1_1MB                         ... bench:      24,181 ns/iter (+/- 704) = 43364 MB/s
-test misc::easy1_32                          ... bench:         494 ns/iter (+/- 14) = 105 MB/s
-test misc::easy1_32K                         ... bench:         892 ns/iter (+/- 82) = 36757 MB/s
-test misc::hard_1K                           ... bench:      15,335 ns/iter (+/- 1,224) = 68 MB/s
-test misc::hard_1MB                          ... bench:  16,105,838 ns/iter (+/- 319,567) = 65 MB/s
-test misc::hard_32                           ... bench:       1,798 ns/iter (+/- 79) = 32 MB/s
-test misc::hard_32K                          ... bench:     504,123 ns/iter (+/- 44,829) = 65 MB/s
-test misc::literal                           ... bench:          74 ns/iter (+/- 9) = 689 MB/s
-test misc::long_needle1                      ... bench:      56,853 ns/iter (+/- 3,662) = 1758 MB/s
-test misc::long_needle2                      ... bench:      57,038 ns/iter (+/- 2,532) = 1753 MB/s
-test misc::match_class                       ... bench:         140 ns/iter (+/- 15) = 578 MB/s
-test misc::match_class_in_range              ... bench:         126 ns/iter (+/- 17) = 642 MB/s
-test misc::match_class_unicode               ... bench:       1,407 ns/iter (+/- 122) = 114 MB/s
-test misc::medium_1K                         ... bench:       1,199 ns/iter (+/- 80) = 877 MB/s
-test misc::medium_1MB                        ... bench:     558,323 ns/iter (+/- 20,908) = 1878 MB/s
-test misc::medium_32                         ... bench:         661 ns/iter (+/- 30) = 90 MB/s
-test misc::medium_32K                        ... bench:      18,148 ns/iter (+/- 1,038) = 1807 MB/s
-test misc::no_exponential                    ... bench:     334,786 ns/iter (+/- 18,234)
-test misc::not_literal                       ... bench:       1,347 ns/iter (+/- 49) = 37 MB/s
-test misc::one_pass_long_prefix              ... bench:         499 ns/iter (+/- 59) = 52 MB/s
-test misc::one_pass_long_prefix_not          ... bench:         522 ns/iter (+/- 64) = 49 MB/s
-test misc::one_pass_short                    ... bench:         804 ns/iter (+/- 37) = 21 MB/s
-test misc::one_pass_short_not                ... bench:       1,260 ns/iter (+/- 130) = 13 MB/s
-test misc::reallyhard2_1K                    ... bench:      37,726 ns/iter (+/- 1,284) = 27 MB/s
-test misc::reallyhard_1K                     ... bench:      15,246 ns/iter (+/- 901) = 68 MB/s
-test misc::reallyhard_1MB                    ... bench:  16,187,692 ns/iter (+/- 1,552,760) = 64 MB/s
-test misc::reallyhard_32                     ... bench:       1,882 ns/iter (+/- 237) = 31 MB/s
-test misc::reallyhard_32K                    ... bench:     541,567 ns/iter (+/- 64,929) = 60 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:       4,576 ns/iter (+/- 185) = 1748 MB/s
-test regexdna::find_new_lines                ... bench:  14,744,849 ns/iter (+/- 1,141,621) = 344 MB/s
-test regexdna::subst1                        ... bench:   2,801,370 ns/iter (+/- 105,875) = 1814 MB/s
-test regexdna::subst10                       ... bench:   3,015,410 ns/iter (+/- 446,982) = 1685 MB/s
-test regexdna::subst11                       ... bench:   2,923,557 ns/iter (+/- 193,230) = 1738 MB/s
-test regexdna::subst2                        ... bench:   2,948,002 ns/iter (+/- 306,203) = 1724 MB/s
-test regexdna::subst3                        ... bench:   2,899,076 ns/iter (+/- 174,958) = 1753 MB/s
-test regexdna::subst4                        ... bench:   2,908,685 ns/iter (+/- 221,436) = 1747 MB/s
-test regexdna::subst5                        ... bench:   3,780,044 ns/iter (+/- 150,740) = 1344 MB/s
-test regexdna::subst6                        ... bench:   2,920,193 ns/iter (+/- 142,191) = 1740 MB/s
-test regexdna::subst7                        ... bench:   2,918,785 ns/iter (+/- 175,109) = 1741 MB/s
-test regexdna::subst8                        ... bench:   2,932,075 ns/iter (+/- 152,745) = 1733 MB/s
-test regexdna::subst9                        ... bench:   2,914,694 ns/iter (+/- 176,327) = 1744 MB/s
-test regexdna::variant1                      ... bench:   5,172,617 ns/iter (+/- 269,855) = 982 MB/s
-test regexdna::variant2                      ... bench:   6,770,702 ns/iter (+/- 474,076) = 750 MB/s
-test regexdna::variant3                      ... bench:  11,124,754 ns/iter (+/- 649,591) = 456 MB/s
-test regexdna::variant4                      ... bench:   9,751,982 ns/iter (+/- 460,679) = 521 MB/s
-test regexdna::variant5                      ... bench:   9,791,229 ns/iter (+/- 461,486) = 519 MB/s
-test regexdna::variant6                      ... bench:   7,417,031 ns/iter (+/- 275,225) = 685 MB/s
-test regexdna::variant7                      ... bench:   7,873,097 ns/iter (+/- 451,115) = 645 MB/s
-test regexdna::variant8                      ... bench:   9,707,683 ns/iter (+/- 418,865) = 523 MB/s
-test regexdna::variant9                      ... bench:  18,696,520 ns/iter (+/- 742,018) = 271 MB/s
-test sherlock::before_after_holmes           ... bench:  22,314,084 ns/iter (+/- 888,249) = 26 MB/s
-test sherlock::before_holmes                 ... bench:  22,501,540 ns/iter (+/- 892,027) = 26 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     929,372 ns/iter (+/- 46,859) = 640 MB/s
-test sherlock::holmes_coword_watson          ... bench: 125,548,613 ns/iter (+/- 3,297,687) = 4 MB/s
-test sherlock::ing_suffix                    ... bench:  18,023,803 ns/iter (+/- 1,079,960) = 33 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:  21,809,497 ns/iter (+/- 1,259,989) = 27 MB/s
-test sherlock::letters                       ... bench:  39,512,315 ns/iter (+/- 3,309,084) = 15 MB/s
-test sherlock::letters_lower                 ... bench:  37,160,354 ns/iter (+/- 3,084,525) = 16 MB/s
-test sherlock::letters_upper                 ... bench:   1,721,867 ns/iter (+/- 66,812) = 345 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     378,307 ns/iter (+/- 30,914) = 1572 MB/s
-test sherlock::name_alt1                     ... bench:     381,242 ns/iter (+/- 41,954) = 1560 MB/s
-test sherlock::name_alt2                     ... bench:     503,558 ns/iter (+/- 46,044) = 1181 MB/s
-test sherlock::name_alt3                     ... bench:     912,340 ns/iter (+/- 79,787) = 652 MB/s
-test sherlock::name_alt3_nocase              ... bench:   1,515,048 ns/iter (+/- 74,623) = 392 MB/s
-test sherlock::name_alt4                     ... bench:     580,652 ns/iter (+/- 60,407) = 1024 MB/s
-test sherlock::name_alt4_nocase              ... bench:     826,866 ns/iter (+/- 58,485) = 719 MB/s
-test sherlock::name_alt5                     ... bench:     651,281 ns/iter (+/- 64,134) = 913 MB/s
-test sherlock::name_alt5_nocase              ... bench:     808,974 ns/iter (+/- 49,119) = 735 MB/s
-test sherlock::name_holmes                   ... bench:     120,010 ns/iter (+/- 9,458) = 4957 MB/s
-test sherlock::name_holmes_nocase            ... bench:     441,316 ns/iter (+/- 56,990) = 1348 MB/s
-test sherlock::name_sherlock                 ... bench:      39,935 ns/iter (+/- 4,078) = 14897 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      49,126 ns/iter (+/- 3,082) = 12110 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:     366,865 ns/iter (+/- 18,520) = 1621 MB/s
-test sherlock::name_sherlock_nocase          ... bench:     349,337 ns/iter (+/- 18,365) = 1703 MB/s
-test sherlock::name_whitespace               ... bench:      57,076 ns/iter (+/- 6,314) = 10423 MB/s
-test sherlock::no_match_common               ... bench:     291,022 ns/iter (+/- 30,143) = 2044 MB/s
-test sherlock::no_match_really_common        ... bench:     286,214 ns/iter (+/- 15,722) = 2078 MB/s
-test sherlock::no_match_uncommon             ... bench:      13,963 ns/iter (+/- 759) = 42607 MB/s
-test sherlock::quotes                        ... bench:   5,580,378 ns/iter (+/- 295,941) = 106 MB/s
-test sherlock::repeated_class_negation       ... bench:  52,797,981 ns/iter (+/- 2,731,805) = 11 MB/s
-test sherlock::the_lower                     ... bench:   1,295,105 ns/iter (+/- 62,365) = 459 MB/s
-test sherlock::the_nocase                    ... bench:   1,620,713 ns/iter (+/- 73,503) = 367 MB/s
-test sherlock::the_upper                     ... bench:     112,911 ns/iter (+/- 5,843) = 5269 MB/s
-test sherlock::the_whitespace                ... bench:   2,441,986 ns/iter (+/- 133,012) = 243 MB/s
-test sherlock::word_ending_n                 ... bench:  26,478,327 ns/iter (+/- 1,361,757) = 22 MB/s
-test sherlock::words                         ... bench:  23,948,872 ns/iter (+/- 2,323,993) = 24 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 95 measured; 0 filtered out
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/06/dphobos-ldc-ct b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/06/dphobos-ldc-ct
deleted file mode 100644
index 6aaa5de7..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/06/dphobos-ldc-ct
+++ /dev/null
@@ -1,99 +0,0 @@
-
-running 94 tests
-test misc::anchored_literal_long_match       ... bench:         189 ns/iter (+/- 23) = 2063 MB/s
-test misc::anchored_literal_long_non_match   ... bench:         128 ns/iter (+/- 14) = 3046 MB/s
-test misc::anchored_literal_short_match      ... bench:         191 ns/iter (+/- 20) = 136 MB/s
-test misc::anchored_literal_short_non_match  ... bench:         120 ns/iter (+/- 13) = 216 MB/s
-test misc::easy0_1K                          ... bench:         536 ns/iter (+/- 49) = 1960 MB/s
-test misc::easy0_1MB                         ... bench:      24,516 ns/iter (+/- 2,181) = 42772 MB/s
-test misc::easy0_32                          ... bench:         551 ns/iter (+/- 36) = 107 MB/s
-test misc::easy0_32K                         ... bench:         961 ns/iter (+/- 105) = 34125 MB/s
-test misc::easy1_1K                          ... bench:         518 ns/iter (+/- 59) = 2015 MB/s
-test misc::easy1_1MB                         ... bench:      25,352 ns/iter (+/- 2,847) = 41361 MB/s
-test misc::easy1_32                          ... bench:         501 ns/iter (+/- 42) = 103 MB/s
-test misc::easy1_32K                         ... bench:         919 ns/iter (+/- 69) = 35677 MB/s
-test misc::hard_1K                           ... bench:      16,146 ns/iter (+/- 1,124) = 65 MB/s
-test misc::hard_1MB                          ... bench:  16,482,695 ns/iter (+/- 805,077) = 63 MB/s
-test misc::hard_32                           ... bench:       1,807 ns/iter (+/- 173) = 32 MB/s
-test misc::hard_32K                          ... bench:     516,772 ns/iter (+/- 33,884) = 63 MB/s
-test misc::literal                           ... bench:          77 ns/iter (+/- 9) = 662 MB/s
-test misc::long_needle1                      ... bench:      56,900 ns/iter (+/- 3,087) = 1757 MB/s
-test misc::long_needle2                      ... bench:      57,364 ns/iter (+/- 4,166) = 1743 MB/s
-test misc::match_class                       ... bench:         156 ns/iter (+/- 21) = 519 MB/s
-test misc::match_class_in_range              ... bench:         121 ns/iter (+/- 12) = 669 MB/s
-test misc::match_class_unicode               ... bench:       1,515 ns/iter (+/- 207) = 106 MB/s
-test misc::medium_1K                         ... bench:       1,186 ns/iter (+/- 120) = 887 MB/s
-test misc::medium_1MB                        ... bench:     559,677 ns/iter (+/- 59,284) = 1873 MB/s
-test misc::medium_32                         ... bench:         657 ns/iter (+/- 86) = 91 MB/s
-test misc::medium_32K                        ... bench:      18,142 ns/iter (+/- 915) = 1807 MB/s
-test misc::not_literal                       ... bench:       1,319 ns/iter (+/- 128) = 38 MB/s
-test misc::one_pass_long_prefix              ... bench:         509 ns/iter (+/- 56) = 51 MB/s
-test misc::one_pass_long_prefix_not          ... bench:         517 ns/iter (+/- 38) = 50 MB/s
-test misc::one_pass_short                    ... bench:         783 ns/iter (+/- 83) = 21 MB/s
-test misc::one_pass_short_not                ... bench:       1,239 ns/iter (+/- 98) = 13 MB/s
-test misc::reallyhard2_1K                    ... bench:      40,580 ns/iter (+/- 3,041) = 25 MB/s
-test misc::reallyhard_1K                     ... bench:      15,162 ns/iter (+/- 652) = 69 MB/s
-test misc::reallyhard_1MB                    ... bench:  16,065,920 ns/iter (+/- 886,245) = 65 MB/s
-test misc::reallyhard_32                     ... bench:       1,829 ns/iter (+/- 90) = 32 MB/s
-test misc::reallyhard_32K                    ... bench:     520,572 ns/iter (+/- 88,290) = 62 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:       4,423 ns/iter (+/- 493) = 1808 MB/s
-test regexdna::find_new_lines                ... bench:  14,658,357 ns/iter (+/- 1,784,941) = 346 MB/s
-test regexdna::subst1                        ... bench:   2,984,959 ns/iter (+/- 422,186) = 1703 MB/s
-test regexdna::subst10                       ... bench:   2,836,747 ns/iter (+/- 274,300) = 1791 MB/s
-test regexdna::subst11                       ... bench:   2,809,880 ns/iter (+/- 309,516) = 1809 MB/s
-test regexdna::subst2                        ... bench:   2,868,765 ns/iter (+/- 435,511) = 1771 MB/s
-test regexdna::subst3                        ... bench:   2,837,000 ns/iter (+/- 319,135) = 1791 MB/s
-test regexdna::subst4                        ... bench:   2,856,540 ns/iter (+/- 320,458) = 1779 MB/s
-test regexdna::subst5                        ... bench:   2,820,953 ns/iter (+/- 340,996) = 1802 MB/s
-test regexdna::subst6                        ... bench:   3,588,607 ns/iter (+/- 462,158) = 1416 MB/s
-test regexdna::subst7                        ... bench:   2,896,235 ns/iter (+/- 165,525) = 1755 MB/s
-test regexdna::subst8                        ... bench:   2,982,961 ns/iter (+/- 315,768) = 1704 MB/s
-test regexdna::subst9                        ... bench:   3,024,311 ns/iter (+/- 300,274) = 1680 MB/s
-test regexdna::variant1                      ... bench:   5,234,342 ns/iter (+/- 269,577) = 971 MB/s
-test regexdna::variant2                      ... bench:   6,463,683 ns/iter (+/- 532,663) = 786 MB/s
-test regexdna::variant3                      ... bench:  10,720,523 ns/iter (+/- 414,684) = 474 MB/s
-test regexdna::variant4                      ... bench:   9,882,647 ns/iter (+/- 297,904) = 514 MB/s
-test regexdna::variant5                      ... bench:   9,664,151 ns/iter (+/- 659,587) = 526 MB/s
-test regexdna::variant6                      ... bench:   7,174,368 ns/iter (+/- 322,025) = 708 MB/s
-test regexdna::variant7                      ... bench:   7,605,668 ns/iter (+/- 411,605) = 668 MB/s
-test regexdna::variant8                      ... bench:   9,580,481 ns/iter (+/- 373,332) = 530 MB/s
-test regexdna::variant9                      ... bench:  18,270,186 ns/iter (+/- 986,510) = 278 MB/s
-test sherlock::before_after_holmes           ... bench:  21,982,853 ns/iter (+/- 1,032,853) = 27 MB/s
-test sherlock::before_holmes                 ... bench:  21,947,949 ns/iter (+/- 848,014) = 27 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     909,691 ns/iter (+/- 48,847) = 653 MB/s
-test sherlock::holmes_coword_watson          ... bench: 124,771,191 ns/iter (+/- 8,084,768) = 4 MB/s
-test sherlock::ing_suffix                    ... bench:  17,864,129 ns/iter (+/- 1,343,114) = 33 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:  21,009,249 ns/iter (+/- 452,676) = 28 MB/s
-test sherlock::letters                       ... bench:  37,888,421 ns/iter (+/- 2,482,541) = 15 MB/s
-test sherlock::letters_lower                 ... bench:  37,029,883 ns/iter (+/- 481,280) = 16 MB/s
-test sherlock::letters_upper                 ... bench:   1,627,107 ns/iter (+/- 51,063) = 365 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     370,850 ns/iter (+/- 15,384) = 1604 MB/s
-test sherlock::name_alt1                     ... bench:     371,780 ns/iter (+/- 28,486) = 1600 MB/s
-test sherlock::name_alt2                     ... bench:     506,859 ns/iter (+/- 17,553) = 1173 MB/s
-test sherlock::name_alt3                     ... bench:     915,729 ns/iter (+/- 99,429) = 649 MB/s
-test sherlock::name_alt3_nocase              ... bench:   1,512,050 ns/iter (+/- 186,130) = 393 MB/s
-test sherlock::name_alt4                     ... bench:     578,710 ns/iter (+/- 18,089) = 1028 MB/s
-test sherlock::name_alt4_nocase              ... bench:     752,912 ns/iter (+/- 51,342) = 790 MB/s
-test sherlock::name_alt5                     ... bench:     595,803 ns/iter (+/- 15,053) = 998 MB/s
-test sherlock::name_alt5_nocase              ... bench:     730,149 ns/iter (+/- 40,662) = 814 MB/s
-test sherlock::name_holmes                   ... bench:     115,596 ns/iter (+/- 4,597) = 5146 MB/s
-test sherlock::name_holmes_nocase            ... bench:     429,765 ns/iter (+/- 16,685) = 1384 MB/s
-test sherlock::name_sherlock                 ... bench:      38,985 ns/iter (+/- 2,195) = 15260 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      49,610 ns/iter (+/- 2,005) = 11992 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:     374,335 ns/iter (+/- 37,062) = 1589 MB/s
-test sherlock::name_sherlock_nocase          ... bench:     355,559 ns/iter (+/- 40,873) = 1673 MB/s
-test sherlock::name_whitespace               ... bench:      57,616 ns/iter (+/- 5,124) = 10325 MB/s
-test sherlock::no_match_common               ... bench:     284,228 ns/iter (+/- 29,087) = 2093 MB/s
-test sherlock::no_match_really_common        ... bench:     287,263 ns/iter (+/- 22,755) = 2071 MB/s
-test sherlock::no_match_uncommon             ... bench:      14,030 ns/iter (+/- 526) = 42404 MB/s
-test sherlock::quotes                        ... bench:   5,563,019 ns/iter (+/- 537,611) = 106 MB/s
-test sherlock::repeated_class_negation       ... bench:  54,831,275 ns/iter (+/- 5,982,214) = 10 MB/s
-test sherlock::the_lower                     ... bench:   1,298,205 ns/iter (+/- 73,265) = 458 MB/s
-test sherlock::the_nocase                    ... bench:   1,572,579 ns/iter (+/- 63,536) = 378 MB/s
-test sherlock::the_upper                     ... bench:     112,795 ns/iter (+/- 4,179) = 5274 MB/s
-test sherlock::the_whitespace                ... bench:   2,630,026 ns/iter (+/- 227,760) = 226 MB/s
-test sherlock::word_ending_n                 ... bench:  26,975,356 ns/iter (+/- 2,531,982) = 22 MB/s
-test sherlock::words                         ... bench:  23,116,326 ns/iter (+/- 458,721) = 25 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 94 measured; 0 filtered out
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/06/pcre1 b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/06/pcre1
deleted file mode 100644
index f8a9100..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/06/pcre1
+++ /dev/null
@@ -1,98 +0,0 @@
-
-running 93 tests
-test misc::anchored_literal_long_match       ... bench:          32 ns/iter (+/- 38) = 12187 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          23 ns/iter (+/- 1) = 16956 MB/s
-test misc::anchored_literal_short_match      ... bench:          30 ns/iter (+/- 1) = 866 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          23 ns/iter (+/- 0) = 1130 MB/s
-test misc::easy0_1K                          ... bench:         261 ns/iter (+/- 21) = 4026 MB/s
-test misc::easy0_1MB                         ... bench:     202,218 ns/iter (+/- 16,050) = 5185 MB/s
-test misc::easy0_32                          ... bench:          49 ns/iter (+/- 3) = 1204 MB/s
-test misc::easy0_32K                         ... bench:       6,305 ns/iter (+/- 448) = 5201 MB/s
-test misc::easy1_1K                          ... bench:         245 ns/iter (+/- 5) = 4261 MB/s
-test misc::easy1_1MB                         ... bench:     198,215 ns/iter (+/- 10,461) = 5290 MB/s
-test misc::easy1_32                          ... bench:          49 ns/iter (+/- 1) = 1061 MB/s
-test misc::easy1_32K                         ... bench:       6,309 ns/iter (+/- 358) = 5197 MB/s
-test misc::hard_1K                           ... bench:       1,306 ns/iter (+/- 50) = 804 MB/s
-test misc::hard_1MB                          ... bench:   1,219,034 ns/iter (+/- 92,693) = 860 MB/s
-test misc::hard_32                           ... bench:          95 ns/iter (+/- 7) = 621 MB/s
-test misc::hard_32K                          ... bench:      37,713 ns/iter (+/- 948) = 869 MB/s
-test misc::literal                           ... bench:          29 ns/iter (+/- 1) = 1758 MB/s
-test misc::long_needle1                      ... bench:     548,012 ns/iter (+/- 26,029) = 182 MB/s
-test misc::long_needle2                      ... bench:     538,536 ns/iter (+/- 54,612) = 185 MB/s
-test misc::match_class                       ... bench:          94 ns/iter (+/- 3) = 861 MB/s
-test misc::match_class_in_range              ... bench:          29 ns/iter (+/- 1) = 2793 MB/s
-test misc::match_class_unicode               ... bench:         370 ns/iter (+/- 19) = 435 MB/s
-test misc::medium_1K                         ... bench:         256 ns/iter (+/- 13) = 4109 MB/s
-test misc::medium_1MB                        ... bench:     207,655 ns/iter (+/- 9,168) = 5049 MB/s
-test misc::medium_32                         ... bench:          51 ns/iter (+/- 5) = 1176 MB/s
-test misc::medium_32K                        ... bench:       6,144 ns/iter (+/- 327) = 5337 MB/s
-test misc::not_literal                       ... bench:         166 ns/iter (+/- 14) = 307 MB/s
-test misc::one_pass_long_prefix              ... bench:          27 ns/iter (+/- 2) = 962 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          29 ns/iter (+/- 1) = 896 MB/s
-test misc::one_pass_short                    ... bench:          55 ns/iter (+/- 2) = 309 MB/s
-test misc::one_pass_short_not                ... bench:          55 ns/iter (+/- 3) = 309 MB/s
-test misc::reallyhard2_1K                    ... bench:       4,404 ns/iter (+/- 346) = 236 MB/s
-test misc::reallyhard_1K                     ... bench:       1,365 ns/iter (+/- 52) = 769 MB/s
-test misc::reallyhard_1MB                    ... bench:   1,118,777 ns/iter (+/- 72,209) = 937 MB/s
-test misc::reallyhard_32                     ... bench:         112 ns/iter (+/- 4) = 526 MB/s
-test misc::reallyhard_32K                    ... bench:      41,164 ns/iter (+/- 2,351) = 796 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:       4,413 ns/iter (+/- 304) = 1812 MB/s
-test regexdna::find_new_lines                ... bench:   2,802,109 ns/iter (+/- 129,768) = 1814 MB/s
-test regexdna::subst1                        ... bench:   1,263,401 ns/iter (+/- 54,374) = 4023 MB/s
-test regexdna::subst10                       ... bench:   1,254,544 ns/iter (+/- 101,656) = 4051 MB/s
-test regexdna::subst11                       ... bench:   1,408,321 ns/iter (+/- 121,522) = 3609 MB/s
-test regexdna::subst2                        ... bench:   1,364,704 ns/iter (+/- 106,508) = 3724 MB/s
-test regexdna::subst3                        ... bench:   1,258,687 ns/iter (+/- 84,504) = 4038 MB/s
-test regexdna::subst4                        ... bench:   1,301,822 ns/iter (+/- 62,866) = 3904 MB/s
-test regexdna::subst5                        ... bench:   1,338,338 ns/iter (+/- 313,996) = 3798 MB/s
-test regexdna::subst6                        ... bench:   1,349,310 ns/iter (+/- 117,181) = 3767 MB/s
-test regexdna::subst7                        ... bench:   1,390,090 ns/iter (+/- 210,430) = 3656 MB/s
-test regexdna::subst8                        ... bench:   1,293,481 ns/iter (+/- 38,532) = 3930 MB/s
-test regexdna::subst9                        ... bench:   1,245,652 ns/iter (+/- 58,026) = 4080 MB/s
-test regexdna::variant1                      ... bench:  15,239,324 ns/iter (+/- 414,621) = 333 MB/s
-test regexdna::variant2                      ... bench:  16,489,922 ns/iter (+/- 825,229) = 308 MB/s
-test regexdna::variant3                      ... bench:  19,945,871 ns/iter (+/- 665,046) = 254 MB/s
-test regexdna::variant4                      ... bench:  18,604,011 ns/iter (+/- 712,670) = 273 MB/s
-test regexdna::variant5                      ... bench:  17,084,919 ns/iter (+/- 1,379,879) = 297 MB/s
-test regexdna::variant6                      ... bench:  16,918,130 ns/iter (+/- 975,620) = 300 MB/s
-test regexdna::variant7                      ... bench:  19,114,194 ns/iter (+/- 857,330) = 265 MB/s
-test regexdna::variant8                      ... bench:  23,831,138 ns/iter (+/- 878,576) = 213 MB/s
-test regexdna::variant9                      ... bench:  21,835,777 ns/iter (+/- 1,339,143) = 232 MB/s
-test sherlock::before_after_holmes           ... bench:   4,401,834 ns/iter (+/- 218,696) = 135 MB/s
-test sherlock::before_holmes                 ... bench:   4,436,717 ns/iter (+/- 109,324) = 134 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     497,667 ns/iter (+/- 19,212) = 1195 MB/s
-test sherlock::ing_suffix                    ... bench:   1,852,390 ns/iter (+/- 77,888) = 321 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   4,775,078 ns/iter (+/- 152,556) = 124 MB/s
-test sherlock::letters                       ... bench:  13,888,750 ns/iter (+/- 668,831) = 42 MB/s
-test sherlock::letters_lower                 ... bench:  13,452,405 ns/iter (+/- 453,184) = 44 MB/s
-test sherlock::letters_upper                 ... bench:   1,870,502 ns/iter (+/- 57,825) = 318 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     198,649 ns/iter (+/- 9,080) = 2994 MB/s
-test sherlock::name_alt1                     ... bench:     464,513 ns/iter (+/- 29,935) = 1280 MB/s
-test sherlock::name_alt2                     ... bench:     470,746 ns/iter (+/- 12,931) = 1263 MB/s
-test sherlock::name_alt3                     ... bench:     874,352 ns/iter (+/- 38,618) = 680 MB/s
-test sherlock::name_alt3_nocase              ... bench:   2,821,106 ns/iter (+/- 113,055) = 210 MB/s
-test sherlock::name_alt4                     ... bench:      78,753 ns/iter (+/- 3,111) = 7554 MB/s
-test sherlock::name_alt4_nocase              ... bench:   1,596,406 ns/iter (+/- 62,919) = 372 MB/s
-test sherlock::name_alt5                     ... bench:     655,870 ns/iter (+/- 32,597) = 907 MB/s
-test sherlock::name_alt5_nocase              ... bench:   1,732,595 ns/iter (+/- 75,827) = 343 MB/s
-test sherlock::name_holmes                   ... bench:     400,037 ns/iter (+/- 16,935) = 1487 MB/s
-test sherlock::name_holmes_nocase            ... bench:     501,467 ns/iter (+/- 20,805) = 1186 MB/s
-test sherlock::name_sherlock                 ... bench:     267,873 ns/iter (+/- 10,199) = 2220 MB/s
-test sherlock::name_sherlock_holmes          ... bench:     202,107 ns/iter (+/- 10,314) = 2943 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:   1,070,780 ns/iter (+/- 43,144) = 555 MB/s
-test sherlock::name_sherlock_nocase          ... bench:   1,074,139 ns/iter (+/- 48,270) = 553 MB/s
-test sherlock::name_whitespace               ... bench:     271,978 ns/iter (+/- 10,137) = 2187 MB/s
-test sherlock::no_match_common               ... bench:     411,484 ns/iter (+/- 13,213) = 1445 MB/s
-test sherlock::no_match_really_common        ... bench:     403,709 ns/iter (+/- 12,415) = 1473 MB/s
-test sherlock::no_match_uncommon             ... bench:      27,730 ns/iter (+/- 928) = 21454 MB/s
-test sherlock::quotes                        ... bench:     515,141 ns/iter (+/- 17,799) = 1154 MB/s
-test sherlock::repeated_class_negation       ... bench:   5,842,243 ns/iter (+/- 282,478) = 101 MB/s
-test sherlock::the_lower                     ... bench:     725,059 ns/iter (+/- 36,233) = 820 MB/s
-test sherlock::the_nocase                    ... bench:     812,888 ns/iter (+/- 34,200) = 731 MB/s
-test sherlock::the_upper                     ... bench:      56,746 ns/iter (+/- 2,186) = 10484 MB/s
-test sherlock::the_whitespace                ... bench:     920,705 ns/iter (+/- 37,325) = 646 MB/s
-test sherlock::word_ending_n                 ... bench:   5,625,614 ns/iter (+/- 199,408) = 105 MB/s
-test sherlock::words                         ... bench:   7,122,561 ns/iter (+/- 161,013) = 83 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 93 measured; 0 filtered out
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/06/pcre2 b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/06/pcre2
deleted file mode 100644
index 51853011..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/06/pcre2
+++ /dev/null
@@ -1,98 +0,0 @@
-
-running 93 tests
-test misc::anchored_literal_long_match       ... bench:          16 ns/iter (+/- 0) = 24375 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          13 ns/iter (+/- 1) = 30000 MB/s
-test misc::anchored_literal_short_match      ... bench:          16 ns/iter (+/- 1) = 1625 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          13 ns/iter (+/- 1) = 2000 MB/s
-test misc::easy0_1K                          ... bench:         104 ns/iter (+/- 5) = 10105 MB/s
-test misc::easy0_1MB                         ... bench:      64,102 ns/iter (+/- 4,103) = 16358 MB/s
-test misc::easy0_32                          ... bench:          32 ns/iter (+/- 4) = 1843 MB/s
-test misc::easy0_32K                         ... bench:       2,042 ns/iter (+/- 152) = 16060 MB/s
-test misc::easy1_1K                          ... bench:         102 ns/iter (+/- 11) = 10235 MB/s
-test misc::easy1_1MB                         ... bench:      63,117 ns/iter (+/- 4,547) = 16613 MB/s
-test misc::easy1_32                          ... bench:          33 ns/iter (+/- 4) = 1575 MB/s
-test misc::easy1_32K                         ... bench:       2,019 ns/iter (+/- 181) = 16239 MB/s
-test misc::hard_1K                           ... bench:       1,236 ns/iter (+/- 82) = 850 MB/s
-test misc::hard_1MB                          ... bench:   1,041,354 ns/iter (+/- 39,123) = 1006 MB/s
-test misc::hard_32                           ... bench:          86 ns/iter (+/- 8) = 686 MB/s
-test misc::hard_32K                          ... bench:      33,054 ns/iter (+/- 1,813) = 992 MB/s
-test misc::literal                           ... bench:          20 ns/iter (+/- 2) = 2550 MB/s
-test misc::long_needle1                      ... bench:     501,732 ns/iter (+/- 52,173) = 199 MB/s
-test misc::long_needle2                      ... bench:     515,127 ns/iter (+/- 48,790) = 194 MB/s
-test misc::match_class                       ... bench:          55 ns/iter (+/- 7) = 1472 MB/s
-test misc::match_class_in_range              ... bench:          19 ns/iter (+/- 2) = 4263 MB/s
-test misc::match_class_unicode               ... bench:         342 ns/iter (+/- 60) = 470 MB/s
-test misc::medium_1K                         ... bench:         106 ns/iter (+/- 4) = 9924 MB/s
-test misc::medium_1MB                        ... bench:      63,011 ns/iter (+/- 4,942) = 16641 MB/s
-test misc::medium_32                         ... bench:          32 ns/iter (+/- 3) = 1875 MB/s
-test misc::medium_32K                        ... bench:       2,068 ns/iter (+/- 189) = 15858 MB/s
-test misc::not_literal                       ... bench:         147 ns/iter (+/- 13) = 346 MB/s
-test misc::one_pass_long_prefix              ... bench:          15 ns/iter (+/- 1) = 1733 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          15 ns/iter (+/- 1) = 1733 MB/s
-test misc::one_pass_short                    ... bench:          42 ns/iter (+/- 3) = 404 MB/s
-test misc::one_pass_short_not                ... bench:          43 ns/iter (+/- 5) = 395 MB/s
-test misc::reallyhard2_1K                    ... bench:       4,356 ns/iter (+/- 499) = 238 MB/s
-test misc::reallyhard_1K                     ... bench:       1,196 ns/iter (+/- 113) = 878 MB/s
-test misc::reallyhard_1MB                    ... bench:   1,070,155 ns/iter (+/- 90,895) = 979 MB/s
-test misc::reallyhard_32                     ... bench:          93 ns/iter (+/- 12) = 634 MB/s
-test misc::reallyhard_32K                    ... bench:      33,521 ns/iter (+/- 2,663) = 978 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:       3,065 ns/iter (+/- 393) = 2610 MB/s
-test regexdna::find_new_lines                ... bench:   1,891,736 ns/iter (+/- 232,990) = 2687 MB/s
-test regexdna::subst1                        ... bench:     920,853 ns/iter (+/- 75,276) = 5520 MB/s
-test regexdna::subst10                       ... bench:     892,533 ns/iter (+/- 77,177) = 5695 MB/s
-test regexdna::subst11                       ... bench:     869,335 ns/iter (+/- 75,754) = 5847 MB/s
-test regexdna::subst2                        ... bench:     901,876 ns/iter (+/- 75,287) = 5636 MB/s
-test regexdna::subst3                        ... bench:     870,185 ns/iter (+/- 53,535) = 5841 MB/s
-test regexdna::subst4                        ... bench:     859,924 ns/iter (+/- 63,888) = 5911 MB/s
-test regexdna::subst5                        ... bench:     886,748 ns/iter (+/- 87,929) = 5732 MB/s
-test regexdna::subst6                        ... bench:     870,428 ns/iter (+/- 47,015) = 5840 MB/s
-test regexdna::subst7                        ... bench:     865,513 ns/iter (+/- 41,507) = 5873 MB/s
-test regexdna::subst8                        ... bench:     870,030 ns/iter (+/- 110,449) = 5842 MB/s
-test regexdna::subst9                        ... bench:     875,649 ns/iter (+/- 32,905) = 5805 MB/s
-test regexdna::variant1                      ... bench:   9,234,989 ns/iter (+/- 127,076) = 550 MB/s
-test regexdna::variant2                      ... bench:  11,759,628 ns/iter (+/- 575,788) = 432 MB/s
-test regexdna::variant3                      ... bench:  11,229,965 ns/iter (+/- 522,759) = 452 MB/s
-test regexdna::variant4                      ... bench:  10,040,716 ns/iter (+/- 309,357) = 506 MB/s
-test regexdna::variant5                      ... bench:  10,052,052 ns/iter (+/- 522,338) = 505 MB/s
-test regexdna::variant6                      ... bench:  10,719,366 ns/iter (+/- 577,988) = 474 MB/s
-test regexdna::variant7                      ... bench:  11,076,094 ns/iter (+/- 1,291,237) = 458 MB/s
-test regexdna::variant8                      ... bench:  11,855,290 ns/iter (+/- 667,429) = 428 MB/s
-test regexdna::variant9                      ... bench:  12,531,240 ns/iter (+/- 606,198) = 405 MB/s
-test sherlock::before_after_holmes           ... bench:   4,169,656 ns/iter (+/- 222,900) = 142 MB/s
-test sherlock::before_holmes                 ... bench:   4,144,394 ns/iter (+/- 170,133) = 143 MB/s
-test sherlock::holmes_cochar_watson          ... bench:      74,437 ns/iter (+/- 4,266) = 7992 MB/s
-test sherlock::ing_suffix                    ... bench:   1,731,507 ns/iter (+/- 162,892) = 343 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   4,418,194 ns/iter (+/- 240,220) = 134 MB/s
-test sherlock::letters                       ... bench:   8,847,041 ns/iter (+/- 392,402) = 67 MB/s
-test sherlock::letters_lower                 ... bench:   8,547,432 ns/iter (+/- 304,256) = 69 MB/s
-test sherlock::letters_upper                 ... bench:   1,584,248 ns/iter (+/- 51,331) = 375 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:      38,057 ns/iter (+/- 1,666) = 15632 MB/s
-test sherlock::name_alt1                     ... bench:      50,415 ns/iter (+/- 3,173) = 11800 MB/s
-test sherlock::name_alt2                     ... bench:      66,062 ns/iter (+/- 2,807) = 9005 MB/s
-test sherlock::name_alt3                     ... bench:     720,097 ns/iter (+/- 32,351) = 826 MB/s
-test sherlock::name_alt3_nocase              ... bench:   2,591,049 ns/iter (+/- 86,537) = 229 MB/s
-test sherlock::name_alt4                     ... bench:      65,860 ns/iter (+/- 2,780) = 9033 MB/s
-test sherlock::name_alt4_nocase              ... bench:   1,204,839 ns/iter (+/- 41,087) = 493 MB/s
-test sherlock::name_alt5                     ... bench:     615,483 ns/iter (+/- 24,177) = 966 MB/s
-test sherlock::name_alt5_nocase              ... bench:   1,467,461 ns/iter (+/- 71,032) = 405 MB/s
-test sherlock::name_holmes                   ... bench:      48,997 ns/iter (+/- 2,471) = 12142 MB/s
-test sherlock::name_holmes_nocase            ... bench:      88,549 ns/iter (+/- 4,814) = 6718 MB/s
-test sherlock::name_sherlock                 ... bench:      38,309 ns/iter (+/- 1,354) = 15529 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      39,062 ns/iter (+/- 4,253) = 15230 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:     713,355 ns/iter (+/- 77,990) = 833 MB/s
-test sherlock::name_sherlock_nocase          ... bench:     719,747 ns/iter (+/- 85,736) = 826 MB/s
-test sherlock::name_whitespace               ... bench:      39,161 ns/iter (+/- 3,678) = 15191 MB/s
-test sherlock::no_match_common               ... bench:      35,574 ns/iter (+/- 3,433) = 16723 MB/s
-test sherlock::no_match_really_common        ... bench:      56,847 ns/iter (+/- 7,068) = 10465 MB/s
-test sherlock::no_match_uncommon             ... bench:      36,185 ns/iter (+/- 4,938) = 16441 MB/s
-test sherlock::quotes                        ... bench:     454,135 ns/iter (+/- 18,816) = 1310 MB/s
-test sherlock::repeated_class_negation       ... bench:   5,724,068 ns/iter (+/- 342,211) = 103 MB/s
-test sherlock::the_lower                     ... bench:     256,190 ns/iter (+/- 25,452) = 2322 MB/s
-test sherlock::the_nocase                    ... bench:     284,080 ns/iter (+/- 17,165) = 2094 MB/s
-test sherlock::the_upper                     ... bench:      56,120 ns/iter (+/- 2,826) = 10601 MB/s
-test sherlock::the_whitespace                ... bench:     456,734 ns/iter (+/- 23,405) = 1302 MB/s
-test sherlock::word_ending_n                 ... bench:   5,079,288 ns/iter (+/- 214,895) = 117 MB/s
-test sherlock::words                         ... bench:   5,200,092 ns/iter (+/- 250,085) = 114 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 93 measured; 0 filtered out
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/06/re2 b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/06/re2
deleted file mode 100644
index 3e1585a..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/06/re2
+++ /dev/null
@@ -1,101 +0,0 @@
-
-running 96 tests
-test misc::anchored_literal_long_match       ... bench:          73 ns/iter (+/- 8) = 5342 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          16 ns/iter (+/- 1) = 24375 MB/s
-test misc::anchored_literal_short_match      ... bench:          73 ns/iter (+/- 9) = 356 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          16 ns/iter (+/- 1) = 1625 MB/s
-test misc::easy0_1K                          ... bench:         119 ns/iter (+/- 11) = 8831 MB/s
-test misc::easy0_1MB                         ... bench:      25,312 ns/iter (+/- 875) = 41427 MB/s
-test misc::easy0_32                          ... bench:         112 ns/iter (+/- 5) = 526 MB/s
-test misc::easy0_32K                         ... bench:         534 ns/iter (+/- 43) = 61413 MB/s
-test misc::easy1_1K                          ... bench:         109 ns/iter (+/- 9) = 9577 MB/s
-test misc::easy1_1MB                         ... bench:      23,892 ns/iter (+/- 715) = 43889 MB/s
-test misc::easy1_32                          ... bench:         102 ns/iter (+/- 8) = 509 MB/s
-test misc::easy1_32K                         ... bench:         519 ns/iter (+/- 54) = 63175 MB/s
-test misc::hard_1K                           ... bench:       1,859 ns/iter (+/- 202) = 565 MB/s
-test misc::hard_1MB                          ... bench:   1,871,446 ns/iter (+/- 99,961) = 560 MB/s
-test misc::hard_32                           ... bench:         162 ns/iter (+/- 20) = 364 MB/s
-test misc::hard_32K                          ... bench:      57,459 ns/iter (+/- 4,672) = 570 MB/s
-test misc::literal                           ... bench:          70 ns/iter (+/- 8) = 728 MB/s
-test misc::long_needle1                      ... bench:     130,995 ns/iter (+/- 4,935) = 763 MB/s
-test misc::long_needle2                      ... bench:     129,668 ns/iter (+/- 8,852) = 771 MB/s
-test misc::match_class                       ... bench:         195 ns/iter (+/- 16) = 415 MB/s
-test misc::match_class_in_range              ... bench:         194 ns/iter (+/- 22) = 417 MB/s
-test misc::match_class_unicode               ... bench:         630 ns/iter (+/- 61) = 255 MB/s
-test misc::medium_1K                         ... bench:       1,699 ns/iter (+/- 147) = 619 MB/s
-test misc::medium_1MB                        ... bench:   1,633,131 ns/iter (+/- 65,889) = 642 MB/s
-test misc::medium_32                         ... bench:         169 ns/iter (+/- 18) = 355 MB/s
-test misc::medium_32K                        ... bench:      51,313 ns/iter (+/- 1,855) = 639 MB/s
-test misc::no_exponential                    ... bench:         216 ns/iter (+/- 13) = 462 MB/s
-test misc::not_literal                       ... bench:         140 ns/iter (+/- 6) = 364 MB/s
-test misc::one_pass_long_prefix              ... bench:          71 ns/iter (+/- 2) = 366 MB/s
-test misc::one_pass_long_prefix_not          ... bench:         109 ns/iter (+/- 9) = 238 MB/s
-test misc::one_pass_short                    ... bench:          99 ns/iter (+/- 7) = 171 MB/s
-test misc::one_pass_short_not                ... bench:          96 ns/iter (+/- 5) = 177 MB/s
-test misc::reallyhard2_1K                    ... bench:       1,405 ns/iter (+/- 134) = 740 MB/s
-test misc::reallyhard_1K                     ... bench:       1,875 ns/iter (+/- 168) = 560 MB/s
-test misc::reallyhard_1MB                    ... bench:   1,853,207 ns/iter (+/- 103,218) = 565 MB/s
-test misc::reallyhard_32                     ... bench:         157 ns/iter (+/- 11) = 375 MB/s
-test misc::reallyhard_32K                    ... bench:      57,880 ns/iter (+/- 5,319) = 566 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:      12,686 ns/iter (+/- 536) = 630 MB/s
-test regexdna::find_new_lines                ... bench:  28,761,913 ns/iter (+/- 1,447,326) = 176 MB/s
-test regexdna::subst1                        ... bench:   4,629,782 ns/iter (+/- 142,214) = 1097 MB/s
-test regexdna::subst10                       ... bench:   4,692,819 ns/iter (+/- 156,805) = 1083 MB/s
-test regexdna::subst11                       ... bench:   4,652,438 ns/iter (+/- 206,457) = 1092 MB/s
-test regexdna::subst2                        ... bench:   4,682,943 ns/iter (+/- 176,335) = 1085 MB/s
-test regexdna::subst3                        ... bench:   4,646,162 ns/iter (+/- 241,873) = 1094 MB/s
-test regexdna::subst4                        ... bench:   4,653,380 ns/iter (+/- 188,899) = 1092 MB/s
-test regexdna::subst5                        ... bench:   4,770,480 ns/iter (+/- 238,930) = 1065 MB/s
-test regexdna::subst6                        ... bench:   4,671,427 ns/iter (+/- 286,241) = 1088 MB/s
-test regexdna::subst7                        ... bench:   4,658,214 ns/iter (+/- 210,723) = 1091 MB/s
-test regexdna::subst8                        ... bench:   4,909,600 ns/iter (+/- 417,894) = 1035 MB/s
-test regexdna::subst9                        ... bench:   4,910,285 ns/iter (+/- 587,024) = 1035 MB/s
-test regexdna::variant1                      ... bench:  20,895,772 ns/iter (+/- 2,313,771) = 243 MB/s
-test regexdna::variant2                      ... bench:  20,465,984 ns/iter (+/- 1,913,613) = 248 MB/s
-test regexdna::variant3                      ... bench:  19,469,527 ns/iter (+/- 1,367,226) = 261 MB/s
-test regexdna::variant4                      ... bench:  21,662,238 ns/iter (+/- 1,489,235) = 234 MB/s
-test regexdna::variant5                      ... bench:  21,808,098 ns/iter (+/- 2,294,522) = 233 MB/s
-test regexdna::variant6                      ... bench:  21,208,952 ns/iter (+/- 986,848) = 239 MB/s
-test regexdna::variant7                      ... bench:  20,289,473 ns/iter (+/- 595,084) = 250 MB/s
-test regexdna::variant8                      ... bench:  17,765,356 ns/iter (+/- 503,529) = 286 MB/s
-test regexdna::variant9                      ... bench:  13,222,010 ns/iter (+/- 509,278) = 384 MB/s
-test sherlock::before_after_holmes           ... bench:   1,313,676 ns/iter (+/- 52,992) = 452 MB/s
-test sherlock::before_holmes                 ... bench:   1,337,432 ns/iter (+/- 37,054) = 444 MB/s
-test sherlock::everything_greedy             ... bench:   6,080,272 ns/iter (+/- 110,011) = 97 MB/s
-test sherlock::everything_greedy_nl          ... bench:   2,395,932 ns/iter (+/- 123,521) = 248 MB/s
-test sherlock::holmes_cochar_watson          ... bench:   1,052,245 ns/iter (+/- 33,929) = 565 MB/s
-test sherlock::holmes_coword_watson          ... bench:   1,063,007 ns/iter (+/- 34,462) = 559 MB/s
-test sherlock::ing_suffix                    ... bench:   2,703,395 ns/iter (+/- 63,263) = 220 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   1,608,756 ns/iter (+/- 42,100) = 369 MB/s
-test sherlock::letters                       ... bench:  68,220,129 ns/iter (+/- 3,602,216) = 8 MB/s
-test sherlock::letters_lower                 ... bench:  67,390,101 ns/iter (+/- 6,032,867) = 8 MB/s
-test sherlock::letters_upper                 ... bench:   3,708,482 ns/iter (+/- 235,128) = 160 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:   2,816,517 ns/iter (+/- 99,081) = 211 MB/s
-test sherlock::name_alt1                     ... bench:      53,193 ns/iter (+/- 1,575) = 11184 MB/s
-test sherlock::name_alt2                     ... bench:   1,133,704 ns/iter (+/- 36,634) = 524 MB/s
-test sherlock::name_alt3                     ... bench:   1,227,785 ns/iter (+/- 31,742) = 484 MB/s
-test sherlock::name_alt3_nocase              ... bench:   2,451,285 ns/iter (+/- 103,766) = 242 MB/s
-test sherlock::name_alt4                     ... bench:   1,168,955 ns/iter (+/- 87,785) = 508 MB/s
-test sherlock::name_alt4_nocase              ... bench:   1,699,899 ns/iter (+/- 91,762) = 349 MB/s
-test sherlock::name_alt5                     ... bench:   1,167,232 ns/iter (+/- 51,695) = 509 MB/s
-test sherlock::name_alt5_nocase              ... bench:   1,805,463 ns/iter (+/- 74,631) = 329 MB/s
-test sherlock::name_holmes                   ... bench:     108,195 ns/iter (+/- 3,815) = 5498 MB/s
-test sherlock::name_holmes_nocase            ... bench:   1,360,092 ns/iter (+/- 60,416) = 437 MB/s
-test sherlock::name_sherlock                 ... bench:      40,376 ns/iter (+/- 5,104) = 14734 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      41,361 ns/iter (+/- 2,553) = 14383 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:   1,317,594 ns/iter (+/- 168,248) = 451 MB/s
-test sherlock::name_sherlock_nocase          ... bench:   1,284,026 ns/iter (+/- 92,188) = 463 MB/s
-test sherlock::name_whitespace               ... bench:      44,973 ns/iter (+/- 5,888) = 13228 MB/s
-test sherlock::no_match_common               ... bench:     341,867 ns/iter (+/- 32,045) = 1740 MB/s
-test sherlock::no_match_really_common        ... bench:     331,760 ns/iter (+/- 43,608) = 1793 MB/s
-test sherlock::no_match_uncommon             ... bench:      14,285 ns/iter (+/- 760) = 41647 MB/s
-test sherlock::quotes                        ... bench:   1,342,144 ns/iter (+/- 96,471) = 443 MB/s
-test sherlock::the_lower                     ... bench:   1,722,919 ns/iter (+/- 83,873) = 345 MB/s
-test sherlock::the_nocase                    ... bench:   2,866,258 ns/iter (+/- 117,349) = 207 MB/s
-test sherlock::the_upper                     ... bench:     151,020 ns/iter (+/- 13,454) = 3939 MB/s
-test sherlock::the_whitespace                ... bench:   1,597,329 ns/iter (+/- 149,689) = 372 MB/s
-test sherlock::word_ending_n                 ... bench:   2,193,027 ns/iter (+/- 136,408) = 271 MB/s
-test sherlock::words                         ... bench:  20,721,148 ns/iter (+/- 1,968,912) = 28 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 96 measured; 0 filtered out
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/06/rust b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/06/rust
deleted file mode 100644
index 53ab222..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/06/rust
+++ /dev/null
@@ -1,113 +0,0 @@
-
-running 108 tests
-test misc::anchored_literal_long_match       ... bench:          22 ns/iter (+/- 2) = 17727 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          27 ns/iter (+/- 2) = 14444 MB/s
-test misc::anchored_literal_short_match      ... bench:          22 ns/iter (+/- 1) = 1181 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          26 ns/iter (+/- 2) = 1000 MB/s
-test misc::easy0_1K                          ... bench:          16 ns/iter (+/- 1) = 65687 MB/s
-test misc::easy0_1MB                         ... bench:          19 ns/iter (+/- 2) = 55189631 MB/s
-test misc::easy0_32                          ... bench:          15 ns/iter (+/- 1) = 3933 MB/s
-test misc::easy0_32K                         ... bench:          16 ns/iter (+/- 0) = 2049687 MB/s
-test misc::easy1_1K                          ... bench:          43 ns/iter (+/- 2) = 24279 MB/s
-test misc::easy1_1MB                         ... bench:          45 ns/iter (+/- 4) = 23302133 MB/s
-test misc::easy1_32                          ... bench:          43 ns/iter (+/- 5) = 1209 MB/s
-test misc::easy1_32K                         ... bench:          43 ns/iter (+/- 2) = 762511 MB/s
-test misc::hard_1K                           ... bench:          53 ns/iter (+/- 6) = 19830 MB/s
-test misc::hard_1MB                          ... bench:          57 ns/iter (+/- 1) = 18396543 MB/s
-test misc::hard_32                           ... bench:          53 ns/iter (+/- 4) = 1113 MB/s
-test misc::hard_32K                          ... bench:          53 ns/iter (+/- 6) = 618773 MB/s
-test misc::literal                           ... bench:          13 ns/iter (+/- 1) = 3923 MB/s
-test misc::long_needle1                      ... bench:       1,203 ns/iter (+/- 55) = 83126 MB/s
-test misc::long_needle2                      ... bench:     149,418 ns/iter (+/- 13,825) = 669 MB/s
-test misc::match_class                       ... bench:          62 ns/iter (+/- 6) = 1306 MB/s
-test misc::match_class_in_range              ... bench:          23 ns/iter (+/- 2) = 3521 MB/s
-test misc::match_class_unicode               ... bench:         268 ns/iter (+/- 30) = 600 MB/s
-test misc::medium_1K                         ... bench:          16 ns/iter (+/- 0) = 65750 MB/s
-test misc::medium_1MB                        ... bench:          20 ns/iter (+/- 15) = 52430200 MB/s
-test misc::medium_32                         ... bench:          16 ns/iter (+/- 2) = 3750 MB/s
-test misc::medium_32K                        ... bench:          16 ns/iter (+/- 1) = 2049750 MB/s
-test misc::no_exponential                    ... bench:         353 ns/iter (+/- 26) = 283 MB/s
-test misc::not_literal                       ... bench:          97 ns/iter (+/- 9) = 525 MB/s
-test misc::one_pass_long_prefix              ... bench:          58 ns/iter (+/- 5) = 448 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          60 ns/iter (+/- 6) = 433 MB/s
-test misc::one_pass_short                    ... bench:          43 ns/iter (+/- 4) = 395 MB/s
-test misc::one_pass_short_not                ... bench:          46 ns/iter (+/- 2) = 369 MB/s
-test misc::reallyhard2_1K                    ... bench:          62 ns/iter (+/- 5) = 16774 MB/s
-test misc::reallyhard_1K                     ... bench:       1,650 ns/iter (+/- 176) = 636 MB/s
-test misc::reallyhard_1MB                    ... bench:   1,635,447 ns/iter (+/- 97,611) = 641 MB/s
-test misc::reallyhard_32                     ... bench:         109 ns/iter (+/- 9) = 541 MB/s
-test misc::reallyhard_32K                    ... bench:      50,991 ns/iter (+/- 4,031) = 643 MB/s
-test misc::replace_all                       ... bench:         155 ns/iter (+/- 8)
-test misc::reverse_suffix_no_quadratic       ... bench:       4,254 ns/iter (+/- 489) = 1880 MB/s
-test misc::short_haystack_1000000x           ... bench:      91,124 ns/iter (+/- 4,584) = 87792 MB/s
-test misc::short_haystack_100000x            ... bench:      10,681 ns/iter (+/- 420) = 74900 MB/s
-test misc::short_haystack_10000x             ... bench:       3,240 ns/iter (+/- 395) = 24694 MB/s
-test misc::short_haystack_1000x              ... bench:         403 ns/iter (+/- 48) = 19878 MB/s
-test misc::short_haystack_100x               ... bench:         303 ns/iter (+/- 27) = 2676 MB/s
-test misc::short_haystack_10x                ... bench:         272 ns/iter (+/- 27) = 334 MB/s
-test misc::short_haystack_1x                 ... bench:         264 ns/iter (+/- 32) = 71 MB/s
-test misc::short_haystack_2x                 ... bench:         269 ns/iter (+/- 25) = 100 MB/s
-test misc::short_haystack_3x                 ... bench:         264 ns/iter (+/- 26) = 132 MB/s
-test misc::short_haystack_4x                 ... bench:         271 ns/iter (+/- 28) = 158 MB/s
-test regexdna::find_new_lines                ... bench:  13,700,405 ns/iter (+/- 647,840) = 371 MB/s
-test regexdna::subst1                        ... bench:     806,342 ns/iter (+/- 48,014) = 6304 MB/s
-test regexdna::subst10                       ... bench:     794,403 ns/iter (+/- 40,393) = 6399 MB/s
-test regexdna::subst11                       ... bench:     801,963 ns/iter (+/- 46,164) = 6338 MB/s
-test regexdna::subst2                        ... bench:     779,768 ns/iter (+/- 81,505) = 6519 MB/s
-test regexdna::subst3                        ... bench:     777,024 ns/iter (+/- 52,795) = 6542 MB/s
-test regexdna::subst4                        ... bench:     769,862 ns/iter (+/- 48,980) = 6603 MB/s
-test regexdna::subst5                        ... bench:     779,754 ns/iter (+/- 39,784) = 6519 MB/s
-test regexdna::subst6                        ... bench:     769,400 ns/iter (+/- 69,980) = 6606 MB/s
-test regexdna::subst7                        ... bench:     771,457 ns/iter (+/- 40,490) = 6589 MB/s
-test regexdna::subst8                        ... bench:     808,468 ns/iter (+/- 53,093) = 6287 MB/s
-test regexdna::subst9                        ... bench:     771,869 ns/iter (+/- 50,966) = 6585 MB/s
-test regexdna::variant1                      ... bench:   3,093,422 ns/iter (+/- 222,818) = 1643 MB/s
-test regexdna::variant2                      ... bench:   6,520,178 ns/iter (+/- 400,704) = 779 MB/s
-test regexdna::variant3                      ... bench:   7,297,818 ns/iter (+/- 319,866) = 696 MB/s
-test regexdna::variant4                      ... bench:   7,356,045 ns/iter (+/- 530,375) = 691 MB/s
-test regexdna::variant5                      ... bench:   5,977,343 ns/iter (+/- 296,375) = 850 MB/s
-test regexdna::variant6                      ... bench:   6,045,776 ns/iter (+/- 270,954) = 840 MB/s
-test regexdna::variant7                      ... bench:   5,447,060 ns/iter (+/- 223,542) = 933 MB/s
-test regexdna::variant8                      ... bench:   5,615,676 ns/iter (+/- 419,756) = 905 MB/s
-test regexdna::variant9                      ... bench:   5,457,949 ns/iter (+/- 439,821) = 931 MB/s
-test sherlock::before_after_holmes           ... bench:     957,660 ns/iter (+/- 96,491) = 621 MB/s
-test sherlock::before_holmes                 ... bench:      65,680 ns/iter (+/- 3,085) = 9058 MB/s
-test sherlock::everything_greedy             ... bench:   2,151,577 ns/iter (+/- 70,114) = 276 MB/s
-test sherlock::everything_greedy_nl          ... bench:     836,942 ns/iter (+/- 81,010) = 710 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     137,441 ns/iter (+/- 14,157) = 4328 MB/s
-test sherlock::holmes_coword_watson          ... bench:     514,100 ns/iter (+/- 48,210) = 1157 MB/s
-test sherlock::ing_suffix                    ... bench:     409,126 ns/iter (+/- 23,370) = 1454 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   1,117,457 ns/iter (+/- 53,545) = 532 MB/s
-test sherlock::letters                       ... bench:  23,152,671 ns/iter (+/- 1,002,203) = 25 MB/s
-test sherlock::letters_lower                 ... bench:  22,521,833 ns/iter (+/- 1,178,375) = 26 MB/s
-test sherlock::letters_upper                 ... bench:   1,841,871 ns/iter (+/- 108,471) = 323 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     946,126 ns/iter (+/- 53,405) = 628 MB/s
-test sherlock::name_alt1                     ... bench:      25,830 ns/iter (+/- 1,054) = 23032 MB/s
-test sherlock::name_alt2                     ... bench:     116,879 ns/iter (+/- 6,000) = 5090 MB/s
-test sherlock::name_alt3                     ... bench:     125,746 ns/iter (+/- 7,121) = 4731 MB/s
-test sherlock::name_alt3_nocase              ... bench:   1,203,114 ns/iter (+/- 72,037) = 494 MB/s
-test sherlock::name_alt4                     ... bench:     156,208 ns/iter (+/- 5,188) = 3808 MB/s
-test sherlock::name_alt4_nocase              ... bench:     222,618 ns/iter (+/- 30,017) = 2672 MB/s
-test sherlock::name_alt5                     ... bench:     133,440 ns/iter (+/- 14,831) = 4458 MB/s
-test sherlock::name_alt5_nocase              ... bench:     558,482 ns/iter (+/- 22,435) = 1065 MB/s
-test sherlock::name_holmes                   ... bench:      30,800 ns/iter (+/- 2,933) = 19316 MB/s
-test sherlock::name_holmes_nocase            ... bench:     190,736 ns/iter (+/- 24,310) = 3119 MB/s
-test sherlock::name_sherlock                 ... bench:      56,238 ns/iter (+/- 3,310) = 10578 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      24,129 ns/iter (+/- 2,662) = 24656 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:     174,649 ns/iter (+/- 13,487) = 3406 MB/s
-test sherlock::name_sherlock_nocase          ... bench:     157,674 ns/iter (+/- 7,888) = 3773 MB/s
-test sherlock::name_whitespace               ... bench:      74,637 ns/iter (+/- 6,523) = 7971 MB/s
-test sherlock::no_match_common               ... bench:      15,140 ns/iter (+/- 969) = 39295 MB/s
-test sherlock::no_match_really_common        ... bench:     305,112 ns/iter (+/- 31,314) = 1949 MB/s
-test sherlock::no_match_uncommon             ... bench:      15,539 ns/iter (+/- 1,269) = 38286 MB/s
-test sherlock::quotes                        ... bench:     482,180 ns/iter (+/- 33,736) = 1233 MB/s
-test sherlock::repeated_class_negation       ... bench:  78,428,426 ns/iter (+/- 6,705,217) = 7 MB/s
-test sherlock::the_lower                     ... bench:     576,511 ns/iter (+/- 21,735) = 1031 MB/s
-test sherlock::the_nocase                    ... bench:     413,565 ns/iter (+/- 42,941) = 1438 MB/s
-test sherlock::the_upper                     ... bench:      34,491 ns/iter (+/- 1,901) = 17248 MB/s
-test sherlock::the_whitespace                ... bench:   1,061,365 ns/iter (+/- 66,639) = 560 MB/s
-test sherlock::word_ending_n                 ... bench:   1,763,795 ns/iter (+/- 83,031) = 337 MB/s
-test sherlock::words                         ... bench:   9,281,896 ns/iter (+/- 934,308) = 64 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 108 measured; 0 filtered out
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/boost b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/boost
deleted file mode 100644
index 5a13a10..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/boost
+++ /dev/null
@@ -1,97 +0,0 @@
-
-running 92 tests
-test misc::anchored_literal_long_match       ... bench:         174 ns/iter (+/- 0) = 2241 MB/s
-test misc::anchored_literal_long_non_match   ... bench:         329 ns/iter (+/- 3) = 1185 MB/s
-test misc::anchored_literal_short_match      ... bench:         168 ns/iter (+/- 0) = 154 MB/s
-test misc::anchored_literal_short_non_match  ... bench:         121 ns/iter (+/- 0) = 214 MB/s
-test misc::easy0_1K                          ... bench:         660 ns/iter (+/- 3) = 1592 MB/s
-test misc::easy0_1MB                         ... bench:     514,707 ns/iter (+/- 2,689) = 2037 MB/s
-test misc::easy0_32                          ... bench:         170 ns/iter (+/- 2) = 347 MB/s
-test misc::easy0_32K                         ... bench:      16,208 ns/iter (+/- 99) = 2023 MB/s
-test misc::easy1_1K                          ... bench:         756 ns/iter (+/- 1) = 1380 MB/s
-test misc::easy1_1MB                         ... bench:     514,816 ns/iter (+/- 2,832) = 2036 MB/s
-test misc::easy1_32                          ... bench:         271 ns/iter (+/- 3) = 191 MB/s
-test misc::easy1_32K                         ... bench:      16,316 ns/iter (+/- 93) = 2009 MB/s
-test misc::hard_1K                           ... bench:      63,089 ns/iter (+/- 594) = 16 MB/s
-test misc::hard_1MB                          ... bench:  66,537,328 ns/iter (+/- 866,695) = 15 MB/s
-test misc::hard_32                           ... bench:       2,125 ns/iter (+/- 8) = 27 MB/s
-test misc::hard_32K                          ... bench:   2,075,568 ns/iter (+/- 6,634) = 15 MB/s
-test misc::literal                           ... bench:         143 ns/iter (+/- 1) = 356 MB/s
-test misc::long_needle1                      ... bench:   6,557,839 ns/iter (+/- 27,779) = 15 MB/s
-test misc::long_needle2                      ... bench:   6,557,332 ns/iter (+/- 101,494) = 15 MB/s
-test misc::match_class                       ... bench:         157 ns/iter (+/- 0) = 515 MB/s
-test misc::match_class_in_range              ... bench:         157 ns/iter (+/- 4) = 515 MB/s
-test misc::medium_1K                         ... bench:         665 ns/iter (+/- 2) = 1581 MB/s
-test misc::medium_1MB                        ... bench:     514,869 ns/iter (+/- 5,832) = 2036 MB/s
-test misc::medium_32                         ... bench:         167 ns/iter (+/- 1) = 359 MB/s
-test misc::medium_32K                        ... bench:      16,253 ns/iter (+/- 74) = 2017 MB/s
-test misc::no_exponential                    ... bench:       1,717 ns/iter (+/- 13) = 58 MB/s
-test misc::not_literal                       ... bench:       1,084 ns/iter (+/- 16) = 47 MB/s
-test misc::one_pass_long_prefix              ... bench:         169 ns/iter (+/- 2) = 153 MB/s
-test misc::one_pass_long_prefix_not          ... bench:         169 ns/iter (+/- 6) = 153 MB/s
-test misc::one_pass_short                    ... bench:       1,105 ns/iter (+/- 2) = 15 MB/s
-test misc::one_pass_short_not                ... bench:       1,076 ns/iter (+/- 10) = 15 MB/s
-test misc::reallyhard2_1K                    ... bench:       4,935 ns/iter (+/- 39) = 210 MB/s
-test misc::reallyhard_1K                     ... bench:      63,076 ns/iter (+/- 226) = 16 MB/s
-test misc::reallyhard_1MB                    ... bench:  68,534,102 ns/iter (+/- 125,043) = 15 MB/s
-test misc::reallyhard_32                     ... bench:       2,134 ns/iter (+/- 8) = 27 MB/s
-test misc::reallyhard_32K                    ... bench:   2,074,582 ns/iter (+/- 5,943) = 15 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:       4,001 ns/iter (+/- 5) = 1999 MB/s
-test regexdna::find_new_lines                ... bench:  12,942,765 ns/iter (+/- 21,828) = 392 MB/s
-test regexdna::subst1                        ... bench:   6,241,036 ns/iter (+/- 13,806) = 814 MB/s
-test regexdna::subst10                       ... bench:   6,247,896 ns/iter (+/- 28,406) = 813 MB/s
-test regexdna::subst11                       ... bench:   6,240,960 ns/iter (+/- 20,660) = 814 MB/s
-test regexdna::subst2                        ... bench:   6,245,156 ns/iter (+/- 17,639) = 813 MB/s
-test regexdna::subst3                        ... bench:   6,276,881 ns/iter (+/- 14,851) = 809 MB/s
-test regexdna::subst4                        ... bench:   6,249,549 ns/iter (+/- 30,600) = 813 MB/s
-test regexdna::subst5                        ... bench:   6,251,942 ns/iter (+/- 33,889) = 813 MB/s
-test regexdna::subst6                        ... bench:   6,244,011 ns/iter (+/- 11,642) = 814 MB/s
-test regexdna::subst7                        ... bench:   6,283,445 ns/iter (+/- 11,693) = 809 MB/s
-test regexdna::subst8                        ... bench:   6,247,310 ns/iter (+/- 11,590) = 813 MB/s
-test regexdna::subst9                        ... bench:   6,249,184 ns/iter (+/- 8,159) = 813 MB/s
-test regexdna::variant1                      ... bench:  73,947,890 ns/iter (+/- 930,039) = 68 MB/s
-test regexdna::variant2                      ... bench: 108,486,922 ns/iter (+/- 181,287) = 46 MB/s
-test regexdna::variant3                      ... bench:  93,241,161 ns/iter (+/- 143,224) = 54 MB/s
-test regexdna::variant4                      ... bench:  75,615,061 ns/iter (+/- 107,918) = 67 MB/s
-test regexdna::variant5                      ... bench:  74,484,623 ns/iter (+/- 121,807) = 68 MB/s
-test regexdna::variant6                      ... bench:  74,594,078 ns/iter (+/- 121,252) = 68 MB/s
-test regexdna::variant7                      ... bench:  77,064,066 ns/iter (+/- 123,262) = 65 MB/s
-test regexdna::variant8                      ... bench:  87,267,656 ns/iter (+/- 128,639) = 58 MB/s
-test regexdna::variant9                      ... bench:  98,197,000 ns/iter (+/- 149,379) = 51 MB/s
-test sherlock::before_after_holmes           ... bench:   7,100,744 ns/iter (+/- 29,939) = 83 MB/s
-test sherlock::before_holmes                 ... bench:   7,120,564 ns/iter (+/- 32,659) = 83 MB/s
-test sherlock::everything_greedy             ... bench:   3,777,458 ns/iter (+/- 8,802) = 157 MB/s
-test sherlock::everything_greedy_nl          ... bench:         282 ns/iter (+/- 2) = 2109691 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     389,335 ns/iter (+/- 1,472) = 1528 MB/s
-test sherlock::ing_suffix                    ... bench:   6,256,416 ns/iter (+/- 8,735) = 95 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   7,572,167 ns/iter (+/- 15,521) = 78 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     343,402 ns/iter (+/- 3,693) = 1732 MB/s
-test sherlock::name_alt1                     ... bench:     347,605 ns/iter (+/- 916) = 1711 MB/s
-test sherlock::name_alt2                     ... bench:     420,500 ns/iter (+/- 2,846) = 1414 MB/s
-test sherlock::name_alt3                     ... bench:     762,684 ns/iter (+/- 3,834) = 780 MB/s
-test sherlock::name_alt3_nocase              ... bench:   9,980,804 ns/iter (+/- 49,424) = 59 MB/s
-test sherlock::name_alt4                     ... bench:     431,744 ns/iter (+/- 682) = 1377 MB/s
-test sherlock::name_alt4_nocase              ... bench:   3,464,135 ns/iter (+/- 11,476) = 171 MB/s
-test sherlock::name_alt5                     ... bench:     472,923 ns/iter (+/- 846) = 1257 MB/s
-test sherlock::name_alt5_nocase              ... bench:   4,338,924 ns/iter (+/- 31,118) = 137 MB/s
-test sherlock::name_holmes                   ... bench:     378,950 ns/iter (+/- 865) = 1569 MB/s
-test sherlock::name_holmes_nocase            ... bench:   1,952,035 ns/iter (+/- 8,233) = 304 MB/s
-test sherlock::name_sherlock                 ... bench:     324,845 ns/iter (+/- 8,376) = 1831 MB/s
-test sherlock::name_sherlock_holmes          ... bench:     324,647 ns/iter (+/- 2,901) = 1832 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:   1,870,400 ns/iter (+/- 10,609) = 318 MB/s
-test sherlock::name_sherlock_nocase          ... bench:   1,814,172 ns/iter (+/- 16,455) = 327 MB/s
-test sherlock::name_whitespace               ... bench:     326,252 ns/iter (+/- 1,557) = 1823 MB/s
-test sherlock::no_match_common               ... bench:   1,154,445 ns/iter (+/- 8,544) = 515 MB/s
-test sherlock::no_match_really_common        ... bench:   1,154,314 ns/iter (+/- 5,467) = 515 MB/s
-test sherlock::no_match_uncommon             ... bench:     295,301 ns/iter (+/- 906) = 2014 MB/s
-test sherlock::quotes                        ... bench:     863,138 ns/iter (+/- 3,072) = 689 MB/s
-test sherlock::repeated_class_negation       ... bench:  13,594,294 ns/iter (+/- 40,354) = 43 MB/s
-test sherlock::the_lower                     ... bench:   2,171,731 ns/iter (+/- 7,148) = 273 MB/s
-test sherlock::the_nocase                    ... bench:   3,556,278 ns/iter (+/- 7,269) = 167 MB/s
-test sherlock::the_upper                     ... bench:     404,851 ns/iter (+/- 865) = 1469 MB/s
-test sherlock::the_whitespace                ... bench:   2,139,597 ns/iter (+/- 7,427) = 278 MB/s
-test sherlock::word_ending_n                 ... bench:   7,824,965 ns/iter (+/- 30,691) = 76 MB/s
-test sherlock::words                         ... bench:  18,386,285 ns/iter (+/- 34,161) = 32 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 92 measured; 0 filtered out
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/dphobos-dmd b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/dphobos-dmd
deleted file mode 100644
index 835a096..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/dphobos-dmd
+++ /dev/null
@@ -1,100 +0,0 @@
-
-running 95 tests
-test misc::anchored_literal_long_match       ... bench:         365 ns/iter (+/- 2) = 1068 MB/s
-test misc::anchored_literal_long_non_match   ... bench:         300 ns/iter (+/- 0) = 1300 MB/s
-test misc::anchored_literal_short_match      ... bench:         364 ns/iter (+/- 2) = 71 MB/s
-test misc::anchored_literal_short_non_match  ... bench:         306 ns/iter (+/- 9) = 84 MB/s
-test misc::easy0_1K                          ... bench:         768 ns/iter (+/- 5) = 1368 MB/s
-test misc::easy0_1MB                         ... bench:      17,062 ns/iter (+/- 252) = 61458 MB/s
-test misc::easy0_32                          ... bench:         759 ns/iter (+/- 7) = 77 MB/s
-test misc::easy0_32K                         ... bench:       1,095 ns/iter (+/- 20) = 29949 MB/s
-test misc::easy1_1K                          ... bench:         723 ns/iter (+/- 1) = 1443 MB/s
-test misc::easy1_1MB                         ... bench:      17,021 ns/iter (+/- 229) = 61606 MB/s
-test misc::easy1_32                          ... bench:         714 ns/iter (+/- 1) = 72 MB/s
-test misc::easy1_32K                         ... bench:       1,052 ns/iter (+/- 12) = 31167 MB/s
-test misc::hard_1K                           ... bench:      17,044 ns/iter (+/- 176) = 61 MB/s
-test misc::hard_1MB                          ... bench:  17,965,420 ns/iter (+/- 72,226) = 58 MB/s
-test misc::hard_32                           ... bench:       2,171 ns/iter (+/- 2) = 27 MB/s
-test misc::hard_32K                          ... bench:     561,207 ns/iter (+/- 5,654) = 58 MB/s
-test misc::literal                           ... bench:         240 ns/iter (+/- 0) = 212 MB/s
-test misc::long_needle1                      ... bench:      76,640 ns/iter (+/- 1,043) = 1304 MB/s
-test misc::long_needle2                      ... bench:      76,747 ns/iter (+/- 3,299) = 1302 MB/s
-test misc::match_class                       ... bench:         344 ns/iter (+/- 1) = 235 MB/s
-test misc::match_class_in_range              ... bench:         306 ns/iter (+/- 9) = 264 MB/s
-test misc::match_class_unicode               ... bench:       1,435 ns/iter (+/- 9) = 112 MB/s
-test misc::medium_1K                         ... bench:       1,480 ns/iter (+/- 16) = 710 MB/s
-test misc::medium_1MB                        ... bench:     564,378 ns/iter (+/- 18,695) = 1857 MB/s
-test misc::medium_32                         ... bench:         941 ns/iter (+/- 32) = 63 MB/s
-test misc::medium_32K                        ... bench:      18,465 ns/iter (+/- 116) = 1776 MB/s
-test misc::no_exponential                    ... bench:     367,476 ns/iter (+/- 15,176)
-test misc::not_literal                       ... bench:       1,165 ns/iter (+/- 9) = 43 MB/s
-test misc::one_pass_long_prefix              ... bench:         596 ns/iter (+/- 2) = 43 MB/s
-test misc::one_pass_long_prefix_not          ... bench:         602 ns/iter (+/- 6) = 43 MB/s
-test misc::one_pass_short                    ... bench:       1,068 ns/iter (+/- 3) = 15 MB/s
-test misc::one_pass_short_not                ... bench:       1,434 ns/iter (+/- 11) = 11 MB/s
-test misc::reallyhard2_1K                    ... bench:      36,539 ns/iter (+/- 281) = 28 MB/s
-test misc::reallyhard_1K                     ... bench:      17,086 ns/iter (+/- 94) = 61 MB/s
-test misc::reallyhard_1MB                    ... bench:  17,973,007 ns/iter (+/- 64,010) = 58 MB/s
-test misc::reallyhard_32                     ... bench:       2,200 ns/iter (+/- 16) = 26 MB/s
-test misc::reallyhard_32K                    ... bench:     561,371 ns/iter (+/- 8,688) = 58 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:       4,606 ns/iter (+/- 16) = 1736 MB/s
-test regexdna::find_new_lines                ... bench:  31,579,756 ns/iter (+/- 121,047) = 160 MB/s
-test regexdna::subst1                        ... bench:   7,930,333 ns/iter (+/- 27,118) = 641 MB/s
-test regexdna::subst10                       ... bench:   7,942,534 ns/iter (+/- 36,470) = 640 MB/s
-test regexdna::subst11                       ... bench:   7,934,646 ns/iter (+/- 42,013) = 640 MB/s
-test regexdna::subst2                        ... bench:   7,947,802 ns/iter (+/- 53,427) = 639 MB/s
-test regexdna::subst3                        ... bench:   7,941,691 ns/iter (+/- 122,303) = 640 MB/s
-test regexdna::subst4                        ... bench:   7,928,228 ns/iter (+/- 30,493) = 641 MB/s
-test regexdna::subst5                        ... bench:   7,936,901 ns/iter (+/- 37,894) = 640 MB/s
-test regexdna::subst6                        ... bench:   7,936,211 ns/iter (+/- 46,269) = 640 MB/s
-test regexdna::subst7                        ... bench:   7,946,477 ns/iter (+/- 62,660) = 639 MB/s
-test regexdna::subst8                        ... bench:   7,930,830 ns/iter (+/- 31,234) = 640 MB/s
-test regexdna::subst9                        ... bench:   7,937,951 ns/iter (+/- 36,425) = 640 MB/s
-test regexdna::variant1                      ... bench:   5,104,224 ns/iter (+/- 26,612) = 995 MB/s
-test regexdna::variant2                      ... bench:   6,847,162 ns/iter (+/- 31,233) = 742 MB/s
-test regexdna::variant3                      ... bench:  11,153,739 ns/iter (+/- 114,193) = 455 MB/s
-test regexdna::variant4                      ... bench:   9,665,797 ns/iter (+/- 47,148) = 525 MB/s
-test regexdna::variant5                      ... bench:   9,645,193 ns/iter (+/- 35,250) = 527 MB/s
-test regexdna::variant6                      ... bench:   7,280,069 ns/iter (+/- 21,171) = 698 MB/s
-test regexdna::variant7                      ... bench:   7,841,177 ns/iter (+/- 20,797) = 648 MB/s
-test regexdna::variant8                      ... bench:   9,783,978 ns/iter (+/- 35,231) = 519 MB/s
-test regexdna::variant9                      ... bench:  19,157,329 ns/iter (+/- 445,911) = 265 MB/s
-test sherlock::before_after_holmes           ... bench:  20,995,307 ns/iter (+/- 258,419) = 28 MB/s
-test sherlock::before_holmes                 ... bench:  20,899,416 ns/iter (+/- 122,256) = 28 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     904,439 ns/iter (+/- 6,934) = 657 MB/s
-test sherlock::holmes_coword_watson          ... bench: 103,706,930 ns/iter (+/- 176,711) = 5 MB/s
-test sherlock::ing_suffix                    ... bench:  14,927,612 ns/iter (+/- 90,346) = 39 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:  19,743,662 ns/iter (+/- 78,506) = 30 MB/s
-test sherlock::letters                       ... bench: 112,708,213 ns/iter (+/- 251,690) = 5 MB/s
-test sherlock::letters_lower                 ... bench: 111,058,829 ns/iter (+/- 192,793) = 5 MB/s
-test sherlock::letters_upper                 ... bench:   4,072,062 ns/iter (+/- 20,273) = 146 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     399,796 ns/iter (+/- 4,222) = 1488 MB/s
-test sherlock::name_alt1                     ... bench:     411,415 ns/iter (+/- 2,257) = 1446 MB/s
-test sherlock::name_alt2                     ... bench:     626,671 ns/iter (+/- 5,745) = 949 MB/s
-test sherlock::name_alt3                     ... bench:   1,086,570 ns/iter (+/- 6,105) = 547 MB/s
-test sherlock::name_alt3_nocase              ... bench:   1,827,028 ns/iter (+/- 12,788) = 325 MB/s
-test sherlock::name_alt4                     ... bench:     687,454 ns/iter (+/- 11,421) = 865 MB/s
-test sherlock::name_alt4_nocase              ... bench:     943,925 ns/iter (+/- 4,108) = 630 MB/s
-test sherlock::name_alt5                     ... bench:     734,969 ns/iter (+/- 7,215) = 809 MB/s
-test sherlock::name_alt5_nocase              ... bench:     895,903 ns/iter (+/- 5,647) = 664 MB/s
-test sherlock::name_holmes                   ... bench:     199,880 ns/iter (+/- 1,654) = 2976 MB/s
-test sherlock::name_holmes_nocase            ... bench:     529,590 ns/iter (+/- 1,288) = 1123 MB/s
-test sherlock::name_sherlock                 ... bench:      57,720 ns/iter (+/- 555) = 10307 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      67,596 ns/iter (+/- 580) = 8801 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:     393,903 ns/iter (+/- 2,700) = 1510 MB/s
-test sherlock::name_sherlock_nocase          ... bench:     373,411 ns/iter (+/- 3,264) = 1593 MB/s
-test sherlock::name_whitespace               ... bench:      79,175 ns/iter (+/- 1,288) = 7514 MB/s
-test sherlock::no_match_common               ... bench:     276,503 ns/iter (+/- 2,155) = 2151 MB/s
-test sherlock::no_match_really_common        ... bench:     276,535 ns/iter (+/- 416) = 2151 MB/s
-test sherlock::no_match_uncommon             ... bench:      10,535 ns/iter (+/- 105) = 56472 MB/s
-test sherlock::quotes                        ... bench:   5,746,202 ns/iter (+/- 33,993) = 103 MB/s
-test sherlock::repeated_class_negation       ... bench:  46,124,528 ns/iter (+/- 125,861) = 12 MB/s
-test sherlock::the_lower                     ... bench:   2,527,960 ns/iter (+/- 12,351) = 235 MB/s
-test sherlock::the_nocase                    ... bench:   3,210,112 ns/iter (+/- 10,799) = 185 MB/s
-test sherlock::the_upper                     ... bench:     240,272 ns/iter (+/- 3,902) = 2476 MB/s
-test sherlock::the_whitespace                ... bench:   3,511,711 ns/iter (+/- 17,181) = 169 MB/s
-test sherlock::word_ending_n                 ... bench:  29,535,089 ns/iter (+/- 95,201) = 20 MB/s
-test sherlock::words                         ... bench:  43,341,782 ns/iter (+/- 110,038) = 13 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 95 measured; 0 filtered out
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/dphobos-dmd-ct b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/dphobos-dmd-ct
deleted file mode 100644
index 9dd6d02..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/dphobos-dmd-ct
+++ /dev/null
@@ -1,98 +0,0 @@
-
-running 93 tests
-test misc::anchored_literal_long_match       ... bench:         354 ns/iter (+/- 25) = 1101 MB/s
-test misc::anchored_literal_long_non_match   ... bench:         314 ns/iter (+/- 3) = 1242 MB/s
-test misc::anchored_literal_short_match      ... bench:         331 ns/iter (+/- 0) = 78 MB/s
-test misc::anchored_literal_short_non_match  ... bench:         314 ns/iter (+/- 4) = 82 MB/s
-test misc::easy0_1K                          ... bench:         430 ns/iter (+/- 2) = 2444 MB/s
-test misc::easy0_1MB                         ... bench:      16,692 ns/iter (+/- 222) = 62820 MB/s
-test misc::easy0_32                          ... bench:         420 ns/iter (+/- 4) = 140 MB/s
-test misc::easy0_32K                         ... bench:         755 ns/iter (+/- 5) = 43437 MB/s
-test misc::easy1_1K                          ... bench:         407 ns/iter (+/- 10) = 2565 MB/s
-test misc::easy1_1MB                         ... bench:      16,670 ns/iter (+/- 205) = 62903 MB/s
-test misc::easy1_32                          ... bench:         389 ns/iter (+/- 0) = 133 MB/s
-test misc::easy1_32K                         ... bench:         732 ns/iter (+/- 6) = 44792 MB/s
-test misc::hard_1K                           ... bench:      35,518 ns/iter (+/- 346) = 29 MB/s
-test misc::hard_1MB                          ... bench:  31,657,473 ns/iter (+/- 512,618) = 33 MB/s
-test misc::hard_32                           ... bench:       1,057 ns/iter (+/- 7) = 55 MB/s
-test misc::hard_32K                          ... bench:     950,905 ns/iter (+/- 13,239) = 34 MB/s
-test misc::literal                           ... bench:         320 ns/iter (+/- 3) = 159 MB/s
-test misc::long_needle1                      ... bench:      73,954 ns/iter (+/- 331) = 1352 MB/s
-test misc::long_needle2                      ... bench:      73,915 ns/iter (+/- 199) = 1352 MB/s
-test misc::match_class                       ... bench:         374 ns/iter (+/- 3) = 216 MB/s
-test misc::match_class_in_range              ... bench:         372 ns/iter (+/- 0) = 217 MB/s
-test misc::match_class_unicode               ... bench:       1,631 ns/iter (+/- 8) = 98 MB/s
-test misc::medium_1K                         ... bench:         965 ns/iter (+/- 10) = 1090 MB/s
-test misc::medium_1MB                        ... bench:     563,242 ns/iter (+/- 6,767) = 1861 MB/s
-test misc::medium_32                         ... bench:         434 ns/iter (+/- 3) = 138 MB/s
-test misc::medium_32K                        ... bench:      17,976 ns/iter (+/- 137) = 1824 MB/s
-test misc::not_literal                       ... bench:       1,063 ns/iter (+/- 2) = 47 MB/s
-test misc::one_pass_long_prefix              ... bench:         405 ns/iter (+/- 4) = 64 MB/s
-test misc::one_pass_long_prefix_not          ... bench:         410 ns/iter (+/- 2) = 63 MB/s
-test misc::one_pass_short                    ... bench:         539 ns/iter (+/- 12) = 31 MB/s
-test misc::one_pass_short_not                ... bench:         534 ns/iter (+/- 1) = 31 MB/s
-test misc::reallyhard2_1K                    ... bench:      75,108 ns/iter (+/- 699) = 13 MB/s
-test misc::reallyhard_1K                     ... bench:      34,681 ns/iter (+/- 268) = 30 MB/s
-test misc::reallyhard_1MB                    ... bench:  30,579,065 ns/iter (+/- 389,443) = 34 MB/s
-test misc::reallyhard_32                     ... bench:       1,025 ns/iter (+/- 22) = 57 MB/s
-test misc::reallyhard_32K                    ... bench:     920,515 ns/iter (+/- 26,281) = 35 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:       4,607 ns/iter (+/- 36) = 1736 MB/s
-test regexdna::find_new_lines                ... bench:  34,122,227 ns/iter (+/- 842,345) = 148 MB/s
-test regexdna::subst1                        ... bench:   9,932,271 ns/iter (+/- 86,915) = 511 MB/s
-test regexdna::subst10                       ... bench:   9,977,738 ns/iter (+/- 51,656) = 509 MB/s
-test regexdna::subst11                       ... bench:   9,945,085 ns/iter (+/- 53,175) = 511 MB/s
-test regexdna::subst2                        ... bench:   9,928,284 ns/iter (+/- 32,335) = 512 MB/s
-test regexdna::subst3                        ... bench:   9,968,901 ns/iter (+/- 41,254) = 509 MB/s
-test regexdna::subst4                        ... bench:   9,912,463 ns/iter (+/- 28,171) = 512 MB/s
-test regexdna::subst5                        ... bench:   9,948,128 ns/iter (+/- 22,949) = 510 MB/s
-test regexdna::subst6                        ... bench:   9,916,200 ns/iter (+/- 28,947) = 512 MB/s
-test regexdna::subst7                        ... bench:   9,996,277 ns/iter (+/- 37,585) = 508 MB/s
-test regexdna::subst8                        ... bench:   9,974,849 ns/iter (+/- 41,503) = 509 MB/s
-test regexdna::subst9                        ... bench:   9,961,948 ns/iter (+/- 28,254) = 510 MB/s
-test regexdna::variant1                      ... bench:   3,504,049 ns/iter (+/- 15,090) = 1450 MB/s
-test regexdna::variant2                      ... bench:   3,800,264 ns/iter (+/- 12,123) = 1337 MB/s
-test regexdna::variant3                      ... bench:   4,932,027 ns/iter (+/- 15,553) = 1030 MB/s
-test regexdna::variant4                      ... bench:   4,709,109 ns/iter (+/- 15,213) = 1079 MB/s
-test regexdna::variant5                      ... bench:   4,918,928 ns/iter (+/- 19,196) = 1033 MB/s
-test regexdna::variant6                      ... bench:   4,244,250 ns/iter (+/- 24,367) = 1197 MB/s
-test regexdna::variant7                      ... bench:   4,245,530 ns/iter (+/- 16,178) = 1197 MB/s
-test regexdna::variant8                      ... bench:   4,205,036 ns/iter (+/- 10,733) = 1208 MB/s
-test regexdna::variant9                      ... bench:   3,864,771 ns/iter (+/- 11,864) = 1315 MB/s
-test sherlock::before_after_holmes           ... bench:  22,490,817 ns/iter (+/- 571,510) = 26 MB/s
-test sherlock::before_holmes                 ... bench:  22,603,264 ns/iter (+/- 74,703) = 26 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     519,710 ns/iter (+/- 5,878) = 1144 MB/s
-test sherlock::ing_suffix                    ... bench:   9,237,783 ns/iter (+/- 30,188) = 64 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:  12,074,301 ns/iter (+/- 30,860) = 49 MB/s
-test sherlock::letters                       ... bench: 137,678,575 ns/iter (+/- 131,761) = 4 MB/s
-test sherlock::letters_lower                 ... bench: 135,414,657 ns/iter (+/- 134,307) = 4 MB/s
-test sherlock::letters_upper                 ... bench:   5,004,996 ns/iter (+/- 23,224) = 118 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     341,556 ns/iter (+/- 1,500) = 1741 MB/s
-test sherlock::name_alt1                     ... bench:     378,291 ns/iter (+/- 1,545) = 1572 MB/s
-test sherlock::name_alt2                     ... bench:     528,403 ns/iter (+/- 2,273) = 1125 MB/s
-test sherlock::name_alt3                     ... bench:     685,634 ns/iter (+/- 17,205) = 867 MB/s
-test sherlock::name_alt3_nocase              ... bench:     825,069 ns/iter (+/- 10,490) = 721 MB/s
-test sherlock::name_alt4                     ... bench:     555,717 ns/iter (+/- 3,223) = 1070 MB/s
-test sherlock::name_alt4_nocase              ... bench:     649,913 ns/iter (+/- 4,543) = 915 MB/s
-test sherlock::name_alt5                     ... bench:     570,036 ns/iter (+/- 543) = 1043 MB/s
-test sherlock::name_alt5_nocase              ... bench:     580,445 ns/iter (+/- 2,100) = 1024 MB/s
-test sherlock::name_holmes                   ... bench:     185,140 ns/iter (+/- 2,100) = 3213 MB/s
-test sherlock::name_holmes_nocase            ... bench:     479,902 ns/iter (+/- 5,898) = 1239 MB/s
-test sherlock::name_sherlock                 ... bench:      51,053 ns/iter (+/- 491) = 11653 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      50,722 ns/iter (+/- 296) = 11729 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:     355,142 ns/iter (+/- 1,424) = 1675 MB/s
-test sherlock::name_sherlock_nocase          ... bench:     354,932 ns/iter (+/- 1,554) = 1676 MB/s
-test sherlock::name_whitespace               ... bench:      56,972 ns/iter (+/- 271) = 10442 MB/s
-test sherlock::no_match_common               ... bench:     274,260 ns/iter (+/- 3,092) = 2169 MB/s
-test sherlock::no_match_really_common        ... bench:     273,984 ns/iter (+/- 2,202) = 2171 MB/s
-test sherlock::no_match_uncommon             ... bench:      10,444 ns/iter (+/- 68) = 56964 MB/s
-test sherlock::quotes                        ... bench:   2,755,414 ns/iter (+/- 11,488) = 215 MB/s
-test sherlock::repeated_class_negation       ... bench:  21,585,138 ns/iter (+/- 50,347) = 27 MB/s
-test sherlock::the_lower                     ... bench:   2,835,360 ns/iter (+/- 10,083) = 209 MB/s
-test sherlock::the_nocase                    ... bench:   3,060,088 ns/iter (+/- 10,321) = 194 MB/s
-test sherlock::the_upper                     ... bench:     272,416 ns/iter (+/- 3,308) = 2183 MB/s
-test sherlock::the_whitespace                ... bench:   2,991,214 ns/iter (+/- 27,223) = 198 MB/s
-test sherlock::word_ending_n                 ... bench:  30,726,303 ns/iter (+/- 83,743) = 19 MB/s
-test sherlock::words                         ... bench:  42,256,710 ns/iter (+/- 88,302) = 14 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 93 measured; 0 filtered out
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/oniguruma b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/oniguruma
deleted file mode 100644
index b9e8e29..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/oniguruma
+++ /dev/null
@@ -1,99 +0,0 @@
-
-running 94 tests
-test misc::anchored_literal_long_match       ... bench:         129 ns/iter (+/- 3) = 3023 MB/s
-test misc::anchored_literal_long_non_match   ... bench:         402 ns/iter (+/- 1) = 970 MB/s
-test misc::anchored_literal_short_match      ... bench:         130 ns/iter (+/- 1) = 200 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          49 ns/iter (+/- 0) = 530 MB/s
-test misc::easy0_1K                          ... bench:         281 ns/iter (+/- 3) = 3740 MB/s
-test misc::easy0_1MB                         ... bench:     119,847 ns/iter (+/- 713) = 8749 MB/s
-test misc::easy0_32                          ... bench:         154 ns/iter (+/- 4) = 383 MB/s
-test misc::easy0_32K                         ... bench:       3,985 ns/iter (+/- 24) = 8229 MB/s
-test misc::easy1_1K                          ... bench:       3,472 ns/iter (+/- 11) = 300 MB/s
-test misc::easy1_1MB                         ... bench:   3,385,764 ns/iter (+/- 6,630) = 309 MB/s
-test misc::easy1_32                          ... bench:         283 ns/iter (+/- 6) = 183 MB/s
-test misc::easy1_32K                         ... bench:     105,977 ns/iter (+/- 319) = 309 MB/s
-test misc::hard_1K                           ... bench:     106,973 ns/iter (+/- 1,091) = 9 MB/s
-test misc::hard_1MB                          ... bench: 114,602,847 ns/iter (+/- 336,051) = 9 MB/s
-test misc::hard_32                           ... bench:       3,569 ns/iter (+/- 3) = 16 MB/s
-test misc::hard_32K                          ... bench:   3,570,108 ns/iter (+/- 17,057) = 9 MB/s
-test misc::literal                           ... bench:         287 ns/iter (+/- 1) = 177 MB/s
-test misc::long_needle1                      ... bench:   5,430,190 ns/iter (+/- 271,737) = 18 MB/s
-test misc::long_needle2                      ... bench:   5,651,748 ns/iter (+/- 260,960) = 17 MB/s
-test misc::match_class                       ... bench:         369 ns/iter (+/- 0) = 219 MB/s
-test misc::match_class_in_range              ... bench:         370 ns/iter (+/- 8) = 218 MB/s
-test misc::match_class_unicode               ... bench:       1,600 ns/iter (+/- 24) = 100 MB/s
-test misc::medium_1K                         ... bench:         295 ns/iter (+/- 4) = 3566 MB/s
-test misc::medium_1MB                        ... bench:     119,845 ns/iter (+/- 707) = 8749 MB/s
-test misc::medium_32                         ... bench:         166 ns/iter (+/- 0) = 361 MB/s
-test misc::medium_32K                        ... bench:       3,995 ns/iter (+/- 30) = 8209 MB/s
-test misc::not_literal                       ... bench:         365 ns/iter (+/- 1) = 139 MB/s
-test misc::one_pass_long_prefix              ... bench:         155 ns/iter (+/- 0) = 167 MB/s
-test misc::one_pass_long_prefix_not          ... bench:         162 ns/iter (+/- 0) = 160 MB/s
-test misc::one_pass_short                    ... bench:         279 ns/iter (+/- 0) = 60 MB/s
-test misc::one_pass_short_not                ... bench:         269 ns/iter (+/- 3) = 63 MB/s
-test misc::reallyhard2_1K                    ... bench:     227,630 ns/iter (+/- 963) = 4 MB/s
-test misc::reallyhard_1K                     ... bench:     106,964 ns/iter (+/- 1,199) = 9 MB/s
-test misc::reallyhard_1MB                    ... bench: 114,622,989 ns/iter (+/- 206,430) = 9 MB/s
-test misc::reallyhard_32                     ... bench:       3,477 ns/iter (+/- 13) = 16 MB/s
-test misc::reallyhard_32K                    ... bench:   3,580,927 ns/iter (+/- 15,784) = 9 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:      23,518 ns/iter (+/- 105) = 340 MB/s
-test regexdna::find_new_lines                ... bench:  33,300,039 ns/iter (+/- 827,837) = 152 MB/s
-test regexdna::subst1                        ... bench:  22,829,688 ns/iter (+/- 81,653) = 222 MB/s
-test regexdna::subst10                       ... bench:  22,843,694 ns/iter (+/- 94,299) = 222 MB/s
-test regexdna::subst11                       ... bench:  22,827,872 ns/iter (+/- 84,129) = 222 MB/s
-test regexdna::subst2                        ... bench:  22,841,925 ns/iter (+/- 84,394) = 222 MB/s
-test regexdna::subst3                        ... bench:  22,885,409 ns/iter (+/- 114,277) = 222 MB/s
-test regexdna::subst4                        ... bench:  22,837,475 ns/iter (+/- 58,938) = 222 MB/s
-test regexdna::subst5                        ... bench:  22,835,207 ns/iter (+/- 39,862) = 222 MB/s
-test regexdna::subst6                        ... bench:  22,833,199 ns/iter (+/- 77,142) = 222 MB/s
-test regexdna::subst7                        ... bench:  22,851,757 ns/iter (+/- 322,186) = 222 MB/s
-test regexdna::subst8                        ... bench:  22,842,892 ns/iter (+/- 86,166) = 222 MB/s
-test regexdna::subst9                        ... bench:  22,840,862 ns/iter (+/- 105,926) = 222 MB/s
-test regexdna::variant1                      ... bench:  91,691,325 ns/iter (+/- 194,247) = 55 MB/s
-test regexdna::variant2                      ... bench: 105,586,659 ns/iter (+/- 320,354) = 48 MB/s
-test regexdna::variant3                      ... bench:  94,437,485 ns/iter (+/- 277,744) = 53 MB/s
-test regexdna::variant4                      ... bench:  90,399,600 ns/iter (+/- 184,588) = 56 MB/s
-test regexdna::variant5                      ... bench:  90,332,232 ns/iter (+/- 174,254) = 56 MB/s
-test regexdna::variant6                      ... bench:  90,519,504 ns/iter (+/- 227,643) = 56 MB/s
-test regexdna::variant7                      ... bench:  90,881,562 ns/iter (+/- 221,861) = 55 MB/s
-test regexdna::variant8                      ... bench:  96,962,980 ns/iter (+/- 180,002) = 52 MB/s
-test regexdna::variant9                      ... bench: 109,558,711 ns/iter (+/- 166,337) = 46 MB/s
-test sherlock::before_after_holmes           ... bench:  31,530,493 ns/iter (+/- 112,639) = 18 MB/s
-test sherlock::before_holmes                 ... bench:  30,420,729 ns/iter (+/- 114,072) = 19 MB/s
-test sherlock::everything_greedy             ... bench:   6,656,677 ns/iter (+/- 167,110) = 89 MB/s
-test sherlock::holmes_cochar_watson          ... bench:   1,992,839 ns/iter (+/- 8,037) = 298 MB/s
-test sherlock::ing_suffix                    ... bench:  15,878,331 ns/iter (+/- 150,901) = 37 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   2,085,471 ns/iter (+/- 8,438) = 285 MB/s
-test sherlock::letters                       ... bench:  89,091,241 ns/iter (+/- 182,225) = 6 MB/s
-test sherlock::letters_lower                 ... bench:  55,634,237 ns/iter (+/- 115,097) = 10 MB/s
-test sherlock::letters_upper                 ... bench:  10,126,641 ns/iter (+/- 36,015) = 58 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     191,963 ns/iter (+/- 687) = 3099 MB/s
-test sherlock::name_alt1                     ... bench:   1,983,070 ns/iter (+/- 5,863) = 300 MB/s
-test sherlock::name_alt2                     ... bench:   1,972,746 ns/iter (+/- 14,082) = 301 MB/s
-test sherlock::name_alt3                     ... bench:   2,424,033 ns/iter (+/- 13,209) = 245 MB/s
-test sherlock::name_alt3_nocase              ... bench:  16,876,942 ns/iter (+/- 77,218) = 35 MB/s
-test sherlock::name_alt4                     ... bench:   1,986,579 ns/iter (+/- 9,195) = 299 MB/s
-test sherlock::name_alt4_nocase              ... bench:   4,992,277 ns/iter (+/- 10,882) = 119 MB/s
-test sherlock::name_alt5                     ... bench:   2,033,937 ns/iter (+/- 13,627) = 292 MB/s
-test sherlock::name_alt5_nocase              ... bench:   6,292,627 ns/iter (+/- 14,666) = 94 MB/s
-test sherlock::name_holmes                   ... bench:     450,290 ns/iter (+/- 1,882) = 1321 MB/s
-test sherlock::name_holmes_nocase            ... bench:   3,032,489 ns/iter (+/- 8,728) = 196 MB/s
-test sherlock::name_sherlock                 ... bench:     265,379 ns/iter (+/- 865) = 2241 MB/s
-test sherlock::name_sherlock_holmes          ... bench:     201,375 ns/iter (+/- 2,146) = 2954 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:   3,010,059 ns/iter (+/- 7,093) = 197 MB/s
-test sherlock::name_sherlock_nocase          ... bench:   3,016,713 ns/iter (+/- 11,280) = 197 MB/s
-test sherlock::name_whitespace               ... bench:     266,706 ns/iter (+/- 908) = 2230 MB/s
-test sherlock::no_match_common               ... bench:     544,428 ns/iter (+/- 7,562) = 1092 MB/s
-test sherlock::no_match_really_common        ... bench:     626,986 ns/iter (+/- 2,959) = 948 MB/s
-test sherlock::no_match_uncommon             ... bench:     534,517 ns/iter (+/- 4,342) = 1113 MB/s
-test sherlock::quotes                        ... bench:   3,210,614 ns/iter (+/- 15,699) = 185 MB/s
-test sherlock::repeated_class_negation       ... bench:  31,147,103 ns/iter (+/- 117,471) = 19 MB/s
-test sherlock::the_lower                     ... bench:   2,275,468 ns/iter (+/- 19,220) = 261 MB/s
-test sherlock::the_nocase                    ... bench:   4,999,086 ns/iter (+/- 20,184) = 119 MB/s
-test sherlock::the_upper                     ... bench:     893,288 ns/iter (+/- 11,368) = 666 MB/s
-test sherlock::the_whitespace                ... bench:   2,364,893 ns/iter (+/- 21,124) = 251 MB/s
-test sherlock::word_ending_n                 ... bench:  18,221,921 ns/iter (+/- 62,927) = 32 MB/s
-test sherlock::words                         ... bench:  27,552,543 ns/iter (+/- 89,437) = 21 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 94 measured; 0 filtered out
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/pcre1 b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/pcre1
deleted file mode 100644
index a28d3cb..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/pcre1
+++ /dev/null
@@ -1,98 +0,0 @@
-
-running 93 tests
-test misc::anchored_literal_long_match       ... bench:          23 ns/iter (+/- 0) = 16956 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          19 ns/iter (+/- 0) = 20526 MB/s
-test misc::anchored_literal_short_match      ... bench:          23 ns/iter (+/- 0) = 1130 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          19 ns/iter (+/- 0) = 1368 MB/s
-test misc::easy0_1K                          ... bench:         223 ns/iter (+/- 2) = 4713 MB/s
-test misc::easy0_1MB                         ... bench:     178,098 ns/iter (+/- 3,124) = 5887 MB/s
-test misc::easy0_32                          ... bench:          39 ns/iter (+/- 0) = 1512 MB/s
-test misc::easy0_32K                         ... bench:       5,600 ns/iter (+/- 27) = 5856 MB/s
-test misc::easy1_1K                          ... bench:         210 ns/iter (+/- 7) = 4971 MB/s
-test misc::easy1_1MB                         ... bench:     178,177 ns/iter (+/- 1,024) = 5885 MB/s
-test misc::easy1_32                          ... bench:          40 ns/iter (+/- 0) = 1300 MB/s
-test misc::easy1_32K                         ... bench:       5,592 ns/iter (+/- 52) = 5863 MB/s
-test misc::hard_1K                           ... bench:       1,223 ns/iter (+/- 14) = 859 MB/s
-test misc::hard_1MB                          ... bench:     983,169 ns/iter (+/- 13,398) = 1066 MB/s
-test misc::hard_32                           ... bench:          99 ns/iter (+/- 0) = 595 MB/s
-test misc::hard_32K                          ... bench:      31,422 ns/iter (+/- 326) = 1043 MB/s
-test misc::literal                           ... bench:          23 ns/iter (+/- 0) = 2217 MB/s
-test misc::long_needle1                      ... bench:     464,932 ns/iter (+/- 1,869) = 215 MB/s
-test misc::long_needle2                      ... bench:     462,587 ns/iter (+/- 6,375) = 216 MB/s
-test misc::match_class                       ... bench:          73 ns/iter (+/- 0) = 1109 MB/s
-test misc::match_class_in_range              ... bench:          25 ns/iter (+/- 0) = 3240 MB/s
-test misc::match_class_unicode               ... bench:         263 ns/iter (+/- 2) = 612 MB/s
-test misc::medium_1K                         ... bench:         213 ns/iter (+/- 3) = 4938 MB/s
-test misc::medium_1MB                        ... bench:     178,077 ns/iter (+/- 1,844) = 5888 MB/s
-test misc::medium_32                         ... bench:          48 ns/iter (+/- 0) = 1250 MB/s
-test misc::medium_32K                        ... bench:       5,598 ns/iter (+/- 38) = 5858 MB/s
-test misc::not_literal                       ... bench:         131 ns/iter (+/- 0) = 389 MB/s
-test misc::one_pass_long_prefix              ... bench:          22 ns/iter (+/- 0) = 1181 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          22 ns/iter (+/- 0) = 1181 MB/s
-test misc::one_pass_short                    ... bench:          44 ns/iter (+/- 0) = 386 MB/s
-test misc::one_pass_short_not                ... bench:          44 ns/iter (+/- 0) = 386 MB/s
-test misc::reallyhard2_1K                    ... bench:       3,503 ns/iter (+/- 29) = 296 MB/s
-test misc::reallyhard_1K                     ... bench:       1,276 ns/iter (+/- 14) = 823 MB/s
-test misc::reallyhard_1MB                    ... bench:   1,003,152 ns/iter (+/- 10,884) = 1045 MB/s
-test misc::reallyhard_32                     ... bench:         102 ns/iter (+/- 7) = 578 MB/s
-test misc::reallyhard_32K                    ... bench:      31,035 ns/iter (+/- 221) = 1056 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:       3,924 ns/iter (+/- 23) = 2038 MB/s
-test regexdna::find_new_lines                ... bench:   2,398,578 ns/iter (+/- 28,663) = 2119 MB/s
-test regexdna::subst1                        ... bench:   1,073,632 ns/iter (+/- 7,567) = 4734 MB/s
-test regexdna::subst10                       ... bench:   1,068,696 ns/iter (+/- 14,896) = 4756 MB/s
-test regexdna::subst11                       ... bench:   1,071,991 ns/iter (+/- 21,623) = 4742 MB/s
-test regexdna::subst2                        ... bench:   1,064,244 ns/iter (+/- 22,701) = 4776 MB/s
-test regexdna::subst3                        ... bench:   1,081,402 ns/iter (+/- 25,919) = 4700 MB/s
-test regexdna::subst4                        ... bench:   1,078,319 ns/iter (+/- 8,278) = 4714 MB/s
-test regexdna::subst5                        ... bench:   1,067,600 ns/iter (+/- 6,079) = 4761 MB/s
-test regexdna::subst6                        ... bench:   1,073,509 ns/iter (+/- 8,068) = 4735 MB/s
-test regexdna::subst7                        ... bench:   1,075,462 ns/iter (+/- 9,145) = 4726 MB/s
-test regexdna::subst8                        ... bench:   1,073,592 ns/iter (+/- 10,284) = 4734 MB/s
-test regexdna::subst9                        ... bench:   1,074,960 ns/iter (+/- 11,802) = 4728 MB/s
-test regexdna::variant1                      ... bench:  14,120,901 ns/iter (+/- 85,462) = 359 MB/s
-test regexdna::variant2                      ... bench:  15,606,152 ns/iter (+/- 128,452) = 325 MB/s
-test regexdna::variant3                      ... bench:  18,892,502 ns/iter (+/- 82,790) = 269 MB/s
-test regexdna::variant4                      ... bench:  17,988,621 ns/iter (+/- 50,462) = 282 MB/s
-test regexdna::variant5                      ... bench:  15,854,890 ns/iter (+/- 54,745) = 320 MB/s
-test regexdna::variant6                      ... bench:  16,126,069 ns/iter (+/- 76,013) = 315 MB/s
-test regexdna::variant7                      ... bench:  17,997,470 ns/iter (+/- 94,472) = 282 MB/s
-test regexdna::variant8                      ... bench:  23,004,949 ns/iter (+/- 81,626) = 220 MB/s
-test regexdna::variant9                      ... bench:  20,272,633 ns/iter (+/- 99,674) = 250 MB/s
-test sherlock::before_after_holmes           ... bench:   3,660,138 ns/iter (+/- 41,095) = 162 MB/s
-test sherlock::before_holmes                 ... bench:   3,632,955 ns/iter (+/- 25,761) = 163 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     458,639 ns/iter (+/- 9,185) = 1297 MB/s
-test sherlock::ing_suffix                    ... bench:   1,746,052 ns/iter (+/- 31,762) = 340 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   4,067,141 ns/iter (+/- 12,943) = 146 MB/s
-test sherlock::letters                       ... bench:  11,360,188 ns/iter (+/- 22,264) = 52 MB/s
-test sherlock::letters_lower                 ... bench:  11,137,940 ns/iter (+/- 35,225) = 53 MB/s
-test sherlock::letters_upper                 ... bench:   1,505,435 ns/iter (+/- 10,318) = 395 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     181,084 ns/iter (+/- 3,121) = 3285 MB/s
-test sherlock::name_alt1                     ... bench:     427,474 ns/iter (+/- 1,601) = 1391 MB/s
-test sherlock::name_alt2                     ... bench:     434,858 ns/iter (+/- 6,444) = 1368 MB/s
-test sherlock::name_alt3                     ... bench:     747,274 ns/iter (+/- 7,303) = 796 MB/s
-test sherlock::name_alt3_nocase              ... bench:   2,574,102 ns/iter (+/- 44,203) = 231 MB/s
-test sherlock::name_alt4                     ... bench:      66,428 ns/iter (+/- 336) = 8956 MB/s
-test sherlock::name_alt4_nocase              ... bench:   1,333,932 ns/iter (+/- 6,683) = 445 MB/s
-test sherlock::name_alt5                     ... bench:     598,062 ns/iter (+/- 4,936) = 994 MB/s
-test sherlock::name_alt5_nocase              ... bench:   1,496,292 ns/iter (+/- 6,595) = 397 MB/s
-test sherlock::name_holmes                   ... bench:     359,203 ns/iter (+/- 6,202) = 1656 MB/s
-test sherlock::name_holmes_nocase            ... bench:     454,624 ns/iter (+/- 2,658) = 1308 MB/s
-test sherlock::name_sherlock                 ... bench:     243,450 ns/iter (+/- 2,435) = 2443 MB/s
-test sherlock::name_sherlock_holmes          ... bench:     182,407 ns/iter (+/- 878) = 3261 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:     940,244 ns/iter (+/- 6,064) = 632 MB/s
-test sherlock::name_sherlock_nocase          ... bench:     904,285 ns/iter (+/- 9,405) = 657 MB/s
-test sherlock::name_whitespace               ... bench:     244,114 ns/iter (+/- 1,875) = 2437 MB/s
-test sherlock::no_match_common               ... bench:     358,735 ns/iter (+/- 4,090) = 1658 MB/s
-test sherlock::no_match_really_common        ... bench:     348,964 ns/iter (+/- 6,060) = 1704 MB/s
-test sherlock::no_match_uncommon             ... bench:      21,256 ns/iter (+/- 144) = 27988 MB/s
-test sherlock::quotes                        ... bench:     422,149 ns/iter (+/- 1,540) = 1409 MB/s
-test sherlock::repeated_class_negation       ... bench:   5,232,683 ns/iter (+/- 21,609) = 113 MB/s
-test sherlock::the_lower                     ... bench:     651,539 ns/iter (+/- 1,763) = 913 MB/s
-test sherlock::the_nocase                    ... bench:     693,506 ns/iter (+/- 13,143) = 857 MB/s
-test sherlock::the_upper                     ... bench:      46,904 ns/iter (+/- 657) = 12684 MB/s
-test sherlock::the_whitespace                ... bench:     788,070 ns/iter (+/- 17,403) = 754 MB/s
-test sherlock::word_ending_n                 ... bench:   4,545,774 ns/iter (+/- 26,965) = 130 MB/s
-test sherlock::words                         ... bench:   5,493,039 ns/iter (+/- 16,767) = 108 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 93 measured; 0 filtered out
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/pcre2 b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/pcre2
deleted file mode 100644
index c2bbd39e..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/pcre2
+++ /dev/null
@@ -1,98 +0,0 @@
-
-running 93 tests
-test misc::anchored_literal_long_match       ... bench:          15 ns/iter (+/- 0) = 26000 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          12 ns/iter (+/- 0) = 32500 MB/s
-test misc::anchored_literal_short_match      ... bench:          14 ns/iter (+/- 0) = 1857 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          11 ns/iter (+/- 0) = 2363 MB/s
-test misc::easy0_1K                          ... bench:          81 ns/iter (+/- 4) = 12975 MB/s
-test misc::easy0_1MB                         ... bench:      60,199 ns/iter (+/- 658) = 17418 MB/s
-test misc::easy0_32                          ... bench:          28 ns/iter (+/- 0) = 2107 MB/s
-test misc::easy0_32K                         ... bench:       1,878 ns/iter (+/- 25) = 17462 MB/s
-test misc::easy1_1K                          ... bench:          81 ns/iter (+/- 0) = 12888 MB/s
-test misc::easy1_1MB                         ... bench:      59,222 ns/iter (+/- 598) = 17706 MB/s
-test misc::easy1_32                          ... bench:          28 ns/iter (+/- 0) = 1857 MB/s
-test misc::easy1_32K                         ... bench:       1,819 ns/iter (+/- 6) = 18025 MB/s
-test misc::hard_1K                           ... bench:       1,147 ns/iter (+/- 13) = 916 MB/s
-test misc::hard_1MB                          ... bench:     990,924 ns/iter (+/- 6,065) = 1058 MB/s
-test misc::hard_32                           ... bench:          82 ns/iter (+/- 3) = 719 MB/s
-test misc::hard_32K                          ... bench:      32,218 ns/iter (+/- 471) = 1017 MB/s
-test misc::literal                           ... bench:          15 ns/iter (+/- 0) = 3400 MB/s
-test misc::long_needle1                      ... bench:     464,061 ns/iter (+/- 2,241) = 215 MB/s
-test misc::long_needle2                      ... bench:     465,191 ns/iter (+/- 823) = 214 MB/s
-test misc::match_class                       ... bench:          46 ns/iter (+/- 1) = 1760 MB/s
-test misc::match_class_in_range              ... bench:          16 ns/iter (+/- 0) = 5062 MB/s
-test misc::match_class_unicode               ... bench:         246 ns/iter (+/- 0) = 654 MB/s
-test misc::medium_1K                         ... bench:         102 ns/iter (+/- 9) = 10313 MB/s
-test misc::medium_1MB                        ... bench:      60,042 ns/iter (+/- 585) = 17464 MB/s
-test misc::medium_32                         ... bench:          29 ns/iter (+/- 1) = 2068 MB/s
-test misc::medium_32K                        ... bench:       1,901 ns/iter (+/- 23) = 17251 MB/s
-test misc::not_literal                       ... bench:         122 ns/iter (+/- 2) = 418 MB/s
-test misc::one_pass_long_prefix              ... bench:          13 ns/iter (+/- 0) = 2000 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          13 ns/iter (+/- 0) = 2000 MB/s
-test misc::one_pass_short                    ... bench:          36 ns/iter (+/- 0) = 472 MB/s
-test misc::one_pass_short_not                ... bench:          36 ns/iter (+/- 0) = 472 MB/s
-test misc::reallyhard2_1K                    ... bench:       3,517 ns/iter (+/- 39) = 295 MB/s
-test misc::reallyhard_1K                     ... bench:       1,123 ns/iter (+/- 12) = 935 MB/s
-test misc::reallyhard_1MB                    ... bench:     992,521 ns/iter (+/- 13,407) = 1056 MB/s
-test misc::reallyhard_32                     ... bench:          71 ns/iter (+/- 0) = 830 MB/s
-test misc::reallyhard_32K                    ... bench:      30,626 ns/iter (+/- 206) = 1070 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:       2,824 ns/iter (+/- 21) = 2832 MB/s
-test regexdna::find_new_lines                ... bench:   1,500,377 ns/iter (+/- 8,152) = 3388 MB/s
-test regexdna::subst1                        ... bench:     815,769 ns/iter (+/- 14,286) = 6231 MB/s
-test regexdna::subst10                       ... bench:     820,459 ns/iter (+/- 57,098) = 6195 MB/s
-test regexdna::subst11                       ... bench:     810,986 ns/iter (+/- 4,270) = 6268 MB/s
-test regexdna::subst2                        ... bench:     815,568 ns/iter (+/- 35,148) = 6232 MB/s
-test regexdna::subst3                        ... bench:     812,590 ns/iter (+/- 6,782) = 6255 MB/s
-test regexdna::subst4                        ... bench:     831,679 ns/iter (+/- 12,372) = 6112 MB/s
-test regexdna::subst5                        ... bench:     823,207 ns/iter (+/- 12,977) = 6175 MB/s
-test regexdna::subst6                        ... bench:     815,506 ns/iter (+/- 11,610) = 6233 MB/s
-test regexdna::subst7                        ... bench:     818,104 ns/iter (+/- 4,807) = 6213 MB/s
-test regexdna::subst8                        ... bench:     815,265 ns/iter (+/- 21,504) = 6235 MB/s
-test regexdna::subst9                        ... bench:     809,236 ns/iter (+/- 7,003) = 6281 MB/s
-test regexdna::variant1                      ... bench:   8,375,573 ns/iter (+/- 80,345) = 606 MB/s
-test regexdna::variant2                      ... bench:  11,207,698 ns/iter (+/- 45,582) = 453 MB/s
-test regexdna::variant3                      ... bench:  10,505,744 ns/iter (+/- 69,756) = 483 MB/s
-test regexdna::variant4                      ... bench:   9,276,177 ns/iter (+/- 50,904) = 548 MB/s
-test regexdna::variant5                      ... bench:   9,333,446 ns/iter (+/- 41,108) = 544 MB/s
-test regexdna::variant6                      ... bench:   9,865,395 ns/iter (+/- 26,010) = 515 MB/s
-test regexdna::variant7                      ... bench:  10,033,179 ns/iter (+/- 28,272) = 506 MB/s
-test regexdna::variant8                      ... bench:  10,752,604 ns/iter (+/- 37,714) = 472 MB/s
-test regexdna::variant9                      ... bench:  11,397,272 ns/iter (+/- 41,200) = 446 MB/s
-test sherlock::before_after_holmes           ... bench:   3,627,616 ns/iter (+/- 18,202) = 164 MB/s
-test sherlock::before_holmes                 ... bench:   3,614,713 ns/iter (+/- 18,191) = 164 MB/s
-test sherlock::holmes_cochar_watson          ... bench:      68,419 ns/iter (+/- 918) = 8695 MB/s
-test sherlock::ing_suffix                    ... bench:   1,766,571 ns/iter (+/- 16,612) = 336 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   4,018,396 ns/iter (+/- 11,822) = 148 MB/s
-test sherlock::letters                       ... bench:   8,058,390 ns/iter (+/- 39,083) = 73 MB/s
-test sherlock::letters_lower                 ... bench:   8,014,051 ns/iter (+/- 33,500) = 74 MB/s
-test sherlock::letters_upper                 ... bench:   1,452,421 ns/iter (+/- 157,023) = 409 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:      36,248 ns/iter (+/- 252) = 16412 MB/s
-test sherlock::name_alt1                     ... bench:      45,538 ns/iter (+/- 235) = 13064 MB/s
-test sherlock::name_alt2                     ... bench:      62,202 ns/iter (+/- 892) = 9564 MB/s
-test sherlock::name_alt3                     ... bench:     623,900 ns/iter (+/- 3,139) = 953 MB/s
-test sherlock::name_alt3_nocase              ... bench:   2,518,464 ns/iter (+/- 31,943) = 236 MB/s
-test sherlock::name_alt4                     ... bench:      62,015 ns/iter (+/- 712) = 9593 MB/s
-test sherlock::name_alt4_nocase              ... bench:   1,162,489 ns/iter (+/- 14,622) = 511 MB/s
-test sherlock::name_alt5                     ... bench:     589,686 ns/iter (+/- 6,775) = 1008 MB/s
-test sherlock::name_alt5_nocase              ... bench:   1,359,066 ns/iter (+/- 7,487) = 437 MB/s
-test sherlock::name_holmes                   ... bench:      45,993 ns/iter (+/- 812) = 12935 MB/s
-test sherlock::name_holmes_nocase            ... bench:      82,326 ns/iter (+/- 758) = 7226 MB/s
-test sherlock::name_sherlock                 ... bench:      36,848 ns/iter (+/- 50) = 16145 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      36,778 ns/iter (+/- 621) = 16176 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:     636,825 ns/iter (+/- 2,957) = 934 MB/s
-test sherlock::name_sherlock_nocase          ... bench:     635,313 ns/iter (+/- 10,776) = 936 MB/s
-test sherlock::name_whitespace               ... bench:      37,360 ns/iter (+/- 132) = 15924 MB/s
-test sherlock::no_match_common               ... bench:      34,545 ns/iter (+/- 239) = 17221 MB/s
-test sherlock::no_match_really_common        ... bench:      49,019 ns/iter (+/- 590) = 12136 MB/s
-test sherlock::no_match_uncommon             ... bench:      34,410 ns/iter (+/- 182) = 17289 MB/s
-test sherlock::quotes                        ... bench:     414,599 ns/iter (+/- 3,528) = 1434 MB/s
-test sherlock::repeated_class_negation       ... bench:   5,106,885 ns/iter (+/- 23,660) = 116 MB/s
-test sherlock::the_lower                     ... bench:     234,135 ns/iter (+/- 3,821) = 2540 MB/s
-test sherlock::the_nocase                    ... bench:     261,765 ns/iter (+/- 2,272) = 2272 MB/s
-test sherlock::the_upper                     ... bench:      50,816 ns/iter (+/- 583) = 11707 MB/s
-test sherlock::the_whitespace                ... bench:     408,355 ns/iter (+/- 5,463) = 1456 MB/s
-test sherlock::word_ending_n                 ... bench:   4,367,721 ns/iter (+/- 55,474) = 136 MB/s
-test sherlock::words                         ... bench:   4,640,171 ns/iter (+/- 20,462) = 128 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 93 measured; 0 filtered out
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/re2 b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/re2
deleted file mode 100644
index 6888bea..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/re2
+++ /dev/null
@@ -1,101 +0,0 @@
-
-running 96 tests
-test misc::anchored_literal_long_match       ... bench:          69 ns/iter (+/- 0) = 5652 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          15 ns/iter (+/- 0) = 26000 MB/s
-test misc::anchored_literal_short_match      ... bench:          69 ns/iter (+/- 0) = 376 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          15 ns/iter (+/- 0) = 1733 MB/s
-test misc::easy0_1K                          ... bench:         106 ns/iter (+/- 0) = 9915 MB/s
-test misc::easy0_1MB                         ... bench:      15,311 ns/iter (+/- 113) = 68486 MB/s
-test misc::easy0_32                          ... bench:         100 ns/iter (+/- 3) = 590 MB/s
-test misc::easy0_32K                         ... bench:         426 ns/iter (+/- 1) = 76983 MB/s
-test misc::easy1_1K                          ... bench:          98 ns/iter (+/- 0) = 10653 MB/s
-test misc::easy1_1MB                         ... bench:      15,299 ns/iter (+/- 136) = 68540 MB/s
-test misc::easy1_32                          ... bench:          91 ns/iter (+/- 0) = 571 MB/s
-test misc::easy1_32K                         ... bench:         413 ns/iter (+/- 5) = 79389 MB/s
-test misc::hard_1K                           ... bench:       1,815 ns/iter (+/- 43) = 579 MB/s
-test misc::hard_1MB                          ... bench:   1,842,293 ns/iter (+/- 10,227) = 569 MB/s
-test misc::hard_32                           ... bench:         146 ns/iter (+/- 4) = 404 MB/s
-test misc::hard_32K                          ... bench:      57,638 ns/iter (+/- 481) = 568 MB/s
-test misc::literal                           ... bench:          64 ns/iter (+/- 1) = 796 MB/s
-test misc::long_needle1                      ... bench:     122,154 ns/iter (+/- 840) = 818 MB/s
-test misc::long_needle2                      ... bench:     122,105 ns/iter (+/- 578) = 818 MB/s
-test misc::match_class                       ... bench:         178 ns/iter (+/- 0) = 455 MB/s
-test misc::match_class_in_range              ... bench:         178 ns/iter (+/- 2) = 455 MB/s
-test misc::match_class_unicode               ... bench:         293 ns/iter (+/- 2) = 549 MB/s
-test misc::medium_1K                         ... bench:       1,610 ns/iter (+/- 26) = 653 MB/s
-test misc::medium_1MB                        ... bench:   1,537,932 ns/iter (+/- 4,134) = 681 MB/s
-test misc::medium_32                         ... bench:         158 ns/iter (+/- 1) = 379 MB/s
-test misc::medium_32K                        ... bench:      48,172 ns/iter (+/- 390) = 680 MB/s
-test misc::no_exponential                    ... bench:         216 ns/iter (+/- 1) = 462 MB/s
-test misc::not_literal                       ... bench:         127 ns/iter (+/- 1) = 401 MB/s
-test misc::one_pass_long_prefix              ... bench:          64 ns/iter (+/- 0) = 406 MB/s
-test misc::one_pass_long_prefix_not          ... bench:         100 ns/iter (+/- 1) = 260 MB/s
-test misc::one_pass_short                    ... bench:          88 ns/iter (+/- 0) = 193 MB/s
-test misc::one_pass_short_not                ... bench:          86 ns/iter (+/- 0) = 197 MB/s
-test misc::reallyhard2_1K                    ... bench:       1,332 ns/iter (+/- 41) = 780 MB/s
-test misc::reallyhard_1K                     ... bench:       1,815 ns/iter (+/- 16) = 579 MB/s
-test misc::reallyhard_1MB                    ... bench:   1,842,206 ns/iter (+/- 9,086) = 569 MB/s
-test misc::reallyhard_32                     ... bench:         149 ns/iter (+/- 1) = 395 MB/s
-test misc::reallyhard_32K                    ... bench:      57,591 ns/iter (+/- 101) = 569 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:      11,753 ns/iter (+/- 130) = 680 MB/s
-test regexdna::find_new_lines                ... bench:  24,330,235 ns/iter (+/- 374,274) = 208 MB/s
-test regexdna::subst1                        ... bench:   4,079,631 ns/iter (+/- 51,348) = 1246 MB/s
-test regexdna::subst10                       ... bench:   4,080,803 ns/iter (+/- 30,966) = 1245 MB/s
-test regexdna::subst11                       ... bench:   4,154,389 ns/iter (+/- 34,825) = 1223 MB/s
-test regexdna::subst2                        ... bench:   4,076,784 ns/iter (+/- 102,863) = 1246 MB/s
-test regexdna::subst3                        ... bench:   4,074,850 ns/iter (+/- 52,106) = 1247 MB/s
-test regexdna::subst4                        ... bench:   4,078,456 ns/iter (+/- 12,343) = 1246 MB/s
-test regexdna::subst5                        ... bench:   4,075,812 ns/iter (+/- 24,524) = 1247 MB/s
-test regexdna::subst6                        ... bench:   4,097,009 ns/iter (+/- 13,240) = 1240 MB/s
-test regexdna::subst7                        ... bench:   4,069,096 ns/iter (+/- 29,794) = 1249 MB/s
-test regexdna::subst8                        ... bench:   4,078,838 ns/iter (+/- 20,713) = 1246 MB/s
-test regexdna::subst9                        ... bench:   4,116,905 ns/iter (+/- 14,130) = 1234 MB/s
-test regexdna::variant1                      ... bench:  21,411,252 ns/iter (+/- 568,076) = 237 MB/s
-test regexdna::variant2                      ... bench:  21,082,571 ns/iter (+/- 92,912) = 241 MB/s
-test regexdna::variant3                      ... bench:  20,302,954 ns/iter (+/- 118,421) = 250 MB/s
-test regexdna::variant4                      ... bench:  21,290,669 ns/iter (+/- 102,527) = 238 MB/s
-test regexdna::variant5                      ... bench:  21,451,671 ns/iter (+/- 99,524) = 236 MB/s
-test regexdna::variant6                      ... bench:  21,057,017 ns/iter (+/- 530,904) = 241 MB/s
-test regexdna::variant7                      ... bench:  20,394,037 ns/iter (+/- 128,973) = 249 MB/s
-test regexdna::variant8                      ... bench:  17,839,069 ns/iter (+/- 122,671) = 284 MB/s
-test regexdna::variant9                      ... bench:  12,720,049 ns/iter (+/- 76,816) = 399 MB/s
-test sherlock::before_after_holmes           ... bench:   1,044,129 ns/iter (+/- 4,967) = 569 MB/s
-test sherlock::before_holmes                 ... bench:   1,067,879 ns/iter (+/- 11,345) = 557 MB/s
-test sherlock::everything_greedy             ... bench:   5,193,222 ns/iter (+/- 10,990) = 114 MB/s
-test sherlock::everything_greedy_nl          ... bench:   2,038,599 ns/iter (+/- 18,946) = 291 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     909,342 ns/iter (+/- 5,295) = 654 MB/s
-test sherlock::holmes_coword_watson          ... bench:     939,154 ns/iter (+/- 6,087) = 633 MB/s
-test sherlock::ing_suffix                    ... bench:   2,729,081 ns/iter (+/- 15,969) = 217 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   1,373,143 ns/iter (+/- 17,068) = 433 MB/s
-test sherlock::letters                       ... bench:  56,266,035 ns/iter (+/- 165,398) = 10 MB/s
-test sherlock::letters_lower                 ... bench:  54,590,671 ns/iter (+/- 138,842) = 10 MB/s
-test sherlock::letters_upper                 ... bench:   2,702,242 ns/iter (+/- 9,889) = 220 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:   2,430,065 ns/iter (+/- 27,756) = 244 MB/s
-test sherlock::name_alt1                     ... bench:      45,514 ns/iter (+/- 403) = 13071 MB/s
-test sherlock::name_alt2                     ... bench:     975,861 ns/iter (+/- 11,553) = 609 MB/s
-test sherlock::name_alt3                     ... bench:   1,070,967 ns/iter (+/- 11,065) = 555 MB/s
-test sherlock::name_alt3_nocase              ... bench:   2,574,585 ns/iter (+/- 39,816) = 231 MB/s
-test sherlock::name_alt4                     ... bench:     978,776 ns/iter (+/- 25,503) = 607 MB/s
-test sherlock::name_alt4_nocase              ... bench:   1,643,230 ns/iter (+/- 27,685) = 362 MB/s
-test sherlock::name_alt5                     ... bench:     998,349 ns/iter (+/- 6,658) = 595 MB/s
-test sherlock::name_alt5_nocase              ... bench:   1,781,006 ns/iter (+/- 22,507) = 334 MB/s
-test sherlock::name_holmes                   ... bench:      92,561 ns/iter (+/- 1,358) = 6427 MB/s
-test sherlock::name_holmes_nocase            ... bench:   1,281,827 ns/iter (+/- 7,651) = 464 MB/s
-test sherlock::name_sherlock                 ... bench:      31,994 ns/iter (+/- 326) = 18595 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      34,272 ns/iter (+/- 445) = 17359 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:   1,218,006 ns/iter (+/- 19,301) = 488 MB/s
-test sherlock::name_sherlock_nocase          ... bench:   1,214,744 ns/iter (+/- 11,473) = 489 MB/s
-test sherlock::name_whitespace               ... bench:      35,455 ns/iter (+/- 412) = 16779 MB/s
-test sherlock::no_match_common               ... bench:     299,771 ns/iter (+/- 7,799) = 1984 MB/s
-test sherlock::no_match_really_common        ... bench:     299,595 ns/iter (+/- 926) = 1985 MB/s
-test sherlock::no_match_uncommon             ... bench:       9,803 ns/iter (+/- 139) = 60688 MB/s
-test sherlock::quotes                        ... bench:   1,033,423 ns/iter (+/- 9,177) = 575 MB/s
-test sherlock::the_lower                     ... bench:   1,454,358 ns/iter (+/- 75,304) = 409 MB/s
-test sherlock::the_nocase                    ... bench:   2,486,681 ns/iter (+/- 9,026) = 239 MB/s
-test sherlock::the_upper                     ... bench:     123,989 ns/iter (+/- 1,097) = 4798 MB/s
-test sherlock::the_whitespace                ... bench:   1,454,732 ns/iter (+/- 118,006) = 408 MB/s
-test sherlock::word_ending_n                 ... bench:   1,922,008 ns/iter (+/- 15,040) = 309 MB/s
-test sherlock::words                         ... bench:  16,054,888 ns/iter (+/- 90,684) = 37 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 96 measured; 0 filtered out
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/rust b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/rust
deleted file mode 100644
index f5e73a2..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/rust
+++ /dev/null
@@ -1,113 +0,0 @@
-
-running 108 tests
-test misc::anchored_literal_long_match       ... bench:          15 ns/iter (+/- 0) = 26000 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          16 ns/iter (+/- 0) = 24375 MB/s
-test misc::anchored_literal_short_match      ... bench:          14 ns/iter (+/- 0) = 1857 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          16 ns/iter (+/- 0) = 1625 MB/s
-test misc::easy0_1K                          ... bench:          11 ns/iter (+/- 0) = 95545 MB/s
-test misc::easy0_1MB                         ... bench:          15 ns/iter (+/- 0) = 69906866 MB/s
-test misc::easy0_32                          ... bench:          11 ns/iter (+/- 0) = 5363 MB/s
-test misc::easy0_32K                         ... bench:          11 ns/iter (+/- 0) = 2981363 MB/s
-test misc::easy1_1K                          ... bench:          36 ns/iter (+/- 0) = 29000 MB/s
-test misc::easy1_1MB                         ... bench:          38 ns/iter (+/- 0) = 27594631 MB/s
-test misc::easy1_32                          ... bench:          39 ns/iter (+/- 0) = 1333 MB/s
-test misc::easy1_32K                         ... bench:          36 ns/iter (+/- 0) = 910777 MB/s
-test misc::hard_1K                           ... bench:          48 ns/iter (+/- 0) = 21895 MB/s
-test misc::hard_1MB                          ... bench:          51 ns/iter (+/- 0) = 20560843 MB/s
-test misc::hard_32                           ... bench:          48 ns/iter (+/- 1) = 1229 MB/s
-test misc::hard_32K                          ... bench:          48 ns/iter (+/- 0) = 683229 MB/s
-test misc::literal                           ... bench:          10 ns/iter (+/- 0) = 5100 MB/s
-test misc::long_needle1                      ... bench:         956 ns/iter (+/- 14) = 104603 MB/s
-test misc::long_needle2                      ... bench:     538,237 ns/iter (+/- 8,739) = 185 MB/s
-test misc::match_class                       ... bench:          57 ns/iter (+/- 0) = 1421 MB/s
-test misc::match_class_in_range              ... bench:          22 ns/iter (+/- 0) = 3681 MB/s
-test misc::match_class_unicode               ... bench:         245 ns/iter (+/- 4) = 657 MB/s
-test misc::medium_1K                         ... bench:          13 ns/iter (+/- 0) = 80923 MB/s
-test misc::medium_1MB                        ... bench:          15 ns/iter (+/- 0) = 69906933 MB/s
-test misc::medium_32                         ... bench:          12 ns/iter (+/- 0) = 5000 MB/s
-test misc::medium_32K                        ... bench:          12 ns/iter (+/- 0) = 2733000 MB/s
-test misc::no_exponential                    ... bench:         318 ns/iter (+/- 0) = 314 MB/s
-test misc::not_literal                       ... bench:          85 ns/iter (+/- 0) = 600 MB/s
-test misc::one_pass_long_prefix              ... bench:          48 ns/iter (+/- 0) = 541 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          48 ns/iter (+/- 0) = 541 MB/s
-test misc::one_pass_short                    ... bench:          34 ns/iter (+/- 0) = 500 MB/s
-test misc::one_pass_short_not                ... bench:          37 ns/iter (+/- 0) = 459 MB/s
-test misc::reallyhard2_1K                    ... bench:          51 ns/iter (+/- 0) = 20392 MB/s
-test misc::reallyhard_1K                     ... bench:       1,547 ns/iter (+/- 19) = 679 MB/s
-test misc::reallyhard_1MB                    ... bench:   1,533,883 ns/iter (+/- 9,553) = 683 MB/s
-test misc::reallyhard_32                     ... bench:          96 ns/iter (+/- 0) = 614 MB/s
-test misc::reallyhard_32K                    ... bench:      47,989 ns/iter (+/- 198) = 683 MB/s
-test misc::replace_all                       ... bench:         136 ns/iter (+/- 0)
-test misc::reverse_suffix_no_quadratic       ... bench:       4,016 ns/iter (+/- 21) = 1992 MB/s
-test misc::short_haystack_1000000x           ... bench:      64,033 ns/iter (+/- 470) = 124935 MB/s
-test misc::short_haystack_100000x            ... bench:       6,472 ns/iter (+/- 44) = 123611 MB/s
-test misc::short_haystack_10000x             ... bench:       1,906 ns/iter (+/- 49) = 41978 MB/s
-test misc::short_haystack_1000x              ... bench:         362 ns/iter (+/- 1) = 22129 MB/s
-test misc::short_haystack_100x               ... bench:         259 ns/iter (+/- 2) = 3131 MB/s
-test misc::short_haystack_10x                ... bench:         228 ns/iter (+/- 0) = 399 MB/s
-test misc::short_haystack_1x                 ... bench:         223 ns/iter (+/- 2) = 85 MB/s
-test misc::short_haystack_2x                 ... bench:         224 ns/iter (+/- 2) = 120 MB/s
-test misc::short_haystack_3x                 ... bench:         221 ns/iter (+/- 2) = 158 MB/s
-test misc::short_haystack_4x                 ... bench:         223 ns/iter (+/- 2) = 192 MB/s
-test regexdna::find_new_lines                ... bench:  11,885,905 ns/iter (+/- 23,501) = 427 MB/s
-test regexdna::subst1                        ... bench:     712,544 ns/iter (+/- 16,100) = 7134 MB/s
-test regexdna::subst10                       ... bench:     709,739 ns/iter (+/- 8,467) = 7162 MB/s
-test regexdna::subst11                       ... bench:     714,261 ns/iter (+/- 8,495) = 7117 MB/s
-test regexdna::subst2                        ... bench:     711,197 ns/iter (+/- 14,736) = 7147 MB/s
-test regexdna::subst3                        ... bench:     718,083 ns/iter (+/- 5,050) = 7079 MB/s
-test regexdna::subst4                        ... bench:     725,196 ns/iter (+/- 20,044) = 7009 MB/s
-test regexdna::subst5                        ... bench:     709,301 ns/iter (+/- 10,961) = 7166 MB/s
-test regexdna::subst6                        ... bench:     715,658 ns/iter (+/- 16,431) = 7103 MB/s
-test regexdna::subst7                        ... bench:     707,472 ns/iter (+/- 5,764) = 7185 MB/s
-test regexdna::subst8                        ... bench:     707,300 ns/iter (+/- 19,545) = 7187 MB/s
-test regexdna::subst9                        ... bench:     709,950 ns/iter (+/- 11,319) = 7160 MB/s
-test regexdna::variant1                      ... bench:   2,498,980 ns/iter (+/- 67,933) = 2034 MB/s
-test regexdna::variant2                      ... bench:   5,544,923 ns/iter (+/- 31,911) = 916 MB/s
-test regexdna::variant3                      ... bench:   6,441,568 ns/iter (+/- 20,197) = 789 MB/s
-test regexdna::variant4                      ... bench:   6,421,276 ns/iter (+/- 161,499) = 791 MB/s
-test regexdna::variant5                      ... bench:   5,093,567 ns/iter (+/- 18,696) = 998 MB/s
-test regexdna::variant6                      ... bench:   5,094,859 ns/iter (+/- 22,894) = 997 MB/s
-test regexdna::variant7                      ... bench:   4,540,111 ns/iter (+/- 11,863) = 1119 MB/s
-test regexdna::variant8                      ... bench:   4,636,741 ns/iter (+/- 23,448) = 1096 MB/s
-test regexdna::variant9                      ... bench:   4,557,500 ns/iter (+/- 16,168) = 1115 MB/s
-test sherlock::before_after_holmes           ... bench:     880,959 ns/iter (+/- 3,004) = 675 MB/s
-test sherlock::before_holmes                 ... bench:      54,416 ns/iter (+/- 1,099) = 10933 MB/s
-test sherlock::everything_greedy             ... bench:   1,736,180 ns/iter (+/- 9,410) = 342 MB/s
-test sherlock::everything_greedy_nl          ... bench:     783,848 ns/iter (+/- 19,640) = 758 MB/s
-test sherlock::holmes_cochar_watson          ... bench:      90,085 ns/iter (+/- 499) = 6604 MB/s
-test sherlock::holmes_coword_watson          ... bench:     459,431 ns/iter (+/- 830) = 1294 MB/s
-test sherlock::ing_suffix                    ... bench:     348,103 ns/iter (+/- 9,052) = 1709 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   1,017,387 ns/iter (+/- 3,906) = 584 MB/s
-test sherlock::letters                       ... bench:  18,265,074 ns/iter (+/- 463,241) = 32 MB/s
-test sherlock::letters_lower                 ... bench:  17,846,209 ns/iter (+/- 431,089) = 33 MB/s
-test sherlock::letters_upper                 ... bench:   1,594,743 ns/iter (+/- 3,151) = 373 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     873,308 ns/iter (+/- 1,379) = 681 MB/s
-test sherlock::name_alt1                     ... bench:      21,144 ns/iter (+/- 315) = 28137 MB/s
-test sherlock::name_alt2                     ... bench:      71,354 ns/iter (+/- 1,432) = 8337 MB/s
-test sherlock::name_alt3                     ... bench:      79,167 ns/iter (+/- 294) = 7514 MB/s
-test sherlock::name_alt3_nocase              ... bench:   1,111,300 ns/iter (+/- 4,434) = 535 MB/s
-test sherlock::name_alt4                     ... bench:     100,864 ns/iter (+/- 570) = 5898 MB/s
-test sherlock::name_alt4_nocase              ... bench:     157,266 ns/iter (+/- 4,048) = 3782 MB/s
-test sherlock::name_alt5                     ... bench:      74,375 ns/iter (+/- 576) = 7999 MB/s
-test sherlock::name_alt5_nocase              ... bench:     467,879 ns/iter (+/- 2,115) = 1271 MB/s
-test sherlock::name_holmes                   ... bench:      26,856 ns/iter (+/- 345) = 22152 MB/s
-test sherlock::name_holmes_nocase            ... bench:     124,140 ns/iter (+/- 1,111) = 4792 MB/s
-test sherlock::name_sherlock                 ... bench:      52,330 ns/iter (+/- 316) = 11368 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      19,646 ns/iter (+/- 355) = 30282 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:     107,973 ns/iter (+/- 508) = 5510 MB/s
-test sherlock::name_sherlock_nocase          ... bench:     105,141 ns/iter (+/- 426) = 5658 MB/s
-test sherlock::name_whitespace               ... bench:      61,149 ns/iter (+/- 350) = 9729 MB/s
-test sherlock::no_match_common               ... bench:      11,735 ns/iter (+/- 185) = 50697 MB/s
-test sherlock::no_match_really_common        ... bench:     274,089 ns/iter (+/- 617) = 2170 MB/s
-test sherlock::no_match_uncommon             ... bench:      11,581 ns/iter (+/- 298) = 51371 MB/s
-test sherlock::quotes                        ... bench:     447,749 ns/iter (+/- 1,173) = 1328 MB/s
-test sherlock::repeated_class_negation       ... bench:  69,119,491 ns/iter (+/- 117,739) = 8 MB/s
-test sherlock::the_lower                     ... bench:     492,559 ns/iter (+/- 1,674) = 1207 MB/s
-test sherlock::the_nocase                    ... bench:     341,445 ns/iter (+/- 6,455) = 1742 MB/s
-test sherlock::the_upper                     ... bench:      30,555 ns/iter (+/- 168) = 19470 MB/s
-test sherlock::the_whitespace                ... bench:     950,630 ns/iter (+/- 25,179) = 625 MB/s
-test sherlock::word_ending_n                 ... bench:   1,551,930 ns/iter (+/- 17,792) = 383 MB/s
-test sherlock::words                         ... bench:   7,229,870 ns/iter (+/- 25,046) = 82 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 108 measured; 0 filtered out
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/rust-bytes b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/rust-bytes
deleted file mode 100644
index 310d775..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/rust-bytes
+++ /dev/null
@@ -1,101 +0,0 @@
-
-running 96 tests
-test misc::anchored_literal_long_match       ... bench:          16 ns/iter (+/- 0) = 24375 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          16 ns/iter (+/- 0) = 24375 MB/s
-test misc::anchored_literal_short_match      ... bench:          14 ns/iter (+/- 0) = 1857 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          16 ns/iter (+/- 0) = 1625 MB/s
-test misc::easy0_1K                          ... bench:          11 ns/iter (+/- 0) = 95545 MB/s
-test misc::easy0_1MB                         ... bench:          14 ns/iter (+/- 0) = 74900214 MB/s
-test misc::easy0_32                          ... bench:          11 ns/iter (+/- 0) = 5363 MB/s
-test misc::easy0_32K                         ... bench:          11 ns/iter (+/- 0) = 2981363 MB/s
-test misc::easy1_1K                          ... bench:          36 ns/iter (+/- 0) = 29000 MB/s
-test misc::easy1_1MB                         ... bench:          38 ns/iter (+/- 0) = 27594631 MB/s
-test misc::easy1_32                          ... bench:          36 ns/iter (+/- 0) = 1444 MB/s
-test misc::easy1_32K                         ... bench:          36 ns/iter (+/- 0) = 910777 MB/s
-test misc::hard_1K                           ... bench:          46 ns/iter (+/- 0) = 22847 MB/s
-test misc::hard_1MB                          ... bench:          49 ns/iter (+/- 0) = 21400061 MB/s
-test misc::hard_32                           ... bench:          46 ns/iter (+/- 0) = 1282 MB/s
-test misc::hard_32K                          ... bench:          46 ns/iter (+/- 0) = 712934 MB/s
-test misc::literal                           ... bench:          10 ns/iter (+/- 0) = 5100 MB/s
-test misc::long_needle1                      ... bench:       1,119 ns/iter (+/- 22) = 89366 MB/s
-test misc::long_needle2                      ... bench:     535,168 ns/iter (+/- 2,976) = 186 MB/s
-test misc::match_class                       ... bench:          67 ns/iter (+/- 0) = 1208 MB/s
-test misc::match_class_in_range              ... bench:          21 ns/iter (+/- 0) = 3857 MB/s
-test misc::medium_1K                         ... bench:          12 ns/iter (+/- 0) = 87666 MB/s
-test misc::medium_1MB                        ... bench:          16 ns/iter (+/- 0) = 65537750 MB/s
-test misc::medium_32                         ... bench:          12 ns/iter (+/- 0) = 5000 MB/s
-test misc::medium_32K                        ... bench:          12 ns/iter (+/- 0) = 2733000 MB/s
-test misc::no_exponential                    ... bench:         320 ns/iter (+/- 3) = 312 MB/s
-test misc::not_literal                       ... bench:          86 ns/iter (+/- 0) = 593 MB/s
-test misc::one_pass_long_prefix              ... bench:          48 ns/iter (+/- 0) = 541 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          48 ns/iter (+/- 0) = 541 MB/s
-test misc::one_pass_short                    ... bench:          34 ns/iter (+/- 0) = 500 MB/s
-test misc::one_pass_short_not                ... bench:          37 ns/iter (+/- 0) = 459 MB/s
-test misc::reallyhard2_1K                    ... bench:          50 ns/iter (+/- 0) = 20800 MB/s
-test misc::reallyhard_1K                     ... bench:       1,548 ns/iter (+/- 0) = 678 MB/s
-test misc::reallyhard_1MB                    ... bench:   1,534,068 ns/iter (+/- 14,813) = 683 MB/s
-test misc::reallyhard_32                     ... bench:          98 ns/iter (+/- 1) = 602 MB/s
-test misc::reallyhard_32K                    ... bench:      48,003 ns/iter (+/- 128) = 683 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:       4,015 ns/iter (+/- 11) = 1992 MB/s
-test regexdna::find_new_lines                ... bench:  11,859,603 ns/iter (+/- 22,707) = 428 MB/s
-test regexdna::subst1                        ... bench:     717,255 ns/iter (+/- 3,261) = 7087 MB/s
-test regexdna::subst10                       ... bench:     719,600 ns/iter (+/- 4,712) = 7064 MB/s
-test regexdna::subst11                       ... bench:     708,612 ns/iter (+/- 6,314) = 7173 MB/s
-test regexdna::subst2                        ... bench:     715,174 ns/iter (+/- 5,097) = 7107 MB/s
-test regexdna::subst3                        ... bench:     711,261 ns/iter (+/- 12,051) = 7147 MB/s
-test regexdna::subst4                        ... bench:     761,920 ns/iter (+/- 4,924) = 6671 MB/s
-test regexdna::subst5                        ... bench:     740,755 ns/iter (+/- 12,762) = 6862 MB/s
-test regexdna::subst6                        ... bench:     713,936 ns/iter (+/- 7,103) = 7120 MB/s
-test regexdna::subst7                        ... bench:     710,142 ns/iter (+/- 5,377) = 7158 MB/s
-test regexdna::subst8                        ... bench:     712,154 ns/iter (+/- 4,485) = 7138 MB/s
-test regexdna::subst9                        ... bench:     713,214 ns/iter (+/- 6,830) = 7127 MB/s
-test regexdna::variant1                      ... bench:   2,448,709 ns/iter (+/- 10,799) = 2075 MB/s
-test regexdna::variant2                      ... bench:   5,541,606 ns/iter (+/- 26,197) = 917 MB/s
-test regexdna::variant3                      ... bench:   6,563,736 ns/iter (+/- 163,805) = 774 MB/s
-test regexdna::variant4                      ... bench:   6,428,096 ns/iter (+/- 38,372) = 790 MB/s
-test regexdna::variant5                      ... bench:   5,110,667 ns/iter (+/- 141,363) = 994 MB/s
-test regexdna::variant6                      ... bench:   5,086,936 ns/iter (+/- 25,675) = 999 MB/s
-test regexdna::variant7                      ... bench:   4,607,360 ns/iter (+/- 31,834) = 1103 MB/s
-test regexdna::variant8                      ... bench:   4,636,550 ns/iter (+/- 11,143) = 1096 MB/s
-test regexdna::variant9                      ... bench:   4,534,765 ns/iter (+/- 18,435) = 1120 MB/s
-test sherlock::before_after_holmes           ... bench:     880,980 ns/iter (+/- 1,386) = 675 MB/s
-test sherlock::before_holmes                 ... bench:      56,626 ns/iter (+/- 612) = 10506 MB/s
-test sherlock::everything_greedy             ... bench:   1,715,022 ns/iter (+/- 7,374) = 346 MB/s
-test sherlock::everything_greedy_nl          ... bench:     778,398 ns/iter (+/- 6,195) = 764 MB/s
-test sherlock::holmes_cochar_watson          ... bench:      91,093 ns/iter (+/- 266) = 6531 MB/s
-test sherlock::holmes_coword_watson          ... bench:     457,793 ns/iter (+/- 3,094) = 1299 MB/s
-test sherlock::ing_suffix                    ... bench:     348,696 ns/iter (+/- 2,174) = 1706 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   1,017,664 ns/iter (+/- 8,581) = 584 MB/s
-test sherlock::letters                       ... bench:  19,098,779 ns/iter (+/- 36,233) = 31 MB/s
-test sherlock::letters_lower                 ... bench:  17,748,386 ns/iter (+/- 37,835) = 33 MB/s
-test sherlock::letters_upper                 ... bench:   1,592,729 ns/iter (+/- 2,977) = 373 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     873,365 ns/iter (+/- 1,399) = 681 MB/s
-test sherlock::name_alt1                     ... bench:      21,965 ns/iter (+/- 336) = 27085 MB/s
-test sherlock::name_alt2                     ... bench:      73,887 ns/iter (+/- 107) = 8051 MB/s
-test sherlock::name_alt3                     ... bench:      79,186 ns/iter (+/- 274) = 7513 MB/s
-test sherlock::name_alt3_nocase              ... bench:   1,111,949 ns/iter (+/- 3,589) = 535 MB/s
-test sherlock::name_alt4                     ... bench:     102,493 ns/iter (+/- 959) = 5804 MB/s
-test sherlock::name_alt4_nocase              ... bench:     158,438 ns/iter (+/- 946) = 3754 MB/s
-test sherlock::name_alt5                     ... bench:      74,362 ns/iter (+/- 139) = 8000 MB/s
-test sherlock::name_alt5_nocase              ... bench:     469,720 ns/iter (+/- 5,941) = 1266 MB/s
-test sherlock::name_holmes                   ... bench:      28,919 ns/iter (+/- 372) = 20572 MB/s
-test sherlock::name_holmes_nocase            ... bench:     123,251 ns/iter (+/- 786) = 4827 MB/s
-test sherlock::name_sherlock                 ... bench:      53,032 ns/iter (+/- 487) = 11218 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      20,566 ns/iter (+/- 280) = 28927 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:     108,166 ns/iter (+/- 303) = 5500 MB/s
-test sherlock::name_sherlock_nocase          ... bench:     105,034 ns/iter (+/- 797) = 5664 MB/s
-test sherlock::name_whitespace               ... bench:      60,968 ns/iter (+/- 490) = 9758 MB/s
-test sherlock::no_match_common               ... bench:      12,191 ns/iter (+/- 128) = 48801 MB/s
-test sherlock::no_match_really_common        ... bench:     274,528 ns/iter (+/- 1,101) = 2167 MB/s
-test sherlock::no_match_uncommon             ... bench:      12,197 ns/iter (+/- 191) = 48776 MB/s
-test sherlock::quotes                        ... bench:     446,264 ns/iter (+/- 5,936) = 1333 MB/s
-test sherlock::repeated_class_negation       ... bench:  69,728,764 ns/iter (+/- 155,104) = 8 MB/s
-test sherlock::the_lower                     ... bench:     493,734 ns/iter (+/- 5,997) = 1204 MB/s
-test sherlock::the_nocase                    ... bench:     339,088 ns/iter (+/- 3,760) = 1754 MB/s
-test sherlock::the_upper                     ... bench:      30,957 ns/iter (+/- 313) = 19218 MB/s
-test sherlock::the_whitespace                ... bench:     921,059 ns/iter (+/- 8,102) = 645 MB/s
-test sherlock::word_ending_n                 ... bench:   1,530,899 ns/iter (+/- 18,006) = 388 MB/s
-test sherlock::words                         ... bench:   6,959,355 ns/iter (+/- 31,671) = 85 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 96 measured; 0 filtered out
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/stdcpp b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/stdcpp
deleted file mode 100644
index 57c25ae..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/stdcpp
+++ /dev/null
@@ -1,87 +0,0 @@
-
-running 82 tests
-test misc::anchored_literal_long_match      ... bench:         142 ns/iter (+/- 0) = 2746 MB/s
-test misc::anchored_literal_long_non_match  ... bench:       5,504 ns/iter (+/- 20) = 70 MB/s
-test misc::anchored_literal_short_match     ... bench:         143 ns/iter (+/- 0) = 181 MB/s
-test misc::anchored_literal_short_non_match ... bench:         471 ns/iter (+/- 1) = 55 MB/s
-test misc::easy0_1K                         ... bench:      14,534 ns/iter (+/- 87) = 72 MB/s
-test misc::easy0_1MB                        ... bench:  14,554,912 ns/iter (+/- 33,264) = 72 MB/s
-test misc::easy0_32                         ... bench:         730 ns/iter (+/- 1) = 80 MB/s
-test misc::easy0_32K                        ... bench:     454,911 ns/iter (+/- 526) = 72 MB/s
-test misc::easy1_1K                         ... bench:      14,486 ns/iter (+/- 45) = 72 MB/s
-test misc::easy1_1MB                        ... bench:  14,555,850 ns/iter (+/- 108,290) = 72 MB/s
-test misc::easy1_32                         ... bench:         692 ns/iter (+/- 1) = 75 MB/s
-test misc::easy1_32K                        ... bench:     456,269 ns/iter (+/- 2,856) = 71 MB/s
-test misc::hard_1K                          ... bench:     299,581 ns/iter (+/- 7,493) = 3 MB/s
-test misc::hard_1MB                         ... bench: 314,289,240 ns/iter (+/- 128,869) = 3 MB/s
-test misc::hard_32                          ... bench:       9,202 ns/iter (+/- 17) = 6 MB/s
-test misc::hard_32K                         ... bench:   9,777,807 ns/iter (+/- 19,451) = 3 MB/s
-test misc::literal                          ... bench:         804 ns/iter (+/- 2) = 63 MB/s
-test misc::long_needle1                     ... bench:  15,712,941 ns/iter (+/- 23,893) = 6 MB/s
-test misc::long_needle2                     ... bench:  15,955,109 ns/iter (+/- 26,652) = 6 MB/s
-test misc::match_class                      ... bench:       1,250 ns/iter (+/- 4) = 64 MB/s
-test misc::match_class_in_range             ... bench:       1,250 ns/iter (+/- 4) = 64 MB/s
-test misc::medium_1K                        ... bench:      14,913 ns/iter (+/- 108) = 70 MB/s
-test misc::medium_1MB                       ... bench:  14,929,542 ns/iter (+/- 38,890) = 70 MB/s
-test misc::medium_32                        ... bench:         736 ns/iter (+/- 0) = 81 MB/s
-test misc::medium_32K                       ... bench:     466,504 ns/iter (+/- 1,488) = 70 MB/s
-test misc::not_literal                      ... bench:       1,015 ns/iter (+/- 8) = 50 MB/s
-test misc::one_pass_long_prefix             ... bench:         262 ns/iter (+/- 0) = 99 MB/s
-test misc::one_pass_long_prefix_not         ... bench:         263 ns/iter (+/- 3) = 98 MB/s
-test misc::one_pass_short                   ... bench:         502 ns/iter (+/- 2) = 33 MB/s
-test misc::one_pass_short_not               ... bench:         498 ns/iter (+/- 0) = 34 MB/s
-test misc::reallyhard2_1K                   ... bench:     304,485 ns/iter (+/- 762) = 3 MB/s
-test misc::reallyhard_1K                    ... bench:     292,315 ns/iter (+/- 1,985) = 3 MB/s
-test misc::reallyhard_1MB                   ... bench: 313,208,610 ns/iter (+/- 163,013) = 3 MB/s
-test misc::reallyhard_32                    ... bench:       9,232 ns/iter (+/- 21) = 6 MB/s
-test misc::reallyhard_32K                   ... bench:   9,952,463 ns/iter (+/- 22,317) = 3 MB/s
-test misc::reverse_suffix_no_quadratic      ... bench:     114,029 ns/iter (+/- 734) = 70 MB/s
-test regexdna::find_new_lines               ... bench: 121,481,845 ns/iter (+/- 289,966) = 41 MB/s
-test regexdna::subst1                       ... bench:  73,580,323 ns/iter (+/- 82,998) = 69 MB/s
-test regexdna::subst10                      ... bench:  73,588,543 ns/iter (+/- 95,250) = 69 MB/s
-test regexdna::subst11                      ... bench:  73,592,436 ns/iter (+/- 86,358) = 69 MB/s
-test regexdna::subst2                       ... bench:  73,581,323 ns/iter (+/- 88,210) = 69 MB/s
-test regexdna::subst3                       ... bench:  73,577,422 ns/iter (+/- 48,215) = 69 MB/s
-test regexdna::subst4                       ... bench:  73,586,896 ns/iter (+/- 82,117) = 69 MB/s
-test regexdna::subst5                       ... bench:  73,652,696 ns/iter (+/- 95,155) = 69 MB/s
-test regexdna::subst6                       ... bench:  74,633,620 ns/iter (+/- 74,754) = 68 MB/s
-test regexdna::subst7                       ... bench:  73,586,338 ns/iter (+/- 82,645) = 69 MB/s
-test regexdna::subst8                       ... bench:  75,009,572 ns/iter (+/- 116,800) = 67 MB/s
-test regexdna::subst9                       ... bench:  73,581,469 ns/iter (+/- 146,286) = 69 MB/s
-test regexdna::variant1                     ... bench: 140,768,740 ns/iter (+/- 113,580) = 36 MB/s
-test regexdna::variant2                     ... bench: 153,330,005 ns/iter (+/- 11,581,095) = 33 MB/s
-test regexdna::variant3                     ... bench: 145,484,512 ns/iter (+/- 150,566) = 34 MB/s
-test regexdna::variant4                     ... bench: 141,659,767 ns/iter (+/- 123,940) = 35 MB/s
-test regexdna::variant5                     ... bench: 145,309,207 ns/iter (+/- 129,675) = 34 MB/s
-test regexdna::variant6                     ... bench: 141,145,017 ns/iter (+/- 164,414) = 36 MB/s
-test regexdna::variant7                     ... bench: 141,897,206 ns/iter (+/- 212,981) = 35 MB/s
-test regexdna::variant8                     ... bench: 150,467,139 ns/iter (+/- 120,619) = 33 MB/s
-test regexdna::variant9                     ... bench: 151,635,430 ns/iter (+/- 128,912) = 33 MB/s
-test sherlock::before_after_holmes          ... bench:  36,941,681 ns/iter (+/- 36,199) = 16 MB/s
-test sherlock::before_holmes                ... bench:  36,920,860 ns/iter (+/- 38,258) = 16 MB/s
-test sherlock::everything_greedy            ... bench:   9,047,684 ns/iter (+/- 18,290) = 65 MB/s
-test sherlock::holmes_cochar_watson         ... bench:  12,634,723 ns/iter (+/- 36,086) = 47 MB/s
-test sherlock::ing_suffix                   ... bench:  30,232,323 ns/iter (+/- 49,084) = 19 MB/s
-test sherlock::ing_suffix_limited_space     ... bench:  18,837,733 ns/iter (+/- 39,569) = 31 MB/s
-test sherlock::name_alt1                    ... bench:  12,462,918 ns/iter (+/- 17,158) = 47 MB/s
-test sherlock::name_alt2                    ... bench:  12,490,419 ns/iter (+/- 26,214) = 47 MB/s
-test sherlock::name_alt3                    ... bench:  33,156,941 ns/iter (+/- 47,236) = 17 MB/s
-test sherlock::name_alt4                    ... bench:  12,583,828 ns/iter (+/- 26,121) = 47 MB/s
-test sherlock::name_alt5                    ... bench:  16,615,345 ns/iter (+/- 22,930) = 35 MB/s
-test sherlock::name_holmes                  ... bench:   8,307,917 ns/iter (+/- 17,452) = 71 MB/s
-test sherlock::name_sherlock                ... bench:   8,273,395 ns/iter (+/- 25,717) = 71 MB/s
-test sherlock::name_sherlock_holmes         ... bench:   8,270,000 ns/iter (+/- 19,702) = 71 MB/s
-test sherlock::name_whitespace              ... bench:   8,453,784 ns/iter (+/- 19,604) = 70 MB/s
-test sherlock::no_match_common              ... bench:   8,679,069 ns/iter (+/- 27,721) = 68 MB/s
-test sherlock::no_match_really_common       ... bench:   8,679,099 ns/iter (+/- 17,665) = 68 MB/s
-test sherlock::no_match_uncommon            ... bench:   8,260,259 ns/iter (+/- 147,913) = 72 MB/s
-test sherlock::quotes                       ... bench:  10,257,367 ns/iter (+/- 25,054) = 58 MB/s
-test sherlock::repeated_class_negation      ... bench:  25,374,678 ns/iter (+/- 23,494) = 23 MB/s
-test sherlock::the_lower                    ... bench:   9,424,206 ns/iter (+/- 23,231) = 63 MB/s
-test sherlock::the_upper                    ... bench:   8,350,015 ns/iter (+/- 23,176) = 71 MB/s
-test sherlock::the_whitespace               ... bench:   9,285,991 ns/iter (+/- 16,835) = 64 MB/s
-test sherlock::word_ending_n                ... bench:  69,609,427 ns/iter (+/- 52,974) = 8 MB/s
-test sherlock::words                        ... bench:  20,107,601 ns/iter (+/- 36,086) = 29 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 82 measured; 0 filtered out
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/stdcpp-libcxx b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/stdcpp-libcxx
deleted file mode 100644
index ff21e67..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/stdcpp-libcxx
+++ /dev/null
@@ -1,87 +0,0 @@
-
-running 82 tests
-test misc::anchored_literal_long_match      ... bench:         162 ns/iter (+/- 0) = 2407 MB/s
-test misc::anchored_literal_long_non_match  ... bench:      21,901 ns/iter (+/- 140) = 17 MB/s
-test misc::anchored_literal_short_match     ... bench:         162 ns/iter (+/- 0) = 160 MB/s
-test misc::anchored_literal_short_non_match ... bench:       1,501 ns/iter (+/- 1) = 17 MB/s
-test misc::easy0_1K                         ... bench:      39,405 ns/iter (+/- 250) = 26 MB/s
-test misc::easy0_1MB                        ... bench:  39,706,678 ns/iter (+/- 103,211) = 26 MB/s
-test misc::easy0_32                         ... bench:       1,415 ns/iter (+/- 3) = 41 MB/s
-test misc::easy0_32K                        ... bench:   1,241,085 ns/iter (+/- 5,625) = 26 MB/s
-test misc::easy1_1K                         ... bench:      39,421 ns/iter (+/- 275) = 26 MB/s
-test misc::easy1_1MB                        ... bench:  39,725,158 ns/iter (+/- 64,488) = 26 MB/s
-test misc::easy1_32                         ... bench:       1,421 ns/iter (+/- 8) = 36 MB/s
-test misc::easy1_32K                        ... bench:   1,240,953 ns/iter (+/- 5,794) = 26 MB/s
-test misc::hard_1K                          ... bench:   1,263,948 ns/iter (+/- 31,771)
-test misc::hard_1MB                         ... bench: 1,331,000,673 ns/iter (+/- 7,401,131)
-test misc::hard_32                          ... bench:      37,752 ns/iter (+/- 109) = 1 MB/s
-test misc::hard_32K                         ... bench:  41,044,286 ns/iter (+/- 57,765)
-test misc::literal                          ... bench:       1,980 ns/iter (+/- 7) = 25 MB/s
-test misc::long_needle1                     ... bench:  12,425,121 ns/iter (+/- 36,611) = 8 MB/s
-test misc::long_needle2                     ... bench:  12,568,992 ns/iter (+/- 28,513) = 7 MB/s
-test misc::match_class                      ... bench:       3,918 ns/iter (+/- 67) = 20 MB/s
-test misc::match_class_in_range             ... bench:       3,534 ns/iter (+/- 11) = 22 MB/s
-test misc::medium_1K                        ... bench:      44,910 ns/iter (+/- 167) = 23 MB/s
-test misc::medium_1MB                       ... bench:  45,558,328 ns/iter (+/- 77,166) = 23 MB/s
-test misc::medium_32                        ... bench:       1,599 ns/iter (+/- 12) = 37 MB/s
-test misc::medium_32K                       ... bench:   1,423,945 ns/iter (+/- 9,468) = 23 MB/s
-test misc::not_literal                      ... bench:       2,051 ns/iter (+/- 16) = 24 MB/s
-test misc::one_pass_long_prefix             ... bench:         222 ns/iter (+/- 0) = 117 MB/s
-test misc::one_pass_long_prefix_not         ... bench:         223 ns/iter (+/- 0) = 116 MB/s
-test misc::one_pass_short                   ... bench:       2,002 ns/iter (+/- 37) = 8 MB/s
-test misc::one_pass_short_not               ... bench:       1,990 ns/iter (+/- 6) = 8 MB/s
-test misc::reallyhard2_1K                   ... bench:   1,335,845 ns/iter (+/- 6,233)
-test misc::reallyhard_1K                    ... bench:   1,208,846 ns/iter (+/- 6,070)
-test misc::reallyhard_1MB                   ... bench: 1,291,183,401 ns/iter (+/- 4,281,775)
-test misc::reallyhard_32                    ... bench:      36,521 ns/iter (+/- 157) = 1 MB/s
-test misc::reallyhard_32K                   ... bench:  40,131,467 ns/iter (+/- 66,846)
-test misc::reverse_suffix_no_quadratic      ... bench:     506,352 ns/iter (+/- 632) = 15 MB/s
-test regexdna::find_new_lines               ... bench: 510,954,670 ns/iter (+/- 1,946,366) = 9 MB/s
-test regexdna::subst1                       ... bench: 198,786,137 ns/iter (+/- 240,963) = 25 MB/s
-test regexdna::subst10                      ... bench: 198,733,597 ns/iter (+/- 770,484) = 25 MB/s
-test regexdna::subst11                      ... bench: 198,734,922 ns/iter (+/- 198,116) = 25 MB/s
-test regexdna::subst2                       ... bench: 198,735,715 ns/iter (+/- 235,337) = 25 MB/s
-test regexdna::subst3                       ... bench: 198,736,727 ns/iter (+/- 157,633) = 25 MB/s
-test regexdna::subst4                       ... bench: 198,811,880 ns/iter (+/- 1,502,214) = 25 MB/s
-test regexdna::subst5                       ... bench: 198,697,281 ns/iter (+/- 211,978) = 25 MB/s
-test regexdna::subst6                       ... bench: 198,714,239 ns/iter (+/- 1,187,050) = 25 MB/s
-test regexdna::subst7                       ... bench: 199,021,730 ns/iter (+/- 1,555,969) = 25 MB/s
-test regexdna::subst8                       ... bench: 199,033,133 ns/iter (+/- 213,859) = 25 MB/s
-test regexdna::subst9                       ... bench: 199,466,527 ns/iter (+/- 1,394,750) = 25 MB/s
-test regexdna::variant1                     ... bench: 403,588,578 ns/iter (+/- 493,905) = 12 MB/s
-test regexdna::variant2                     ... bench: 440,582,945 ns/iter (+/- 305,836) = 11 MB/s
-test regexdna::variant3                     ... bench: 417,460,804 ns/iter (+/- 1,858,105) = 12 MB/s
-test regexdna::variant4                     ... bench: 407,209,088 ns/iter (+/- 1,374,513) = 12 MB/s
-test regexdna::variant5                     ... bench: 408,665,895 ns/iter (+/- 338,946) = 12 MB/s
-test regexdna::variant6                     ... bench: 408,640,565 ns/iter (+/- 1,895,287) = 12 MB/s
-test regexdna::variant7                     ... bench: 406,340,097 ns/iter (+/- 2,309,358) = 12 MB/s
-test regexdna::variant8                     ... bench: 413,195,331 ns/iter (+/- 2,178,194) = 12 MB/s
-test regexdna::variant9                     ... bench: 438,844,927 ns/iter (+/- 2,589,599) = 11 MB/s
-test sherlock::before_after_holmes          ... bench: 165,435,560 ns/iter (+/- 165,901) = 3 MB/s
-test sherlock::before_holmes                ... bench: 164,466,984 ns/iter (+/- 178,082) = 3 MB/s
-test sherlock::everything_greedy            ... bench:  34,680,745 ns/iter (+/- 862,671) = 17 MB/s
-test sherlock::holmes_cochar_watson         ... bench:  59,712,596 ns/iter (+/- 85,049) = 9 MB/s
-test sherlock::ing_suffix                   ... bench: 135,611,524 ns/iter (+/- 383,869) = 4 MB/s
-test sherlock::ing_suffix_limited_space     ... bench:  73,398,446 ns/iter (+/- 112,893) = 8 MB/s
-test sherlock::name_alt1                    ... bench:  42,274,906 ns/iter (+/- 60,836) = 14 MB/s
-test sherlock::name_alt2                    ... bench:  42,159,449 ns/iter (+/- 56,642) = 14 MB/s
-test sherlock::name_alt3                    ... bench: 121,926,811 ns/iter (+/- 624,877) = 4 MB/s
-test sherlock::name_alt4                    ... bench:  58,912,788 ns/iter (+/- 101,576) = 10 MB/s
-test sherlock::name_alt5                    ... bench:  63,891,303 ns/iter (+/- 79,754) = 9 MB/s
-test sherlock::name_holmes                  ... bench:  22,995,759 ns/iter (+/- 45,074) = 25 MB/s
-test sherlock::name_sherlock                ... bench:  23,024,135 ns/iter (+/- 86,982) = 25 MB/s
-test sherlock::name_sherlock_holmes         ... bench:  23,026,357 ns/iter (+/- 42,271) = 25 MB/s
-test sherlock::name_whitespace              ... bench:  32,485,572 ns/iter (+/- 77,736) = 18 MB/s
-test sherlock::no_match_common              ... bench:  23,544,207 ns/iter (+/- 590,037) = 25 MB/s
-test sherlock::no_match_really_common       ... bench:  23,543,480 ns/iter (+/- 51,838) = 25 MB/s
-test sherlock::no_match_uncommon            ... bench:  23,024,692 ns/iter (+/- 78,358) = 25 MB/s
-test sherlock::quotes                       ... bench:  42,376,602 ns/iter (+/- 49,060) = 14 MB/s
-test sherlock::repeated_class_negation      ... bench:  92,701,274 ns/iter (+/- 208,063) = 6 MB/s
-test sherlock::the_lower                    ... bench:  23,553,163 ns/iter (+/- 61,446) = 25 MB/s
-test sherlock::the_upper                    ... bench:  23,281,951 ns/iter (+/- 35,811) = 25 MB/s
-test sherlock::the_whitespace               ... bench:  33,011,779 ns/iter (+/- 65,085) = 18 MB/s
-test sherlock::word_ending_n                ... bench:  64,965,762 ns/iter (+/- 106,103) = 9 MB/s
-test sherlock::words                        ... bench:  47,466,153 ns/iter (+/- 773,222) = 12 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 82 measured; 0 filtered out
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/tcl b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/tcl
deleted file mode 100644
index 0586935..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/07/tcl
+++ /dev/null
@@ -1,94 +0,0 @@
-
-running 89 tests
-test misc::anchored_literal_long_match       ... bench:         452 ns/iter (+/- 6) = 862 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          92 ns/iter (+/- 2) = 4239 MB/s
-test misc::anchored_literal_short_match      ... bench:         454 ns/iter (+/- 6) = 57 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          92 ns/iter (+/- 1) = 282 MB/s
-test misc::easy0_1K                          ... bench:       9,231 ns/iter (+/- 59) = 113 MB/s
-test misc::easy0_1MB                         ... bench:   2,828,050 ns/iter (+/- 9,104) = 370 MB/s
-test misc::easy0_32                          ... bench:       6,527 ns/iter (+/- 78) = 9 MB/s
-test misc::easy0_32K                         ... bench:      94,825 ns/iter (+/- 410) = 345 MB/s
-test misc::easy1_1K                          ... bench:       5,420 ns/iter (+/- 54) = 192 MB/s
-test misc::easy1_1MB                         ... bench:   2,823,597 ns/iter (+/- 8,534) = 371 MB/s
-test misc::easy1_32                          ... bench:       2,727 ns/iter (+/- 80) = 19 MB/s
-test misc::easy1_32K                         ... bench:      93,382 ns/iter (+/- 108) = 351 MB/s
-test misc::hard_1K                           ... bench:      12,046 ns/iter (+/- 88) = 87 MB/s
-test misc::hard_1MB                          ... bench:   2,831,445 ns/iter (+/- 9,713) = 370 MB/s
-test misc::hard_32                           ... bench:       9,257 ns/iter (+/- 63) = 6 MB/s
-test misc::hard_32K                          ... bench:      97,613 ns/iter (+/- 533) = 335 MB/s
-test misc::literal                           ... bench:         398 ns/iter (+/- 14) = 128 MB/s
-test misc::long_needle1                      ... bench:  18,459,088 ns/iter (+/- 162,391) = 5 MB/s
-test misc::long_needle2                      ... bench:  18,390,595 ns/iter (+/- 96,143) = 5 MB/s
-test misc::match_class                       ... bench:         480 ns/iter (+/- 1) = 168 MB/s
-test misc::match_class_in_range              ... bench:         477 ns/iter (+/- 10) = 169 MB/s
-test misc::medium_1K                         ... bench:       9,573 ns/iter (+/- 94) = 109 MB/s
-test misc::medium_1MB                        ... bench:   2,828,512 ns/iter (+/- 28,270) = 370 MB/s
-test misc::medium_32                         ... bench:       6,874 ns/iter (+/- 68) = 8 MB/s
-test misc::medium_32K                        ... bench:      95,040 ns/iter (+/- 517) = 345 MB/s
-test misc::no_exponential                    ... bench:   1,976,788 ns/iter (+/- 20,661)
-test misc::not_literal                       ... bench:       1,548 ns/iter (+/- 15) = 32 MB/s
-test misc::one_pass_long_prefix              ... bench:       5,063 ns/iter (+/- 76) = 5 MB/s
-test misc::one_pass_long_prefix_not          ... bench:       4,933 ns/iter (+/- 62) = 5 MB/s
-test misc::one_pass_short                    ... bench:         486 ns/iter (+/- 4) = 34 MB/s
-test misc::one_pass_short_not                ... bench:         579 ns/iter (+/- 3) = 29 MB/s
-test misc::reallyhard2_1K                    ... bench:      88,153 ns/iter (+/- 2,317) = 11 MB/s
-test misc::reallyhard_1K                     ... bench:      12,157 ns/iter (+/- 51) = 86 MB/s
-test misc::reallyhard_1MB                    ... bench:   2,866,126 ns/iter (+/- 71,338) = 365 MB/s
-test misc::reallyhard_32                     ... bench:       9,321 ns/iter (+/- 138) = 6 MB/s
-test misc::reallyhard_32K                    ... bench:      97,799 ns/iter (+/- 1,087) = 335 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:      22,679 ns/iter (+/- 293) = 352 MB/s
-test regexdna::find_new_lines                ... bench:  38,700,951 ns/iter (+/- 105,197) = 131 MB/s
-test regexdna::subst1                        ... bench:  22,123,470 ns/iter (+/- 96,738) = 229 MB/s
-test regexdna::subst10                       ... bench:  22,125,412 ns/iter (+/- 65,856) = 229 MB/s
-test regexdna::subst11                       ... bench:  22,178,791 ns/iter (+/- 75,853) = 229 MB/s
-test regexdna::subst2                        ... bench:  22,348,278 ns/iter (+/- 228,790) = 227 MB/s
-test regexdna::subst3                        ... bench:  22,187,493 ns/iter (+/- 69,149) = 229 MB/s
-test regexdna::subst4                        ... bench:  22,134,373 ns/iter (+/- 71,979) = 229 MB/s
-test regexdna::subst5                        ... bench:  22,183,169 ns/iter (+/- 66,220) = 229 MB/s
-test regexdna::subst6                        ... bench:  22,263,432 ns/iter (+/- 91,605) = 228 MB/s
-test regexdna::subst7                        ... bench:  22,256,481 ns/iter (+/- 62,794) = 228 MB/s
-test regexdna::subst8                        ... bench:  22,134,314 ns/iter (+/- 75,199) = 229 MB/s
-test regexdna::subst9                        ... bench:  22,144,129 ns/iter (+/- 76,744) = 229 MB/s
-test regexdna::variant1                      ... bench:  13,846,793 ns/iter (+/- 33,520) = 367 MB/s
-test regexdna::variant2                      ... bench:  14,248,239 ns/iter (+/- 62,252) = 356 MB/s
-test regexdna::variant3                      ... bench:  15,702,520 ns/iter (+/- 339,738) = 323 MB/s
-test regexdna::variant4                      ... bench:  15,143,136 ns/iter (+/- 52,300) = 335 MB/s
-test regexdna::variant5                      ... bench:  16,324,698 ns/iter (+/- 50,942) = 311 MB/s
-test regexdna::variant6                      ... bench:  14,508,593 ns/iter (+/- 46,251) = 350 MB/s
-test regexdna::variant7                      ... bench:  14,443,485 ns/iter (+/- 80,444) = 351 MB/s
-test regexdna::variant8                      ... bench:  14,430,571 ns/iter (+/- 63,143) = 352 MB/s
-test regexdna::variant9                      ... bench:  14,883,129 ns/iter (+/- 76,837) = 341 MB/s
-test sherlock::before_after_holmes           ... bench:   2,227,807 ns/iter (+/- 9,119) = 267 MB/s
-test sherlock::before_holmes                 ... bench:   2,700,579 ns/iter (+/- 24,875) = 220 MB/s
-test sherlock::holmes_cochar_watson          ... bench:   2,211,847 ns/iter (+/- 15,027) = 268 MB/s
-test sherlock::ing_suffix                    ... bench:   4,398,150 ns/iter (+/- 27,219) = 135 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:  17,992,130 ns/iter (+/- 457,978) = 33 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:   1,845,704 ns/iter (+/- 9,382) = 322 MB/s
-test sherlock::name_alt1                     ... bench:   1,890,373 ns/iter (+/- 9,971) = 314 MB/s
-test sherlock::name_alt2                     ... bench:   2,626,524 ns/iter (+/- 18,261) = 226 MB/s
-test sherlock::name_alt3                     ... bench:   4,468,643 ns/iter (+/- 11,946) = 133 MB/s
-test sherlock::name_alt3_nocase              ... bench:   7,226,342 ns/iter (+/- 57,220) = 82 MB/s
-test sherlock::name_alt4                     ... bench:   2,395,105 ns/iter (+/- 31,101) = 248 MB/s
-test sherlock::name_alt4_nocase              ... bench:   2,895,153 ns/iter (+/- 12,446) = 205 MB/s
-test sherlock::name_alt5                     ... bench:   3,253,560 ns/iter (+/- 33,725) = 182 MB/s
-test sherlock::name_alt5_nocase              ... bench:   4,008,656 ns/iter (+/- 39,415) = 148 MB/s
-test sherlock::name_holmes                   ... bench:   2,076,117 ns/iter (+/- 6,376) = 286 MB/s
-test sherlock::name_holmes_nocase            ... bench:   2,157,634 ns/iter (+/- 6,494) = 275 MB/s
-test sherlock::name_sherlock                 ... bench:   1,757,317 ns/iter (+/- 5,935) = 338 MB/s
-test sherlock::name_sherlock_holmes          ... bench:   1,897,004 ns/iter (+/- 12,012) = 313 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:   1,939,722 ns/iter (+/- 6,273) = 306 MB/s
-test sherlock::name_sherlock_nocase          ... bench:   1,801,334 ns/iter (+/- 3,179) = 330 MB/s
-test sherlock::name_whitespace               ... bench:   1,910,996 ns/iter (+/- 6,429) = 311 MB/s
-test sherlock::no_match_common               ... bench:   1,601,431 ns/iter (+/- 7,131) = 371 MB/s
-test sherlock::no_match_really_common        ... bench:   1,601,153 ns/iter (+/- 4,375) = 371 MB/s
-test sherlock::no_match_uncommon             ... bench:   1,600,840 ns/iter (+/- 8,348) = 371 MB/s
-test sherlock::quotes                        ... bench:   7,620,650 ns/iter (+/- 48,467) = 78 MB/s
-test sherlock::repeated_class_negation       ... bench:  55,564,521 ns/iter (+/- 210,324) = 10 MB/s
-test sherlock::the_lower                     ... bench:   5,628,558 ns/iter (+/- 19,934) = 105 MB/s
-test sherlock::the_nocase                    ... bench:   6,063,195 ns/iter (+/- 28,534) = 98 MB/s
-test sherlock::the_upper                     ... bench:   1,992,703 ns/iter (+/- 6,736) = 298 MB/s
-test sherlock::the_whitespace                ... bench:   7,159,423 ns/iter (+/- 38,306) = 83 MB/s
-test sherlock::words                         ... bench:  38,358,421 ns/iter (+/- 99,230) = 15 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 89 measured; 0 filtered out
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/08-new-memmem/rust-after-01 b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/08-new-memmem/rust-after-01
deleted file mode 100644
index 521e935..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/08-new-memmem/rust-after-01
+++ /dev/null
@@ -1,124 +0,0 @@
-
-running 119 tests
-test misc::anchored_literal_long_match       ... bench:          18 ns/iter (+/- 1) = 21666 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          19 ns/iter (+/- 2) = 20526 MB/s
-test misc::anchored_literal_short_match      ... bench:          18 ns/iter (+/- 1) = 1444 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          19 ns/iter (+/- 0) = 1368 MB/s
-test misc::easy0_1K                          ... bench:          15 ns/iter (+/- 2) = 70066 MB/s
-test misc::easy0_1MB                         ... bench:          22 ns/iter (+/- 0) = 47663772 MB/s
-test misc::easy0_32                          ... bench:          14 ns/iter (+/- 2) = 4214 MB/s
-test misc::easy0_32K                         ... bench:          15 ns/iter (+/- 1) = 2186333 MB/s
-test misc::easy1_1K                          ... bench:          40 ns/iter (+/- 2) = 26100 MB/s
-test misc::easy1_1MB                         ... bench:          44 ns/iter (+/- 5) = 23831727 MB/s
-test misc::easy1_32                          ... bench:          39 ns/iter (+/- 5) = 1333 MB/s
-test misc::easy1_32K                         ... bench:          41 ns/iter (+/- 3) = 799707 MB/s
-test misc::hard_1K                           ... bench:          50 ns/iter (+/- 7) = 21020 MB/s
-test misc::hard_1MB                          ... bench:          55 ns/iter (+/- 6) = 19065509 MB/s
-test misc::hard_32                           ... bench:          50 ns/iter (+/- 7) = 1180 MB/s
-test misc::hard_32K                          ... bench:          50 ns/iter (+/- 2) = 655900 MB/s
-test misc::is_match_set                      ... bench:          60 ns/iter (+/- 2) = 416 MB/s
-test misc::literal                           ... bench:          12 ns/iter (+/- 1) = 4250 MB/s
-test misc::long_needle1                      ... bench:       3,252 ns/iter (+/- 168) = 30750 MB/s
-test misc::long_needle2                      ... bench:     355,576 ns/iter (+/- 34,074) = 281 MB/s
-test misc::match_class                       ... bench:          67 ns/iter (+/- 2) = 1208 MB/s
-test misc::match_class_in_range              ... bench:          14 ns/iter (+/- 0) = 5785 MB/s
-test misc::match_class_unicode               ... bench:         256 ns/iter (+/- 36) = 628 MB/s
-test misc::matches_set                       ... bench:         458 ns/iter (+/- 65) = 54 MB/s
-test misc::medium_1K                         ... bench:          15 ns/iter (+/- 1) = 70133 MB/s
-test misc::medium_1MB                        ... bench:          23 ns/iter (+/- 2) = 45591478 MB/s
-test misc::medium_32                         ... bench:          15 ns/iter (+/- 0) = 4000 MB/s
-test misc::medium_32K                        ... bench:          15 ns/iter (+/- 0) = 2186400 MB/s
-test misc::no_exponential                    ... bench:         406 ns/iter (+/- 32) = 246 MB/s
-test misc::not_literal                       ... bench:          90 ns/iter (+/- 12) = 566 MB/s
-test misc::one_pass_long_prefix              ... bench:          53 ns/iter (+/- 1) = 490 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          51 ns/iter (+/- 7) = 509 MB/s
-test misc::one_pass_short                    ... bench:          37 ns/iter (+/- 1) = 459 MB/s
-test misc::one_pass_short_not                ... bench:          39 ns/iter (+/- 5) = 435 MB/s
-test misc::reallyhard2_1K                    ... bench:          75 ns/iter (+/- 2) = 13866 MB/s
-test misc::reallyhard_1K                     ... bench:       1,591 ns/iter (+/- 227) = 660 MB/s
-test misc::reallyhard_1MB                    ... bench:   1,576,602 ns/iter (+/- 204,573) = 665 MB/s
-test misc::reallyhard_32                     ... bench:         102 ns/iter (+/- 7) = 578 MB/s
-test misc::reallyhard_32K                    ... bench:      49,327 ns/iter (+/- 4,812) = 664 MB/s
-test misc::replace_all                       ... bench:         132 ns/iter (+/- 13)
-test misc::reverse_suffix_no_quadratic       ... bench:       4,190 ns/iter (+/- 581) = 1909 MB/s
-test misc::short_haystack_1000000x           ... bench:     132,982 ns/iter (+/- 18,045) = 60158 MB/s
-test misc::short_haystack_100000x            ... bench:      14,720 ns/iter (+/- 946) = 54348 MB/s
-test misc::short_haystack_10000x             ... bench:       5,993 ns/iter (+/- 381) = 13350 MB/s
-test misc::short_haystack_1000x              ... bench:         476 ns/iter (+/- 58) = 16829 MB/s
-test misc::short_haystack_100x               ... bench:         227 ns/iter (+/- 22) = 3572 MB/s
-test misc::short_haystack_10x                ... bench:         211 ns/iter (+/- 13) = 431 MB/s
-test misc::short_haystack_1x                 ... bench:         204 ns/iter (+/- 29) = 93 MB/s
-test misc::short_haystack_2x                 ... bench:         206 ns/iter (+/- 7) = 131 MB/s
-test misc::short_haystack_3x                 ... bench:         212 ns/iter (+/- 16) = 165 MB/s
-test misc::short_haystack_4x                 ... bench:         207 ns/iter (+/- 29) = 207 MB/s
-test regexdna::find_new_lines                ... bench:  12,053,740 ns/iter (+/- 393,644) = 421 MB/s
-test regexdna::subst1                        ... bench:     786,112 ns/iter (+/- 91,136) = 6466 MB/s
-test regexdna::subst10                       ... bench:     831,353 ns/iter (+/- 67,293) = 6114 MB/s
-test regexdna::subst11                       ... bench:     784,021 ns/iter (+/- 28,112) = 6483 MB/s
-test regexdna::subst2                        ... bench:     785,838 ns/iter (+/- 108,510) = 6468 MB/s
-test regexdna::subst3                        ... bench:     791,789 ns/iter (+/- 37,364) = 6420 MB/s
-test regexdna::subst4                        ... bench:     784,224 ns/iter (+/- 23,802) = 6482 MB/s
-test regexdna::subst5                        ... bench:     788,368 ns/iter (+/- 75,171) = 6448 MB/s
-test regexdna::subst6                        ... bench:     784,730 ns/iter (+/- 48,594) = 6477 MB/s
-test regexdna::subst7                        ... bench:     788,067 ns/iter (+/- 88,333) = 6450 MB/s
-test regexdna::subst8                        ... bench:     810,784 ns/iter (+/- 111,836) = 6269 MB/s
-test regexdna::subst9                        ... bench:     788,854 ns/iter (+/- 66,496) = 6444 MB/s
-test regexdna::variant1                      ... bench:   2,238,677 ns/iter (+/- 144,752) = 2270 MB/s
-test regexdna::variant2                      ... bench:   3,258,761 ns/iter (+/- 205,012) = 1559 MB/s
-test regexdna::variant3                      ... bench:   3,818,146 ns/iter (+/- 254,877) = 1331 MB/s
-test regexdna::variant4                      ... bench:   3,837,323 ns/iter (+/- 349,373) = 1324 MB/s
-test regexdna::variant5                      ... bench:   2,698,901 ns/iter (+/- 111,145) = 1883 MB/s
-test regexdna::variant6                      ... bench:   2,687,854 ns/iter (+/- 184,039) = 1891 MB/s
-test regexdna::variant7                      ... bench:   3,291,211 ns/iter (+/- 220,992) = 1544 MB/s
-test regexdna::variant8                      ... bench:   3,359,262 ns/iter (+/- 185,610) = 1513 MB/s
-test regexdna::variant9                      ... bench:   3,293,953 ns/iter (+/- 245,454) = 1543 MB/s
-test rust_compile::compile_huge              ... bench:      95,142 ns/iter (+/- 10,195)
-test rust_compile::compile_huge_bytes        ... bench:   5,650,680 ns/iter (+/- 252,936)
-test rust_compile::compile_huge_full         ... bench:  10,867,986 ns/iter (+/- 275,259)
-test rust_compile::compile_simple            ... bench:       3,751 ns/iter (+/- 310)
-test rust_compile::compile_simple_bytes      ... bench:       3,664 ns/iter (+/- 172)
-test rust_compile::compile_simple_full       ... bench:      22,078 ns/iter (+/- 3,259)
-test rust_compile::compile_small             ... bench:       8,499 ns/iter (+/- 942)
-test rust_compile::compile_small_bytes       ... bench:     151,196 ns/iter (+/- 16,322)
-test rust_compile::compile_small_full        ... bench:     309,597 ns/iter (+/- 32,622)
-test sherlock::before_after_holmes           ... bench:     917,591 ns/iter (+/- 55,643) = 648 MB/s
-test sherlock::before_holmes                 ... bench:      62,726 ns/iter (+/- 8,861) = 9484 MB/s
-test sherlock::everything_greedy             ... bench:   2,036,050 ns/iter (+/- 152,461) = 292 MB/s
-test sherlock::everything_greedy_nl          ... bench:     796,690 ns/iter (+/- 71,089) = 746 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     106,258 ns/iter (+/- 8,294) = 5598 MB/s
-test sherlock::holmes_coword_watson          ... bench:     481,086 ns/iter (+/- 60,212) = 1236 MB/s
-test sherlock::ing_suffix                    ... bench:     322,033 ns/iter (+/- 8,912) = 1847 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   1,067,523 ns/iter (+/- 89,630) = 557 MB/s
-test sherlock::letters                       ... bench:  22,745,932 ns/iter (+/- 428,787) = 26 MB/s
-test sherlock::letters_lower                 ... bench:  22,228,365 ns/iter (+/- 495,287) = 26 MB/s
-test sherlock::letters_upper                 ... bench:   1,775,941 ns/iter (+/- 158,985) = 334 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     897,327 ns/iter (+/- 49,085) = 663 MB/s
-test sherlock::name_alt1                     ... bench:      32,008 ns/iter (+/- 4,011) = 18587 MB/s
-test sherlock::name_alt2                     ... bench:      86,850 ns/iter (+/- 5,463) = 6850 MB/s
-test sherlock::name_alt3                     ... bench:      98,359 ns/iter (+/- 14,052) = 6048 MB/s
-test sherlock::name_alt3_nocase              ... bench:     381,147 ns/iter (+/- 16,996) = 1560 MB/s
-test sherlock::name_alt4                     ... bench:     121,025 ns/iter (+/- 16,654) = 4915 MB/s
-test sherlock::name_alt4_nocase              ... bench:     188,972 ns/iter (+/- 26,145) = 3148 MB/s
-test sherlock::name_alt5                     ... bench:      91,832 ns/iter (+/- 6,188) = 6478 MB/s
-test sherlock::name_alt5_nocase              ... bench:     351,422 ns/iter (+/- 49,084) = 1692 MB/s
-test sherlock::name_holmes                   ... bench:      33,405 ns/iter (+/- 3,113) = 17809 MB/s
-test sherlock::name_holmes_nocase            ... bench:     134,899 ns/iter (+/- 10,883) = 4410 MB/s
-test sherlock::name_sherlock                 ... bench:      22,455 ns/iter (+/- 2,027) = 26494 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      22,283 ns/iter (+/- 2,281) = 26698 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:      97,492 ns/iter (+/- 6,496) = 6102 MB/s
-test sherlock::name_sherlock_nocase          ... bench:      95,627 ns/iter (+/- 8,442) = 6221 MB/s
-test sherlock::name_whitespace               ... bench:      30,702 ns/iter (+/- 4,194) = 19377 MB/s
-test sherlock::no_match_common               ... bench:      19,616 ns/iter (+/- 2,677) = 30328 MB/s
-test sherlock::no_match_really_common        ... bench:      25,601 ns/iter (+/- 2,506) = 23238 MB/s
-test sherlock::no_match_uncommon             ... bench:      19,641 ns/iter (+/- 2,175) = 30290 MB/s
-test sherlock::quotes                        ... bench:     369,048 ns/iter (+/- 25,898) = 1612 MB/s
-test sherlock::repeated_class_negation       ... bench:  75,780,396 ns/iter (+/- 1,032,817) = 7 MB/s
-test sherlock::the_lower                     ... bench:     327,762 ns/iter (+/- 48,769) = 1815 MB/s
-test sherlock::the_nocase                    ... bench:     532,075 ns/iter (+/- 40,117) = 1118 MB/s
-test sherlock::the_upper                     ... bench:      45,197 ns/iter (+/- 1,621) = 13163 MB/s
-test sherlock::the_whitespace                ... bench:     819,239 ns/iter (+/- 81,388) = 726 MB/s
-test sherlock::word_ending_n                 ... bench:   1,716,625 ns/iter (+/- 120,247) = 346 MB/s
-test sherlock::words                         ... bench:   8,690,764 ns/iter (+/- 322,915) = 68 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 119 measured; 0 filtered out; finished in 114.31s
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/08-new-memmem/rust-after-02 b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/08-new-memmem/rust-after-02
deleted file mode 100644
index 60d0578..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/08-new-memmem/rust-after-02
+++ /dev/null
@@ -1,124 +0,0 @@
-
-running 119 tests
-test misc::anchored_literal_long_match       ... bench:          18 ns/iter (+/- 2) = 21666 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          19 ns/iter (+/- 1) = 20526 MB/s
-test misc::anchored_literal_short_match      ... bench:          18 ns/iter (+/- 1) = 1444 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          19 ns/iter (+/- 2) = 1368 MB/s
-test misc::easy0_1K                          ... bench:          15 ns/iter (+/- 1) = 70066 MB/s
-test misc::easy0_1MB                         ... bench:          22 ns/iter (+/- 1) = 47663772 MB/s
-test misc::easy0_32                          ... bench:          14 ns/iter (+/- 1) = 4214 MB/s
-test misc::easy0_32K                         ... bench:          15 ns/iter (+/- 1) = 2186333 MB/s
-test misc::easy1_1K                          ... bench:          39 ns/iter (+/- 4) = 26769 MB/s
-test misc::easy1_1MB                         ... bench:          43 ns/iter (+/- 3) = 24385953 MB/s
-test misc::easy1_32                          ... bench:          39 ns/iter (+/- 4) = 1333 MB/s
-test misc::easy1_32K                         ... bench:          39 ns/iter (+/- 3) = 840717 MB/s
-test misc::hard_1K                           ... bench:          50 ns/iter (+/- 5) = 21020 MB/s
-test misc::hard_1MB                          ... bench:          55 ns/iter (+/- 7) = 19065509 MB/s
-test misc::hard_32                           ... bench:          50 ns/iter (+/- 5) = 1180 MB/s
-test misc::hard_32K                          ... bench:          50 ns/iter (+/- 6) = 655900 MB/s
-test misc::is_match_set                      ... bench:          60 ns/iter (+/- 4) = 416 MB/s
-test misc::literal                           ... bench:          12 ns/iter (+/- 0) = 4250 MB/s
-test misc::long_needle1                      ... bench:       3,251 ns/iter (+/- 333) = 30760 MB/s
-test misc::long_needle2                      ... bench:     355,576 ns/iter (+/- 24,612) = 281 MB/s
-test misc::match_class                       ... bench:          66 ns/iter (+/- 1) = 1227 MB/s
-test misc::match_class_in_range              ... bench:          14 ns/iter (+/- 1) = 5785 MB/s
-test misc::match_class_unicode               ... bench:         254 ns/iter (+/- 25) = 633 MB/s
-test misc::matches_set                       ... bench:         456 ns/iter (+/- 17) = 54 MB/s
-test misc::medium_1K                         ... bench:          15 ns/iter (+/- 0) = 70133 MB/s
-test misc::medium_1MB                        ... bench:          23 ns/iter (+/- 2) = 45591478 MB/s
-test misc::medium_32                         ... bench:          15 ns/iter (+/- 2) = 4000 MB/s
-test misc::medium_32K                        ... bench:          15 ns/iter (+/- 2) = 2186400 MB/s
-test misc::no_exponential                    ... bench:         403 ns/iter (+/- 55) = 248 MB/s
-test misc::not_literal                       ... bench:          90 ns/iter (+/- 12) = 566 MB/s
-test misc::one_pass_long_prefix              ... bench:          51 ns/iter (+/- 7) = 509 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          51 ns/iter (+/- 5) = 509 MB/s
-test misc::one_pass_short                    ... bench:          38 ns/iter (+/- 5) = 447 MB/s
-test misc::one_pass_short_not                ... bench:          39 ns/iter (+/- 1) = 435 MB/s
-test misc::reallyhard2_1K                    ... bench:          75 ns/iter (+/- 2) = 13866 MB/s
-test misc::reallyhard_1K                     ... bench:       1,592 ns/iter (+/- 148) = 660 MB/s
-test misc::reallyhard_1MB                    ... bench:   1,576,299 ns/iter (+/- 142,145) = 665 MB/s
-test misc::reallyhard_32                     ... bench:         103 ns/iter (+/- 8) = 572 MB/s
-test misc::reallyhard_32K                    ... bench:      49,326 ns/iter (+/- 3,202) = 664 MB/s
-test misc::replace_all                       ... bench:         132 ns/iter (+/- 16)
-test misc::reverse_suffix_no_quadratic       ... bench:       4,168 ns/iter (+/- 227) = 1919 MB/s
-test misc::short_haystack_1000000x           ... bench:     132,733 ns/iter (+/- 18,141) = 60271 MB/s
-test misc::short_haystack_100000x            ... bench:      14,468 ns/iter (+/- 1,777) = 55295 MB/s
-test misc::short_haystack_10000x             ... bench:       6,316 ns/iter (+/- 360) = 12667 MB/s
-test misc::short_haystack_1000x              ... bench:         474 ns/iter (+/- 69) = 16900 MB/s
-test misc::short_haystack_100x               ... bench:         229 ns/iter (+/- 32) = 3541 MB/s
-test misc::short_haystack_10x                ... bench:         212 ns/iter (+/- 18) = 429 MB/s
-test misc::short_haystack_1x                 ... bench:         205 ns/iter (+/- 28) = 92 MB/s
-test misc::short_haystack_2x                 ... bench:         207 ns/iter (+/- 20) = 130 MB/s
-test misc::short_haystack_3x                 ... bench:         213 ns/iter (+/- 7) = 164 MB/s
-test misc::short_haystack_4x                 ... bench:         208 ns/iter (+/- 9) = 206 MB/s
-test regexdna::find_new_lines                ... bench:  12,050,847 ns/iter (+/- 346,484) = 421 MB/s
-test regexdna::subst1                        ... bench:     817,689 ns/iter (+/- 104,629) = 6216 MB/s
-test regexdna::subst10                       ... bench:     788,728 ns/iter (+/- 66,497) = 6445 MB/s
-test regexdna::subst11                       ... bench:     787,188 ns/iter (+/- 49,158) = 6457 MB/s
-test regexdna::subst2                        ... bench:     787,143 ns/iter (+/- 108,541) = 6458 MB/s
-test regexdna::subst3                        ... bench:     792,452 ns/iter (+/- 32,963) = 6414 MB/s
-test regexdna::subst4                        ... bench:     820,043 ns/iter (+/- 71,037) = 6198 MB/s
-test regexdna::subst5                        ... bench:     790,043 ns/iter (+/- 39,234) = 6434 MB/s
-test regexdna::subst6                        ... bench:     785,007 ns/iter (+/- 18,701) = 6475 MB/s
-test regexdna::subst7                        ... bench:     789,393 ns/iter (+/- 51,525) = 6439 MB/s
-test regexdna::subst8                        ... bench:     784,190 ns/iter (+/- 90,675) = 6482 MB/s
-test regexdna::subst9                        ... bench:     789,021 ns/iter (+/- 88,256) = 6442 MB/s
-test regexdna::variant1                      ... bench:   2,237,592 ns/iter (+/- 146,174) = 2271 MB/s
-test regexdna::variant2                      ... bench:   3,255,382 ns/iter (+/- 179,473) = 1561 MB/s
-test regexdna::variant3                      ... bench:   3,812,799 ns/iter (+/- 210,786) = 1333 MB/s
-test regexdna::variant4                      ... bench:   3,853,476 ns/iter (+/- 263,442) = 1319 MB/s
-test regexdna::variant5                      ... bench:   2,696,756 ns/iter (+/- 161,353) = 1885 MB/s
-test regexdna::variant6                      ... bench:   2,683,221 ns/iter (+/- 149,650) = 1894 MB/s
-test regexdna::variant7                      ... bench:   3,289,426 ns/iter (+/- 209,217) = 1545 MB/s
-test regexdna::variant8                      ... bench:   3,362,858 ns/iter (+/- 274,273) = 1511 MB/s
-test regexdna::variant9                      ... bench:   3,287,253 ns/iter (+/- 188,894) = 1546 MB/s
-test rust_compile::compile_huge              ... bench:      94,912 ns/iter (+/- 12,311)
-test rust_compile::compile_huge_bytes        ... bench:   5,534,281 ns/iter (+/- 192,069)
-test rust_compile::compile_huge_full         ... bench:  10,969,970 ns/iter (+/- 312,230)
-test rust_compile::compile_simple            ... bench:       3,523 ns/iter (+/- 525)
-test rust_compile::compile_simple_bytes      ... bench:       3,564 ns/iter (+/- 355)
-test rust_compile::compile_simple_full       ... bench:      19,887 ns/iter (+/- 1,885)
-test rust_compile::compile_small             ... bench:       8,294 ns/iter (+/- 1,123)
-test rust_compile::compile_small_bytes       ... bench:     153,070 ns/iter (+/- 20,825)
-test rust_compile::compile_small_full        ... bench:     313,318 ns/iter (+/- 28,271)
-test sherlock::before_after_holmes           ... bench:     907,585 ns/iter (+/- 86,027) = 655 MB/s
-test sherlock::before_holmes                 ... bench:      62,765 ns/iter (+/- 6,413) = 9478 MB/s
-test sherlock::everything_greedy             ... bench:   2,033,519 ns/iter (+/- 97,963) = 292 MB/s
-test sherlock::everything_greedy_nl          ... bench:     796,514 ns/iter (+/- 48,247) = 746 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     107,788 ns/iter (+/- 15,545) = 5519 MB/s
-test sherlock::holmes_coword_watson          ... bench:     482,686 ns/iter (+/- 49,033) = 1232 MB/s
-test sherlock::ing_suffix                    ... bench:     322,901 ns/iter (+/- 46,329) = 1842 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   1,067,799 ns/iter (+/- 57,022) = 557 MB/s
-test sherlock::letters                       ... bench:  22,823,246 ns/iter (+/- 472,094) = 26 MB/s
-test sherlock::letters_lower                 ... bench:  22,137,278 ns/iter (+/- 443,188) = 26 MB/s
-test sherlock::letters_upper                 ... bench:   1,773,598 ns/iter (+/- 96,994) = 335 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     897,623 ns/iter (+/- 48,509) = 662 MB/s
-test sherlock::name_alt1                     ... bench:      31,882 ns/iter (+/- 3,354) = 18660 MB/s
-test sherlock::name_alt2                     ... bench:      86,500 ns/iter (+/- 7,997) = 6877 MB/s
-test sherlock::name_alt3                     ... bench:      98,159 ns/iter (+/- 6,106) = 6060 MB/s
-test sherlock::name_alt3_nocase              ... bench:     383,858 ns/iter (+/- 19,224) = 1549 MB/s
-test sherlock::name_alt4                     ... bench:     122,489 ns/iter (+/- 17,271) = 4857 MB/s
-test sherlock::name_alt4_nocase              ... bench:     192,081 ns/iter (+/- 10,999) = 3097 MB/s
-test sherlock::name_alt5                     ... bench:      91,396 ns/iter (+/- 6,399) = 6509 MB/s
-test sherlock::name_alt5_nocase              ... bench:     354,804 ns/iter (+/- 26,158) = 1676 MB/s
-test sherlock::name_holmes                   ... bench:      33,569 ns/iter (+/- 4,647) = 17722 MB/s
-test sherlock::name_holmes_nocase            ... bench:     136,387 ns/iter (+/- 14,005) = 4362 MB/s
-test sherlock::name_sherlock                 ... bench:      22,468 ns/iter (+/- 1,144) = 26479 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      22,279 ns/iter (+/- 1,563) = 26703 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:      98,003 ns/iter (+/- 10,978) = 6070 MB/s
-test sherlock::name_sherlock_nocase          ... bench:      96,130 ns/iter (+/- 4,373) = 6188 MB/s
-test sherlock::name_whitespace               ... bench:      30,532 ns/iter (+/- 3,125) = 19485 MB/s
-test sherlock::no_match_common               ... bench:      19,644 ns/iter (+/- 2,118) = 30285 MB/s
-test sherlock::no_match_really_common        ... bench:      25,374 ns/iter (+/- 1,538) = 23446 MB/s
-test sherlock::no_match_uncommon             ... bench:      19,602 ns/iter (+/- 427) = 30350 MB/s
-test sherlock::quotes                        ... bench:     369,657 ns/iter (+/- 52,406) = 1609 MB/s
-test sherlock::repeated_class_negation       ... bench:  76,922,839 ns/iter (+/- 1,261,770) = 7 MB/s
-test sherlock::the_lower                     ... bench:     326,221 ns/iter (+/- 35,683) = 1823 MB/s
-test sherlock::the_nocase                    ... bench:     525,254 ns/iter (+/- 26,000) = 1132 MB/s
-test sherlock::the_upper                     ... bench:      44,702 ns/iter (+/- 5,012) = 13308 MB/s
-test sherlock::the_whitespace                ... bench:     814,494 ns/iter (+/- 66,715) = 730 MB/s
-test sherlock::word_ending_n                 ... bench:   1,705,139 ns/iter (+/- 97,420) = 348 MB/s
-test sherlock::words                         ... bench:   8,632,437 ns/iter (+/- 278,177) = 68 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 119 measured; 0 filtered out; finished in 106.01s
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/08-new-memmem/rust-before-01 b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/08-new-memmem/rust-before-01
deleted file mode 100644
index 1316e6d..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/08-new-memmem/rust-before-01
+++ /dev/null
@@ -1,124 +0,0 @@
-
-running 119 tests
-test misc::anchored_literal_long_match       ... bench:          18 ns/iter (+/- 1) = 21666 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          17 ns/iter (+/- 1) = 22941 MB/s
-test misc::anchored_literal_short_match      ... bench:          16 ns/iter (+/- 2) = 1625 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          17 ns/iter (+/- 2) = 1529 MB/s
-test misc::easy0_1K                          ... bench:          12 ns/iter (+/- 1) = 87583 MB/s
-test misc::easy0_1MB                         ... bench:          15 ns/iter (+/- 0) = 69906866 MB/s
-test misc::easy0_32                          ... bench:          11 ns/iter (+/- 0) = 5363 MB/s
-test misc::easy0_32K                         ... bench:          12 ns/iter (+/- 2) = 2732916 MB/s
-test misc::easy1_1K                          ... bench:          39 ns/iter (+/- 5) = 26769 MB/s
-test misc::easy1_1MB                         ... bench:          40 ns/iter (+/- 6) = 26214900 MB/s
-test misc::easy1_32                          ... bench:          39 ns/iter (+/- 3) = 1333 MB/s
-test misc::easy1_32K                         ... bench:          39 ns/iter (+/- 5) = 840717 MB/s
-test misc::hard_1K                           ... bench:          49 ns/iter (+/- 1) = 21448 MB/s
-test misc::hard_1MB                          ... bench:          52 ns/iter (+/- 2) = 20165442 MB/s
-test misc::hard_32                           ... bench:          49 ns/iter (+/- 2) = 1204 MB/s
-test misc::hard_32K                          ... bench:          49 ns/iter (+/- 3) = 669285 MB/s
-test misc::is_match_set                      ... bench:          59 ns/iter (+/- 2) = 423 MB/s
-test misc::literal                           ... bench:          11 ns/iter (+/- 1) = 4636 MB/s
-test misc::long_needle1                      ... bench:       1,161 ns/iter (+/- 54) = 86133 MB/s
-test misc::long_needle2                      ... bench:     680,687 ns/iter (+/- 63,713) = 146 MB/s
-test misc::match_class                       ... bench:          69 ns/iter (+/- 4) = 1173 MB/s
-test misc::match_class_in_range              ... bench:          13 ns/iter (+/- 0) = 6230 MB/s
-test misc::match_class_unicode               ... bench:         253 ns/iter (+/- 9) = 636 MB/s
-test misc::matches_set                       ... bench:         453 ns/iter (+/- 65) = 55 MB/s
-test misc::medium_1K                         ... bench:          13 ns/iter (+/- 0) = 80923 MB/s
-test misc::medium_1MB                        ... bench:          17 ns/iter (+/- 2) = 61682588 MB/s
-test misc::medium_32                         ... bench:          13 ns/iter (+/- 0) = 4615 MB/s
-test misc::medium_32K                        ... bench:          13 ns/iter (+/- 0) = 2522769 MB/s
-test misc::no_exponential                    ... bench:         330 ns/iter (+/- 47) = 303 MB/s
-test misc::not_literal                       ... bench:          88 ns/iter (+/- 9) = 579 MB/s
-test misc::one_pass_long_prefix              ... bench:          50 ns/iter (+/- 7) = 520 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          50 ns/iter (+/- 2) = 520 MB/s
-test misc::one_pass_short                    ... bench:          36 ns/iter (+/- 4) = 472 MB/s
-test misc::one_pass_short_not                ... bench:          39 ns/iter (+/- 2) = 435 MB/s
-test misc::reallyhard2_1K                    ... bench:          55 ns/iter (+/- 7) = 18909 MB/s
-test misc::reallyhard_1K                     ... bench:       1,590 ns/iter (+/- 225) = 661 MB/s
-test misc::reallyhard_1MB                    ... bench:   1,580,163 ns/iter (+/- 224,935) = 663 MB/s
-test misc::reallyhard_32                     ... bench:         100 ns/iter (+/- 6) = 590 MB/s
-test misc::reallyhard_32K                    ... bench:      49,318 ns/iter (+/- 6,046) = 664 MB/s
-test misc::replace_all                       ... bench:         127 ns/iter (+/- 14)
-test misc::reverse_suffix_no_quadratic       ... bench:       4,240 ns/iter (+/- 117) = 1886 MB/s
-test misc::short_haystack_1000000x           ... bench:      89,004 ns/iter (+/- 2,927) = 89883 MB/s
-test misc::short_haystack_100000x            ... bench:      10,349 ns/iter (+/- 334) = 77303 MB/s
-test misc::short_haystack_10000x             ... bench:       5,835 ns/iter (+/- 700) = 13712 MB/s
-test misc::short_haystack_1000x              ... bench:         563 ns/iter (+/- 33) = 14229 MB/s
-test misc::short_haystack_100x               ... bench:         260 ns/iter (+/- 21) = 3119 MB/s
-test misc::short_haystack_10x                ... bench:         221 ns/iter (+/- 31) = 411 MB/s
-test misc::short_haystack_1x                 ... bench:         211 ns/iter (+/- 30) = 90 MB/s
-test misc::short_haystack_2x                 ... bench:         213 ns/iter (+/- 19) = 126 MB/s
-test misc::short_haystack_3x                 ... bench:         212 ns/iter (+/- 7) = 165 MB/s
-test misc::short_haystack_4x                 ... bench:         221 ns/iter (+/- 26) = 194 MB/s
-test regexdna::find_new_lines                ... bench:  12,035,248 ns/iter (+/- 362,122) = 422 MB/s
-test regexdna::subst1                        ... bench:     787,853 ns/iter (+/- 29,667) = 6452 MB/s
-test regexdna::subst10                       ... bench:     750,718 ns/iter (+/- 103,118) = 6771 MB/s
-test regexdna::subst11                       ... bench:     749,377 ns/iter (+/- 103,312) = 6783 MB/s
-test regexdna::subst2                        ... bench:     748,785 ns/iter (+/- 83,175) = 6788 MB/s
-test regexdna::subst3                        ... bench:     755,004 ns/iter (+/- 75,589) = 6732 MB/s
-test regexdna::subst4                        ... bench:     747,617 ns/iter (+/- 70,600) = 6799 MB/s
-test regexdna::subst5                        ... bench:     752,458 ns/iter (+/- 86,154) = 6755 MB/s
-test regexdna::subst6                        ... bench:     749,801 ns/iter (+/- 102,642) = 6779 MB/s
-test regexdna::subst7                        ... bench:     760,975 ns/iter (+/- 105,159) = 6680 MB/s
-test regexdna::subst8                        ... bench:     749,002 ns/iter (+/- 82,082) = 6786 MB/s
-test regexdna::subst9                        ... bench:     751,248 ns/iter (+/- 100,152) = 6766 MB/s
-test regexdna::variant1                      ... bench:   2,211,035 ns/iter (+/- 150,147) = 2299 MB/s
-test regexdna::variant2                      ... bench:   3,210,193 ns/iter (+/- 161,942) = 1583 MB/s
-test regexdna::variant3                      ... bench:   3,793,641 ns/iter (+/- 203,795) = 1339 MB/s
-test regexdna::variant4                      ... bench:   3,799,721 ns/iter (+/- 140,933) = 1337 MB/s
-test regexdna::variant5                      ... bench:   2,652,750 ns/iter (+/- 185,489) = 1916 MB/s
-test regexdna::variant6                      ... bench:   2,633,257 ns/iter (+/- 211,323) = 1930 MB/s
-test regexdna::variant7                      ... bench:   3,268,111 ns/iter (+/- 176,273) = 1555 MB/s
-test regexdna::variant8                      ... bench:   3,331,333 ns/iter (+/- 264,431) = 1525 MB/s
-test regexdna::variant9                      ... bench:   3,268,398 ns/iter (+/- 298,223) = 1555 MB/s
-test rust_compile::compile_huge              ... bench:      94,562 ns/iter (+/- 2,194)
-test rust_compile::compile_huge_bytes        ... bench:   5,611,428 ns/iter (+/- 202,365)
-test rust_compile::compile_huge_full         ... bench:  10,933,505 ns/iter (+/- 325,078)
-test rust_compile::compile_simple            ... bench:       3,496 ns/iter (+/- 156)
-test rust_compile::compile_simple_bytes      ... bench:       3,572 ns/iter (+/- 389)
-test rust_compile::compile_simple_full       ... bench:      20,283 ns/iter (+/- 1,894)
-test rust_compile::compile_small             ... bench:       8,475 ns/iter (+/- 1,008)
-test rust_compile::compile_small_bytes       ... bench:     157,446 ns/iter (+/- 11,319)
-test rust_compile::compile_small_full        ... bench:     316,041 ns/iter (+/- 23,620)
-test sherlock::before_after_holmes           ... bench:     906,578 ns/iter (+/- 129,507) = 656 MB/s
-test sherlock::before_holmes                 ... bench:      64,715 ns/iter (+/- 9,107) = 9193 MB/s
-test sherlock::everything_greedy             ... bench:   2,065,017 ns/iter (+/- 156,855) = 288 MB/s
-test sherlock::everything_greedy_nl          ... bench:     810,672 ns/iter (+/- 100,547) = 733 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     106,124 ns/iter (+/- 10,948) = 5606 MB/s
-test sherlock::holmes_coword_watson          ... bench:     488,503 ns/iter (+/- 63,243) = 1217 MB/s
-test sherlock::ing_suffix                    ... bench:     384,936 ns/iter (+/- 25,316) = 1545 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   1,060,294 ns/iter (+/- 152,263) = 561 MB/s
-test sherlock::letters                       ... bench:  22,127,059 ns/iter (+/- 413,502) = 26 MB/s
-test sherlock::letters_lower                 ... bench:  21,535,012 ns/iter (+/- 463,835) = 27 MB/s
-test sherlock::letters_upper                 ... bench:   1,758,480 ns/iter (+/- 130,352) = 338 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     897,329 ns/iter (+/- 96,625) = 663 MB/s
-test sherlock::name_alt1                     ... bench:      31,585 ns/iter (+/- 2,796) = 18835 MB/s
-test sherlock::name_alt2                     ... bench:      86,223 ns/iter (+/- 9,553) = 6899 MB/s
-test sherlock::name_alt3                     ... bench:      97,177 ns/iter (+/- 11,479) = 6122 MB/s
-test sherlock::name_alt3_nocase              ... bench:     381,511 ns/iter (+/- 55,025) = 1559 MB/s
-test sherlock::name_alt4                     ... bench:     121,672 ns/iter (+/- 9,253) = 4889 MB/s
-test sherlock::name_alt4_nocase              ... bench:     187,887 ns/iter (+/- 26,932) = 3166 MB/s
-test sherlock::name_alt5                     ... bench:      90,732 ns/iter (+/- 7,251) = 6557 MB/s
-test sherlock::name_alt5_nocase              ... bench:     352,388 ns/iter (+/- 50,408) = 1688 MB/s
-test sherlock::name_holmes                   ... bench:      33,836 ns/iter (+/- 3,388) = 17582 MB/s
-test sherlock::name_holmes_nocase            ... bench:     133,068 ns/iter (+/- 7,602) = 4470 MB/s
-test sherlock::name_sherlock                 ... bench:      62,719 ns/iter (+/- 8,927) = 9485 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      24,688 ns/iter (+/- 2,482) = 24098 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:      97,793 ns/iter (+/- 12,078) = 6083 MB/s
-test sherlock::name_sherlock_nocase          ... bench:      95,772 ns/iter (+/- 13,713) = 6211 MB/s
-test sherlock::name_whitespace               ... bench:      70,942 ns/iter (+/- 5,565) = 8386 MB/s
-test sherlock::no_match_common               ... bench:      14,645 ns/iter (+/- 1,430) = 40623 MB/s
-test sherlock::no_match_really_common        ... bench:     239,346 ns/iter (+/- 17,292) = 2485 MB/s
-test sherlock::no_match_uncommon             ... bench:      14,637 ns/iter (+/- 1,360) = 40645 MB/s
-test sherlock::quotes                        ... bench:     367,945 ns/iter (+/- 35,370) = 1616 MB/s
-test sherlock::repeated_class_negation       ... bench:  74,367,046 ns/iter (+/- 1,114,875) = 7 MB/s
-test sherlock::the_lower                     ... bench:     463,888 ns/iter (+/- 67,551) = 1282 MB/s
-test sherlock::the_nocase                    ... bench:     520,822 ns/iter (+/- 76,131) = 1142 MB/s
-test sherlock::the_upper                     ... bench:      37,354 ns/iter (+/- 4,110) = 15926 MB/s
-test sherlock::the_whitespace                ... bench:     922,312 ns/iter (+/- 95,082) = 645 MB/s
-test sherlock::word_ending_n                 ... bench:   1,679,343 ns/iter (+/- 165,580) = 354 MB/s
-test sherlock::words                         ... bench:   8,280,082 ns/iter (+/- 290,280) = 71 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 119 measured; 0 filtered out; finished in 113.49s
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/08-new-memmem/rust-before-02 b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/08-new-memmem/rust-before-02
deleted file mode 100644
index 5d751021..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/08-new-memmem/rust-before-02
+++ /dev/null
@@ -1,124 +0,0 @@
-
-running 119 tests
-test misc::anchored_literal_long_match       ... bench:          17 ns/iter (+/- 0) = 22941 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          18 ns/iter (+/- 0) = 21666 MB/s
-test misc::anchored_literal_short_match      ... bench:          16 ns/iter (+/- 2) = 1625 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          17 ns/iter (+/- 2) = 1529 MB/s
-test misc::easy0_1K                          ... bench:          12 ns/iter (+/- 0) = 87583 MB/s
-test misc::easy0_1MB                         ... bench:          14 ns/iter (+/- 1) = 74900214 MB/s
-test misc::easy0_32                          ... bench:          11 ns/iter (+/- 1) = 5363 MB/s
-test misc::easy0_32K                         ... bench:          12 ns/iter (+/- 1) = 2732916 MB/s
-test misc::easy1_1K                          ... bench:          38 ns/iter (+/- 5) = 27473 MB/s
-test misc::easy1_1MB                         ... bench:          40 ns/iter (+/- 5) = 26214900 MB/s
-test misc::easy1_32                          ... bench:          38 ns/iter (+/- 1) = 1368 MB/s
-test misc::easy1_32K                         ... bench:          38 ns/iter (+/- 1) = 862842 MB/s
-test misc::hard_1K                           ... bench:          49 ns/iter (+/- 4) = 21448 MB/s
-test misc::hard_1MB                          ... bench:          52 ns/iter (+/- 7) = 20165442 MB/s
-test misc::hard_32                           ... bench:          49 ns/iter (+/- 1) = 1204 MB/s
-test misc::hard_32K                          ... bench:          49 ns/iter (+/- 6) = 669285 MB/s
-test misc::is_match_set                      ... bench:          59 ns/iter (+/- 2) = 423 MB/s
-test misc::literal                           ... bench:          11 ns/iter (+/- 0) = 4636 MB/s
-test misc::long_needle1                      ... bench:       1,179 ns/iter (+/- 92) = 84818 MB/s
-test misc::long_needle2                      ... bench:     680,418 ns/iter (+/- 27,142) = 146 MB/s
-test misc::match_class                       ... bench:          68 ns/iter (+/- 6) = 1191 MB/s
-test misc::match_class_in_range              ... bench:          13 ns/iter (+/- 1) = 6230 MB/s
-test misc::match_class_unicode               ... bench:         253 ns/iter (+/- 33) = 636 MB/s
-test misc::matches_set                       ... bench:         453 ns/iter (+/- 65) = 55 MB/s
-test misc::medium_1K                         ... bench:          13 ns/iter (+/- 1) = 80923 MB/s
-test misc::medium_1MB                        ... bench:          17 ns/iter (+/- 2) = 61682588 MB/s
-test misc::medium_32                         ... bench:          13 ns/iter (+/- 0) = 4615 MB/s
-test misc::medium_32K                        ... bench:          13 ns/iter (+/- 0) = 2522769 MB/s
-test misc::no_exponential                    ... bench:         330 ns/iter (+/- 47) = 303 MB/s
-test misc::not_literal                       ... bench:          88 ns/iter (+/- 12) = 579 MB/s
-test misc::one_pass_long_prefix              ... bench:          50 ns/iter (+/- 6) = 520 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          50 ns/iter (+/- 7) = 520 MB/s
-test misc::one_pass_short                    ... bench:          36 ns/iter (+/- 2) = 472 MB/s
-test misc::one_pass_short_not                ... bench:          38 ns/iter (+/- 5) = 447 MB/s
-test misc::reallyhard2_1K                    ... bench:          55 ns/iter (+/- 7) = 18909 MB/s
-test misc::reallyhard_1K                     ... bench:       1,590 ns/iter (+/- 64) = 661 MB/s
-test misc::reallyhard_1MB                    ... bench:   1,581,975 ns/iter (+/- 126,709) = 662 MB/s
-test misc::reallyhard_32                     ... bench:         100 ns/iter (+/- 4) = 590 MB/s
-test misc::reallyhard_32K                    ... bench:      49,323 ns/iter (+/- 7,063) = 664 MB/s
-test misc::replace_all                       ... bench:         127 ns/iter (+/- 5)
-test misc::reverse_suffix_no_quadratic       ... bench:       4,171 ns/iter (+/- 624) = 1918 MB/s
-test misc::short_haystack_1000000x           ... bench:      88,960 ns/iter (+/- 7,710) = 89928 MB/s
-test misc::short_haystack_100000x            ... bench:      10,193 ns/iter (+/- 952) = 78486 MB/s
-test misc::short_haystack_10000x             ... bench:       5,798 ns/iter (+/- 636) = 13799 MB/s
-test misc::short_haystack_1000x              ... bench:         418 ns/iter (+/- 60) = 19165 MB/s
-test misc::short_haystack_100x               ... bench:         258 ns/iter (+/- 21) = 3143 MB/s
-test misc::short_haystack_10x                ... bench:         216 ns/iter (+/- 21) = 421 MB/s
-test misc::short_haystack_1x                 ... bench:         205 ns/iter (+/- 29) = 92 MB/s
-test misc::short_haystack_2x                 ... bench:         206 ns/iter (+/- 22) = 131 MB/s
-test misc::short_haystack_3x                 ... bench:         205 ns/iter (+/- 29) = 170 MB/s
-test misc::short_haystack_4x                 ... bench:         214 ns/iter (+/- 6) = 200 MB/s
-test regexdna::find_new_lines                ... bench:  12,039,715 ns/iter (+/- 410,515) = 422 MB/s
-test regexdna::subst1                        ... bench:     750,454 ns/iter (+/- 65,358) = 6773 MB/s
-test regexdna::subst10                       ... bench:     748,321 ns/iter (+/- 93,416) = 6793 MB/s
-test regexdna::subst11                       ... bench:     747,906 ns/iter (+/- 92,141) = 6796 MB/s
-test regexdna::subst2                        ... bench:     755,082 ns/iter (+/- 88,044) = 6732 MB/s
-test regexdna::subst3                        ... bench:     753,496 ns/iter (+/- 70,987) = 6746 MB/s
-test regexdna::subst4                        ... bench:     747,103 ns/iter (+/- 102,992) = 6804 MB/s
-test regexdna::subst5                        ... bench:     750,805 ns/iter (+/- 72,572) = 6770 MB/s
-test regexdna::subst6                        ... bench:     748,419 ns/iter (+/- 47,272) = 6792 MB/s
-test regexdna::subst7                        ... bench:     752,556 ns/iter (+/- 95,329) = 6754 MB/s
-test regexdna::subst8                        ... bench:     756,009 ns/iter (+/- 78,049) = 6724 MB/s
-test regexdna::subst9                        ... bench:     749,278 ns/iter (+/- 70,259) = 6784 MB/s
-test regexdna::variant1                      ... bench:   2,215,182 ns/iter (+/- 114,543) = 2294 MB/s
-test regexdna::variant2                      ... bench:   3,207,983 ns/iter (+/- 184,419) = 1584 MB/s
-test regexdna::variant3                      ... bench:   3,791,716 ns/iter (+/- 192,185) = 1340 MB/s
-test regexdna::variant4                      ... bench:   3,809,934 ns/iter (+/- 222,872) = 1334 MB/s
-test regexdna::variant5                      ... bench:   2,651,345 ns/iter (+/- 183,673) = 1917 MB/s
-test regexdna::variant6                      ... bench:   2,635,566 ns/iter (+/- 170,288) = 1928 MB/s
-test regexdna::variant7                      ... bench:   3,265,519 ns/iter (+/- 234,923) = 1556 MB/s
-test regexdna::variant8                      ... bench:   3,340,830 ns/iter (+/- 183,129) = 1521 MB/s
-test regexdna::variant9                      ... bench:   3,267,141 ns/iter (+/- 185,543) = 1555 MB/s
-test rust_compile::compile_huge              ... bench:      94,368 ns/iter (+/- 13,293)
-test rust_compile::compile_huge_bytes        ... bench:   5,616,594 ns/iter (+/- 243,462)
-test rust_compile::compile_huge_full         ... bench:  10,862,100 ns/iter (+/- 260,207)
-test rust_compile::compile_simple            ... bench:       3,463 ns/iter (+/- 350)
-test rust_compile::compile_simple_bytes      ... bench:       3,542 ns/iter (+/- 504)
-test rust_compile::compile_simple_full       ... bench:      20,562 ns/iter (+/- 3,117)
-test rust_compile::compile_small             ... bench:       8,325 ns/iter (+/- 641)
-test rust_compile::compile_small_bytes       ... bench:     153,450 ns/iter (+/- 11,174)
-test rust_compile::compile_small_full        ... bench:     315,871 ns/iter (+/- 33,828)
-test sherlock::before_after_holmes           ... bench:     906,423 ns/iter (+/- 34,801) = 656 MB/s
-test sherlock::before_holmes                 ... bench:      64,457 ns/iter (+/- 8,343) = 9229 MB/s
-test sherlock::everything_greedy             ... bench:   2,058,675 ns/iter (+/- 208,885) = 288 MB/s
-test sherlock::everything_greedy_nl          ... bench:     810,638 ns/iter (+/- 39,955) = 733 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     106,048 ns/iter (+/- 8,158) = 5610 MB/s
-test sherlock::holmes_coword_watson          ... bench:     482,243 ns/iter (+/- 30,955) = 1233 MB/s
-test sherlock::ing_suffix                    ... bench:     385,767 ns/iter (+/- 24,902) = 1542 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   1,060,762 ns/iter (+/- 94,273) = 560 MB/s
-test sherlock::letters                       ... bench:  22,127,007 ns/iter (+/- 467,539) = 26 MB/s
-test sherlock::letters_lower                 ... bench:  21,719,871 ns/iter (+/- 459,587) = 27 MB/s
-test sherlock::letters_upper                 ... bench:   1,753,028 ns/iter (+/- 172,914) = 339 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     897,091 ns/iter (+/- 109,954) = 663 MB/s
-test sherlock::name_alt1                     ... bench:      31,636 ns/iter (+/- 2,323) = 18805 MB/s
-test sherlock::name_alt2                     ... bench:      85,898 ns/iter (+/- 10,486) = 6926 MB/s
-test sherlock::name_alt3                     ... bench:      97,104 ns/iter (+/- 8,851) = 6126 MB/s
-test sherlock::name_alt3_nocase              ... bench:     381,487 ns/iter (+/- 14,829) = 1559 MB/s
-test sherlock::name_alt4                     ... bench:     121,301 ns/iter (+/- 17,178) = 4904 MB/s
-test sherlock::name_alt4_nocase              ... bench:     187,262 ns/iter (+/- 17,478) = 3177 MB/s
-test sherlock::name_alt5                     ... bench:      90,773 ns/iter (+/- 2,791) = 6554 MB/s
-test sherlock::name_alt5_nocase              ... bench:     351,900 ns/iter (+/- 40,408) = 1690 MB/s
-test sherlock::name_holmes                   ... bench:      34,767 ns/iter (+/- 3,334) = 17112 MB/s
-test sherlock::name_holmes_nocase            ... bench:     132,953 ns/iter (+/- 15,747) = 4474 MB/s
-test sherlock::name_sherlock                 ... bench:      66,566 ns/iter (+/- 6,822) = 8937 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      24,481 ns/iter (+/- 2,330) = 24301 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:      97,531 ns/iter (+/- 12,331) = 6099 MB/s
-test sherlock::name_sherlock_nocase          ... bench:      95,808 ns/iter (+/- 13,250) = 6209 MB/s
-test sherlock::name_whitespace               ... bench:      71,342 ns/iter (+/- 9,877) = 8339 MB/s
-test sherlock::no_match_common               ... bench:      14,704 ns/iter (+/- 1,241) = 40460 MB/s
-test sherlock::no_match_really_common        ... bench:     238,731 ns/iter (+/- 31,179) = 2492 MB/s
-test sherlock::no_match_uncommon             ... bench:      14,620 ns/iter (+/- 1,250) = 40693 MB/s
-test sherlock::quotes                        ... bench:     367,740 ns/iter (+/- 10,107) = 1617 MB/s
-test sherlock::repeated_class_negation       ... bench:  76,315,217 ns/iter (+/- 940,903) = 7 MB/s
-test sherlock::the_lower                     ... bench:     464,322 ns/iter (+/- 14,654) = 1281 MB/s
-test sherlock::the_nocase                    ... bench:     519,069 ns/iter (+/- 59,161) = 1146 MB/s
-test sherlock::the_upper                     ... bench:      37,575 ns/iter (+/- 2,455) = 15833 MB/s
-test sherlock::the_whitespace                ... bench:     939,412 ns/iter (+/- 60,941) = 633 MB/s
-test sherlock::word_ending_n                 ... bench:   1,681,192 ns/iter (+/- 156,265) = 353 MB/s
-test sherlock::words                         ... bench:   8,213,141 ns/iter (+/- 322,533) = 72 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 119 measured; 0 filtered out; finished in 94.52s
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/09-new-baseline/pcre2 b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/09-new-baseline/pcre2
deleted file mode 100644
index 595365d..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/09-new-baseline/pcre2
+++ /dev/null
@@ -1,98 +0,0 @@
-
-running 93 tests
-test misc::anchored_literal_long_match       ... bench:           8 ns/iter (+/- 0) = 48750 MB/s
-test misc::anchored_literal_long_non_match   ... bench:           7 ns/iter (+/- 0) = 55714 MB/s
-test misc::anchored_literal_short_match      ... bench:           8 ns/iter (+/- 0) = 3250 MB/s
-test misc::anchored_literal_short_non_match  ... bench:           7 ns/iter (+/- 0) = 3714 MB/s
-test misc::easy0_1K                          ... bench:          32 ns/iter (+/- 1) = 32843 MB/s
-test misc::easy0_1MB                         ... bench:      22,160 ns/iter (+/- 3,887) = 47319 MB/s
-test misc::easy0_32                          ... bench:          10 ns/iter (+/- 0) = 5900 MB/s
-test misc::easy0_32K                         ... bench:         651 ns/iter (+/- 2) = 50376 MB/s
-test misc::easy1_1K                          ... bench:          36 ns/iter (+/- 1) = 29000 MB/s
-test misc::easy1_1MB                         ... bench:      22,982 ns/iter (+/- 2,839) = 45626 MB/s
-test misc::easy1_32                          ... bench:          12 ns/iter (+/- 0) = 4333 MB/s
-test misc::easy1_32K                         ... bench:         654 ns/iter (+/- 2) = 50134 MB/s
-test misc::hard_1K                           ... bench:         469 ns/iter (+/- 9) = 2240 MB/s
-test misc::hard_1MB                          ... bench:     733,962 ns/iter (+/- 28,297) = 1428 MB/s
-test misc::hard_32                           ... bench:          34 ns/iter (+/- 4) = 1735 MB/s
-test misc::hard_32K                          ... bench:      19,567 ns/iter (+/- 363) = 1676 MB/s
-test misc::literal                           ... bench:           8 ns/iter (+/- 0) = 6375 MB/s
-test misc::long_needle1                      ... bench:     257,858 ns/iter (+/- 646) = 387 MB/s
-test misc::long_needle2                      ... bench:     259,045 ns/iter (+/- 2,220) = 386 MB/s
-test misc::match_class                       ... bench:          34 ns/iter (+/- 1) = 2382 MB/s
-test misc::match_class_in_range              ... bench:           9 ns/iter (+/- 0) = 9000 MB/s
-test misc::match_class_unicode               ... bench:         125 ns/iter (+/- 3) = 1288 MB/s
-test misc::medium_1K                         ... bench:          35 ns/iter (+/- 3) = 30057 MB/s
-test misc::medium_1MB                        ... bench:      21,126 ns/iter (+/- 4,036) = 49635 MB/s
-test misc::medium_32                         ... bench:          10 ns/iter (+/- 0) = 6000 MB/s
-test misc::medium_32K                        ... bench:         714 ns/iter (+/- 122) = 45932 MB/s
-test misc::not_literal                       ... bench:          62 ns/iter (+/- 2) = 822 MB/s
-test misc::one_pass_long_prefix              ... bench:           8 ns/iter (+/- 0) = 3250 MB/s
-test misc::one_pass_long_prefix_not          ... bench:           8 ns/iter (+/- 0) = 3250 MB/s
-test misc::one_pass_short                    ... bench:          19 ns/iter (+/- 1) = 894 MB/s
-test misc::one_pass_short_not                ... bench:          19 ns/iter (+/- 1) = 894 MB/s
-test misc::reallyhard2_1K                    ... bench:       1,704 ns/iter (+/- 17) = 610 MB/s
-test misc::reallyhard_1K                     ... bench:         495 ns/iter (+/- 9) = 2123 MB/s
-test misc::reallyhard_1MB                    ... bench:     682,371 ns/iter (+/- 31,284) = 1536 MB/s
-test misc::reallyhard_32                     ... bench:          34 ns/iter (+/- 2) = 1735 MB/s
-test misc::reallyhard_32K                    ... bench:      17,994 ns/iter (+/- 540) = 1822 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:       1,507 ns/iter (+/- 10) = 5308 MB/s
-test regexdna::find_new_lines                ... bench:     849,983 ns/iter (+/- 25,898) = 5980 MB/s
-test regexdna::subst1                        ... bench:     520,602 ns/iter (+/- 12,170) = 9764 MB/s
-test regexdna::subst10                       ... bench:     517,151 ns/iter (+/- 19,060) = 9829 MB/s
-test regexdna::subst11                       ... bench:     519,209 ns/iter (+/- 12,477) = 9790 MB/s
-test regexdna::subst2                        ... bench:     513,418 ns/iter (+/- 19,803) = 9901 MB/s
-test regexdna::subst3                        ... bench:     514,166 ns/iter (+/- 13,019) = 9886 MB/s
-test regexdna::subst4                        ... bench:     517,808 ns/iter (+/- 30,655) = 9817 MB/s
-test regexdna::subst5                        ... bench:     516,922 ns/iter (+/- 17,204) = 9834 MB/s
-test regexdna::subst6                        ... bench:     509,430 ns/iter (+/- 20,608) = 9978 MB/s
-test regexdna::subst7                        ... bench:     519,437 ns/iter (+/- 10,537) = 9786 MB/s
-test regexdna::subst8                        ... bench:     520,282 ns/iter (+/- 25,742) = 9770 MB/s
-test regexdna::subst9                        ... bench:     512,819 ns/iter (+/- 11,443) = 9912 MB/s
-test regexdna::variant1                      ... bench:   5,302,526 ns/iter (+/- 158,370) = 958 MB/s
-test regexdna::variant2                      ... bench:   7,421,107 ns/iter (+/- 105,716) = 684 MB/s
-test regexdna::variant3                      ... bench:   7,310,968 ns/iter (+/- 103,989) = 695 MB/s
-test regexdna::variant4                      ... bench:   6,152,891 ns/iter (+/- 144,194) = 826 MB/s
-test regexdna::variant5                      ... bench:   5,717,515 ns/iter (+/- 42,902) = 889 MB/s
-test regexdna::variant6                      ... bench:   5,840,938 ns/iter (+/- 47,730) = 870 MB/s
-test regexdna::variant7                      ... bench:   6,624,859 ns/iter (+/- 37,376) = 767 MB/s
-test regexdna::variant8                      ... bench:   7,308,342 ns/iter (+/- 58,395) = 695 MB/s
-test regexdna::variant9                      ... bench:   7,372,260 ns/iter (+/- 76,966) = 689 MB/s
-test sherlock::before_after_holmes           ... bench:   2,817,108 ns/iter (+/- 18,002) = 211 MB/s
-test sherlock::before_holmes                 ... bench:   2,841,515 ns/iter (+/- 14,677) = 209 MB/s
-test sherlock::holmes_cochar_watson          ... bench:      33,066 ns/iter (+/- 1,766) = 17992 MB/s
-test sherlock::ing_suffix                    ... bench:   1,299,382 ns/iter (+/- 19,674) = 457 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   2,799,189 ns/iter (+/- 33,841) = 212 MB/s
-test sherlock::letters                       ... bench:   4,923,399 ns/iter (+/- 111,904) = 120 MB/s
-test sherlock::letters_lower                 ... bench:   5,057,224 ns/iter (+/- 102,860) = 117 MB/s
-test sherlock::letters_upper                 ... bench:     874,306 ns/iter (+/- 10,587) = 680 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:      15,876 ns/iter (+/- 58) = 37473 MB/s
-test sherlock::name_alt1                     ... bench:      19,349 ns/iter (+/- 201) = 30747 MB/s
-test sherlock::name_alt2                     ... bench:      29,916 ns/iter (+/- 581) = 19886 MB/s
-test sherlock::name_alt3                     ... bench:     461,887 ns/iter (+/- 5,337) = 1288 MB/s
-test sherlock::name_alt3_nocase              ... bench:   1,813,574 ns/iter (+/- 27,519) = 328 MB/s
-test sherlock::name_alt4                     ... bench:      30,155 ns/iter (+/- 1,407) = 19729 MB/s
-test sherlock::name_alt4_nocase              ... bench:     822,605 ns/iter (+/- 56,624) = 723 MB/s
-test sherlock::name_alt5                     ... bench:     426,318 ns/iter (+/- 12,233) = 1395 MB/s
-test sherlock::name_alt5_nocase              ... bench:   1,012,097 ns/iter (+/- 27,806) = 587 MB/s
-test sherlock::name_holmes                   ... bench:      19,833 ns/iter (+/- 499) = 29997 MB/s
-test sherlock::name_holmes_nocase            ... bench:      40,266 ns/iter (+/- 2,089) = 14775 MB/s
-test sherlock::name_sherlock                 ... bench:      14,589 ns/iter (+/- 115) = 40779 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      14,213 ns/iter (+/- 81) = 41858 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:     602,296 ns/iter (+/- 98,066) = 987 MB/s
-test sherlock::name_sherlock_nocase          ... bench:     479,745 ns/iter (+/- 18,070) = 1240 MB/s
-test sherlock::name_whitespace               ... bench:      14,584 ns/iter (+/- 44) = 40793 MB/s
-test sherlock::no_match_common               ... bench:      13,499 ns/iter (+/- 1,090) = 44072 MB/s
-test sherlock::no_match_really_common        ... bench:      12,507 ns/iter (+/- 1,238) = 47568 MB/s
-test sherlock::no_match_uncommon             ... bench:      11,534 ns/iter (+/- 9) = 51580 MB/s
-test sherlock::quotes                        ... bench:     251,867 ns/iter (+/- 11,818) = 2362 MB/s
-test sherlock::repeated_class_negation       ... bench:   2,969,330 ns/iter (+/- 287,150) = 200 MB/s
-test sherlock::the_lower                     ... bench:     206,513 ns/iter (+/- 3,294) = 2880 MB/s
-test sherlock::the_nocase                    ... bench:     237,655 ns/iter (+/- 6,616) = 2503 MB/s
-test sherlock::the_upper                     ... bench:      23,922 ns/iter (+/- 510) = 24869 MB/s
-test sherlock::the_whitespace                ... bench:     326,257 ns/iter (+/- 10,038) = 1823 MB/s
-test sherlock::word_ending_n                 ... bench:   3,264,085 ns/iter (+/- 57,242) = 182 MB/s
-test sherlock::words                         ... bench:   3,161,731 ns/iter (+/- 45,794) = 188 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 93 measured; 0 filtered out; finished in 184.16s
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/09-new-baseline/re2 b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/09-new-baseline/re2
deleted file mode 100644
index 9bae2a1..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/09-new-baseline/re2
+++ /dev/null
@@ -1,101 +0,0 @@
-
-running 96 tests
-test misc::anchored_literal_long_match       ... bench:          26 ns/iter (+/- 0) = 15000 MB/s
-test misc::anchored_literal_long_non_match   ... bench:           4 ns/iter (+/- 0) = 97500 MB/s
-test misc::anchored_literal_short_match      ... bench:          26 ns/iter (+/- 0) = 1000 MB/s
-test misc::anchored_literal_short_non_match  ... bench:           4 ns/iter (+/- 0) = 6500 MB/s
-test misc::easy0_1K                          ... bench:          50 ns/iter (+/- 0) = 21020 MB/s
-test misc::easy0_1MB                         ... bench:          51 ns/iter (+/- 0) = 20560843 MB/s
-test misc::easy0_32                          ... bench:          50 ns/iter (+/- 0) = 1180 MB/s
-test misc::easy0_32K                         ... bench:          50 ns/iter (+/- 0) = 655900 MB/s
-test misc::easy1_1K                          ... bench:          43 ns/iter (+/- 1) = 24279 MB/s
-test misc::easy1_1MB                         ... bench:          43 ns/iter (+/- 0) = 24385953 MB/s
-test misc::easy1_32                          ... bench:          43 ns/iter (+/- 1) = 1209 MB/s
-test misc::easy1_32K                         ... bench:          43 ns/iter (+/- 0) = 762511 MB/s
-test misc::hard_1K                           ... bench:          50 ns/iter (+/- 0) = 21020 MB/s
-test misc::hard_1MB                          ... bench:          50 ns/iter (+/- 0) = 20972060 MB/s
-test misc::hard_32                           ... bench:          50 ns/iter (+/- 0) = 1180 MB/s
-test misc::hard_32K                          ... bench:          50 ns/iter (+/- 0) = 655900 MB/s
-test misc::literal                           ... bench:          25 ns/iter (+/- 0) = 2040 MB/s
-test misc::long_needle1                      ... bench:     356,319 ns/iter (+/- 680) = 280 MB/s
-test misc::long_needle2                      ... bench:     356,384 ns/iter (+/- 3,126) = 280 MB/s
-test misc::match_class                       ... bench:          94 ns/iter (+/- 0) = 861 MB/s
-test misc::match_class_in_range              ... bench:          94 ns/iter (+/- 0) = 861 MB/s
-test misc::match_class_unicode               ... bench:         168 ns/iter (+/- 1) = 958 MB/s
-test misc::medium_1K                         ... bench:          51 ns/iter (+/- 0) = 20627 MB/s
-test misc::medium_1MB                        ... bench:          51 ns/iter (+/- 0) = 20560862 MB/s
-test misc::medium_32                         ... bench:          51 ns/iter (+/- 0) = 1176 MB/s
-test misc::medium_32K                        ... bench:          51 ns/iter (+/- 1) = 643058 MB/s
-test misc::no_exponential                    ... bench:         112 ns/iter (+/- 0) = 892 MB/s
-test misc::not_literal                       ... bench:          66 ns/iter (+/- 0) = 772 MB/s
-test misc::one_pass_long_prefix              ... bench:          25 ns/iter (+/- 0) = 1040 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          44 ns/iter (+/- 0) = 590 MB/s
-test misc::one_pass_short                    ... bench:          43 ns/iter (+/- 0) = 395 MB/s
-test misc::one_pass_short_not                ... bench:          41 ns/iter (+/- 0) = 414 MB/s
-test misc::reallyhard2_1K                    ... bench:         978 ns/iter (+/- 7) = 1063 MB/s
-test misc::reallyhard_1K                     ... bench:         987 ns/iter (+/- 11) = 1064 MB/s
-test misc::reallyhard_1MB                    ... bench:     957,501 ns/iter (+/- 8,247) = 1095 MB/s
-test misc::reallyhard_32                     ... bench:          73 ns/iter (+/- 0) = 808 MB/s
-test misc::reallyhard_32K                    ... bench:      30,057 ns/iter (+/- 315) = 1091 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:       7,368 ns/iter (+/- 105) = 1085 MB/s
-test regexdna::find_new_lines                ... bench:  15,567,882 ns/iter (+/- 48,213) = 326 MB/s
-test regexdna::subst1                        ... bench:   2,011,288 ns/iter (+/- 23,092) = 2527 MB/s
-test regexdna::subst10                       ... bench:   2,013,337 ns/iter (+/- 33,388) = 2524 MB/s
-test regexdna::subst11                       ... bench:   2,005,968 ns/iter (+/- 25,799) = 2534 MB/s
-test regexdna::subst2                        ... bench:   2,022,572 ns/iter (+/- 23,311) = 2513 MB/s
-test regexdna::subst3                        ... bench:   2,018,386 ns/iter (+/- 32,071) = 2518 MB/s
-test regexdna::subst4                        ... bench:   2,013,345 ns/iter (+/- 32,599) = 2524 MB/s
-test regexdna::subst5                        ... bench:   2,015,871 ns/iter (+/- 25,081) = 2521 MB/s
-test regexdna::subst6                        ... bench:   2,008,492 ns/iter (+/- 24,502) = 2530 MB/s
-test regexdna::subst7                        ... bench:   2,018,804 ns/iter (+/- 38,700) = 2518 MB/s
-test regexdna::subst8                        ... bench:   2,010,856 ns/iter (+/- 23,695) = 2527 MB/s
-test regexdna::subst9                        ... bench:   2,023,767 ns/iter (+/- 17,040) = 2511 MB/s
-test regexdna::variant1                      ... bench:   4,688,839 ns/iter (+/- 19,258) = 1084 MB/s
-test regexdna::variant2                      ... bench:   4,693,463 ns/iter (+/- 31,741) = 1083 MB/s
-test regexdna::variant3                      ... bench:   4,674,020 ns/iter (+/- 15,755) = 1087 MB/s
-test regexdna::variant4                      ... bench:   4,666,017 ns/iter (+/- 16,318) = 1089 MB/s
-test regexdna::variant5                      ... bench:   4,682,965 ns/iter (+/- 17,552) = 1085 MB/s
-test regexdna::variant6                      ... bench:   4,661,825 ns/iter (+/- 21,667) = 1090 MB/s
-test regexdna::variant7                      ... bench:   4,697,959 ns/iter (+/- 24,282) = 1082 MB/s
-test regexdna::variant8                      ... bench:   4,700,703 ns/iter (+/- 21,377) = 1081 MB/s
-test regexdna::variant9                      ... bench:   4,665,298 ns/iter (+/- 19,086) = 1089 MB/s
-test sherlock::before_after_holmes           ... bench:     560,350 ns/iter (+/- 3,852) = 1061 MB/s
-test sherlock::before_holmes                 ... bench:     574,423 ns/iter (+/- 4,638) = 1035 MB/s
-test sherlock::everything_greedy             ... bench:   2,688,852 ns/iter (+/- 16,320) = 221 MB/s
-test sherlock::everything_greedy_nl          ... bench:   1,206,136 ns/iter (+/- 6,173) = 493 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     547,910 ns/iter (+/- 7,147) = 1085 MB/s
-test sherlock::holmes_coword_watson          ... bench:     610,803 ns/iter (+/- 1,029) = 974 MB/s
-test sherlock::ing_suffix                    ... bench:     777,478 ns/iter (+/- 3,028) = 765 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:     725,653 ns/iter (+/- 4,746) = 819 MB/s
-test sherlock::letters                       ... bench:  25,265,004 ns/iter (+/- 120,234) = 23 MB/s
-test sherlock::letters_lower                 ... bench:  24,615,621 ns/iter (+/- 134,875) = 24 MB/s
-test sherlock::letters_upper                 ... bench:   1,485,920 ns/iter (+/- 21,446) = 400 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     547,229 ns/iter (+/- 3,726) = 1087 MB/s
-test sherlock::name_alt1                     ... bench:      18,148 ns/iter (+/- 131) = 32782 MB/s
-test sherlock::name_alt2                     ... bench:     586,335 ns/iter (+/- 3,679) = 1014 MB/s
-test sherlock::name_alt3                     ... bench:     601,096 ns/iter (+/- 3,781) = 989 MB/s
-test sherlock::name_alt3_nocase              ... bench:     602,319 ns/iter (+/- 7,872) = 987 MB/s
-test sherlock::name_alt4                     ... bench:     586,762 ns/iter (+/- 3,465) = 1013 MB/s
-test sherlock::name_alt4_nocase              ... bench:     595,539 ns/iter (+/- 3,240) = 998 MB/s
-test sherlock::name_alt5                     ... bench:     592,474 ns/iter (+/- 6,361) = 1004 MB/s
-test sherlock::name_alt5_nocase              ... bench:     593,214 ns/iter (+/- 4,667) = 1002 MB/s
-test sherlock::name_holmes                   ... bench:      40,236 ns/iter (+/- 514) = 14786 MB/s
-test sherlock::name_holmes_nocase            ... bench:     215,216 ns/iter (+/- 4,822) = 2764 MB/s
-test sherlock::name_sherlock                 ... bench:      14,064 ns/iter (+/- 159) = 42301 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      15,727 ns/iter (+/- 166) = 37828 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:     552,042 ns/iter (+/- 6,395) = 1077 MB/s
-test sherlock::name_sherlock_nocase          ... bench:     552,475 ns/iter (+/- 5,365) = 1076 MB/s
-test sherlock::name_whitespace               ... bench:      16,210 ns/iter (+/- 194) = 36701 MB/s
-test sherlock::no_match_common               ... bench:     147,489 ns/iter (+/- 602) = 4033 MB/s
-test sherlock::no_match_really_common        ... bench:     157,205 ns/iter (+/- 350) = 3784 MB/s
-test sherlock::no_match_uncommon             ... bench:       4,849 ns/iter (+/- 5) = 122691 MB/s
-test sherlock::quotes                        ... bench:     619,880 ns/iter (+/- 5,189) = 959 MB/s
-test sherlock::the_lower                     ... bench:     685,396 ns/iter (+/- 12,559) = 868 MB/s
-test sherlock::the_nocase                    ... bench:     771,051 ns/iter (+/- 18,470) = 771 MB/s
-test sherlock::the_upper                     ... bench:      59,139 ns/iter (+/- 1,604) = 10059 MB/s
-test sherlock::the_whitespace                ... bench:     736,147 ns/iter (+/- 7,668) = 808 MB/s
-test sherlock::word_ending_n                 ... bench:   1,200,401 ns/iter (+/- 11,206) = 495 MB/s
-test sherlock::words                         ... bench:   8,024,768 ns/iter (+/- 93,051) = 74 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 96 measured; 0 filtered out; finished in 86.80s
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/09-new-baseline/rust b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/09-new-baseline/rust
deleted file mode 100644
index 30924d4b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/09-new-baseline/rust
+++ /dev/null
@@ -1,124 +0,0 @@
-
-running 119 tests
-test misc::anchored_literal_long_match       ... bench:           7 ns/iter (+/- 0) = 55714 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          10 ns/iter (+/- 0) = 39000 MB/s
-test misc::anchored_literal_short_match      ... bench:           7 ns/iter (+/- 0) = 3714 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          10 ns/iter (+/- 0) = 2600 MB/s
-test misc::easy0_1K                          ... bench:           8 ns/iter (+/- 0) = 131375 MB/s
-test misc::easy0_1MB                         ... bench:          12 ns/iter (+/- 0) = 87383583 MB/s
-test misc::easy0_32                          ... bench:           8 ns/iter (+/- 0) = 7375 MB/s
-test misc::easy0_32K                         ... bench:           8 ns/iter (+/- 0) = 4099375 MB/s
-test misc::easy1_1K                          ... bench:          25 ns/iter (+/- 0) = 41760 MB/s
-test misc::easy1_1MB                         ... bench:          26 ns/iter (+/- 0) = 40330615 MB/s
-test misc::easy1_32                          ... bench:          25 ns/iter (+/- 0) = 2080 MB/s
-test misc::easy1_32K                         ... bench:          26 ns/iter (+/- 0) = 1261076 MB/s
-test misc::hard_1K                           ... bench:          33 ns/iter (+/- 0) = 31848 MB/s
-test misc::hard_1MB                          ... bench:          33 ns/iter (+/- 0) = 31775848 MB/s
-test misc::hard_32                           ... bench:          34 ns/iter (+/- 0) = 1735 MB/s
-test misc::hard_32K                          ... bench:          33 ns/iter (+/- 0) = 993787 MB/s
-test misc::is_match_set                      ... bench:          35 ns/iter (+/- 0) = 714 MB/s
-test misc::literal                           ... bench:           7 ns/iter (+/- 0) = 7285 MB/s
-test misc::long_needle1                      ... bench:       1,517 ns/iter (+/- 25) = 65920 MB/s
-test misc::long_needle2                      ... bench:     186,131 ns/iter (+/- 1,191) = 537 MB/s
-test misc::match_class                       ... bench:          37 ns/iter (+/- 0) = 2189 MB/s
-test misc::match_class_in_range              ... bench:           7 ns/iter (+/- 0) = 11571 MB/s
-test misc::match_class_unicode               ... bench:         160 ns/iter (+/- 1) = 1006 MB/s
-test misc::matches_set                       ... bench:         200 ns/iter (+/- 4) = 125 MB/s
-test misc::medium_1K                         ... bench:           8 ns/iter (+/- 0) = 131500 MB/s
-test misc::medium_1MB                        ... bench:          12 ns/iter (+/- 0) = 87383666 MB/s
-test misc::medium_32                         ... bench:           8 ns/iter (+/- 0) = 7500 MB/s
-test misc::medium_32K                        ... bench:           8 ns/iter (+/- 0) = 4099500 MB/s
-test misc::no_exponential                    ... bench:         262 ns/iter (+/- 6) = 381 MB/s
-test misc::not_literal                       ... bench:          43 ns/iter (+/- 1) = 1186 MB/s
-test misc::one_pass_long_prefix              ... bench:          23 ns/iter (+/- 1) = 1130 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          23 ns/iter (+/- 1) = 1130 MB/s
-test misc::one_pass_short                    ... bench:          16 ns/iter (+/- 0) = 1062 MB/s
-test misc::one_pass_short_not                ... bench:          18 ns/iter (+/- 0) = 944 MB/s
-test misc::reallyhard2_1K                    ... bench:          36 ns/iter (+/- 1) = 28888 MB/s
-test misc::reallyhard_1K                     ... bench:       1,155 ns/iter (+/- 11) = 909 MB/s
-test misc::reallyhard_1MB                    ... bench:   1,152,983 ns/iter (+/- 6,607) = 909 MB/s
-test misc::reallyhard_32                     ... bench:          52 ns/iter (+/- 2) = 1134 MB/s
-test misc::reallyhard_32K                    ... bench:      36,194 ns/iter (+/- 327) = 906 MB/s
-test misc::replace_all                       ... bench:          81 ns/iter (+/- 5)
-test misc::reverse_suffix_no_quadratic       ... bench:       2,269 ns/iter (+/- 3) = 3525 MB/s
-test misc::short_haystack_1000000x           ... bench:      63,956 ns/iter (+/- 209) = 125086 MB/s
-test misc::short_haystack_100000x            ... bench:       5,877 ns/iter (+/- 66) = 136125 MB/s
-test misc::short_haystack_10000x             ... bench:       2,414 ns/iter (+/- 10) = 33144 MB/s
-test misc::short_haystack_1000x              ... bench:         195 ns/iter (+/- 11) = 41082 MB/s
-test misc::short_haystack_100x               ... bench:          96 ns/iter (+/- 7) = 8447 MB/s
-test misc::short_haystack_10x                ... bench:          85 ns/iter (+/- 8) = 1070 MB/s
-test misc::short_haystack_1x                 ... bench:          85 ns/iter (+/- 6) = 223 MB/s
-test misc::short_haystack_2x                 ... bench:          86 ns/iter (+/- 12) = 313 MB/s
-test misc::short_haystack_3x                 ... bench:          85 ns/iter (+/- 22) = 411 MB/s
-test misc::short_haystack_4x                 ... bench:          85 ns/iter (+/- 12) = 505 MB/s
-test regexdna::find_new_lines                ... bench:   6,977,678 ns/iter (+/- 90,937) = 728 MB/s
-test regexdna::subst1                        ... bench:     423,846 ns/iter (+/- 41,460) = 11993 MB/s
-test regexdna::subst10                       ... bench:     424,043 ns/iter (+/- 55,743) = 11987 MB/s
-test regexdna::subst11                       ... bench:     418,549 ns/iter (+/- 12,106) = 12145 MB/s
-test regexdna::subst2                        ... bench:     430,056 ns/iter (+/- 8,862) = 11820 MB/s
-test regexdna::subst3                        ... bench:     429,634 ns/iter (+/- 26,807) = 11831 MB/s
-test regexdna::subst4                        ... bench:     419,313 ns/iter (+/- 42,070) = 12123 MB/s
-test regexdna::subst5                        ... bench:     425,299 ns/iter (+/- 43,161) = 11952 MB/s
-test regexdna::subst6                        ... bench:     420,177 ns/iter (+/- 49,394) = 12098 MB/s
-test regexdna::subst7                        ... bench:     425,118 ns/iter (+/- 46,952) = 11957 MB/s
-test regexdna::subst8                        ... bench:     420,840 ns/iter (+/- 11,623) = 12079 MB/s
-test regexdna::subst9                        ... bench:     420,752 ns/iter (+/- 10,186) = 12081 MB/s
-test regexdna::variant1                      ... bench:   1,445,103 ns/iter (+/- 29,436) = 3517 MB/s
-test regexdna::variant2                      ... bench:   2,234,423 ns/iter (+/- 24,502) = 2275 MB/s
-test regexdna::variant3                      ... bench:   2,730,972 ns/iter (+/- 26,961) = 1861 MB/s
-test regexdna::variant4                      ... bench:   2,708,975 ns/iter (+/- 36,517) = 1876 MB/s
-test regexdna::variant5                      ... bench:   1,663,458 ns/iter (+/- 39,508) = 3055 MB/s
-test regexdna::variant6                      ... bench:   1,673,873 ns/iter (+/- 14,846) = 3036 MB/s
-test regexdna::variant7                      ... bench:   2,322,347 ns/iter (+/- 33,731) = 2188 MB/s
-test regexdna::variant8                      ... bench:   2,350,779 ns/iter (+/- 54,976) = 2162 MB/s
-test regexdna::variant9                      ... bench:   2,326,741 ns/iter (+/- 20,836) = 2184 MB/s
-test rust_compile::compile_huge              ... bench:      47,700 ns/iter (+/- 230)
-test rust_compile::compile_huge_bytes        ... bench:   2,987,898 ns/iter (+/- 32,819)
-test rust_compile::compile_huge_full         ... bench:   5,705,551 ns/iter (+/- 63,483)
-test rust_compile::compile_simple            ... bench:       1,963 ns/iter (+/- 44)
-test rust_compile::compile_simple_bytes      ... bench:       1,970 ns/iter (+/- 32)
-test rust_compile::compile_simple_full       ... bench:       9,677 ns/iter (+/- 69)
-test rust_compile::compile_small             ... bench:       4,501 ns/iter (+/- 70)
-test rust_compile::compile_small_bytes       ... bench:      75,372 ns/iter (+/- 2,007)
-test rust_compile::compile_small_full        ... bench:     151,733 ns/iter (+/- 2,378)
-test sherlock::before_after_holmes           ... bench:     655,827 ns/iter (+/- 1,426) = 907 MB/s
-test sherlock::before_holmes                 ... bench:      24,653 ns/iter (+/- 224) = 24132 MB/s
-test sherlock::everything_greedy             ... bench:   1,026,254 ns/iter (+/- 27,926) = 579 MB/s
-test sherlock::everything_greedy_nl          ... bench:     469,676 ns/iter (+/- 62,296) = 1266 MB/s
-test sherlock::holmes_cochar_watson          ... bench:      47,578 ns/iter (+/- 1,730) = 12504 MB/s
-test sherlock::holmes_coword_watson          ... bench:     321,318 ns/iter (+/- 3,235) = 1851 MB/s
-test sherlock::ing_suffix                    ... bench:     150,908 ns/iter (+/- 3,952) = 3942 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:     726,848 ns/iter (+/- 5,314) = 818 MB/s
-test sherlock::letters                       ... bench:   9,719,997 ns/iter (+/- 67,717) = 61 MB/s
-test sherlock::letters_lower                 ... bench:   9,559,105 ns/iter (+/- 79,257) = 62 MB/s
-test sherlock::letters_upper                 ... bench:   1,066,791 ns/iter (+/- 13,193) = 557 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     653,228 ns/iter (+/- 881) = 910 MB/s
-test sherlock::name_alt1                     ... bench:      10,663 ns/iter (+/- 76) = 55794 MB/s
-test sherlock::name_alt2                     ... bench:      33,831 ns/iter (+/- 967) = 17585 MB/s
-test sherlock::name_alt3                     ... bench:      38,061 ns/iter (+/- 1,123) = 15631 MB/s
-test sherlock::name_alt3_nocase              ... bench:     218,691 ns/iter (+/- 2,345) = 2720 MB/s
-test sherlock::name_alt4                     ... bench:      52,408 ns/iter (+/- 1,315) = 11351 MB/s
-test sherlock::name_alt4_nocase              ... bench:      84,212 ns/iter (+/- 2,708) = 7064 MB/s
-test sherlock::name_alt5                     ... bench:      35,272 ns/iter (+/- 1,784) = 16867 MB/s
-test sherlock::name_alt5_nocase              ... bench:     193,585 ns/iter (+/- 5,057) = 3073 MB/s
-test sherlock::name_holmes                   ... bench:      15,018 ns/iter (+/- 440) = 39614 MB/s
-test sherlock::name_holmes_nocase            ... bench:      60,207 ns/iter (+/- 1,046) = 9881 MB/s
-test sherlock::name_sherlock                 ... bench:      10,344 ns/iter (+/- 52) = 57514 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      10,374 ns/iter (+/- 98) = 57348 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:      42,037 ns/iter (+/- 1,363) = 14152 MB/s
-test sherlock::name_sherlock_nocase          ... bench:      41,192 ns/iter (+/- 881) = 14442 MB/s
-test sherlock::name_whitespace               ... bench:      13,893 ns/iter (+/- 55) = 42822 MB/s
-test sherlock::no_match_common               ... bench:       8,700 ns/iter (+/- 10) = 68383 MB/s
-test sherlock::no_match_really_common        ... bench:      10,368 ns/iter (+/- 123) = 57381 MB/s
-test sherlock::no_match_uncommon             ... bench:       8,695 ns/iter (+/- 7) = 68422 MB/s
-test sherlock::quotes                        ... bench:     222,526 ns/iter (+/- 5,362) = 2673 MB/s
-test sherlock::repeated_class_negation       ... bench:  35,869,193 ns/iter (+/- 551,212) = 16 MB/s
-test sherlock::the_lower                     ... bench:     187,208 ns/iter (+/- 4,374) = 3177 MB/s
-test sherlock::the_nocase                    ... bench:     280,625 ns/iter (+/- 10,142) = 2120 MB/s
-test sherlock::the_upper                     ... bench:      19,742 ns/iter (+/- 692) = 30135 MB/s
-test sherlock::the_whitespace                ... bench:     396,099 ns/iter (+/- 10,400) = 1501 MB/s
-test sherlock::word_ending_n                 ... bench:   1,055,639 ns/iter (+/- 6,627) = 563 MB/s
-test sherlock::words                         ... bench:   4,280,471 ns/iter (+/- 53,841) = 138 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 119 measured; 0 filtered out; finished in 141.25s
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/09-new-baseline/rust-bytes b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/09-new-baseline/rust-bytes
deleted file mode 100644
index ff08ed1..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/09-new-baseline/rust-bytes
+++ /dev/null
@@ -1,112 +0,0 @@
-
-running 107 tests
-test misc::anchored_literal_long_match       ... bench:           7 ns/iter (+/- 0) = 55714 MB/s
-test misc::anchored_literal_long_non_match   ... bench:           8 ns/iter (+/- 0) = 48750 MB/s
-test misc::anchored_literal_short_match      ... bench:           7 ns/iter (+/- 0) = 3714 MB/s
-test misc::anchored_literal_short_non_match  ... bench:           9 ns/iter (+/- 0) = 2888 MB/s
-test misc::easy0_1K                          ... bench:           7 ns/iter (+/- 0) = 150142 MB/s
-test misc::easy0_1MB                         ... bench:          11 ns/iter (+/- 1) = 95327545 MB/s
-test misc::easy0_32                          ... bench:           7 ns/iter (+/- 0) = 8428 MB/s
-test misc::easy0_32K                         ... bench:           7 ns/iter (+/- 0) = 4685000 MB/s
-test misc::easy1_1K                          ... bench:          17 ns/iter (+/- 0) = 61411 MB/s
-test misc::easy1_1MB                         ... bench:          20 ns/iter (+/- 0) = 52429800 MB/s
-test misc::easy1_32                          ... bench:          17 ns/iter (+/- 0) = 3058 MB/s
-test misc::easy1_32K                         ... bench:          17 ns/iter (+/- 1) = 1928705 MB/s
-test misc::hard_1K                           ... bench:          24 ns/iter (+/- 0) = 43791 MB/s
-test misc::hard_1MB                          ... bench:          27 ns/iter (+/- 0) = 38837148 MB/s
-test misc::hard_32                           ... bench:          24 ns/iter (+/- 0) = 2458 MB/s
-test misc::hard_32K                          ... bench:          24 ns/iter (+/- 0) = 1366458 MB/s
-test misc::is_match_set                      ... bench:          35 ns/iter (+/- 0) = 714 MB/s
-test misc::literal                           ... bench:           7 ns/iter (+/- 0) = 7285 MB/s
-test misc::long_needle1                      ... bench:       1,325 ns/iter (+/- 18) = 75472 MB/s
-test misc::long_needle2                      ... bench:     186,021 ns/iter (+/- 1,157) = 537 MB/s
-test misc::match_class                       ... bench:          38 ns/iter (+/- 3) = 2131 MB/s
-test misc::match_class_in_range              ... bench:           7 ns/iter (+/- 0) = 11571 MB/s
-test misc::matches_set                       ... bench:         172 ns/iter (+/- 4) = 145 MB/s
-test misc::medium_1K                         ... bench:           7 ns/iter (+/- 0) = 150285 MB/s
-test misc::medium_1MB                        ... bench:          12 ns/iter (+/- 0) = 87383666 MB/s
-test misc::medium_32                         ... bench:           8 ns/iter (+/- 0) = 7500 MB/s
-test misc::medium_32K                        ... bench:           7 ns/iter (+/- 0) = 4685142 MB/s
-test misc::no_exponential                    ... bench:         272 ns/iter (+/- 10) = 367 MB/s
-test misc::not_literal                       ... bench:          42 ns/iter (+/- 1) = 1214 MB/s
-test misc::one_pass_long_prefix              ... bench:          23 ns/iter (+/- 1) = 1130 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          22 ns/iter (+/- 0) = 1181 MB/s
-test misc::one_pass_short                    ... bench:          15 ns/iter (+/- 0) = 1133 MB/s
-test misc::one_pass_short_not                ... bench:          18 ns/iter (+/- 0) = 944 MB/s
-test misc::reallyhard2_1K                    ... bench:          36 ns/iter (+/- 0) = 28888 MB/s
-test misc::reallyhard_1K                     ... bench:       1,152 ns/iter (+/- 14) = 912 MB/s
-test misc::reallyhard_1MB                    ... bench:   1,155,496 ns/iter (+/- 7,722) = 907 MB/s
-test misc::reallyhard_32                     ... bench:          51 ns/iter (+/- 1) = 1156 MB/s
-test misc::reallyhard_32K                    ... bench:      36,202 ns/iter (+/- 167) = 905 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:       2,269 ns/iter (+/- 2) = 3525 MB/s
-test regexdna::find_new_lines                ... bench:   6,982,142 ns/iter (+/- 70,086) = 728 MB/s
-test regexdna::subst1                        ... bench:     425,753 ns/iter (+/- 15,075) = 11939 MB/s
-test regexdna::subst10                       ... bench:     431,401 ns/iter (+/- 19,346) = 11783 MB/s
-test regexdna::subst11                       ... bench:     427,131 ns/iter (+/- 38,166) = 11901 MB/s
-test regexdna::subst2                        ... bench:     423,284 ns/iter (+/- 9,016) = 12009 MB/s
-test regexdna::subst3                        ... bench:     425,850 ns/iter (+/- 7,324) = 11937 MB/s
-test regexdna::subst4                        ... bench:     426,013 ns/iter (+/- 6,922) = 11932 MB/s
-test regexdna::subst5                        ... bench:     426,029 ns/iter (+/- 8,697) = 11932 MB/s
-test regexdna::subst6                        ... bench:     427,781 ns/iter (+/- 8,166) = 11883 MB/s
-test regexdna::subst7                        ... bench:     426,589 ns/iter (+/- 13,274) = 11916 MB/s
-test regexdna::subst8                        ... bench:     424,152 ns/iter (+/- 14,879) = 11984 MB/s
-test regexdna::subst9                        ... bench:     428,066 ns/iter (+/- 8,773) = 11875 MB/s
-test regexdna::variant1                      ... bench:   1,446,630 ns/iter (+/- 53,195) = 3513 MB/s
-test regexdna::variant2                      ... bench:   2,241,934 ns/iter (+/- 42,563) = 2267 MB/s
-test regexdna::variant3                      ... bench:   2,741,736 ns/iter (+/- 28,424) = 1854 MB/s
-test regexdna::variant4                      ... bench:   2,725,768 ns/iter (+/- 37,801) = 1864 MB/s
-test regexdna::variant5                      ... bench:   1,686,366 ns/iter (+/- 25,054) = 3014 MB/s
-test regexdna::variant6                      ... bench:   1,689,225 ns/iter (+/- 24,479) = 3009 MB/s
-test regexdna::variant7                      ... bench:   2,343,567 ns/iter (+/- 34,646) = 2169 MB/s
-test regexdna::variant8                      ... bench:   2,363,133 ns/iter (+/- 69,696) = 2151 MB/s
-test regexdna::variant9                      ... bench:   2,337,512 ns/iter (+/- 32,958) = 2174 MB/s
-test rust_compile::compile_huge              ... bench:      53,055 ns/iter (+/- 88)
-test rust_compile::compile_huge_bytes        ... bench:   2,979,724 ns/iter (+/- 43,904)
-test rust_compile::compile_huge_full         ... bench:   5,825,193 ns/iter (+/- 61,322)
-test rust_compile::compile_simple            ... bench:       1,927 ns/iter (+/- 39)
-test rust_compile::compile_simple_bytes      ... bench:       1,924 ns/iter (+/- 29)
-test rust_compile::compile_simple_full       ... bench:       9,830 ns/iter (+/- 108)
-test rust_compile::compile_small             ... bench:       4,569 ns/iter (+/- 70)
-test rust_compile::compile_small_bytes       ... bench:      74,875 ns/iter (+/- 1,337)
-test rust_compile::compile_small_full        ... bench:     151,485 ns/iter (+/- 3,063)
-test sherlock::before_after_holmes           ... bench:     655,632 ns/iter (+/- 801) = 907 MB/s
-test sherlock::before_holmes                 ... bench:      24,576 ns/iter (+/- 307) = 24207 MB/s
-test sherlock::everything_greedy             ... bench:   1,026,410 ns/iter (+/- 57,265) = 579 MB/s
-test sherlock::everything_greedy_nl          ... bench:     424,490 ns/iter (+/- 7,188) = 1401 MB/s
-test sherlock::holmes_cochar_watson          ... bench:      46,935 ns/iter (+/- 1,007) = 12675 MB/s
-test sherlock::holmes_coword_watson          ... bench:     322,497 ns/iter (+/- 3,680) = 1844 MB/s
-test sherlock::ing_suffix                    ... bench:     149,923 ns/iter (+/- 2,936) = 3968 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:     732,021 ns/iter (+/- 10,242) = 812 MB/s
-test sherlock::letters                       ... bench:   9,716,641 ns/iter (+/- 56,270) = 61 MB/s
-test sherlock::letters_lower                 ... bench:   9,541,922 ns/iter (+/- 63,715) = 62 MB/s
-test sherlock::letters_upper                 ... bench:   1,070,240 ns/iter (+/- 10,505) = 555 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     652,312 ns/iter (+/- 546) = 912 MB/s
-test sherlock::name_alt1                     ... bench:      10,832 ns/iter (+/- 499) = 54923 MB/s
-test sherlock::name_alt2                     ... bench:      33,528 ns/iter (+/- 484) = 17744 MB/s
-test sherlock::name_alt3                     ... bench:      37,352 ns/iter (+/- 1,173) = 15927 MB/s
-test sherlock::name_alt3_nocase              ... bench:     217,570 ns/iter (+/- 3,401) = 2734 MB/s
-test sherlock::name_alt4                     ... bench:      52,711 ns/iter (+/- 1,257) = 11286 MB/s
-test sherlock::name_alt4_nocase              ... bench:      81,635 ns/iter (+/- 1,740) = 7287 MB/s
-test sherlock::name_alt5                     ... bench:      34,935 ns/iter (+/- 1,190) = 17029 MB/s
-test sherlock::name_alt5_nocase              ... bench:     194,600 ns/iter (+/- 3,742) = 3057 MB/s
-test sherlock::name_holmes                   ... bench:      14,670 ns/iter (+/- 153) = 40554 MB/s
-test sherlock::name_holmes_nocase            ... bench:      59,906 ns/iter (+/- 898) = 9931 MB/s
-test sherlock::name_sherlock                 ... bench:      10,470 ns/iter (+/- 74) = 56822 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      10,291 ns/iter (+/- 29) = 57810 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:      41,316 ns/iter (+/- 1,350) = 14399 MB/s
-test sherlock::name_sherlock_nocase          ... bench:      40,748 ns/iter (+/- 724) = 14600 MB/s
-test sherlock::name_whitespace               ... bench:      14,682 ns/iter (+/- 52) = 40521 MB/s
-test sherlock::no_match_common               ... bench:       8,822 ns/iter (+/- 310) = 67437 MB/s
-test sherlock::no_match_really_common        ... bench:       8,990 ns/iter (+/- 129) = 66177 MB/s
-test sherlock::no_match_uncommon             ... bench:       8,649 ns/iter (+/- 192) = 68786 MB/s
-test sherlock::quotes                        ... bench:     218,225 ns/iter (+/- 4,267) = 2726 MB/s
-test sherlock::repeated_class_negation       ... bench:  35,771,807 ns/iter (+/- 640,817) = 16 MB/s
-test sherlock::the_lower                     ... bench:     190,205 ns/iter (+/- 9,051) = 3127 MB/s
-test sherlock::the_nocase                    ... bench:     280,386 ns/iter (+/- 5,346) = 2121 MB/s
-test sherlock::the_upper                     ... bench:      19,325 ns/iter (+/- 695) = 30785 MB/s
-test sherlock::the_whitespace                ... bench:     409,665 ns/iter (+/- 7,657) = 1452 MB/s
-test sherlock::word_ending_n                 ... bench:   1,066,052 ns/iter (+/- 7,072) = 558 MB/s
-test sherlock::words                         ... bench:   4,330,659 ns/iter (+/- 53,403) = 137 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 107 measured; 0 filtered out; finished in 131.99s
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/10-last-frontier/rust-after-literal.log b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/10-last-frontier/rust-after-literal.log
deleted file mode 100644
index c45b55ca..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/10-last-frontier/rust-after-literal.log
+++ /dev/null
@@ -1,124 +0,0 @@
-
-running 119 tests
-test misc::anchored_literal_long_match       ... bench:          18 ns/iter (+/- 0) = 21666 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          20 ns/iter (+/- 0) = 19500 MB/s
-test misc::anchored_literal_short_match      ... bench:          18 ns/iter (+/- 0) = 1444 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          20 ns/iter (+/- 0) = 1300 MB/s
-test misc::easy0_1K                          ... bench:          51 ns/iter (+/- 2) = 20607 MB/s
-test misc::easy0_1MB                         ... bench:          56 ns/iter (+/- 1) = 18725053 MB/s
-test misc::easy0_32                          ... bench:          51 ns/iter (+/- 0) = 1156 MB/s
-test misc::easy0_32K                         ... bench:          53 ns/iter (+/- 1) = 618773 MB/s
-test misc::easy1_1K                          ... bench:          41 ns/iter (+/- 0) = 25463 MB/s
-test misc::easy1_1MB                         ... bench:          44 ns/iter (+/- 1) = 23831727 MB/s
-test misc::easy1_32                          ... bench:          40 ns/iter (+/- 1) = 1300 MB/s
-test misc::easy1_32K                         ... bench:          40 ns/iter (+/- 1) = 819700 MB/s
-test misc::hard_1K                           ... bench:          51 ns/iter (+/- 2) = 20607 MB/s
-test misc::hard_1MB                          ... bench:          56 ns/iter (+/- 1) = 18725053 MB/s
-test misc::hard_32                           ... bench:          51 ns/iter (+/- 2) = 1156 MB/s
-test misc::hard_32K                          ... bench:          51 ns/iter (+/- 1) = 643039 MB/s
-test misc::is_match_set                      ... bench:          61 ns/iter (+/- 2) = 409 MB/s
-test misc::literal                           ... bench:          13 ns/iter (+/- 0) = 3923 MB/s
-test misc::long_needle1                      ... bench:       3,242 ns/iter (+/- 79) = 30845 MB/s
-test misc::long_needle2                      ... bench:     350,572 ns/iter (+/- 6,860) = 285 MB/s
-test misc::match_class                       ... bench:          62 ns/iter (+/- 6) = 1306 MB/s
-test misc::match_class_in_range              ... bench:          14 ns/iter (+/- 0) = 5785 MB/s
-test misc::match_class_unicode               ... bench:         259 ns/iter (+/- 15) = 621 MB/s
-test misc::matches_set                       ... bench:         462 ns/iter (+/- 9) = 54 MB/s
-test misc::medium_1K                         ... bench:          53 ns/iter (+/- 0) = 19849 MB/s
-test misc::medium_1MB                        ... bench:          58 ns/iter (+/- 1) = 18079379 MB/s
-test misc::medium_32                         ... bench:          53 ns/iter (+/- 1) = 1132 MB/s
-test misc::medium_32K                        ... bench:          53 ns/iter (+/- 1) = 618792 MB/s
-test misc::no_exponential                    ... bench:         423 ns/iter (+/- 13) = 236 MB/s
-test misc::not_literal                       ... bench:          89 ns/iter (+/- 0) = 573 MB/s
-test misc::one_pass_long_prefix              ... bench:          52 ns/iter (+/- 0) = 500 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          52 ns/iter (+/- 1) = 500 MB/s
-test misc::one_pass_short                    ... bench:          38 ns/iter (+/- 1) = 447 MB/s
-test misc::one_pass_short_not                ... bench:          41 ns/iter (+/- 1) = 414 MB/s
-test misc::reallyhard2_1K                    ... bench:          81 ns/iter (+/- 1) = 12839 MB/s
-test misc::reallyhard_1K                     ... bench:       1,592 ns/iter (+/- 1) = 660 MB/s
-test misc::reallyhard_1MB                    ... bench:   1,575,822 ns/iter (+/- 39,203) = 665 MB/s
-test misc::reallyhard_32                     ... bench:         102 ns/iter (+/- 0) = 578 MB/s
-test misc::reallyhard_32K                    ... bench:      49,328 ns/iter (+/- 2,598) = 664 MB/s
-test misc::replace_all                       ... bench:         132 ns/iter (+/- 3)
-test misc::reverse_suffix_no_quadratic       ... bench:       4,171 ns/iter (+/- 134) = 1918 MB/s
-test misc::short_haystack_1000000x           ... bench:     132,251 ns/iter (+/- 729) = 60491 MB/s
-test misc::short_haystack_100000x            ... bench:      13,184 ns/iter (+/- 408) = 60680 MB/s
-test misc::short_haystack_10000x             ... bench:       6,036 ns/iter (+/- 167) = 13255 MB/s
-test misc::short_haystack_1000x              ... bench:         602 ns/iter (+/- 14) = 13307 MB/s
-test misc::short_haystack_100x               ... bench:         230 ns/iter (+/- 7) = 3526 MB/s
-test misc::short_haystack_10x                ... bench:         218 ns/iter (+/- 3) = 417 MB/s
-test misc::short_haystack_1x                 ... bench:         210 ns/iter (+/- 8) = 90 MB/s
-test misc::short_haystack_2x                 ... bench:         225 ns/iter (+/- 6) = 120 MB/s
-test misc::short_haystack_3x                 ... bench:         211 ns/iter (+/- 8) = 165 MB/s
-test misc::short_haystack_4x                 ... bench:         212 ns/iter (+/- 6) = 202 MB/s
-test regexdna::find_new_lines                ... bench:  12,245,066 ns/iter (+/- 117,141) = 415 MB/s
-test regexdna::subst1                        ... bench:     786,357 ns/iter (+/- 14,200) = 6464 MB/s
-test regexdna::subst10                       ... bench:     788,550 ns/iter (+/- 26,456) = 6446 MB/s
-test regexdna::subst11                       ... bench:     782,161 ns/iter (+/- 15,583) = 6499 MB/s
-test regexdna::subst2                        ... bench:     784,902 ns/iter (+/- 23,379) = 6476 MB/s
-test regexdna::subst3                        ... bench:     786,640 ns/iter (+/- 27,063) = 6462 MB/s
-test regexdna::subst4                        ... bench:     785,591 ns/iter (+/- 20,498) = 6470 MB/s
-test regexdna::subst5                        ... bench:     787,447 ns/iter (+/- 20,892) = 6455 MB/s
-test regexdna::subst6                        ... bench:     784,994 ns/iter (+/- 19,687) = 6475 MB/s
-test regexdna::subst7                        ... bench:     801,921 ns/iter (+/- 15,391) = 6339 MB/s
-test regexdna::subst8                        ... bench:     785,541 ns/iter (+/- 11,908) = 6471 MB/s
-test regexdna::subst9                        ... bench:     785,848 ns/iter (+/- 28,020) = 6468 MB/s
-test regexdna::variant1                      ... bench:   2,195,058 ns/iter (+/- 44,066) = 2315 MB/s
-test regexdna::variant2                      ... bench:   3,219,968 ns/iter (+/- 59,372) = 1578 MB/s
-test regexdna::variant3                      ... bench:   3,776,467 ns/iter (+/- 54,326) = 1346 MB/s
-test regexdna::variant4                      ... bench:   3,803,674 ns/iter (+/- 95,281) = 1336 MB/s
-test regexdna::variant5                      ... bench:   2,661,333 ns/iter (+/- 46,408) = 1910 MB/s
-test regexdna::variant6                      ... bench:   2,645,716 ns/iter (+/- 38,659) = 1921 MB/s
-test regexdna::variant7                      ... bench:   3,228,352 ns/iter (+/- 69,155) = 1574 MB/s
-test regexdna::variant8                      ... bench:   3,305,563 ns/iter (+/- 59,321) = 1537 MB/s
-test regexdna::variant9                      ... bench:   3,225,039 ns/iter (+/- 49,720) = 1576 MB/s
-test rust_compile::compile_huge              ... bench:     100,381 ns/iter (+/- 2,052)
-test rust_compile::compile_huge_bytes        ... bench:   5,899,989 ns/iter (+/- 114,363)
-test rust_compile::compile_huge_full         ... bench:  11,650,995 ns/iter (+/- 172,285)
-test rust_compile::compile_simple            ... bench:       4,082 ns/iter (+/- 88)
-test rust_compile::compile_simple_bytes      ... bench:       4,153 ns/iter (+/- 120)
-test rust_compile::compile_simple_full       ... bench:      20,414 ns/iter (+/- 1,860)
-test rust_compile::compile_small             ... bench:       9,114 ns/iter (+/- 216)
-test rust_compile::compile_small_bytes       ... bench:     183,049 ns/iter (+/- 9,917)
-test rust_compile::compile_small_full        ... bench:     361,291 ns/iter (+/- 11,045)
-test sherlock::before_after_holmes           ... bench:     907,103 ns/iter (+/- 12,165) = 655 MB/s
-test sherlock::before_holmes                 ... bench:      62,501 ns/iter (+/- 1,880) = 9518 MB/s
-test sherlock::everything_greedy             ... bench:   2,062,116 ns/iter (+/- 41,900) = 288 MB/s
-test sherlock::everything_greedy_nl          ... bench:     894,529 ns/iter (+/- 38,723) = 665 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     103,305 ns/iter (+/- 3,798) = 5758 MB/s
-test sherlock::holmes_coword_watson          ... bench:     479,423 ns/iter (+/- 13,924) = 1240 MB/s
-test sherlock::ing_suffix                    ... bench:     318,300 ns/iter (+/- 6,846) = 1869 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   1,066,300 ns/iter (+/- 19,375) = 557 MB/s
-test sherlock::letters                       ... bench:  21,777,358 ns/iter (+/- 230,478) = 27 MB/s
-test sherlock::letters_lower                 ... bench:  21,152,019 ns/iter (+/- 203,617) = 28 MB/s
-test sherlock::letters_upper                 ... bench:   1,777,626 ns/iter (+/- 26,243) = 334 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     897,509 ns/iter (+/- 24,983) = 662 MB/s
-test sherlock::name_alt1                     ... bench:      32,255 ns/iter (+/- 681) = 18444 MB/s
-test sherlock::name_alt2                     ... bench:      86,369 ns/iter (+/- 2,494) = 6888 MB/s
-test sherlock::name_alt3                     ... bench:      97,618 ns/iter (+/- 564) = 6094 MB/s
-test sherlock::name_alt3_nocase              ... bench:     944,848 ns/iter (+/- 31,039) = 629 MB/s
-test sherlock::name_alt4                     ... bench:     122,029 ns/iter (+/- 2,716) = 4875 MB/s
-test sherlock::name_alt4_nocase              ... bench:     225,544 ns/iter (+/- 5,783) = 2637 MB/s
-test sherlock::name_alt5                     ... bench:      91,897 ns/iter (+/- 3,796) = 6473 MB/s
-test sherlock::name_alt5_nocase              ... bench:     936,420 ns/iter (+/- 15,092) = 635 MB/s
-test sherlock::name_holmes                   ... bench:      33,448 ns/iter (+/- 959) = 17786 MB/s
-test sherlock::name_holmes_nocase            ... bench:     115,864 ns/iter (+/- 1,645) = 5134 MB/s
-test sherlock::name_sherlock                 ... bench:      22,474 ns/iter (+/- 674) = 26472 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      22,184 ns/iter (+/- 497) = 26818 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:      99,629 ns/iter (+/- 2,398) = 5971 MB/s
-test sherlock::name_sherlock_nocase          ... bench:      99,523 ns/iter (+/- 2,674) = 5977 MB/s
-test sherlock::name_whitespace               ... bench:      30,815 ns/iter (+/- 107) = 19306 MB/s
-test sherlock::no_match_common               ... bench:      19,661 ns/iter (+/- 656) = 30259 MB/s
-test sherlock::no_match_really_common        ... bench:      27,544 ns/iter (+/- 527) = 21599 MB/s
-test sherlock::no_match_uncommon             ... bench:      19,553 ns/iter (+/- 31) = 30426 MB/s
-test sherlock::quotes                        ... bench:     369,144 ns/iter (+/- 45,316) = 1611 MB/s
-test sherlock::repeated_class_negation       ... bench:  68,838,857 ns/iter (+/- 330,544) = 8 MB/s
-test sherlock::the_lower                     ... bench:     321,692 ns/iter (+/- 5,418) = 1849 MB/s
-test sherlock::the_nocase                    ... bench:     507,936 ns/iter (+/- 3,080) = 1171 MB/s
-test sherlock::the_upper                     ... bench:      43,705 ns/iter (+/- 788) = 13612 MB/s
-test sherlock::the_whitespace                ... bench:     819,179 ns/iter (+/- 20,071) = 726 MB/s
-test sherlock::word_ending_n                 ... bench:   1,700,300 ns/iter (+/- 36,623) = 349 MB/s
-test sherlock::words                         ... bench:   8,249,767 ns/iter (+/- 75,015) = 72 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 119 measured; 0 filtered out; finished in 111.55s
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/10-last-frontier/rust-before-literal.log b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/10-last-frontier/rust-before-literal.log
deleted file mode 100644
index 98b3496..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/10-last-frontier/rust-before-literal.log
+++ /dev/null
@@ -1,124 +0,0 @@
-
-running 119 tests
-test misc::anchored_literal_long_match       ... bench:          18 ns/iter (+/- 0) = 21666 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          18 ns/iter (+/- 0) = 21666 MB/s
-test misc::anchored_literal_short_match      ... bench:          18 ns/iter (+/- 0) = 1444 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          18 ns/iter (+/- 0) = 1444 MB/s
-test misc::easy0_1K                          ... bench:          15 ns/iter (+/- 0) = 70066 MB/s
-test misc::easy0_1MB                         ... bench:          21 ns/iter (+/- 0) = 49933476 MB/s
-test misc::easy0_32                          ... bench:          15 ns/iter (+/- 0) = 3933 MB/s
-test misc::easy0_32K                         ... bench:          14 ns/iter (+/- 0) = 2342500 MB/s
-test misc::easy1_1K                          ... bench:          40 ns/iter (+/- 1) = 26100 MB/s
-test misc::easy1_1MB                         ... bench:          45 ns/iter (+/- 1) = 23302133 MB/s
-test misc::easy1_32                          ... bench:          40 ns/iter (+/- 5) = 1300 MB/s
-test misc::easy1_32K                         ... bench:          40 ns/iter (+/- 1) = 819700 MB/s
-test misc::hard_1K                           ... bench:          51 ns/iter (+/- 1) = 20607 MB/s
-test misc::hard_1MB                          ... bench:          56 ns/iter (+/- 0) = 18725053 MB/s
-test misc::hard_32                           ... bench:          51 ns/iter (+/- 3) = 1156 MB/s
-test misc::hard_32K                          ... bench:          51 ns/iter (+/- 1) = 643039 MB/s
-test misc::is_match_set                      ... bench:          61 ns/iter (+/- 2) = 409 MB/s
-test misc::literal                           ... bench:          13 ns/iter (+/- 0) = 3923 MB/s
-test misc::long_needle1                      ... bench:       3,259 ns/iter (+/- 86) = 30684 MB/s
-test misc::long_needle2                      ... bench:     350,722 ns/iter (+/- 6,984) = 285 MB/s
-test misc::match_class                       ... bench:          60 ns/iter (+/- 1) = 1350 MB/s
-test misc::match_class_in_range              ... bench:          14 ns/iter (+/- 0) = 5785 MB/s
-test misc::match_class_unicode               ... bench:         255 ns/iter (+/- 0) = 631 MB/s
-test misc::matches_set                       ... bench:         481 ns/iter (+/- 11) = 51 MB/s
-test misc::medium_1K                         ... bench:          15 ns/iter (+/- 0) = 70133 MB/s
-test misc::medium_1MB                        ... bench:          22 ns/iter (+/- 0) = 47663818 MB/s
-test misc::medium_32                         ... bench:          15 ns/iter (+/- 0) = 4000 MB/s
-test misc::medium_32K                        ... bench:          15 ns/iter (+/- 0) = 2186400 MB/s
-test misc::no_exponential                    ... bench:         442 ns/iter (+/- 13) = 226 MB/s
-test misc::not_literal                       ... bench:          89 ns/iter (+/- 1) = 573 MB/s
-test misc::one_pass_long_prefix              ... bench:          54 ns/iter (+/- 1) = 481 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          52 ns/iter (+/- 1) = 500 MB/s
-test misc::one_pass_short                    ... bench:          39 ns/iter (+/- 0) = 435 MB/s
-test misc::one_pass_short_not                ... bench:          42 ns/iter (+/- 0) = 404 MB/s
-test misc::reallyhard2_1K                    ... bench:          83 ns/iter (+/- 6) = 12530 MB/s
-test misc::reallyhard_1K                     ... bench:       1,592 ns/iter (+/- 4) = 660 MB/s
-test misc::reallyhard_1MB                    ... bench:   1,575,691 ns/iter (+/- 29,668) = 665 MB/s
-test misc::reallyhard_32                     ... bench:         101 ns/iter (+/- 5) = 584 MB/s
-test misc::reallyhard_32K                    ... bench:      49,325 ns/iter (+/- 1,734) = 664 MB/s
-test misc::replace_all                       ... bench:         134 ns/iter (+/- 2)
-test misc::reverse_suffix_no_quadratic       ... bench:       4,189 ns/iter (+/- 274) = 1909 MB/s
-test misc::short_haystack_1000000x           ... bench:     132,182 ns/iter (+/- 4,966) = 60522 MB/s
-test misc::short_haystack_100000x            ... bench:      13,344 ns/iter (+/- 275) = 59952 MB/s
-test misc::short_haystack_10000x             ... bench:       6,119 ns/iter (+/- 285) = 13075 MB/s
-test misc::short_haystack_1000x              ... bench:         617 ns/iter (+/- 15) = 12983 MB/s
-test misc::short_haystack_100x               ... bench:         230 ns/iter (+/- 7) = 3526 MB/s
-test misc::short_haystack_10x                ... bench:         207 ns/iter (+/- 8) = 439 MB/s
-test misc::short_haystack_1x                 ... bench:         213 ns/iter (+/- 7) = 89 MB/s
-test misc::short_haystack_2x                 ... bench:         206 ns/iter (+/- 6) = 131 MB/s
-test misc::short_haystack_3x                 ... bench:         207 ns/iter (+/- 10) = 169 MB/s
-test misc::short_haystack_4x                 ... bench:         208 ns/iter (+/- 7) = 206 MB/s
-test regexdna::find_new_lines                ... bench:  12,275,804 ns/iter (+/- 145,331) = 414 MB/s
-test regexdna::subst1                        ... bench:     793,517 ns/iter (+/- 44,203) = 6406 MB/s
-test regexdna::subst10                       ... bench:     794,922 ns/iter (+/- 23,459) = 6394 MB/s
-test regexdna::subst11                       ... bench:     790,525 ns/iter (+/- 23,010) = 6430 MB/s
-test regexdna::subst2                        ... bench:     790,637 ns/iter (+/- 17,962) = 6429 MB/s
-test regexdna::subst3                        ... bench:     793,559 ns/iter (+/- 17,575) = 6405 MB/s
-test regexdna::subst4                        ... bench:     792,738 ns/iter (+/- 15,237) = 6412 MB/s
-test regexdna::subst5                        ... bench:     795,060 ns/iter (+/- 26,172) = 6393 MB/s
-test regexdna::subst6                        ... bench:     792,357 ns/iter (+/- 15,067) = 6415 MB/s
-test regexdna::subst7                        ... bench:     797,006 ns/iter (+/- 27,928) = 6378 MB/s
-test regexdna::subst8                        ... bench:     790,603 ns/iter (+/- 22,754) = 6429 MB/s
-test regexdna::subst9                        ... bench:     793,055 ns/iter (+/- 13,202) = 6409 MB/s
-test regexdna::variant1                      ... bench:   2,204,304 ns/iter (+/- 50,669) = 2306 MB/s
-test regexdna::variant2                      ... bench:   3,224,798 ns/iter (+/- 45,705) = 1576 MB/s
-test regexdna::variant3                      ... bench:   3,802,774 ns/iter (+/- 86,530) = 1336 MB/s
-test regexdna::variant4                      ... bench:   3,805,916 ns/iter (+/- 69,737) = 1335 MB/s
-test regexdna::variant5                      ... bench:   2,662,373 ns/iter (+/- 61,259) = 1909 MB/s
-test regexdna::variant6                      ... bench:   2,654,072 ns/iter (+/- 51,095) = 1915 MB/s
-test regexdna::variant7                      ... bench:   3,232,369 ns/iter (+/- 67,147) = 1572 MB/s
-test regexdna::variant8                      ... bench:   3,311,225 ns/iter (+/- 66,086) = 1535 MB/s
-test regexdna::variant9                      ... bench:   3,241,601 ns/iter (+/- 68,394) = 1568 MB/s
-test rust_compile::compile_huge              ... bench:     100,955 ns/iter (+/- 2,466)
-test rust_compile::compile_huge_bytes        ... bench:   5,936,732 ns/iter (+/- 126,993)
-test rust_compile::compile_huge_full         ... bench:  11,880,838 ns/iter (+/- 211,387)
-test rust_compile::compile_simple            ... bench:       4,575 ns/iter (+/- 139)
-test rust_compile::compile_simple_bytes      ... bench:       4,653 ns/iter (+/- 122)
-test rust_compile::compile_simple_full       ... bench:      20,656 ns/iter (+/- 535)
-test rust_compile::compile_small             ... bench:       9,613 ns/iter (+/- 992)
-test rust_compile::compile_small_bytes       ... bench:     188,349 ns/iter (+/- 4,733)
-test rust_compile::compile_small_full        ... bench:     341,554 ns/iter (+/- 9,774)
-test sherlock::before_after_holmes           ... bench:     907,419 ns/iter (+/- 11,645) = 655 MB/s
-test sherlock::before_holmes                 ... bench:      62,036 ns/iter (+/- 1,854) = 9590 MB/s
-test sherlock::everything_greedy             ... bench:   2,072,694 ns/iter (+/- 45,192) = 287 MB/s
-test sherlock::everything_greedy_nl          ... bench:     884,483 ns/iter (+/- 25,710) = 672 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     103,873 ns/iter (+/- 1,310) = 5727 MB/s
-test sherlock::holmes_coword_watson          ... bench:     481,491 ns/iter (+/- 11,516) = 1235 MB/s
-test sherlock::ing_suffix                    ... bench:     323,119 ns/iter (+/- 7,438) = 1841 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   1,067,293 ns/iter (+/- 18,661) = 557 MB/s
-test sherlock::letters                       ... bench:  21,732,526 ns/iter (+/- 253,563) = 27 MB/s
-test sherlock::letters_lower                 ... bench:  21,187,465 ns/iter (+/- 191,023) = 28 MB/s
-test sherlock::letters_upper                 ... bench:   1,766,003 ns/iter (+/- 17,494) = 336 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     897,387 ns/iter (+/- 26,674) = 662 MB/s
-test sherlock::name_alt1                     ... bench:      34,183 ns/iter (+/- 885) = 17404 MB/s
-test sherlock::name_alt2                     ... bench:      87,151 ns/iter (+/- 2,139) = 6826 MB/s
-test sherlock::name_alt3                     ... bench:      99,293 ns/iter (+/- 1,938) = 5991 MB/s
-test sherlock::name_alt3_nocase              ... bench:     379,228 ns/iter (+/- 22,539) = 1568 MB/s
-test sherlock::name_alt4                     ... bench:     123,040 ns/iter (+/- 2,676) = 4835 MB/s
-test sherlock::name_alt4_nocase              ... bench:     186,045 ns/iter (+/- 403) = 3197 MB/s
-test sherlock::name_alt5                     ... bench:      91,679 ns/iter (+/- 2,543) = 6489 MB/s
-test sherlock::name_alt5_nocase              ... bench:     343,668 ns/iter (+/- 6,807) = 1731 MB/s
-test sherlock::name_holmes                   ... bench:      33,802 ns/iter (+/- 936) = 17600 MB/s
-test sherlock::name_holmes_nocase            ... bench:     136,208 ns/iter (+/- 4,317) = 4367 MB/s
-test sherlock::name_sherlock                 ... bench:      22,534 ns/iter (+/- 462) = 26401 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      22,514 ns/iter (+/- 697) = 26425 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:      97,796 ns/iter (+/- 2,037) = 6083 MB/s
-test sherlock::name_sherlock_nocase          ... bench:      95,809 ns/iter (+/- 1,538) = 6209 MB/s
-test sherlock::name_whitespace               ... bench:      30,959 ns/iter (+/- 968) = 19216 MB/s
-test sherlock::no_match_common               ... bench:      19,568 ns/iter (+/- 616) = 30403 MB/s
-test sherlock::no_match_really_common        ... bench:      26,273 ns/iter (+/- 1,143) = 22644 MB/s
-test sherlock::no_match_uncommon             ... bench:      19,643 ns/iter (+/- 496) = 30287 MB/s
-test sherlock::quotes                        ... bench:     371,876 ns/iter (+/- 2,494) = 1599 MB/s
-test sherlock::repeated_class_negation       ... bench:  76,963,104 ns/iter (+/- 277,311) = 7 MB/s
-test sherlock::the_lower                     ... bench:     331,250 ns/iter (+/- 8,588) = 1796 MB/s
-test sherlock::the_nocase                    ... bench:     516,528 ns/iter (+/- 40,826) = 1151 MB/s
-test sherlock::the_upper                     ... bench:      44,206 ns/iter (+/- 1,277) = 13458 MB/s
-test sherlock::the_whitespace                ... bench:     822,577 ns/iter (+/- 23,649) = 723 MB/s
-test sherlock::word_ending_n                 ... bench:   1,685,110 ns/iter (+/- 34,615) = 353 MB/s
-test sherlock::words                         ... bench:   8,333,499 ns/iter (+/- 152,757) = 71 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 119 measured; 0 filtered out; finished in 124.94s
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/10-last-frontier/rust-bytes-after-literal.log b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/10-last-frontier/rust-bytes-after-literal.log
deleted file mode 100644
index 470e09b9c..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/10-last-frontier/rust-bytes-after-literal.log
+++ /dev/null
@@ -1,112 +0,0 @@
-
-running 107 tests
-test misc::anchored_literal_long_match       ... bench:          18 ns/iter (+/- 0) = 21666 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          20 ns/iter (+/- 1) = 19500 MB/s
-test misc::anchored_literal_short_match      ... bench:          18 ns/iter (+/- 0) = 1444 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          20 ns/iter (+/- 0) = 1300 MB/s
-test misc::easy0_1K                          ... bench:          54 ns/iter (+/- 2) = 19462 MB/s
-test misc::easy0_1MB                         ... bench:          56 ns/iter (+/- 1) = 18725053 MB/s
-test misc::easy0_32                          ... bench:          51 ns/iter (+/- 1) = 1156 MB/s
-test misc::easy0_32K                         ... bench:          51 ns/iter (+/- 2) = 643039 MB/s
-test misc::easy1_1K                          ... bench:          41 ns/iter (+/- 1) = 25463 MB/s
-test misc::easy1_1MB                         ... bench:          44 ns/iter (+/- 1) = 23831727 MB/s
-test misc::easy1_32                          ... bench:          40 ns/iter (+/- 2) = 1300 MB/s
-test misc::easy1_32K                         ... bench:          40 ns/iter (+/- 1) = 819700 MB/s
-test misc::hard_1K                           ... bench:          52 ns/iter (+/- 1) = 20211 MB/s
-test misc::hard_1MB                          ... bench:          57 ns/iter (+/- 0) = 18396543 MB/s
-test misc::hard_32                           ... bench:          51 ns/iter (+/- 0) = 1156 MB/s
-test misc::hard_32K                          ... bench:          51 ns/iter (+/- 3) = 643039 MB/s
-test misc::is_match_set                      ... bench:          61 ns/iter (+/- 2) = 409 MB/s
-test misc::literal                           ... bench:          14 ns/iter (+/- 0) = 3642 MB/s
-test misc::long_needle1                      ... bench:       3,249 ns/iter (+/- 87) = 30779 MB/s
-test misc::long_needle2                      ... bench:     350,559 ns/iter (+/- 7,154) = 285 MB/s
-test misc::match_class                       ... bench:          61 ns/iter (+/- 4) = 1327 MB/s
-test misc::match_class_in_range              ... bench:          14 ns/iter (+/- 0) = 5785 MB/s
-test misc::matches_set                       ... bench:         401 ns/iter (+/- 17) = 62 MB/s
-test misc::medium_1K                         ... bench:          53 ns/iter (+/- 0) = 19849 MB/s
-test misc::medium_1MB                        ... bench:          58 ns/iter (+/- 0) = 18079379 MB/s
-test misc::medium_32                         ... bench:          53 ns/iter (+/- 0) = 1132 MB/s
-test misc::medium_32K                        ... bench:          53 ns/iter (+/- 2) = 618792 MB/s
-test misc::no_exponential                    ... bench:         421 ns/iter (+/- 8) = 237 MB/s
-test misc::not_literal                       ... bench:          90 ns/iter (+/- 0) = 566 MB/s
-test misc::one_pass_long_prefix              ... bench:          53 ns/iter (+/- 1) = 490 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          53 ns/iter (+/- 0) = 490 MB/s
-test misc::one_pass_short                    ... bench:          38 ns/iter (+/- 0) = 447 MB/s
-test misc::one_pass_short_not                ... bench:          42 ns/iter (+/- 3) = 404 MB/s
-test misc::reallyhard2_1K                    ... bench:          77 ns/iter (+/- 1) = 13506 MB/s
-test misc::reallyhard_1K                     ... bench:       1,592 ns/iter (+/- 1) = 660 MB/s
-test misc::reallyhard_1MB                    ... bench:   1,575,759 ns/iter (+/- 49,997) = 665 MB/s
-test misc::reallyhard_32                     ... bench:         102 ns/iter (+/- 2) = 578 MB/s
-test misc::reallyhard_32K                    ... bench:      49,326 ns/iter (+/- 1,055) = 664 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:       4,161 ns/iter (+/- 94) = 1922 MB/s
-test regexdna::find_new_lines                ... bench:  12,344,799 ns/iter (+/- 188,054) = 411 MB/s
-test regexdna::subst1                        ... bench:     780,449 ns/iter (+/- 14,474) = 6513 MB/s
-test regexdna::subst10                       ... bench:     795,203 ns/iter (+/- 40,742) = 6392 MB/s
-test regexdna::subst11                       ... bench:     816,444 ns/iter (+/- 23,334) = 6226 MB/s
-test regexdna::subst2                        ... bench:     777,546 ns/iter (+/- 19,625) = 6537 MB/s
-test regexdna::subst3                        ... bench:     783,295 ns/iter (+/- 8,266) = 6489 MB/s
-test regexdna::subst4                        ... bench:     775,154 ns/iter (+/- 21,350) = 6557 MB/s
-test regexdna::subst5                        ... bench:     781,414 ns/iter (+/- 21,057) = 6505 MB/s
-test regexdna::subst6                        ... bench:     783,595 ns/iter (+/- 23,835) = 6487 MB/s
-test regexdna::subst7                        ... bench:     821,620 ns/iter (+/- 46,131) = 6187 MB/s
-test regexdna::subst8                        ... bench:     818,402 ns/iter (+/- 21,350) = 6211 MB/s
-test regexdna::subst9                        ... bench:     779,115 ns/iter (+/- 21,335) = 6524 MB/s
-test regexdna::variant1                      ... bench:   2,189,308 ns/iter (+/- 32,528) = 2321 MB/s
-test regexdna::variant2                      ... bench:   3,217,478 ns/iter (+/- 36,011) = 1579 MB/s
-test regexdna::variant3                      ... bench:   3,771,330 ns/iter (+/- 74,944) = 1347 MB/s
-test regexdna::variant4                      ... bench:   3,787,593 ns/iter (+/- 37,825) = 1342 MB/s
-test regexdna::variant5                      ... bench:   2,669,799 ns/iter (+/- 69,777) = 1904 MB/s
-test regexdna::variant6                      ... bench:   2,651,559 ns/iter (+/- 33,895) = 1917 MB/s
-test regexdna::variant7                      ... bench:   3,222,991 ns/iter (+/- 41,014) = 1577 MB/s
-test regexdna::variant8                      ... bench:   3,298,048 ns/iter (+/- 41,331) = 1541 MB/s
-test regexdna::variant9                      ... bench:   3,218,486 ns/iter (+/- 50,318) = 1579 MB/s
-test rust_compile::compile_huge              ... bench:     100,031 ns/iter (+/- 3,464)
-test rust_compile::compile_huge_bytes        ... bench:   5,885,102 ns/iter (+/- 130,016)
-test rust_compile::compile_huge_full         ... bench:  11,641,251 ns/iter (+/- 147,700)
-test rust_compile::compile_simple            ... bench:       4,263 ns/iter (+/- 116)
-test rust_compile::compile_simple_bytes      ... bench:       4,236 ns/iter (+/- 91)
-test rust_compile::compile_simple_full       ... bench:      22,349 ns/iter (+/- 2,085)
-test rust_compile::compile_small             ... bench:       9,537 ns/iter (+/- 298)
-test rust_compile::compile_small_bytes       ... bench:     178,561 ns/iter (+/- 3,796)
-test rust_compile::compile_small_full        ... bench:     363,343 ns/iter (+/- 9,481)
-test sherlock::before_after_holmes           ... bench:     907,022 ns/iter (+/- 19,133) = 655 MB/s
-test sherlock::before_holmes                 ... bench:      63,729 ns/iter (+/- 1,830) = 9335 MB/s
-test sherlock::everything_greedy             ... bench:   2,181,593 ns/iter (+/- 46,002) = 272 MB/s
-test sherlock::everything_greedy_nl          ... bench:     884,811 ns/iter (+/- 26,211) = 672 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     105,610 ns/iter (+/- 3,120) = 5633 MB/s
-test sherlock::holmes_coword_watson          ... bench:     480,986 ns/iter (+/- 13,228) = 1236 MB/s
-test sherlock::ing_suffix                    ... bench:     322,921 ns/iter (+/- 3,555) = 1842 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   1,065,372 ns/iter (+/- 21,242) = 558 MB/s
-test sherlock::letters                       ... bench:  22,109,015 ns/iter (+/- 146,243) = 26 MB/s
-test sherlock::letters_lower                 ... bench:  21,686,153 ns/iter (+/- 206,041) = 27 MB/s
-test sherlock::letters_upper                 ... bench:   1,778,225 ns/iter (+/- 25,935) = 334 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     897,355 ns/iter (+/- 26,781) = 662 MB/s
-test sherlock::name_alt1                     ... bench:      31,927 ns/iter (+/- 633) = 18634 MB/s
-test sherlock::name_alt2                     ... bench:      87,040 ns/iter (+/- 1,859) = 6835 MB/s
-test sherlock::name_alt3                     ... bench:      97,715 ns/iter (+/- 2,109) = 6088 MB/s
-test sherlock::name_alt3_nocase              ... bench:     944,955 ns/iter (+/- 26,503) = 629 MB/s
-test sherlock::name_alt4                     ... bench:     120,935 ns/iter (+/- 2,399) = 4919 MB/s
-test sherlock::name_alt4_nocase              ... bench:     228,597 ns/iter (+/- 7,137) = 2602 MB/s
-test sherlock::name_alt5                     ... bench:      91,174 ns/iter (+/- 1,096) = 6525 MB/s
-test sherlock::name_alt5_nocase              ... bench:     937,189 ns/iter (+/- 23,839) = 634 MB/s
-test sherlock::name_holmes                   ... bench:      34,020 ns/iter (+/- 752) = 17487 MB/s
-test sherlock::name_holmes_nocase            ... bench:     117,194 ns/iter (+/- 3,444) = 5076 MB/s
-test sherlock::name_sherlock                 ... bench:      22,557 ns/iter (+/- 388) = 26374 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      22,428 ns/iter (+/- 683) = 26526 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:      99,637 ns/iter (+/- 636) = 5971 MB/s
-test sherlock::name_sherlock_nocase          ... bench:      97,895 ns/iter (+/- 1,875) = 6077 MB/s
-test sherlock::name_whitespace               ... bench:      30,772 ns/iter (+/- 1,591) = 19333 MB/s
-test sherlock::no_match_common               ... bench:      19,665 ns/iter (+/- 296) = 30253 MB/s
-test sherlock::no_match_really_common        ... bench:      27,403 ns/iter (+/- 2,507) = 21710 MB/s
-test sherlock::no_match_uncommon             ... bench:      19,601 ns/iter (+/- 293) = 30352 MB/s
-test sherlock::quotes                        ... bench:     370,323 ns/iter (+/- 1,345) = 1606 MB/s
-test sherlock::repeated_class_negation       ... bench:  68,414,794 ns/iter (+/- 342,428) = 8 MB/s
-test sherlock::the_lower                     ... bench:     327,767 ns/iter (+/- 5,493) = 1815 MB/s
-test sherlock::the_nocase                    ... bench:     507,818 ns/iter (+/- 1,796) = 1171 MB/s
-test sherlock::the_upper                     ... bench:      45,045 ns/iter (+/- 1,400) = 13207 MB/s
-test sherlock::the_whitespace                ... bench:     822,080 ns/iter (+/- 16,581) = 723 MB/s
-test sherlock::word_ending_n                 ... bench:   1,690,084 ns/iter (+/- 40,361) = 352 MB/s
-test sherlock::words                         ... bench:   8,573,617 ns/iter (+/- 143,313) = 69 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 107 measured; 0 filtered out; finished in 110.03s
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/10-last-frontier/rust-bytes-before-literal.log b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/10-last-frontier/rust-bytes-before-literal.log
deleted file mode 100644
index 7016e3c..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/10-last-frontier/rust-bytes-before-literal.log
+++ /dev/null
@@ -1,112 +0,0 @@
-
-running 107 tests
-test misc::anchored_literal_long_match       ... bench:          18 ns/iter (+/- 0) = 21666 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          19 ns/iter (+/- 1) = 20526 MB/s
-test misc::anchored_literal_short_match      ... bench:          18 ns/iter (+/- 0) = 1444 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          20 ns/iter (+/- 0) = 1300 MB/s
-test misc::easy0_1K                          ... bench:          14 ns/iter (+/- 0) = 75071 MB/s
-test misc::easy0_1MB                         ... bench:          21 ns/iter (+/- 0) = 49933476 MB/s
-test misc::easy0_32                          ... bench:          14 ns/iter (+/- 0) = 4214 MB/s
-test misc::easy0_32K                         ... bench:          14 ns/iter (+/- 0) = 2342500 MB/s
-test misc::easy1_1K                          ... bench:          41 ns/iter (+/- 0) = 25463 MB/s
-test misc::easy1_1MB                         ... bench:          48 ns/iter (+/- 0) = 21845750 MB/s
-test misc::easy1_32                          ... bench:          41 ns/iter (+/- 0) = 1268 MB/s
-test misc::easy1_32K                         ... bench:          41 ns/iter (+/- 1) = 799707 MB/s
-test misc::hard_1K                           ... bench:          51 ns/iter (+/- 1) = 20607 MB/s
-test misc::hard_1MB                          ... bench:          56 ns/iter (+/- 2) = 18725053 MB/s
-test misc::hard_32                           ... bench:          51 ns/iter (+/- 6) = 1156 MB/s
-test misc::hard_32K                          ... bench:          51 ns/iter (+/- 1) = 643039 MB/s
-test misc::is_match_set                      ... bench:          62 ns/iter (+/- 2) = 403 MB/s
-test misc::literal                           ... bench:          13 ns/iter (+/- 0) = 3923 MB/s
-test misc::long_needle1                      ... bench:       2,825 ns/iter (+/- 57) = 35398 MB/s
-test misc::long_needle2                      ... bench:     350,755 ns/iter (+/- 11,905) = 285 MB/s
-test misc::match_class                       ... bench:          64 ns/iter (+/- 1) = 1265 MB/s
-test misc::match_class_in_range              ... bench:          13 ns/iter (+/- 0) = 6230 MB/s
-test misc::matches_set                       ... bench:         422 ns/iter (+/- 12) = 59 MB/s
-test misc::medium_1K                         ... bench:          15 ns/iter (+/- 0) = 70133 MB/s
-test misc::medium_1MB                        ... bench:          21 ns/iter (+/- 0) = 49933523 MB/s
-test misc::medium_32                         ... bench:          15 ns/iter (+/- 0) = 4000 MB/s
-test misc::medium_32K                        ... bench:          14 ns/iter (+/- 0) = 2342571 MB/s
-test misc::no_exponential                    ... bench:         443 ns/iter (+/- 12) = 225 MB/s
-test misc::not_literal                       ... bench:          89 ns/iter (+/- 1) = 573 MB/s
-test misc::one_pass_long_prefix              ... bench:          52 ns/iter (+/- 1) = 500 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          52 ns/iter (+/- 1) = 500 MB/s
-test misc::one_pass_short                    ... bench:          40 ns/iter (+/- 1) = 425 MB/s
-test misc::one_pass_short_not                ... bench:          42 ns/iter (+/- 0) = 404 MB/s
-test misc::reallyhard2_1K                    ... bench:          80 ns/iter (+/- 0) = 13000 MB/s
-test misc::reallyhard_1K                     ... bench:       1,592 ns/iter (+/- 1) = 660 MB/s
-test misc::reallyhard_1MB                    ... bench:   1,575,789 ns/iter (+/- 34,236) = 665 MB/s
-test misc::reallyhard_32                     ... bench:         101 ns/iter (+/- 2) = 584 MB/s
-test misc::reallyhard_32K                    ... bench:      49,321 ns/iter (+/- 2,718) = 664 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:       4,158 ns/iter (+/- 93) = 1924 MB/s
-test regexdna::find_new_lines                ... bench:  12,391,732 ns/iter (+/- 180,913) = 410 MB/s
-test regexdna::subst1                        ... bench:     781,690 ns/iter (+/- 29,637) = 6503 MB/s
-test regexdna::subst10                       ... bench:     778,306 ns/iter (+/- 22,706) = 6531 MB/s
-test regexdna::subst11                       ... bench:     777,716 ns/iter (+/- 24,635) = 6536 MB/s
-test regexdna::subst2                        ... bench:     791,786 ns/iter (+/- 15,778) = 6420 MB/s
-test regexdna::subst3                        ... bench:     783,470 ns/iter (+/- 25,543) = 6488 MB/s
-test regexdna::subst4                        ... bench:     814,902 ns/iter (+/- 14,146) = 6238 MB/s
-test regexdna::subst5                        ... bench:     781,464 ns/iter (+/- 19,532) = 6504 MB/s
-test regexdna::subst6                        ... bench:     780,116 ns/iter (+/- 16,558) = 6516 MB/s
-test regexdna::subst7                        ... bench:     795,982 ns/iter (+/- 11,254) = 6386 MB/s
-test regexdna::subst8                        ... bench:     781,746 ns/iter (+/- 24,996) = 6502 MB/s
-test regexdna::subst9                        ... bench:     783,793 ns/iter (+/- 14,943) = 6485 MB/s
-test regexdna::variant1                      ... bench:   2,188,940 ns/iter (+/- 42,308) = 2322 MB/s
-test regexdna::variant2                      ... bench:   3,218,011 ns/iter (+/- 50,700) = 1579 MB/s
-test regexdna::variant3                      ... bench:   3,778,907 ns/iter (+/- 90,543) = 1345 MB/s
-test regexdna::variant4                      ... bench:   3,803,852 ns/iter (+/- 68,319) = 1336 MB/s
-test regexdna::variant5                      ... bench:   2,660,949 ns/iter (+/- 55,488) = 1910 MB/s
-test regexdna::variant6                      ... bench:   2,647,131 ns/iter (+/- 26,846) = 1920 MB/s
-test regexdna::variant7                      ... bench:   3,235,032 ns/iter (+/- 37,599) = 1571 MB/s
-test regexdna::variant8                      ... bench:   3,305,124 ns/iter (+/- 67,109) = 1538 MB/s
-test regexdna::variant9                      ... bench:   3,231,033 ns/iter (+/- 55,626) = 1573 MB/s
-test rust_compile::compile_huge              ... bench:      99,387 ns/iter (+/- 2,366)
-test rust_compile::compile_huge_bytes        ... bench:   5,865,693 ns/iter (+/- 62,255)
-test rust_compile::compile_huge_full         ... bench:  11,752,845 ns/iter (+/- 195,440)
-test rust_compile::compile_simple            ... bench:       4,117 ns/iter (+/- 141)
-test rust_compile::compile_simple_bytes      ... bench:       4,162 ns/iter (+/- 67)
-test rust_compile::compile_simple_full       ... bench:      19,955 ns/iter (+/- 622)
-test rust_compile::compile_small             ... bench:       9,140 ns/iter (+/- 112)
-test rust_compile::compile_small_bytes       ... bench:     165,990 ns/iter (+/- 5,876)
-test rust_compile::compile_small_full        ... bench:     342,897 ns/iter (+/- 13,730)
-test sherlock::before_after_holmes           ... bench:     906,789 ns/iter (+/- 13,931) = 656 MB/s
-test sherlock::before_holmes                 ... bench:      62,319 ns/iter (+/- 790) = 9546 MB/s
-test sherlock::everything_greedy             ... bench:   2,175,424 ns/iter (+/- 47,720) = 273 MB/s
-test sherlock::everything_greedy_nl          ... bench:     884,406 ns/iter (+/- 22,679) = 672 MB/s
-test sherlock::holmes_cochar_watson          ... bench:     105,261 ns/iter (+/- 3,536) = 5651 MB/s
-test sherlock::holmes_coword_watson          ... bench:     479,524 ns/iter (+/- 7,749) = 1240 MB/s
-test sherlock::ing_suffix                    ... bench:     321,401 ns/iter (+/- 9,123) = 1851 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:   1,069,722 ns/iter (+/- 16,366) = 556 MB/s
-test sherlock::letters                       ... bench:  21,959,896 ns/iter (+/- 204,695) = 27 MB/s
-test sherlock::letters_lower                 ... bench:  21,462,457 ns/iter (+/- 207,449) = 27 MB/s
-test sherlock::letters_upper                 ... bench:   1,768,026 ns/iter (+/- 41,459) = 336 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     897,197 ns/iter (+/- 14,349) = 663 MB/s
-test sherlock::name_alt1                     ... bench:      34,037 ns/iter (+/- 719) = 17479 MB/s
-test sherlock::name_alt2                     ... bench:      86,788 ns/iter (+/- 1,203) = 6855 MB/s
-test sherlock::name_alt3                     ... bench:      98,225 ns/iter (+/- 1,589) = 6056 MB/s
-test sherlock::name_alt3_nocase              ... bench:     377,597 ns/iter (+/- 14,840) = 1575 MB/s
-test sherlock::name_alt4                     ... bench:     122,440 ns/iter (+/- 8,123) = 4858 MB/s
-test sherlock::name_alt4_nocase              ... bench:     187,282 ns/iter (+/- 5,176) = 3176 MB/s
-test sherlock::name_alt5                     ... bench:      91,429 ns/iter (+/- 1,944) = 6507 MB/s
-test sherlock::name_alt5_nocase              ... bench:     348,111 ns/iter (+/- 12,721) = 1709 MB/s
-test sherlock::name_holmes                   ... bench:      33,547 ns/iter (+/- 1,119) = 17734 MB/s
-test sherlock::name_holmes_nocase            ... bench:     132,342 ns/iter (+/- 3,974) = 4495 MB/s
-test sherlock::name_sherlock                 ... bench:      22,562 ns/iter (+/- 364) = 26368 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      22,313 ns/iter (+/- 579) = 26663 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:      97,556 ns/iter (+/- 2,092) = 6098 MB/s
-test sherlock::name_sherlock_nocase          ... bench:      95,917 ns/iter (+/- 4,054) = 6202 MB/s
-test sherlock::name_whitespace               ... bench:      30,997 ns/iter (+/- 1,039) = 19193 MB/s
-test sherlock::no_match_common               ... bench:      19,690 ns/iter (+/- 378) = 30214 MB/s
-test sherlock::no_match_really_common        ... bench:      27,629 ns/iter (+/- 465) = 21532 MB/s
-test sherlock::no_match_uncommon             ... bench:      19,681 ns/iter (+/- 291) = 30228 MB/s
-test sherlock::quotes                        ... bench:     368,290 ns/iter (+/- 1,508) = 1615 MB/s
-test sherlock::repeated_class_negation       ... bench:  73,004,024 ns/iter (+/- 1,040,743) = 8 MB/s
-test sherlock::the_lower                     ... bench:     320,929 ns/iter (+/- 12,287) = 1853 MB/s
-test sherlock::the_nocase                    ... bench:     514,946 ns/iter (+/- 11,241) = 1155 MB/s
-test sherlock::the_upper                     ... bench:      43,816 ns/iter (+/- 1,719) = 13577 MB/s
-test sherlock::the_whitespace                ... bench:     825,245 ns/iter (+/- 20,797) = 720 MB/s
-test sherlock::word_ending_n                 ... bench:   1,676,908 ns/iter (+/- 40,650) = 354 MB/s
-test sherlock::words                         ... bench:   8,449,099 ns/iter (+/- 123,842) = 70 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 107 measured; 0 filtered out; finished in 128.47s
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/11-regex-1.7.3/rust b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/11-regex-1.7.3/rust
deleted file mode 100644
index aed99af..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/11-regex-1.7.3/rust
+++ /dev/null
@@ -1,124 +0,0 @@
-
-running 119 tests
-test misc::anchored_literal_long_match       ... bench:           7 ns/iter (+/- 0) = 55714 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          10 ns/iter (+/- 0) = 39000 MB/s
-test misc::anchored_literal_short_match      ... bench:           7 ns/iter (+/- 0) = 3714 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          10 ns/iter (+/- 0) = 2600 MB/s
-test misc::easy0_1K                          ... bench:           7 ns/iter (+/- 0) = 150142 MB/s
-test misc::easy0_1MB                         ... bench:          11 ns/iter (+/- 1) = 95327545 MB/s
-test misc::easy0_32                          ... bench:           7 ns/iter (+/- 0) = 8428 MB/s
-test misc::easy0_32K                         ... bench:           7 ns/iter (+/- 0) = 4685000 MB/s
-test misc::easy1_1K                          ... bench:          17 ns/iter (+/- 1) = 61411 MB/s
-test misc::easy1_1MB                         ... bench:          20 ns/iter (+/- 0) = 52429800 MB/s
-test misc::easy1_32                          ... bench:          18 ns/iter (+/- 1) = 2888 MB/s
-test misc::easy1_32K                         ... bench:          18 ns/iter (+/- 0) = 1821555 MB/s
-test misc::hard_1K                           ... bench:          24 ns/iter (+/- 0) = 43791 MB/s
-test misc::hard_1MB                          ... bench:          28 ns/iter (+/- 0) = 37450107 MB/s
-test misc::hard_32                           ... bench:          24 ns/iter (+/- 0) = 2458 MB/s
-test misc::hard_32K                          ... bench:          24 ns/iter (+/- 0) = 1366458 MB/s
-test misc::is_match_set                      ... bench:          37 ns/iter (+/- 0) = 675 MB/s
-test misc::literal                           ... bench:           8 ns/iter (+/- 1) = 6375 MB/s
-test misc::long_needle1                      ... bench:       1,785 ns/iter (+/- 1) = 56022 MB/s
-test misc::long_needle2                      ... bench:     193,595 ns/iter (+/- 1,486) = 516 MB/s
-test misc::match_class                       ... bench:          37 ns/iter (+/- 1) = 2189 MB/s
-test misc::match_class_in_range              ... bench:           8 ns/iter (+/- 0) = 10125 MB/s
-test misc::match_class_unicode               ... bench:         181 ns/iter (+/- 1) = 889 MB/s
-test misc::matches_set                       ... bench:         216 ns/iter (+/- 9) = 115 MB/s
-test misc::medium_1K                         ... bench:           7 ns/iter (+/- 0) = 150285 MB/s
-test misc::medium_1MB                        ... bench:          12 ns/iter (+/- 1) = 87383666 MB/s
-test misc::medium_32                         ... bench:           7 ns/iter (+/- 0) = 8571 MB/s
-test misc::medium_32K                        ... bench:           7 ns/iter (+/- 0) = 4685142 MB/s
-test misc::no_exponential                    ... bench:         283 ns/iter (+/- 7) = 353 MB/s
-test misc::not_literal                       ... bench:          53 ns/iter (+/- 1) = 962 MB/s
-test misc::one_pass_long_prefix              ... bench:          24 ns/iter (+/- 2) = 1083 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          24 ns/iter (+/- 2) = 1083 MB/s
-test misc::one_pass_short                    ... bench:          16 ns/iter (+/- 0) = 1062 MB/s
-test misc::one_pass_short_not                ... bench:          19 ns/iter (+/- 0) = 894 MB/s
-test misc::reallyhard2_1K                    ... bench:          41 ns/iter (+/- 0) = 25365 MB/s
-test misc::reallyhard_1K                     ... bench:       1,208 ns/iter (+/- 2) = 870 MB/s
-test misc::reallyhard_1MB                    ... bench:   1,213,959 ns/iter (+/- 7,198) = 863 MB/s
-test misc::reallyhard_32                     ... bench:          62 ns/iter (+/- 0) = 951 MB/s
-test misc::reallyhard_32K                    ... bench:      38,231 ns/iter (+/- 354) = 857 MB/s
-test misc::replace_all                       ... bench:          86 ns/iter (+/- 3)
-test misc::reverse_suffix_no_quadratic       ... bench:       2,351 ns/iter (+/- 31) = 3402 MB/s
-test misc::short_haystack_1000000x           ... bench:      91,018 ns/iter (+/- 203) = 87894 MB/s
-test misc::short_haystack_100000x            ... bench:       9,277 ns/iter (+/- 40) = 86235 MB/s
-test misc::short_haystack_10000x             ... bench:       2,863 ns/iter (+/- 4) = 27946 MB/s
-test misc::short_haystack_1000x              ... bench:         201 ns/iter (+/- 3) = 39855 MB/s
-test misc::short_haystack_100x               ... bench:         100 ns/iter (+/- 2) = 8110 MB/s
-test misc::short_haystack_10x                ... bench:          88 ns/iter (+/- 0) = 1034 MB/s
-test misc::short_haystack_1x                 ... bench:          86 ns/iter (+/- 1) = 220 MB/s
-test misc::short_haystack_2x                 ... bench:          87 ns/iter (+/- 0) = 310 MB/s
-test misc::short_haystack_3x                 ... bench:          88 ns/iter (+/- 1) = 397 MB/s
-test misc::short_haystack_4x                 ... bench:          88 ns/iter (+/- 1) = 488 MB/s
-test regexdna::find_new_lines                ... bench:   7,348,651 ns/iter (+/- 40,559) = 691 MB/s
-test regexdna::subst1                        ... bench:     493,624 ns/iter (+/- 10,315) = 10298 MB/s
-test regexdna::subst10                       ... bench:     489,573 ns/iter (+/- 18,151) = 10383 MB/s
-test regexdna::subst11                       ... bench:     492,501 ns/iter (+/- 11,650) = 10321 MB/s
-test regexdna::subst2                        ... bench:     492,283 ns/iter (+/- 12,363) = 10326 MB/s
-test regexdna::subst3                        ... bench:     496,795 ns/iter (+/- 20,704) = 10232 MB/s
-test regexdna::subst4                        ... bench:     489,245 ns/iter (+/- 10,289) = 10390 MB/s
-test regexdna::subst5                        ... bench:     499,701 ns/iter (+/- 11,359) = 10172 MB/s
-test regexdna::subst6                        ... bench:     490,460 ns/iter (+/- 8,758) = 10364 MB/s
-test regexdna::subst7                        ... bench:     496,398 ns/iter (+/- 18,774) = 10240 MB/s
-test regexdna::subst8                        ... bench:     497,077 ns/iter (+/- 24,767) = 10226 MB/s
-test regexdna::subst9                        ... bench:     496,763 ns/iter (+/- 12,477) = 10233 MB/s
-test regexdna::variant1                      ... bench:   1,454,747 ns/iter (+/- 48,995) = 3494 MB/s
-test regexdna::variant2                      ... bench:   2,311,001 ns/iter (+/- 63,347) = 2199 MB/s
-test regexdna::variant3                      ... bench:   2,832,483 ns/iter (+/- 33,976) = 1794 MB/s
-test regexdna::variant4                      ... bench:   2,796,710 ns/iter (+/- 56,279) = 1817 MB/s
-test regexdna::variant5                      ... bench:   1,708,634 ns/iter (+/- 25,749) = 2975 MB/s
-test regexdna::variant6                      ... bench:   1,706,259 ns/iter (+/- 22,151) = 2979 MB/s
-test regexdna::variant7                      ... bench:   2,400,436 ns/iter (+/- 24,655) = 2117 MB/s
-test regexdna::variant8                      ... bench:   2,413,765 ns/iter (+/- 50,326) = 2106 MB/s
-test regexdna::variant9                      ... bench:   2,402,528 ns/iter (+/- 26,150) = 2115 MB/s
-test rust_compile::compile_huge              ... bench:      51,936 ns/iter (+/- 834)
-test rust_compile::compile_huge_bytes        ... bench:   3,294,633 ns/iter (+/- 40,585)
-test rust_compile::compile_huge_full         ... bench:   6,323,294 ns/iter (+/- 66,684)
-test rust_compile::compile_simple            ... bench:       1,992 ns/iter (+/- 25)
-test rust_compile::compile_simple_bytes      ... bench:       2,004 ns/iter (+/- 20)
-test rust_compile::compile_simple_full       ... bench:       9,697 ns/iter (+/- 68)
-test rust_compile::compile_small             ... bench:       4,261 ns/iter (+/- 72)
-test rust_compile::compile_small_bytes       ... bench:      83,908 ns/iter (+/- 1,405)
-test rust_compile::compile_small_full        ... bench:     166,152 ns/iter (+/- 3,508)
-test sherlock::before_after_holmes           ... bench:     699,767 ns/iter (+/- 6,201) = 850 MB/s
-test sherlock::before_holmes                 ... bench:      29,284 ns/iter (+/- 573) = 20315 MB/s
-test sherlock::everything_greedy             ... bench:   1,070,812 ns/iter (+/- 18,795) = 555 MB/s
-test sherlock::everything_greedy_nl          ... bench:     445,517 ns/iter (+/- 7,760) = 1335 MB/s
-test sherlock::holmes_cochar_watson          ... bench:      43,459 ns/iter (+/- 901) = 13689 MB/s
-test sherlock::holmes_coword_watson          ... bench:     335,772 ns/iter (+/- 6,348) = 1771 MB/s
-test sherlock::ing_suffix                    ... bench:     153,546 ns/iter (+/- 3,075) = 3874 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:     777,388 ns/iter (+/- 8,447) = 765 MB/s
-test sherlock::letters                       ... bench:  10,123,374 ns/iter (+/- 90,059) = 58 MB/s
-test sherlock::letters_lower                 ... bench:   9,957,916 ns/iter (+/- 63,766) = 59 MB/s
-test sherlock::letters_upper                 ... bench:   1,123,119 ns/iter (+/- 17,972) = 529 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     694,714 ns/iter (+/- 7,006) = 856 MB/s
-test sherlock::name_alt1                     ... bench:      13,427 ns/iter (+/- 331) = 44308 MB/s
-test sherlock::name_alt2                     ... bench:      33,171 ns/iter (+/- 1,029) = 17935 MB/s
-test sherlock::name_alt3                     ... bench:      36,816 ns/iter (+/- 1,138) = 16159 MB/s
-test sherlock::name_alt3_nocase              ... bench:     221,185 ns/iter (+/- 3,268) = 2689 MB/s
-test sherlock::name_alt4                     ... bench:      49,883 ns/iter (+/- 1,150) = 11926 MB/s
-test sherlock::name_alt4_nocase              ... bench:      74,967 ns/iter (+/- 1,807) = 7935 MB/s
-test sherlock::name_alt5                     ... bench:      34,675 ns/iter (+/- 1,335) = 17157 MB/s
-test sherlock::name_alt5_nocase              ... bench:     192,109 ns/iter (+/- 6,194) = 3096 MB/s
-test sherlock::name_holmes                   ... bench:      18,355 ns/iter (+/- 389) = 32412 MB/s
-test sherlock::name_holmes_nocase            ... bench:      58,179 ns/iter (+/- 917) = 10225 MB/s
-test sherlock::name_sherlock                 ... bench:      14,307 ns/iter (+/- 74) = 41583 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      14,332 ns/iter (+/- 144) = 41510 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:      41,336 ns/iter (+/- 736) = 14392 MB/s
-test sherlock::name_sherlock_nocase          ... bench:      40,029 ns/iter (+/- 1,393) = 14862 MB/s
-test sherlock::name_whitespace               ... bench:      17,807 ns/iter (+/- 105) = 33410 MB/s
-test sherlock::no_match_common               ... bench:      13,625 ns/iter (+/- 15) = 43664 MB/s
-test sherlock::no_match_really_common        ... bench:      13,818 ns/iter (+/- 282) = 43054 MB/s
-test sherlock::no_match_uncommon             ... bench:      13,628 ns/iter (+/- 27) = 43655 MB/s
-test sherlock::quotes                        ... bench:     232,910 ns/iter (+/- 1,883) = 2554 MB/s
-test sherlock::repeated_class_negation       ... bench:  36,892,964 ns/iter (+/- 629,538) = 16 MB/s
-test sherlock::the_lower                     ... bench:     203,077 ns/iter (+/- 2,574) = 2929 MB/s
-test sherlock::the_nocase                    ... bench:     290,781 ns/iter (+/- 6,597) = 2045 MB/s
-test sherlock::the_upper                     ... bench:      22,731 ns/iter (+/- 439) = 26172 MB/s
-test sherlock::the_whitespace                ... bench:     423,983 ns/iter (+/- 10,849) = 1403 MB/s
-test sherlock::word_ending_n                 ... bench:   1,109,013 ns/iter (+/- 12,645) = 536 MB/s
-test sherlock::words                         ... bench:   4,529,451 ns/iter (+/- 44,285) = 131 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 119 measured; 0 filtered out; finished in 164.08s
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/11-regex-1.7.3/rust-bytes b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/11-regex-1.7.3/rust-bytes
deleted file mode 100644
index e9f750e..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/11-regex-1.7.3/rust-bytes
+++ /dev/null
@@ -1,112 +0,0 @@
-
-running 107 tests
-test misc::anchored_literal_long_match       ... bench:           8 ns/iter (+/- 0) = 48750 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          10 ns/iter (+/- 0) = 39000 MB/s
-test misc::anchored_literal_short_match      ... bench:           7 ns/iter (+/- 0) = 3714 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          10 ns/iter (+/- 0) = 2600 MB/s
-test misc::easy0_1K                          ... bench:           7 ns/iter (+/- 0) = 150142 MB/s
-test misc::easy0_1MB                         ... bench:          11 ns/iter (+/- 0) = 95327545 MB/s
-test misc::easy0_32                          ... bench:           7 ns/iter (+/- 0) = 8428 MB/s
-test misc::easy0_32K                         ... bench:           7 ns/iter (+/- 0) = 4685000 MB/s
-test misc::easy1_1K                          ... bench:          17 ns/iter (+/- 0) = 61411 MB/s
-test misc::easy1_1MB                         ... bench:          20 ns/iter (+/- 0) = 52429800 MB/s
-test misc::easy1_32                          ... bench:          18 ns/iter (+/- 0) = 2888 MB/s
-test misc::easy1_32K                         ... bench:          18 ns/iter (+/- 0) = 1821555 MB/s
-test misc::hard_1K                           ... bench:          24 ns/iter (+/- 0) = 43791 MB/s
-test misc::hard_1MB                          ... bench:          28 ns/iter (+/- 0) = 37450107 MB/s
-test misc::hard_32                           ... bench:          24 ns/iter (+/- 0) = 2458 MB/s
-test misc::hard_32K                          ... bench:          24 ns/iter (+/- 0) = 1366458 MB/s
-test misc::is_match_set                      ... bench:          37 ns/iter (+/- 0) = 675 MB/s
-test misc::literal                           ... bench:           7 ns/iter (+/- 0) = 7285 MB/s
-test misc::long_needle1                      ... bench:       2,186 ns/iter (+/- 19) = 45746 MB/s
-test misc::long_needle2                      ... bench:     210,378 ns/iter (+/- 61,574) = 475 MB/s
-test misc::match_class                       ... bench:          39 ns/iter (+/- 1) = 2076 MB/s
-test misc::match_class_in_range              ... bench:           7 ns/iter (+/- 0) = 11571 MB/s
-test misc::matches_set                       ... bench:         176 ns/iter (+/- 12) = 142 MB/s
-test misc::medium_1K                         ... bench:           8 ns/iter (+/- 0) = 131500 MB/s
-test misc::medium_1MB                        ... bench:          12 ns/iter (+/- 0) = 87383666 MB/s
-test misc::medium_32                         ... bench:           8 ns/iter (+/- 0) = 7500 MB/s
-test misc::medium_32K                        ... bench:           8 ns/iter (+/- 0) = 4099500 MB/s
-test misc::no_exponential                    ... bench:         274 ns/iter (+/- 7) = 364 MB/s
-test misc::not_literal                       ... bench:          53 ns/iter (+/- 0) = 962 MB/s
-test misc::one_pass_long_prefix              ... bench:          24 ns/iter (+/- 2) = 1083 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          24 ns/iter (+/- 1) = 1083 MB/s
-test misc::one_pass_short                    ... bench:          16 ns/iter (+/- 1) = 1062 MB/s
-test misc::one_pass_short_not                ... bench:          19 ns/iter (+/- 0) = 894 MB/s
-test misc::reallyhard2_1K                    ... bench:          38 ns/iter (+/- 5) = 27368 MB/s
-test misc::reallyhard_1K                     ... bench:       1,220 ns/iter (+/- 15) = 861 MB/s
-test misc::reallyhard_1MB                    ... bench:   1,215,297 ns/iter (+/- 5,229) = 862 MB/s
-test misc::reallyhard_32                     ... bench:          63 ns/iter (+/- 1) = 936 MB/s
-test misc::reallyhard_32K                    ... bench:      38,164 ns/iter (+/- 232) = 859 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:       2,353 ns/iter (+/- 8) = 3399 MB/s
-test regexdna::find_new_lines                ... bench:   7,346,276 ns/iter (+/- 46,149) = 691 MB/s
-test regexdna::subst1                        ... bench:     486,203 ns/iter (+/- 21,159) = 10455 MB/s
-test regexdna::subst10                       ... bench:     494,356 ns/iter (+/- 6,423) = 10282 MB/s
-test regexdna::subst11                       ... bench:     481,930 ns/iter (+/- 19,639) = 10548 MB/s
-test regexdna::subst2                        ... bench:     486,672 ns/iter (+/- 22,184) = 10445 MB/s
-test regexdna::subst3                        ... bench:     487,152 ns/iter (+/- 19,776) = 10434 MB/s
-test regexdna::subst4                        ... bench:     486,534 ns/iter (+/- 23,897) = 10448 MB/s
-test regexdna::subst5                        ... bench:     481,412 ns/iter (+/- 26,310) = 10559 MB/s
-test regexdna::subst6                        ... bench:     479,498 ns/iter (+/- 20,310) = 10601 MB/s
-test regexdna::subst7                        ... bench:     481,960 ns/iter (+/- 18,492) = 10547 MB/s
-test regexdna::subst8                        ... bench:     482,282 ns/iter (+/- 22,522) = 10540 MB/s
-test regexdna::subst9                        ... bench:     489,224 ns/iter (+/- 25,264) = 10390 MB/s
-test regexdna::variant1                      ... bench:   1,470,068 ns/iter (+/- 65,563) = 3457 MB/s
-test regexdna::variant2                      ... bench:   2,298,112 ns/iter (+/- 27,688) = 2211 MB/s
-test regexdna::variant3                      ... bench:   2,818,539 ns/iter (+/- 31,432) = 1803 MB/s
-test regexdna::variant4                      ... bench:   2,786,226 ns/iter (+/- 30,699) = 1824 MB/s
-test regexdna::variant5                      ... bench:   1,716,429 ns/iter (+/- 20,264) = 2961 MB/s
-test regexdna::variant6                      ... bench:   1,719,420 ns/iter (+/- 23,944) = 2956 MB/s
-test regexdna::variant7                      ... bench:   2,391,022 ns/iter (+/- 23,192) = 2126 MB/s
-test regexdna::variant8                      ... bench:   2,418,744 ns/iter (+/- 44,152) = 2101 MB/s
-test regexdna::variant9                      ... bench:   2,400,918 ns/iter (+/- 24,041) = 2117 MB/s
-test rust_compile::compile_huge              ... bench:      57,745 ns/iter (+/- 816)
-test rust_compile::compile_huge_bytes        ... bench:   3,346,952 ns/iter (+/- 39,488)
-test rust_compile::compile_huge_full         ... bench:   6,344,293 ns/iter (+/- 53,114)
-test rust_compile::compile_simple            ... bench:       2,040 ns/iter (+/- 32)
-test rust_compile::compile_simple_bytes      ... bench:       2,010 ns/iter (+/- 34)
-test rust_compile::compile_simple_full       ... bench:       9,632 ns/iter (+/- 464)
-test rust_compile::compile_small             ... bench:       4,445 ns/iter (+/- 77)
-test rust_compile::compile_small_bytes       ... bench:      83,791 ns/iter (+/- 1,929)
-test rust_compile::compile_small_full        ... bench:     164,948 ns/iter (+/- 2,595)
-test sherlock::before_after_holmes           ... bench:     699,996 ns/iter (+/- 6,647) = 849 MB/s
-test sherlock::before_holmes                 ... bench:      28,208 ns/iter (+/- 233) = 21090 MB/s
-test sherlock::everything_greedy             ... bench:   1,033,048 ns/iter (+/- 9,790) = 575 MB/s
-test sherlock::everything_greedy_nl          ... bench:     424,081 ns/iter (+/- 22,574) = 1402 MB/s
-test sherlock::holmes_cochar_watson          ... bench:      43,131 ns/iter (+/- 827) = 13793 MB/s
-test sherlock::holmes_coword_watson          ... bench:     336,678 ns/iter (+/- 6,985) = 1767 MB/s
-test sherlock::ing_suffix                    ... bench:     153,589 ns/iter (+/- 3,193) = 3873 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:     776,911 ns/iter (+/- 8,815) = 765 MB/s
-test sherlock::letters                       ... bench:  10,056,702 ns/iter (+/- 49,688) = 59 MB/s
-test sherlock::letters_lower                 ... bench:   9,900,568 ns/iter (+/- 76,118) = 60 MB/s
-test sherlock::letters_upper                 ... bench:   1,120,456 ns/iter (+/- 13,538) = 530 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     693,727 ns/iter (+/- 6,840) = 857 MB/s
-test sherlock::name_alt1                     ... bench:      11,101 ns/iter (+/- 65) = 53592 MB/s
-test sherlock::name_alt2                     ... bench:      34,003 ns/iter (+/- 966) = 17496 MB/s
-test sherlock::name_alt3                     ... bench:      37,975 ns/iter (+/- 1,313) = 15666 MB/s
-test sherlock::name_alt3_nocase              ... bench:     214,299 ns/iter (+/- 3,026) = 2776 MB/s
-test sherlock::name_alt4                     ... bench:      50,551 ns/iter (+/- 1,377) = 11768 MB/s
-test sherlock::name_alt4_nocase              ... bench:      74,713 ns/iter (+/- 1,359) = 7962 MB/s
-test sherlock::name_alt5                     ... bench:      35,426 ns/iter (+/- 625) = 16793 MB/s
-test sherlock::name_alt5_nocase              ... bench:     190,521 ns/iter (+/- 4,903) = 3122 MB/s
-test sherlock::name_holmes                   ... bench:      18,070 ns/iter (+/- 763) = 32923 MB/s
-test sherlock::name_holmes_nocase            ... bench:      58,454 ns/iter (+/- 1,228) = 10177 MB/s
-test sherlock::name_sherlock                 ... bench:      14,380 ns/iter (+/- 227) = 41372 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      14,491 ns/iter (+/- 116) = 41055 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:      40,722 ns/iter (+/- 231) = 14609 MB/s
-test sherlock::name_sherlock_nocase          ... bench:      39,937 ns/iter (+/- 623) = 14896 MB/s
-test sherlock::name_whitespace               ... bench:      17,979 ns/iter (+/- 140) = 33090 MB/s
-test sherlock::no_match_common               ... bench:      13,650 ns/iter (+/- 112) = 43584 MB/s
-test sherlock::no_match_really_common        ... bench:      13,623 ns/iter (+/- 295) = 43671 MB/s
-test sherlock::no_match_uncommon             ... bench:      13,641 ns/iter (+/- 55) = 43613 MB/s
-test sherlock::quotes                        ... bench:     232,451 ns/iter (+/- 6,555) = 2559 MB/s
-test sherlock::repeated_class_negation       ... bench:  36,984,199 ns/iter (+/- 623,153) = 16 MB/s
-test sherlock::the_lower                     ... bench:     189,502 ns/iter (+/- 4,870) = 3139 MB/s
-test sherlock::the_nocase                    ... bench:     294,945 ns/iter (+/- 9,381) = 2017 MB/s
-test sherlock::the_upper                     ... bench:      21,591 ns/iter (+/- 680) = 27554 MB/s
-test sherlock::the_whitespace                ... bench:     424,862 ns/iter (+/- 7,197) = 1400 MB/s
-test sherlock::word_ending_n                 ... bench:   1,126,768 ns/iter (+/- 13,900) = 527 MB/s
-test sherlock::words                         ... bench:   4,517,167 ns/iter (+/- 55,809) = 131 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 107 measured; 0 filtered out; finished in 150.58s
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/12-regex-1.8.1/rust b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/12-regex-1.8.1/rust
deleted file mode 100644
index 282893e..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/12-regex-1.8.1/rust
+++ /dev/null
@@ -1,124 +0,0 @@
-
-running 119 tests
-test misc::anchored_literal_long_match       ... bench:           8 ns/iter (+/- 0) = 48750 MB/s
-test misc::anchored_literal_long_non_match   ... bench:           9 ns/iter (+/- 0) = 43333 MB/s
-test misc::anchored_literal_short_match      ... bench:           7 ns/iter (+/- 0) = 3714 MB/s
-test misc::anchored_literal_short_non_match  ... bench:           9 ns/iter (+/- 0) = 2888 MB/s
-test misc::easy0_1K                          ... bench:          24 ns/iter (+/- 1) = 43791 MB/s
-test misc::easy0_1MB                         ... bench:          28 ns/iter (+/- 0) = 37450107 MB/s
-test misc::easy0_32                          ... bench:          25 ns/iter (+/- 0) = 2360 MB/s
-test misc::easy0_32K                         ... bench:          24 ns/iter (+/- 0) = 1366458 MB/s
-test misc::easy1_1K                          ... bench:          18 ns/iter (+/- 1) = 58000 MB/s
-test misc::easy1_1MB                         ... bench:          21 ns/iter (+/- 0) = 49933142 MB/s
-test misc::easy1_32                          ... bench:          18 ns/iter (+/- 0) = 2888 MB/s
-test misc::easy1_32K                         ... bench:          18 ns/iter (+/- 0) = 1821555 MB/s
-test misc::hard_1K                           ... bench:          24 ns/iter (+/- 0) = 43791 MB/s
-test misc::hard_1MB                          ... bench:          29 ns/iter (+/- 0) = 36158724 MB/s
-test misc::hard_32                           ... bench:          24 ns/iter (+/- 0) = 2458 MB/s
-test misc::hard_32K                          ... bench:          24 ns/iter (+/- 0) = 1366458 MB/s
-test misc::is_match_set                      ... bench:          37 ns/iter (+/- 0) = 675 MB/s
-test misc::literal                           ... bench:           7 ns/iter (+/- 0) = 7285 MB/s
-test misc::long_needle1                      ... bench:       1,802 ns/iter (+/- 6) = 55494 MB/s
-test misc::long_needle2                      ... bench:     207,353 ns/iter (+/- 165) = 482 MB/s
-test misc::match_class                       ... bench:          41 ns/iter (+/- 2) = 1975 MB/s
-test misc::match_class_in_range              ... bench:           7 ns/iter (+/- 0) = 11571 MB/s
-test misc::match_class_unicode               ... bench:         168 ns/iter (+/- 3) = 958 MB/s
-test misc::matches_set                       ... bench:         210 ns/iter (+/- 5) = 119 MB/s
-test misc::medium_1K                         ... bench:          25 ns/iter (+/- 0) = 42080 MB/s
-test misc::medium_1MB                        ... bench:          29 ns/iter (+/- 0) = 36158758 MB/s
-test misc::medium_32                         ... bench:          25 ns/iter (+/- 0) = 2400 MB/s
-test misc::medium_32K                        ... bench:          25 ns/iter (+/- 0) = 1311840 MB/s
-test misc::no_exponential                    ... bench:         268 ns/iter (+/- 7) = 373 MB/s
-test misc::not_literal                       ... bench:          44 ns/iter (+/- 4) = 1159 MB/s
-test misc::one_pass_long_prefix              ... bench:          24 ns/iter (+/- 2) = 1083 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          23 ns/iter (+/- 2) = 1130 MB/s
-test misc::one_pass_short                    ... bench:          16 ns/iter (+/- 0) = 1062 MB/s
-test misc::one_pass_short_not                ... bench:          19 ns/iter (+/- 0) = 894 MB/s
-test misc::reallyhard2_1K                    ... bench:          38 ns/iter (+/- 1) = 27368 MB/s
-test misc::reallyhard_1K                     ... bench:       1,215 ns/iter (+/- 12) = 865 MB/s
-test misc::reallyhard_1MB                    ... bench:   1,215,907 ns/iter (+/- 6,442) = 862 MB/s
-test misc::reallyhard_32                     ... bench:          53 ns/iter (+/- 2) = 1113 MB/s
-test misc::reallyhard_32K                    ... bench:      38,162 ns/iter (+/- 464) = 859 MB/s
-test misc::replace_all                       ... bench:          86 ns/iter (+/- 5)
-test misc::reverse_suffix_no_quadratic       ... bench:       2,355 ns/iter (+/- 470) = 3397 MB/s
-test misc::short_haystack_1000000x           ... bench:      91,039 ns/iter (+/- 157) = 87874 MB/s
-test misc::short_haystack_100000x            ... bench:       7,595 ns/iter (+/- 33) = 105333 MB/s
-test misc::short_haystack_10000x             ... bench:       2,865 ns/iter (+/- 9) = 27927 MB/s
-test misc::short_haystack_1000x              ... bench:         211 ns/iter (+/- 2) = 37966 MB/s
-test misc::short_haystack_100x               ... bench:          98 ns/iter (+/- 3) = 8275 MB/s
-test misc::short_haystack_10x                ... bench:          92 ns/iter (+/- 4) = 989 MB/s
-test misc::short_haystack_1x                 ... bench:          90 ns/iter (+/- 2) = 211 MB/s
-test misc::short_haystack_2x                 ... bench:          88 ns/iter (+/- 3) = 306 MB/s
-test misc::short_haystack_3x                 ... bench:          91 ns/iter (+/- 3) = 384 MB/s
-test misc::short_haystack_4x                 ... bench:          90 ns/iter (+/- 3) = 477 MB/s
-test regexdna::find_new_lines                ... bench:   7,323,399 ns/iter (+/- 24,661) = 694 MB/s
-test regexdna::subst1                        ... bench:     473,671 ns/iter (+/- 16,963) = 10731 MB/s
-test regexdna::subst10                       ... bench:     463,672 ns/iter (+/- 13,433) = 10963 MB/s
-test regexdna::subst11                       ... bench:     470,891 ns/iter (+/- 28,305) = 10795 MB/s
-test regexdna::subst2                        ... bench:     469,218 ns/iter (+/- 26,181) = 10833 MB/s
-test regexdna::subst3                        ... bench:     467,417 ns/iter (+/- 30,700) = 10875 MB/s
-test regexdna::subst4                        ... bench:     469,373 ns/iter (+/- 17,254) = 10830 MB/s
-test regexdna::subst5                        ... bench:     467,035 ns/iter (+/- 30,365) = 10884 MB/s
-test regexdna::subst6                        ... bench:     466,540 ns/iter (+/- 18,283) = 10895 MB/s
-test regexdna::subst7                        ... bench:     470,291 ns/iter (+/- 23,930) = 10809 MB/s
-test regexdna::subst8                        ... bench:     466,425 ns/iter (+/- 27,080) = 10898 MB/s
-test regexdna::subst9                        ... bench:     468,192 ns/iter (+/- 17,296) = 10857 MB/s
-test regexdna::variant1                      ... bench:     653,471 ns/iter (+/- 8,898) = 7779 MB/s
-test regexdna::variant2                      ... bench:     902,852 ns/iter (+/- 12,549) = 5630 MB/s
-test regexdna::variant3                      ... bench:   1,158,000 ns/iter (+/- 14,075) = 4389 MB/s
-test regexdna::variant4                      ... bench:   1,149,520 ns/iter (+/- 13,482) = 4422 MB/s
-test regexdna::variant5                      ... bench:   1,132,121 ns/iter (+/- 7,624) = 4490 MB/s
-test regexdna::variant6                      ... bench:   1,069,227 ns/iter (+/- 13,436) = 4754 MB/s
-test regexdna::variant7                      ... bench:   1,150,436 ns/iter (+/- 28,302) = 4418 MB/s
-test regexdna::variant8                      ... bench:   1,148,923 ns/iter (+/- 49,063) = 4424 MB/s
-test regexdna::variant9                      ... bench:   1,190,858 ns/iter (+/- 15,044) = 4268 MB/s
-test rust_compile::compile_huge              ... bench:      52,168 ns/iter (+/- 827)
-test rust_compile::compile_huge_bytes        ... bench:   3,330,456 ns/iter (+/- 57,242)
-test rust_compile::compile_huge_full         ... bench:   6,378,126 ns/iter (+/- 85,019)
-test rust_compile::compile_simple            ... bench:       2,291 ns/iter (+/- 39)
-test rust_compile::compile_simple_bytes      ... bench:       2,355 ns/iter (+/- 37)
-test rust_compile::compile_simple_full       ... bench:      14,581 ns/iter (+/- 103)
-test rust_compile::compile_small             ... bench:      10,443 ns/iter (+/- 114)
-test rust_compile::compile_small_bytes       ... bench:      11,269 ns/iter (+/- 150)
-test rust_compile::compile_small_full        ... bench:      14,746 ns/iter (+/- 212)
-test sherlock::before_after_holmes           ... bench:     699,736 ns/iter (+/- 6,402) = 850 MB/s
-test sherlock::before_holmes                 ... bench:      28,001 ns/iter (+/- 198) = 21246 MB/s
-test sherlock::everything_greedy             ... bench:   1,029,174 ns/iter (+/- 33,321) = 578 MB/s
-test sherlock::everything_greedy_nl          ... bench:     460,103 ns/iter (+/- 23,290) = 1293 MB/s
-test sherlock::holmes_cochar_watson          ... bench:      57,666 ns/iter (+/- 907) = 10316 MB/s
-test sherlock::holmes_coword_watson          ... bench:     345,016 ns/iter (+/- 4,672) = 1724 MB/s
-test sherlock::ing_suffix                    ... bench:     150,499 ns/iter (+/- 4,855) = 3953 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:     777,723 ns/iter (+/- 8,076) = 764 MB/s
-test sherlock::letters                       ... bench:  10,022,203 ns/iter (+/- 77,897) = 59 MB/s
-test sherlock::letters_lower                 ... bench:   9,861,816 ns/iter (+/- 76,172) = 60 MB/s
-test sherlock::letters_upper                 ... bench:   1,134,201 ns/iter (+/- 11,926) = 524 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     693,533 ns/iter (+/- 6,686) = 857 MB/s
-test sherlock::name_alt1                     ... bench:      11,974 ns/iter (+/- 292) = 49685 MB/s
-test sherlock::name_alt2                     ... bench:      44,708 ns/iter (+/- 573) = 13307 MB/s
-test sherlock::name_alt3                     ... bench:      49,873 ns/iter (+/- 785) = 11928 MB/s
-test sherlock::name_alt3_nocase              ... bench:     190,194 ns/iter (+/- 2,944) = 3128 MB/s
-test sherlock::name_alt4                     ... bench:      52,028 ns/iter (+/- 1,102) = 11434 MB/s
-test sherlock::name_alt4_nocase              ... bench:     119,891 ns/iter (+/- 921) = 4962 MB/s
-test sherlock::name_alt5                     ... bench:      47,139 ns/iter (+/- 1,617) = 12620 MB/s
-test sherlock::name_alt5_nocase              ... bench:     200,159 ns/iter (+/- 3,992) = 2972 MB/s
-test sherlock::name_holmes                   ... bench:      17,902 ns/iter (+/- 577) = 33232 MB/s
-test sherlock::name_holmes_nocase            ... bench:      58,219 ns/iter (+/- 1,215) = 10218 MB/s
-test sherlock::name_sherlock                 ... bench:      14,314 ns/iter (+/- 45) = 41563 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      14,399 ns/iter (+/- 45) = 41317 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:      41,418 ns/iter (+/- 591) = 14364 MB/s
-test sherlock::name_sherlock_nocase          ... bench:      39,877 ns/iter (+/- 545) = 14919 MB/s
-test sherlock::name_whitespace               ... bench:      17,883 ns/iter (+/- 151) = 33268 MB/s
-test sherlock::no_match_common               ... bench:      13,696 ns/iter (+/- 123) = 43438 MB/s
-test sherlock::no_match_really_common        ... bench:      10,157 ns/iter (+/- 222) = 58573 MB/s
-test sherlock::no_match_uncommon             ... bench:      13,663 ns/iter (+/- 53) = 43543 MB/s
-test sherlock::quotes                        ... bench:     234,890 ns/iter (+/- 4,574) = 2532 MB/s
-test sherlock::repeated_class_negation       ... bench:  36,406,680 ns/iter (+/- 397,378) = 16 MB/s
-test sherlock::the_lower                     ... bench:     192,028 ns/iter (+/- 5,315) = 3098 MB/s
-test sherlock::the_nocase                    ... bench:     311,087 ns/iter (+/- 6,723) = 1912 MB/s
-test sherlock::the_upper                     ... bench:      21,710 ns/iter (+/- 1,269) = 27403 MB/s
-test sherlock::the_whitespace                ... bench:     425,246 ns/iter (+/- 7,741) = 1399 MB/s
-test sherlock::word_ending_n                 ... bench:   1,116,412 ns/iter (+/- 11,753) = 532 MB/s
-test sherlock::words                         ... bench:   4,452,805 ns/iter (+/- 84,309) = 133 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 119 measured; 0 filtered out; finished in 142.33s
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/12-regex-1.8.1/rust-bytes b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/12-regex-1.8.1/rust-bytes
deleted file mode 100644
index f5380a7..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/12-regex-1.8.1/rust-bytes
+++ /dev/null
@@ -1,112 +0,0 @@
-
-running 107 tests
-test misc::anchored_literal_long_match       ... bench:           7 ns/iter (+/- 0) = 55714 MB/s
-test misc::anchored_literal_long_non_match   ... bench:           8 ns/iter (+/- 0) = 48750 MB/s
-test misc::anchored_literal_short_match      ... bench:           7 ns/iter (+/- 0) = 3714 MB/s
-test misc::anchored_literal_short_non_match  ... bench:           8 ns/iter (+/- 0) = 3250 MB/s
-test misc::easy0_1K                          ... bench:          24 ns/iter (+/- 0) = 43791 MB/s
-test misc::easy0_1MB                         ... bench:          28 ns/iter (+/- 0) = 37450107 MB/s
-test misc::easy0_32                          ... bench:          24 ns/iter (+/- 0) = 2458 MB/s
-test misc::easy0_32K                         ... bench:          24 ns/iter (+/- 0) = 1366458 MB/s
-test misc::easy1_1K                          ... bench:          18 ns/iter (+/- 0) = 58000 MB/s
-test misc::easy1_1MB                         ... bench:          21 ns/iter (+/- 0) = 49933142 MB/s
-test misc::easy1_32                          ... bench:          18 ns/iter (+/- 2) = 2888 MB/s
-test misc::easy1_32K                         ... bench:          18 ns/iter (+/- 0) = 1821555 MB/s
-test misc::hard_1K                           ... bench:          24 ns/iter (+/- 0) = 43791 MB/s
-test misc::hard_1MB                          ... bench:          28 ns/iter (+/- 0) = 37450107 MB/s
-test misc::hard_32                           ... bench:          24 ns/iter (+/- 0) = 2458 MB/s
-test misc::hard_32K                          ... bench:          24 ns/iter (+/- 0) = 1366458 MB/s
-test misc::is_match_set                      ... bench:          37 ns/iter (+/- 0) = 675 MB/s
-test misc::literal                           ... bench:           7 ns/iter (+/- 0) = 7285 MB/s
-test misc::long_needle1                      ... bench:       1,801 ns/iter (+/- 2) = 55525 MB/s
-test misc::long_needle2                      ... bench:     212,892 ns/iter (+/- 206) = 469 MB/s
-test misc::match_class                       ... bench:          40 ns/iter (+/- 0) = 2025 MB/s
-test misc::match_class_in_range              ... bench:           7 ns/iter (+/- 0) = 11571 MB/s
-test misc::matches_set                       ... bench:         174 ns/iter (+/- 2) = 143 MB/s
-test misc::medium_1K                         ... bench:          25 ns/iter (+/- 0) = 42080 MB/s
-test misc::medium_1MB                        ... bench:          29 ns/iter (+/- 0) = 36158758 MB/s
-test misc::medium_32                         ... bench:          25 ns/iter (+/- 0) = 2400 MB/s
-test misc::medium_32K                        ... bench:          25 ns/iter (+/- 0) = 1311840 MB/s
-test misc::no_exponential                    ... bench:         270 ns/iter (+/- 8) = 370 MB/s
-test misc::not_literal                       ... bench:          44 ns/iter (+/- 1) = 1159 MB/s
-test misc::one_pass_long_prefix              ... bench:          23 ns/iter (+/- 0) = 1130 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          23 ns/iter (+/- 0) = 1130 MB/s
-test misc::one_pass_short                    ... bench:          16 ns/iter (+/- 1) = 1062 MB/s
-test misc::one_pass_short_not                ... bench:          19 ns/iter (+/- 0) = 894 MB/s
-test misc::reallyhard2_1K                    ... bench:          38 ns/iter (+/- 2) = 27368 MB/s
-test misc::reallyhard_1K                     ... bench:       1,215 ns/iter (+/- 15) = 865 MB/s
-test misc::reallyhard_1MB                    ... bench:   1,217,631 ns/iter (+/- 11,216) = 861 MB/s
-test misc::reallyhard_32                     ... bench:          53 ns/iter (+/- 4) = 1113 MB/s
-test misc::reallyhard_32K                    ... bench:      38,251 ns/iter (+/- 364) = 857 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:       2,353 ns/iter (+/- 4) = 3399 MB/s
-test regexdna::find_new_lines                ... bench:   7,322,463 ns/iter (+/- 37,966) = 694 MB/s
-test regexdna::subst1                        ... bench:     466,849 ns/iter (+/- 12,252) = 10888 MB/s
-test regexdna::subst10                       ... bench:     465,011 ns/iter (+/- 19,693) = 10931 MB/s
-test regexdna::subst11                       ... bench:     457,806 ns/iter (+/- 13,453) = 11103 MB/s
-test regexdna::subst2                        ... bench:     456,878 ns/iter (+/- 32,828) = 11126 MB/s
-test regexdna::subst3                        ... bench:     465,531 ns/iter (+/- 21,786) = 10919 MB/s
-test regexdna::subst4                        ... bench:     454,553 ns/iter (+/- 12,698) = 11183 MB/s
-test regexdna::subst5                        ... bench:     456,977 ns/iter (+/- 13,155) = 11123 MB/s
-test regexdna::subst6                        ... bench:     466,105 ns/iter (+/- 15,667) = 10906 MB/s
-test regexdna::subst7                        ... bench:     462,655 ns/iter (+/- 18,871) = 10987 MB/s
-test regexdna::subst8                        ... bench:     456,642 ns/iter (+/- 19,218) = 11132 MB/s
-test regexdna::subst9                        ... bench:     456,307 ns/iter (+/- 15,369) = 11140 MB/s
-test regexdna::variant1                      ... bench:     655,033 ns/iter (+/- 7,901) = 7760 MB/s
-test regexdna::variant2                      ... bench:     902,675 ns/iter (+/- 15,165) = 5631 MB/s
-test regexdna::variant3                      ... bench:   1,159,521 ns/iter (+/- 14,489) = 4384 MB/s
-test regexdna::variant4                      ... bench:   1,147,781 ns/iter (+/- 16,536) = 4428 MB/s
-test regexdna::variant5                      ... bench:   1,133,068 ns/iter (+/- 13,938) = 4486 MB/s
-test regexdna::variant6                      ... bench:   1,061,174 ns/iter (+/- 14,478) = 4790 MB/s
-test regexdna::variant7                      ... bench:   1,151,637 ns/iter (+/- 35,753) = 4414 MB/s
-test regexdna::variant8                      ... bench:   1,137,068 ns/iter (+/- 37,678) = 4470 MB/s
-test regexdna::variant9                      ... bench:   1,185,082 ns/iter (+/- 14,355) = 4289 MB/s
-test rust_compile::compile_huge              ... bench:      66,894 ns/iter (+/- 2,425)
-test rust_compile::compile_huge_bytes        ... bench:   3,331,663 ns/iter (+/- 47,261)
-test rust_compile::compile_huge_full         ... bench:   6,446,254 ns/iter (+/- 65,334)
-test rust_compile::compile_simple            ... bench:       2,351 ns/iter (+/- 71)
-test rust_compile::compile_simple_bytes      ... bench:       2,350 ns/iter (+/- 49)
-test rust_compile::compile_simple_full       ... bench:      14,460 ns/iter (+/- 144)
-test rust_compile::compile_small             ... bench:      10,350 ns/iter (+/- 120)
-test rust_compile::compile_small_bytes       ... bench:      10,993 ns/iter (+/- 89)
-test rust_compile::compile_small_full        ... bench:      14,201 ns/iter (+/- 139)
-test sherlock::before_after_holmes           ... bench:     698,092 ns/iter (+/- 6,907) = 852 MB/s
-test sherlock::before_holmes                 ... bench:      29,127 ns/iter (+/- 1,001) = 20425 MB/s
-test sherlock::everything_greedy             ... bench:   1,026,902 ns/iter (+/- 86,299) = 579 MB/s
-test sherlock::everything_greedy_nl          ... bench:     433,157 ns/iter (+/- 10,129) = 1373 MB/s
-test sherlock::holmes_cochar_watson          ... bench:      57,103 ns/iter (+/- 509) = 10418 MB/s
-test sherlock::holmes_coword_watson          ... bench:     344,973 ns/iter (+/- 3,288) = 1724 MB/s
-test sherlock::ing_suffix                    ... bench:     158,337 ns/iter (+/- 2,492) = 3757 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:     776,703 ns/iter (+/- 8,000) = 765 MB/s
-test sherlock::letters                       ... bench:  10,179,909 ns/iter (+/- 55,188) = 58 MB/s
-test sherlock::letters_lower                 ... bench:  10,007,465 ns/iter (+/- 75,168) = 59 MB/s
-test sherlock::letters_upper                 ... bench:   1,116,201 ns/iter (+/- 11,571) = 532 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:     693,124 ns/iter (+/- 6,540) = 858 MB/s
-test sherlock::name_alt1                     ... bench:      12,079 ns/iter (+/- 192) = 49253 MB/s
-test sherlock::name_alt2                     ... bench:      44,336 ns/iter (+/- 1,424) = 13418 MB/s
-test sherlock::name_alt3                     ... bench:      49,569 ns/iter (+/- 721) = 12002 MB/s
-test sherlock::name_alt3_nocase              ... bench:     189,812 ns/iter (+/- 2,952) = 3134 MB/s
-test sherlock::name_alt4                     ... bench:      52,132 ns/iter (+/- 1,182) = 11412 MB/s
-test sherlock::name_alt4_nocase              ... bench:     120,591 ns/iter (+/- 2,521) = 4933 MB/s
-test sherlock::name_alt5                     ... bench:      46,956 ns/iter (+/- 545) = 12670 MB/s
-test sherlock::name_alt5_nocase              ... bench:     199,252 ns/iter (+/- 2,212) = 2985 MB/s
-test sherlock::name_holmes                   ... bench:      17,983 ns/iter (+/- 591) = 33083 MB/s
-test sherlock::name_holmes_nocase            ... bench:      58,139 ns/iter (+/- 919) = 10232 MB/s
-test sherlock::name_sherlock                 ... bench:      14,283 ns/iter (+/- 113) = 41653 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      14,587 ns/iter (+/- 82) = 40785 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:      40,947 ns/iter (+/- 385) = 14529 MB/s
-test sherlock::name_sherlock_nocase          ... bench:      39,607 ns/iter (+/- 567) = 15020 MB/s
-test sherlock::name_whitespace               ... bench:      18,803 ns/iter (+/- 232) = 31640 MB/s
-test sherlock::no_match_common               ... bench:      13,704 ns/iter (+/- 73) = 43413 MB/s
-test sherlock::no_match_really_common        ... bench:      14,166 ns/iter (+/- 191) = 41997 MB/s
-test sherlock::no_match_uncommon             ... bench:      13,702 ns/iter (+/- 36) = 43419 MB/s
-test sherlock::quotes                        ... bench:     232,609 ns/iter (+/- 3,217) = 2557 MB/s
-test sherlock::repeated_class_negation       ... bench:  36,167,769 ns/iter (+/- 592,579) = 16 MB/s
-test sherlock::the_lower                     ... bench:     188,281 ns/iter (+/- 2,966) = 3159 MB/s
-test sherlock::the_nocase                    ... bench:     312,853 ns/iter (+/- 23,145) = 1901 MB/s
-test sherlock::the_upper                     ... bench:      20,987 ns/iter (+/- 909) = 28347 MB/s
-test sherlock::the_whitespace                ... bench:     427,154 ns/iter (+/- 6,396) = 1392 MB/s
-test sherlock::word_ending_n                 ... bench:   1,112,964 ns/iter (+/- 15,393) = 534 MB/s
-test sherlock::words                         ... bench:   4,513,468 ns/iter (+/- 35,410) = 131 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 107 measured; 0 filtered out; finished in 143.96s
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/13-regex-1.9.0/rust b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/13-regex-1.9.0/rust
deleted file mode 100644
index b46bdf91..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/13-regex-1.9.0/rust
+++ /dev/null
@@ -1,115 +0,0 @@
-
-running 110 tests
-test misc::anchored_literal_long_match       ... bench:          15 ns/iter (+/- 0) = 26000 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          12 ns/iter (+/- 1) = 32500 MB/s
-test misc::anchored_literal_short_match      ... bench:          15 ns/iter (+/- 0) = 1733 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          12 ns/iter (+/- 1) = 2166 MB/s
-test misc::easy0_1K                          ... bench:          42 ns/iter (+/- 0) = 25023 MB/s
-test misc::easy0_1MB                         ... bench:          42 ns/iter (+/- 0) = 24966738 MB/s
-test misc::easy0_32                          ... bench:          42 ns/iter (+/- 0) = 1404 MB/s
-test misc::easy0_32K                         ... bench:          43 ns/iter (+/- 0) = 762674 MB/s
-test misc::easy1_1K                          ... bench:          35 ns/iter (+/- 0) = 29828 MB/s
-test misc::easy1_1MB                         ... bench:          35 ns/iter (+/- 0) = 29959885 MB/s
-test misc::easy1_32                          ... bench:          35 ns/iter (+/- 0) = 1485 MB/s
-test misc::easy1_32K                         ... bench:          35 ns/iter (+/- 0) = 936800 MB/s
-test misc::hard_1K                           ... bench:          43 ns/iter (+/- 0) = 24441 MB/s
-test misc::hard_1MB                          ... bench:          42 ns/iter (+/- 0) = 24966738 MB/s
-test misc::hard_32                           ... bench:          42 ns/iter (+/- 0) = 1404 MB/s
-test misc::hard_32K                          ... bench:          42 ns/iter (+/- 0) = 780833 MB/s
-test misc::is_match_set                      ... bench:          46 ns/iter (+/- 1) = 543 MB/s
-test misc::literal                           ... bench:           9 ns/iter (+/- 0) = 5666 MB/s
-test misc::long_needle1                      ... bench:       1,801 ns/iter (+/- 24) = 55525 MB/s
-test misc::long_needle2                      ... bench:     194,124 ns/iter (+/- 289) = 515 MB/s
-test misc::match_class                       ... bench:          22 ns/iter (+/- 1) = 3681 MB/s
-test misc::match_class_in_range              ... bench:          10 ns/iter (+/- 0) = 8100 MB/s
-test misc::match_class_unicode               ... bench:         196 ns/iter (+/- 0) = 821 MB/s
-test misc::matches_set                       ... bench:          55 ns/iter (+/- 3) = 454 MB/s
-test misc::medium_1K                         ... bench:          43 ns/iter (+/- 0) = 24465 MB/s
-test misc::medium_1MB                        ... bench:          43 ns/iter (+/- 0) = 24386139 MB/s
-test misc::medium_32                         ... bench:          43 ns/iter (+/- 0) = 1395 MB/s
-test misc::medium_32K                        ... bench:          43 ns/iter (+/- 0) = 762697 MB/s
-test misc::no_exponential                    ... bench:         167 ns/iter (+/- 0) = 598 MB/s
-test misc::not_literal                       ... bench:          26 ns/iter (+/- 1) = 1961 MB/s
-test misc::one_pass_long_prefix              ... bench:          40 ns/iter (+/- 0) = 650 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          40 ns/iter (+/- 0) = 650 MB/s
-test misc::one_pass_short                    ... bench:          30 ns/iter (+/- 0) = 566 MB/s
-test misc::one_pass_short_not                ... bench:          31 ns/iter (+/- 0) = 548 MB/s
-test misc::reallyhard2_1K                    ... bench:          67 ns/iter (+/- 1) = 15522 MB/s
-test misc::reallyhard_1K                     ... bench:          78 ns/iter (+/- 1) = 13474 MB/s
-test misc::reallyhard_1MB                    ... bench:      19,310 ns/iter (+/- 80) = 54303 MB/s
-test misc::reallyhard_32                     ... bench:          62 ns/iter (+/- 2) = 951 MB/s
-test misc::reallyhard_32K                    ... bench:         543 ns/iter (+/- 4) = 60395 MB/s
-test misc::replace_all                       ... bench:         151 ns/iter (+/- 13)
-test misc::reverse_suffix_no_quadratic       ... bench:       9,302 ns/iter (+/- 25) = 860 MB/s
-test misc::short_haystack_1000000x           ... bench:      90,868 ns/iter (+/- 354) = 88039 MB/s
-test misc::short_haystack_100000x            ... bench:       7,215 ns/iter (+/- 18) = 110881 MB/s
-test misc::short_haystack_10000x             ... bench:         605 ns/iter (+/- 2) = 132249 MB/s
-test misc::short_haystack_1000x              ... bench:         148 ns/iter (+/- 2) = 54128 MB/s
-test misc::short_haystack_100x               ... bench:          83 ns/iter (+/- 3) = 9771 MB/s
-test misc::short_haystack_10x                ... bench:          89 ns/iter (+/- 1) = 1022 MB/s
-test misc::short_haystack_1x                 ... bench:          79 ns/iter (+/- 1) = 240 MB/s
-test misc::short_haystack_2x                 ... bench:          79 ns/iter (+/- 1) = 341 MB/s
-test misc::short_haystack_3x                 ... bench:          80 ns/iter (+/- 2) = 437 MB/s
-test misc::short_haystack_4x                 ... bench:          79 ns/iter (+/- 1) = 544 MB/s
-test regexdna::find_new_lines                ... bench:   1,748,215 ns/iter (+/- 25,793) = 2907 MB/s
-test regexdna::subst1                        ... bench:     486,169 ns/iter (+/- 11,425) = 10456 MB/s
-test regexdna::subst10                       ... bench:     479,019 ns/iter (+/- 7,468) = 10612 MB/s
-test regexdna::subst11                       ... bench:     481,118 ns/iter (+/- 10,305) = 10565 MB/s
-test regexdna::subst2                        ... bench:     484,508 ns/iter (+/- 11,753) = 10491 MB/s
-test regexdna::subst3                        ... bench:     481,861 ns/iter (+/- 7,991) = 10549 MB/s
-test regexdna::subst4                        ... bench:     477,043 ns/iter (+/- 12,101) = 10656 MB/s
-test regexdna::subst5                        ... bench:     483,954 ns/iter (+/- 7,728) = 10503 MB/s
-test regexdna::subst6                        ... bench:     479,564 ns/iter (+/- 13,514) = 10600 MB/s
-test regexdna::subst7                        ... bench:     481,345 ns/iter (+/- 11,205) = 10560 MB/s
-test regexdna::subst8                        ... bench:     479,772 ns/iter (+/- 13,266) = 10595 MB/s
-test regexdna::subst9                        ... bench:     480,299 ns/iter (+/- 9,997) = 10583 MB/s
-test regexdna::variant1                      ... bench:     693,230 ns/iter (+/- 21,808) = 7332 MB/s
-test regexdna::variant2                      ... bench:     936,552 ns/iter (+/- 9,916) = 5427 MB/s
-test regexdna::variant3                      ... bench:   1,192,921 ns/iter (+/- 11,038) = 4261 MB/s
-test regexdna::variant4                      ... bench:   1,170,341 ns/iter (+/- 27,745) = 4343 MB/s
-test regexdna::variant5                      ... bench:   1,166,877 ns/iter (+/- 8,369) = 4356 MB/s
-test regexdna::variant6                      ... bench:   1,085,919 ns/iter (+/- 9,594) = 4681 MB/s
-test regexdna::variant7                      ... bench:   1,248,718 ns/iter (+/- 13,480) = 4070 MB/s
-test regexdna::variant8                      ... bench:   1,216,643 ns/iter (+/- 15,505) = 4178 MB/s
-test regexdna::variant9                      ... bench:   1,219,951 ns/iter (+/- 14,109) = 4166 MB/s
-test sherlock::before_after_holmes           ... bench:      27,363 ns/iter (+/- 604) = 21742 MB/s
-test sherlock::before_holmes                 ... bench:      31,147 ns/iter (+/- 876) = 19100 MB/s
-test sherlock::everything_greedy             ... bench:   1,326,354 ns/iter (+/- 22,628) = 448 MB/s
-test sherlock::everything_greedy_nl          ... bench:     801,343 ns/iter (+/- 895) = 742 MB/s
-test sherlock::holmes_cochar_watson          ... bench:      56,328 ns/iter (+/- 1,009) = 10561 MB/s
-test sherlock::holmes_coword_watson          ... bench:     301,186 ns/iter (+/- 3,615) = 1975 MB/s
-test sherlock::ing_suffix                    ... bench:     176,428 ns/iter (+/- 2,182) = 3372 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:     173,948 ns/iter (+/- 5,073) = 3420 MB/s
-test sherlock::letters                       ... bench:   7,226,608 ns/iter (+/- 261,849) = 82 MB/s
-test sherlock::letters_lower                 ... bench:   7,024,589 ns/iter (+/- 145,281) = 84 MB/s
-test sherlock::letters_upper                 ... bench:   1,004,841 ns/iter (+/- 6,857) = 592 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:      15,978 ns/iter (+/- 90) = 37234 MB/s
-test sherlock::name_alt1                     ... bench:      11,151 ns/iter (+/- 289) = 53352 MB/s
-test sherlock::name_alt2                     ... bench:      45,441 ns/iter (+/- 960) = 13092 MB/s
-test sherlock::name_alt3                     ... bench:      51,934 ns/iter (+/- 806) = 11455 MB/s
-test sherlock::name_alt3_nocase              ... bench:     171,844 ns/iter (+/- 4,176) = 3462 MB/s
-test sherlock::name_alt4                     ... bench:      46,611 ns/iter (+/- 1,072) = 12763 MB/s
-test sherlock::name_alt4_nocase              ... bench:      74,956 ns/iter (+/- 2,098) = 7937 MB/s
-test sherlock::name_alt5                     ... bench:      47,595 ns/iter (+/- 595) = 12499 MB/s
-test sherlock::name_alt5_nocase              ... bench:     100,636 ns/iter (+/- 814) = 5911 MB/s
-test sherlock::name_holmes                   ... bench:      19,293 ns/iter (+/- 687) = 30836 MB/s
-test sherlock::name_holmes_nocase            ... bench:      52,310 ns/iter (+/- 1,024) = 11373 MB/s
-test sherlock::name_sherlock                 ... bench:      16,080 ns/iter (+/- 327) = 36998 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      14,605 ns/iter (+/- 120) = 40734 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:      38,662 ns/iter (+/- 360) = 15388 MB/s
-test sherlock::name_sherlock_nocase          ... bench:      37,650 ns/iter (+/- 316) = 15801 MB/s
-test sherlock::name_whitespace               ... bench:      16,234 ns/iter (+/- 125) = 36647 MB/s
-test sherlock::no_match_common               ... bench:      13,709 ns/iter (+/- 72) = 43397 MB/s
-test sherlock::no_match_really_common        ... bench:       9,870 ns/iter (+/- 133) = 60276 MB/s
-test sherlock::no_match_uncommon             ... bench:      13,735 ns/iter (+/- 57) = 43315 MB/s
-test sherlock::quotes                        ... bench:     189,377 ns/iter (+/- 2,105) = 3141 MB/s
-test sherlock::repeated_class_negation       ... bench:      29,934 ns/iter (+/- 1,249) = 19874 MB/s
-test sherlock::the_lower                     ... bench:     213,236 ns/iter (+/- 3,823) = 2790 MB/s
-test sherlock::the_nocase                    ... bench:     322,922 ns/iter (+/- 5,946) = 1842 MB/s
-test sherlock::the_upper                     ... bench:      23,494 ns/iter (+/- 718) = 25322 MB/s
-test sherlock::the_whitespace                ... bench:     392,113 ns/iter (+/- 6,046) = 1517 MB/s
-test sherlock::word_ending_n                 ... bench:     673,618 ns/iter (+/- 12,865) = 883 MB/s
-test sherlock::words                         ... bench:   3,632,096 ns/iter (+/- 56,944) = 163 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 110 measured; 0 filtered out; finished in 117.87s
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/13-regex-1.9.0/rust-bytes b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/13-regex-1.9.0/rust-bytes
deleted file mode 100644
index 8ac6c04..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/13-regex-1.9.0/rust-bytes
+++ /dev/null
@@ -1,103 +0,0 @@
-
-running 98 tests
-test misc::anchored_literal_long_match       ... bench:          15 ns/iter (+/- 0) = 26000 MB/s
-test misc::anchored_literal_long_non_match   ... bench:          12 ns/iter (+/- 0) = 32500 MB/s
-test misc::anchored_literal_short_match      ... bench:          15 ns/iter (+/- 0) = 1733 MB/s
-test misc::anchored_literal_short_non_match  ... bench:          12 ns/iter (+/- 0) = 2166 MB/s
-test misc::easy0_1K                          ... bench:          42 ns/iter (+/- 0) = 25023 MB/s
-test misc::easy0_1MB                         ... bench:          42 ns/iter (+/- 0) = 24966738 MB/s
-test misc::easy0_32                          ... bench:          42 ns/iter (+/- 0) = 1404 MB/s
-test misc::easy0_32K                         ... bench:          42 ns/iter (+/- 0) = 780833 MB/s
-test misc::easy1_1K                          ... bench:          34 ns/iter (+/- 1) = 30705 MB/s
-test misc::easy1_1MB                         ... bench:          34 ns/iter (+/- 0) = 30841058 MB/s
-test misc::easy1_32                          ... bench:          34 ns/iter (+/- 0) = 1529 MB/s
-test misc::easy1_32K                         ... bench:          34 ns/iter (+/- 0) = 964352 MB/s
-test misc::hard_1K                           ... bench:          42 ns/iter (+/- 0) = 25023 MB/s
-test misc::hard_1MB                          ... bench:          42 ns/iter (+/- 0) = 24966738 MB/s
-test misc::hard_32                           ... bench:          42 ns/iter (+/- 0) = 1404 MB/s
-test misc::hard_32K                          ... bench:          42 ns/iter (+/- 0) = 780833 MB/s
-test misc::is_match_set                      ... bench:          47 ns/iter (+/- 1) = 531 MB/s
-test misc::literal                           ... bench:          10 ns/iter (+/- 0) = 5100 MB/s
-test misc::long_needle1                      ... bench:       1,808 ns/iter (+/- 7) = 55310 MB/s
-test misc::long_needle2                      ... bench:     213,106 ns/iter (+/- 416) = 469 MB/s
-test misc::match_class                       ... bench:          23 ns/iter (+/- 1) = 3521 MB/s
-test misc::match_class_in_range              ... bench:          11 ns/iter (+/- 0) = 7363 MB/s
-test misc::matches_set                       ... bench:          56 ns/iter (+/- 3) = 446 MB/s
-test misc::medium_1K                         ... bench:          43 ns/iter (+/- 0) = 24465 MB/s
-test misc::medium_1MB                        ... bench:          43 ns/iter (+/- 0) = 24386139 MB/s
-test misc::medium_32                         ... bench:          43 ns/iter (+/- 0) = 1395 MB/s
-test misc::medium_32K                        ... bench:          43 ns/iter (+/- 0) = 762697 MB/s
-test misc::no_exponential                    ... bench:         162 ns/iter (+/- 4) = 617 MB/s
-test misc::not_literal                       ... bench:          27 ns/iter (+/- 1) = 1888 MB/s
-test misc::one_pass_long_prefix              ... bench:          41 ns/iter (+/- 0) = 634 MB/s
-test misc::one_pass_long_prefix_not          ... bench:          41 ns/iter (+/- 0) = 634 MB/s
-test misc::one_pass_short                    ... bench:          30 ns/iter (+/- 0) = 566 MB/s
-test misc::one_pass_short_not                ... bench:          31 ns/iter (+/- 0) = 548 MB/s
-test misc::reallyhard2_1K                    ... bench:          70 ns/iter (+/- 1) = 14857 MB/s
-test misc::reallyhard_1K                     ... bench:          78 ns/iter (+/- 3) = 13474 MB/s
-test misc::reallyhard_1MB                    ... bench:      19,850 ns/iter (+/- 345) = 52826 MB/s
-test misc::reallyhard_32                     ... bench:          61 ns/iter (+/- 2) = 967 MB/s
-test misc::reallyhard_32K                    ... bench:         546 ns/iter (+/- 8) = 60064 MB/s
-test misc::reverse_suffix_no_quadratic       ... bench:       9,304 ns/iter (+/- 29) = 859 MB/s
-test regexdna::find_new_lines                ... bench:   1,733,767 ns/iter (+/- 66,699) = 2932 MB/s
-test regexdna::subst1                        ... bench:     486,442 ns/iter (+/- 11,929) = 10450 MB/s
-test regexdna::subst10                       ... bench:     486,073 ns/iter (+/- 12,157) = 10458 MB/s
-test regexdna::subst11                       ... bench:     483,485 ns/iter (+/- 11,703) = 10514 MB/s
-test regexdna::subst2                        ... bench:     487,298 ns/iter (+/- 9,184) = 10431 MB/s
-test regexdna::subst3                        ... bench:     491,219 ns/iter (+/- 9,614) = 10348 MB/s
-test regexdna::subst4                        ... bench:     482,668 ns/iter (+/- 9,576) = 10531 MB/s
-test regexdna::subst5                        ... bench:     489,673 ns/iter (+/- 8,331) = 10381 MB/s
-test regexdna::subst6                        ... bench:     484,707 ns/iter (+/- 5,276) = 10487 MB/s
-test regexdna::subst7                        ... bench:     485,109 ns/iter (+/- 9,360) = 10478 MB/s
-test regexdna::subst8                        ... bench:     485,790 ns/iter (+/- 9,298) = 10464 MB/s
-test regexdna::subst9                        ... bench:     483,255 ns/iter (+/- 12,434) = 10519 MB/s
-test regexdna::variant1                      ... bench:     654,757 ns/iter (+/- 8,719) = 7763 MB/s
-test regexdna::variant2                      ... bench:     905,052 ns/iter (+/- 9,599) = 5616 MB/s
-test regexdna::variant3                      ... bench:   1,161,187 ns/iter (+/- 13,798) = 4377 MB/s
-test regexdna::variant4                      ... bench:   1,144,656 ns/iter (+/- 15,198) = 4440 MB/s
-test regexdna::variant5                      ... bench:   1,136,222 ns/iter (+/- 9,112) = 4473 MB/s
-test regexdna::variant6                      ... bench:   1,062,124 ns/iter (+/- 12,336) = 4786 MB/s
-test regexdna::variant7                      ... bench:   1,144,371 ns/iter (+/- 44,700) = 4442 MB/s
-test regexdna::variant8                      ... bench:   1,143,064 ns/iter (+/- 53,456) = 4447 MB/s
-test regexdna::variant9                      ... bench:   1,187,063 ns/iter (+/- 14,341) = 4282 MB/s
-test sherlock::before_after_holmes           ... bench:      27,804 ns/iter (+/- 598) = 21397 MB/s
-test sherlock::before_holmes                 ... bench:      31,197 ns/iter (+/- 933) = 19070 MB/s
-test sherlock::everything_greedy             ... bench:   1,272,335 ns/iter (+/- 12,466) = 467 MB/s
-test sherlock::everything_greedy_nl          ... bench:     801,469 ns/iter (+/- 955) = 742 MB/s
-test sherlock::holmes_cochar_watson          ... bench:      56,790 ns/iter (+/- 1,606) = 10476 MB/s
-test sherlock::holmes_coword_watson          ... bench:     300,554 ns/iter (+/- 3,460) = 1979 MB/s
-test sherlock::ing_suffix                    ... bench:     179,355 ns/iter (+/- 5,486) = 3317 MB/s
-test sherlock::ing_suffix_limited_space      ... bench:     175,703 ns/iter (+/- 2,380) = 3386 MB/s
-test sherlock::letters                       ... bench:   7,197,094 ns/iter (+/- 181,502) = 82 MB/s
-test sherlock::letters_lower                 ... bench:   7,100,979 ns/iter (+/- 155,898) = 83 MB/s
-test sherlock::letters_upper                 ... bench:   1,018,217 ns/iter (+/- 21,695) = 584 MB/s
-test sherlock::line_boundary_sherlock_holmes ... bench:      15,931 ns/iter (+/- 140) = 37344 MB/s
-test sherlock::name_alt1                     ... bench:      10,932 ns/iter (+/- 96) = 54421 MB/s
-test sherlock::name_alt2                     ... bench:      45,580 ns/iter (+/- 829) = 13052 MB/s
-test sherlock::name_alt3                     ... bench:      51,942 ns/iter (+/- 1,418) = 11453 MB/s
-test sherlock::name_alt3_nocase              ... bench:     171,749 ns/iter (+/- 1,451) = 3463 MB/s
-test sherlock::name_alt4                     ... bench:      45,705 ns/iter (+/- 1,536) = 13016 MB/s
-test sherlock::name_alt4_nocase              ... bench:      73,782 ns/iter (+/- 1,679) = 8063 MB/s
-test sherlock::name_alt5                     ... bench:      48,045 ns/iter (+/- 1,261) = 12382 MB/s
-test sherlock::name_alt5_nocase              ... bench:     100,307 ns/iter (+/- 553) = 5931 MB/s
-test sherlock::name_holmes                   ... bench:      18,916 ns/iter (+/- 662) = 31451 MB/s
-test sherlock::name_holmes_nocase            ... bench:      52,714 ns/iter (+/- 774) = 11286 MB/s
-test sherlock::name_sherlock                 ... bench:      14,575 ns/iter (+/- 163) = 40818 MB/s
-test sherlock::name_sherlock_holmes          ... bench:      14,625 ns/iter (+/- 166) = 40679 MB/s
-test sherlock::name_sherlock_holmes_nocase   ... bench:      39,024 ns/iter (+/- 361) = 15245 MB/s
-test sherlock::name_sherlock_nocase          ... bench:      38,025 ns/iter (+/- 418) = 15645 MB/s
-test sherlock::name_whitespace               ... bench:      16,247 ns/iter (+/- 88) = 36618 MB/s
-test sherlock::no_match_common               ... bench:      13,724 ns/iter (+/- 28) = 43349 MB/s
-test sherlock::no_match_really_common        ... bench:      13,798 ns/iter (+/- 93) = 43117 MB/s
-test sherlock::no_match_uncommon             ... bench:      13,671 ns/iter (+/- 80) = 43517 MB/s
-test sherlock::quotes                        ... bench:     189,359 ns/iter (+/- 2,334) = 3141 MB/s
-test sherlock::repeated_class_negation       ... bench:      29,083 ns/iter (+/- 708) = 20456 MB/s
-test sherlock::the_lower                     ... bench:     204,122 ns/iter (+/- 4,256) = 2914 MB/s
-test sherlock::the_nocase                    ... bench:     319,388 ns/iter (+/- 6,790) = 1862 MB/s
-test sherlock::the_upper                     ... bench:      22,706 ns/iter (+/- 961) = 26201 MB/s
-test sherlock::the_whitespace                ... bench:     386,276 ns/iter (+/- 4,950) = 1540 MB/s
-test sherlock::word_ending_n                 ... bench:     690,010 ns/iter (+/- 8,516) = 862 MB/s
-test sherlock::words                         ... bench:   3,659,990 ns/iter (+/- 104,505) = 162 MB/s
-
-test result: ok. 0 passed; 0 failed; 0 ignored; 98 measured; 0 filtered out; finished in 105.65s
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/README.md b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/README.md
deleted file mode 100644
index aab290e..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
-These represent an old log of benchmarks from regex 1.7.3 and older. New
-and much more comprehensive benchmarks are now maintained as part of the
-[rebar] project.
-
-We keep these old benchmark recordings for posterity, but they may be removed
-in the future.
-
-Measurements can be compared using the [`cargo-benchcmp`][cargo-benchcmp] tool.
-
-[rebar]: https://github.com/BurntSushi/rebar
-[cargo-benchcmp]: https://github.com/BurntSushi/cargo-benchcmp
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/01-before b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/01-before
deleted file mode 100644
index 74890a3..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/01-before
+++ /dev/null
@@ -1,28 +0,0 @@
-test bench::anchored_literal_long_match      ... bench:       520 ns/iter (+/- 1)
-test bench::anchored_literal_long_non_match  ... bench:       236 ns/iter (+/- 2)
-test bench::anchored_literal_short_match     ... bench:       519 ns/iter (+/- 2)
-test bench::anchored_literal_short_non_match ... bench:       238 ns/iter (+/- 2)
-test bench::easy0_1K                         ... bench:      7742 ns/iter (+/- 97) = 132 MB/s
-test bench::easy0_32                         ... bench:      4989 ns/iter (+/- 20) = 6 MB/s
-test bench::easy0_32K                        ... bench:     96347 ns/iter (+/- 997) = 340 MB/s
-test bench::easy1_1K                         ... bench:      9805 ns/iter (+/- 1846) = 104 MB/s
-test bench::easy1_32                         ... bench:      4930 ns/iter (+/- 202) = 6 MB/s
-test bench::easy1_32K                        ... bench:    163332 ns/iter (+/- 9207) = 200 MB/s
-test bench::hard_1K                          ... bench:     97455 ns/iter (+/- 1089) = 10 MB/s
-test bench::hard_32                          ... bench:      8256 ns/iter (+/- 148) = 3 MB/s
-test bench::hard_32K                         ... bench:   2948095 ns/iter (+/- 11988) = 11 MB/s
-test bench::literal                          ... bench:       371 ns/iter (+/- 5)
-test bench::match_class                      ... bench:      2168 ns/iter (+/- 12)
-test bench::match_class_in_range             ... bench:      2379 ns/iter (+/- 13)
-test bench::medium_1K                        ... bench:     37073 ns/iter (+/- 1100) = 27 MB/s
-test bench::medium_32                        ... bench:      6183 ns/iter (+/- 218) = 5 MB/s
-test bench::medium_32K                       ... bench:   1032000 ns/iter (+/- 8278) = 31 MB/s
-test bench::no_exponential                   ... bench:    727975 ns/iter (+/- 2970)
-test bench::not_literal                      ... bench:      4670 ns/iter (+/- 29)
-test bench::one_pass_long_prefix             ... bench:      1562 ns/iter (+/- 24)
-test bench::one_pass_long_prefix_not         ... bench:      1539 ns/iter (+/- 40)
-test bench::one_pass_short_a                 ... bench:      2688 ns/iter (+/- 21)
-test bench::one_pass_short_a_not             ... bench:      4197 ns/iter (+/- 36)
-test bench::one_pass_short_b                 ... bench:      2198 ns/iter (+/- 22)
-test bench::one_pass_short_b_not             ... bench:      3761 ns/iter (+/- 41)
-test bench::replace_all                      ... bench:      2874 ns/iter (+/- 25)
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/02-new-syntax-crate b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/02-new-syntax-crate
deleted file mode 100644
index 267808ff..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/02-new-syntax-crate
+++ /dev/null
@@ -1,28 +0,0 @@
-test bench::anchored_literal_long_match      ... bench:       545 ns/iter (+/- 12)
-test bench::anchored_literal_long_non_match  ... bench:       251 ns/iter (+/- 11)
-test bench::anchored_literal_short_match     ... bench:       521 ns/iter (+/- 31)
-test bench::anchored_literal_short_non_match ... bench:       231 ns/iter (+/- 0)
-test bench::easy0_1K                         ... bench:      7465 ns/iter (+/- 102) = 137 MB/s
-test bench::easy0_32                         ... bench:      4995 ns/iter (+/- 27) = 6 MB/s
-test bench::easy0_32K                        ... bench:     86985 ns/iter (+/- 755) = 376 MB/s
-test bench::easy1_1K                         ... bench:      9493 ns/iter (+/- 1727) = 107 MB/s
-test bench::easy1_32                         ... bench:      4955 ns/iter (+/- 324) = 6 MB/s
-test bench::easy1_32K                        ... bench:    155288 ns/iter (+/- 13016) = 210 MB/s
-test bench::hard_1K                          ... bench:     95925 ns/iter (+/- 1674) = 10 MB/s
-test bench::hard_32                          ... bench:      8264 ns/iter (+/- 151) = 3 MB/s
-test bench::hard_32K                         ... bench:   2886440 ns/iter (+/- 25807) = 11 MB/s
-test bench::literal                          ... bench:       365 ns/iter (+/- 12)
-test bench::match_class                      ... bench:      2313 ns/iter (+/- 8)
-test bench::match_class_in_range             ... bench:      2596 ns/iter (+/- 8)
-test bench::medium_1K                        ... bench:     38136 ns/iter (+/- 941) = 26 MB/s
-test bench::medium_32                        ... bench:      6178 ns/iter (+/- 147) = 5 MB/s
-test bench::medium_32K                       ... bench:   1065698 ns/iter (+/- 6815) = 30 MB/s
-test bench::no_exponential                   ... bench:    682461 ns/iter (+/- 2860)
-test bench::not_literal                      ... bench:      4525 ns/iter (+/- 67)
-test bench::one_pass_long_prefix             ... bench:      1459 ns/iter (+/- 13)
-test bench::one_pass_long_prefix_not         ... bench:      1463 ns/iter (+/- 8)
-test bench::one_pass_short_a                 ... bench:      2615 ns/iter (+/- 10)
-test bench::one_pass_short_a_not             ... bench:      4066 ns/iter (+/- 48)
-test bench::one_pass_short_b                 ... bench:      2064 ns/iter (+/- 10)
-test bench::one_pass_short_b_not             ... bench:      3502 ns/iter (+/- 24)
-test bench::replace_all                      ... bench:      2949 ns/iter (+/- 15)
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/03-new-syntax-crate b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/03-new-syntax-crate
deleted file mode 100644
index a50005d..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/03-new-syntax-crate
+++ /dev/null
@@ -1,28 +0,0 @@
-test bench::anchored_literal_long_match      ... bench:       373 ns/iter (+/- 5)
-test bench::anchored_literal_long_non_match  ... bench:       204 ns/iter (+/- 3)
-test bench::anchored_literal_short_match     ... bench:       376 ns/iter (+/- 5)
-test bench::anchored_literal_short_non_match ... bench:       206 ns/iter (+/- 3)
-test bench::easy0_1K                         ... bench:      9136 ns/iter (+/- 177) = 112 MB/s
-test bench::easy0_32                         ... bench:      6641 ns/iter (+/- 86) = 4 MB/s
-test bench::easy0_32K                        ... bench:     88826 ns/iter (+/- 1366) = 368 MB/s
-test bench::easy1_1K                         ... bench:     10937 ns/iter (+/- 737) = 93 MB/s
-test bench::easy1_32                         ... bench:      7366 ns/iter (+/- 219) = 4 MB/s
-test bench::easy1_32K                        ... bench:    122324 ns/iter (+/- 4628) = 267 MB/s
-test bench::hard_1K                          ... bench:     59998 ns/iter (+/- 965) = 17 MB/s
-test bench::hard_32                          ... bench:      9058 ns/iter (+/- 123) = 3 MB/s
-test bench::hard_32K                         ... bench:   1694326 ns/iter (+/- 27226) = 19 MB/s
-test bench::literal                          ... bench:       336 ns/iter (+/- 6)
-test bench::match_class                      ... bench:      2109 ns/iter (+/- 27)
-test bench::match_class_in_range             ... bench:      2274 ns/iter (+/- 32)
-test bench::medium_1K                        ... bench:     38317 ns/iter (+/- 1075) = 26 MB/s
-test bench::medium_32                        ... bench:      7969 ns/iter (+/- 115) = 4 MB/s
-test bench::medium_32K                       ... bench:   1028260 ns/iter (+/- 12905) = 31 MB/s
-test bench::no_exponential                   ... bench:    257719 ns/iter (+/- 4939)
-test bench::not_literal                      ... bench:      1699 ns/iter (+/- 31)
-test bench::one_pass_long_prefix             ... bench:       750 ns/iter (+/- 9)
-test bench::one_pass_long_prefix_not         ... bench:       747 ns/iter (+/- 12)
-test bench::one_pass_short_a                 ... bench:      1844 ns/iter (+/- 22)
-test bench::one_pass_short_a_not             ... bench:      2395 ns/iter (+/- 21)
-test bench::one_pass_short_b                 ... bench:      1270 ns/iter (+/- 26)
-test bench::one_pass_short_b_not             ... bench:      1869 ns/iter (+/- 25)
-test bench::replace_all                      ... bench:      3124 ns/iter (+/- 53)
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/04-fixed-benchmark b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/04-fixed-benchmark
deleted file mode 100644
index 1956e98..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/04-fixed-benchmark
+++ /dev/null
@@ -1,28 +0,0 @@
-test bench::anchored_literal_long_match      ... bench:         373 ns/iter (+/- 5)
-test bench::anchored_literal_long_non_match  ... bench:         202 ns/iter (+/- 12)
-test bench::anchored_literal_short_match     ... bench:         380 ns/iter (+/- 135)
-test bench::anchored_literal_short_non_match ... bench:         211 ns/iter (+/- 1)
-test bench::easy0_1K                         ... bench:       2,723 ns/iter (+/- 101) = 376 MB/s
-test bench::easy0_32                         ... bench:         255 ns/iter (+/- 2) = 125 MB/s
-test bench::easy0_32K                        ... bench:      81,845 ns/iter (+/- 598) = 400 MB/s
-test bench::easy1_1K                         ... bench:       3,872 ns/iter (+/- 783) = 264 MB/s
-test bench::easy1_32                         ... bench:         287 ns/iter (+/- 143) = 111 MB/s
-test bench::easy1_32K                        ... bench:     115,340 ns/iter (+/- 4,717) = 284 MB/s
-test bench::hard_1K                          ... bench:      52,484 ns/iter (+/- 472) = 19 MB/s
-test bench::hard_32                          ... bench:       1,923 ns/iter (+/- 49) = 16 MB/s
-test bench::hard_32K                         ... bench:   1,710,214 ns/iter (+/- 9,733) = 19 MB/s
-test bench::literal                          ... bench:         337 ns/iter (+/- 13)
-test bench::match_class                      ... bench:       2,141 ns/iter (+/- 7)
-test bench::match_class_in_range             ... bench:       2,301 ns/iter (+/- 7)
-test bench::medium_1K                        ... bench:      31,696 ns/iter (+/- 961) = 32 MB/s
-test bench::medium_32                        ... bench:       1,155 ns/iter (+/- 71) = 27 MB/s
-test bench::medium_32K                       ... bench:   1,016,101 ns/iter (+/- 12,090) = 32 MB/s
-test bench::no_exponential                   ... bench:     262,801 ns/iter (+/- 1,332)
-test bench::not_literal                      ... bench:       1,729 ns/iter (+/- 3)
-test bench::one_pass_long_prefix             ... bench:         779 ns/iter (+/- 4)
-test bench::one_pass_long_prefix_not         ... bench:         779 ns/iter (+/- 6)
-test bench::one_pass_short_a                 ... bench:       1,943 ns/iter (+/- 10)
-test bench::one_pass_short_a_not             ... bench:       2,545 ns/iter (+/- 9)
-test bench::one_pass_short_b                 ... bench:       1,364 ns/iter (+/- 4)
-test bench::one_pass_short_b_not             ... bench:       2,029 ns/iter (+/- 22)
-test bench::replace_all                      ... bench:       3,185 ns/iter (+/- 12)
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/05-thread-caching b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/05-thread-caching
deleted file mode 100644
index 238f978..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/05-thread-caching
+++ /dev/null
@@ -1,29 +0,0 @@
-test bench::anchored_literal_long_match      ... bench:       287 ns/iter (+/- 11)
-test bench::anchored_literal_long_non_match  ... bench:       111 ns/iter (+/- 0)
-test bench::anchored_literal_short_match     ... bench:       286 ns/iter (+/- 4)
-test bench::anchored_literal_short_non_match ... bench:       114 ns/iter (+/- 0)
-test bench::easy0_1K                         ... bench:      2562 ns/iter (+/- 94) = 399 MB/s
-test bench::easy0_32                         ... bench:        95 ns/iter (+/- 1) = 336 MB/s
-test bench::easy0_32K                        ... bench:     81755 ns/iter (+/- 576) = 400 MB/s
-test bench::easy1_1K                         ... bench:      3586 ns/iter (+/- 917) = 285 MB/s
-test bench::easy1_32                         ... bench:       155 ns/iter (+/- 132) = 206 MB/s
-test bench::easy1_32K                        ... bench:    113980 ns/iter (+/- 9331) = 287 MB/s
-test bench::hard_1K                          ... bench:     54573 ns/iter (+/- 565) = 18 MB/s
-test bench::hard_32                          ... bench:      1806 ns/iter (+/- 44) = 17 MB/s
-test bench::hard_32K                         ... bench:   1754465 ns/iter (+/- 7867) = 18 MB/s
-test bench::literal                          ... bench:       299 ns/iter (+/- 1)
-test bench::match_class                      ... bench:      2399 ns/iter (+/- 23)
-test bench::match_class_in_range             ... bench:      2142 ns/iter (+/- 8)
-test bench::match_class_unicode              ... bench:      2804 ns/iter (+/- 9)
-test bench::medium_1K                        ... bench:     29536 ns/iter (+/- 537) = 34 MB/s
-test bench::medium_32                        ... bench:       962 ns/iter (+/- 59) = 33 MB/s
-test bench::medium_32K                       ... bench:    946483 ns/iter (+/- 7106) = 34 MB/s
-test bench::no_exponential                   ... bench:    274301 ns/iter (+/- 552)
-test bench::not_literal                      ... bench:      2039 ns/iter (+/- 13)
-test bench::one_pass_long_prefix             ... bench:       573 ns/iter (+/- 3)
-test bench::one_pass_long_prefix_not         ... bench:       577 ns/iter (+/- 4)
-test bench::one_pass_short_a                 ... bench:      1951 ns/iter (+/- 29)
-test bench::one_pass_short_a_not             ... bench:      2464 ns/iter (+/- 10)
-test bench::one_pass_short_b                 ... bench:      1301 ns/iter (+/- 6)
-test bench::one_pass_short_b_not             ... bench:      1785 ns/iter (+/- 6)
-test bench::replace_all                      ... bench:      2168 ns/iter (+/- 152)
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/06-major-dynamic b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/06-major-dynamic
deleted file mode 100644
index 123efdd..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/06-major-dynamic
+++ /dev/null
@@ -1,33 +0,0 @@
-test bench::anchored_literal_long_match      ... bench:         206 ns/iter (+/- 7)
-test bench::anchored_literal_long_non_match  ... bench:          97 ns/iter (+/- 1)
-test bench::anchored_literal_short_match     ... bench:         193 ns/iter (+/- 1)
-test bench::anchored_literal_short_non_match ... bench:          86 ns/iter (+/- 0)
-test bench::easy0_1K                         ... bench:         356 ns/iter (+/- 136) = 2876 MB/s
-test bench::easy0_1MB                        ... bench:     352,434 ns/iter (+/- 7,874) = 2974 MB/s
-test bench::easy0_32                         ... bench:          72 ns/iter (+/- 21) = 444 MB/s
-test bench::easy0_32K                        ... bench:      11,053 ns/iter (+/- 1,388) = 2964 MB/s
-test bench::easy1_1K                         ... bench:         331 ns/iter (+/- 162) = 3093 MB/s
-test bench::easy1_1MB                        ... bench:     353,723 ns/iter (+/- 6,836) = 2964 MB/s
-test bench::easy1_32                         ... bench:          73 ns/iter (+/- 20) = 438 MB/s
-test bench::easy1_32K                        ... bench:      10,297 ns/iter (+/- 1,137) = 3182 MB/s
-test bench::hard_1K                          ... bench:      34,951 ns/iter (+/- 171) = 29 MB/s
-test bench::hard_1MB                         ... bench:  63,323,613 ns/iter (+/- 279,582) = 15 MB/s
-test bench::hard_32                          ... bench:       1,131 ns/iter (+/- 13) = 28 MB/s
-test bench::hard_32K                         ... bench:   1,099,921 ns/iter (+/- 1,338) = 29 MB/s
-test bench::literal                          ... bench:          16 ns/iter (+/- 0)
-test bench::match_class                      ... bench:         188 ns/iter (+/- 0)
-test bench::match_class_in_range             ... bench:         188 ns/iter (+/- 0)
-test bench::match_class_unicode              ... bench:       1,940 ns/iter (+/- 10)
-test bench::medium_1K                        ... bench:       5,262 ns/iter (+/- 256) = 194 MB/s
-test bench::medium_1MB                       ... bench:   5,295,539 ns/iter (+/- 9,808) = 197 MB/s
-test bench::medium_32                        ... bench:         217 ns/iter (+/- 19) = 147 MB/s
-test bench::medium_32K                       ... bench:     169,169 ns/iter (+/- 1,606) = 193 MB/s
-test bench::no_exponential                   ... bench:     293,739 ns/iter (+/- 1,632)
-test bench::not_literal                      ... bench:       1,371 ns/iter (+/- 136)
-test bench::one_pass_long_prefix             ... bench:         337 ns/iter (+/- 6)
-test bench::one_pass_long_prefix_not         ... bench:         341 ns/iter (+/- 6)
-test bench::one_pass_short_a                 ... bench:       1,399 ns/iter (+/- 16)
-test bench::one_pass_short_a_not             ... bench:       1,229 ns/iter (+/- 13)
-test bench::one_pass_short_b                 ... bench:         844 ns/iter (+/- 24)
-test bench::one_pass_short_b_not             ... bench:         849 ns/iter (+/- 45)
-test bench::replace_all                      ... bench:         579 ns/iter (+/- 3)
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/06-major-macro b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/06-major-macro
deleted file mode 100644
index 199561d..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/06-major-macro
+++ /dev/null
@@ -1,33 +0,0 @@
-test bench::anchored_literal_long_match      ... bench:         225 ns/iter (+/- 22)
-test bench::anchored_literal_long_non_match  ... bench:          62 ns/iter (+/- 1)
-test bench::anchored_literal_short_match     ... bench:         225 ns/iter (+/- 1)
-test bench::anchored_literal_short_non_match ... bench:          60 ns/iter (+/- 1)
-test bench::easy0_1K                         ... bench:      29,984 ns/iter (+/- 190) = 34 MB/s
-test bench::easy0_1MB                        ... bench:  30,641,690 ns/iter (+/- 110,535) = 33 MB/s
-test bench::easy0_32                         ... bench:         981 ns/iter (+/- 12) = 32 MB/s
-test bench::easy0_32K                        ... bench:     957,358 ns/iter (+/- 2,633) = 34 MB/s
-test bench::easy1_1K                         ... bench:      29,636 ns/iter (+/- 150) = 34 MB/s
-test bench::easy1_1MB                        ... bench:  30,295,321 ns/iter (+/- 98,181) = 34 MB/s
-test bench::easy1_32                         ... bench:         971 ns/iter (+/- 30) = 32 MB/s
-test bench::easy1_32K                        ... bench:     947,307 ns/iter (+/- 4,258) = 34 MB/s
-test bench::hard_1K                          ... bench:      54,856 ns/iter (+/- 209) = 18 MB/s
-test bench::hard_1MB                         ... bench:  56,126,571 ns/iter (+/- 224,163) = 17 MB/s
-test bench::hard_32                          ... bench:       1,776 ns/iter (+/- 23) = 18 MB/s
-test bench::hard_32K                         ... bench:   1,753,833 ns/iter (+/- 54,427) = 18 MB/s
-test bench::literal                          ... bench:       1,516 ns/iter (+/- 6)
-test bench::match_class                      ... bench:       2,429 ns/iter (+/- 11)
-test bench::match_class_in_range             ... bench:       2,398 ns/iter (+/- 4)
-test bench::match_class_unicode              ... bench:      12,915 ns/iter (+/- 29)
-test bench::medium_1K                        ... bench:      31,914 ns/iter (+/- 276) = 32 MB/s
-test bench::medium_1MB                       ... bench:  32,617,173 ns/iter (+/- 68,114) = 31 MB/s
-test bench::medium_32                        ... bench:       1,046 ns/iter (+/- 42) = 30 MB/s
-test bench::medium_32K                       ... bench:   1,019,516 ns/iter (+/- 3,788) = 32 MB/s
-test bench::no_exponential                   ... bench:     303,239 ns/iter (+/- 518)
-test bench::not_literal                      ... bench:       1,756 ns/iter (+/- 115)
-test bench::one_pass_long_prefix             ... bench:         834 ns/iter (+/- 7)
-test bench::one_pass_long_prefix_not         ... bench:         858 ns/iter (+/- 15)
-test bench::one_pass_short_a                 ... bench:       1,597 ns/iter (+/- 9)
-test bench::one_pass_short_a_not             ... bench:       1,950 ns/iter (+/- 21)
-test bench::one_pass_short_b                 ... bench:       1,077 ns/iter (+/- 5)
-test bench::one_pass_short_b_not             ... bench:       1,596 ns/iter (+/- 9)
-test bench::replace_all                      ... bench:       1,288 ns/iter (+/- 13)
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/07-prefix-improvements b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/07-prefix-improvements
deleted file mode 100644
index 55477fd..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/07-prefix-improvements
+++ /dev/null
@@ -1,33 +0,0 @@
-test bench::anchored_literal_long_match      ... bench:         197 ns/iter (+/- 9)
-test bench::anchored_literal_long_non_match  ... bench:          95 ns/iter (+/- 1)
-test bench::anchored_literal_short_match     ... bench:         193 ns/iter (+/- 2)
-test bench::anchored_literal_short_non_match ... bench:          85 ns/iter (+/- 2)
-test bench::easy0_1K                         ... bench:         304 ns/iter (+/- 119) = 3368 MB/s
-test bench::easy0_1MB                        ... bench:     281,912 ns/iter (+/- 5,274) = 3719 MB/s
-test bench::easy0_32                         ... bench:          74 ns/iter (+/- 16) = 432 MB/s
-test bench::easy0_32K                        ... bench:       8,909 ns/iter (+/- 667) = 3678 MB/s
-test bench::easy1_1K                         ... bench:         300 ns/iter (+/- 111) = 3413 MB/s
-test bench::easy1_1MB                        ... bench:     282,250 ns/iter (+/- 5,556) = 3714 MB/s
-test bench::easy1_32                         ... bench:          98 ns/iter (+/- 17) = 326 MB/s
-test bench::easy1_32K                        ... bench:       8,105 ns/iter (+/- 593) = 4042 MB/s
-test bench::hard_1K                          ... bench:      34,562 ns/iter (+/- 211) = 29 MB/s
-test bench::hard_1MB                         ... bench:  64,510,947 ns/iter (+/- 308,627) = 15 MB/s
-test bench::hard_32                          ... bench:       1,139 ns/iter (+/- 26) = 28 MB/s
-test bench::hard_32K                         ... bench:   1,102,562 ns/iter (+/- 1,850) = 29 MB/s
-test bench::literal                          ... bench:          15 ns/iter (+/- 0)
-test bench::match_class                      ... bench:         105 ns/iter (+/- 1)
-test bench::match_class_in_range             ... bench:         105 ns/iter (+/- 1)
-test bench::match_class_unicode              ... bench:       2,270 ns/iter (+/- 185)
-test bench::medium_1K                        ... bench:       2,262 ns/iter (+/- 73) = 452 MB/s
-test bench::medium_1MB                       ... bench:   2,185,098 ns/iter (+/- 3,007) = 479 MB/s
-test bench::medium_32                        ... bench:         139 ns/iter (+/- 1) = 230 MB/s
-test bench::medium_32K                       ... bench:      72,320 ns/iter (+/- 193) = 453 MB/s
-test bench::no_exponential                   ... bench:     300,699 ns/iter (+/- 494)
-test bench::not_literal                      ... bench:       1,462 ns/iter (+/- 89)
-test bench::one_pass_long_prefix             ... bench:         283 ns/iter (+/- 1)
-test bench::one_pass_long_prefix_not         ... bench:         287 ns/iter (+/- 0)
-test bench::one_pass_short_a                 ... bench:       1,131 ns/iter (+/- 11)
-test bench::one_pass_short_a_not             ... bench:       1,259 ns/iter (+/- 12)
-test bench::one_pass_short_b                 ... bench:         883 ns/iter (+/- 15)
-test bench::one_pass_short_b_not             ... bench:         799 ns/iter (+/- 28)
-test bench::replace_all                      ... bench:         170 ns/iter (+/- 1)
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/08-case-fixes b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/08-case-fixes
deleted file mode 100644
index 7609f6c..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/08-case-fixes
+++ /dev/null
@@ -1,33 +0,0 @@
-test bench::anchored_literal_long_match      ... bench:         192 ns/iter (+/- 11)
-test bench::anchored_literal_long_non_match  ... bench:          92 ns/iter (+/- 4)
-test bench::anchored_literal_short_match     ... bench:         182 ns/iter (+/- 6)
-test bench::anchored_literal_short_non_match ... bench:          82 ns/iter (+/- 1)
-test bench::easy0_1K                         ... bench:         277 ns/iter (+/- 79) = 3696 MB/s
-test bench::easy0_1MB                        ... bench:     230,829 ns/iter (+/- 5,712) = 4542 MB/s
-test bench::easy0_32                         ... bench:          70 ns/iter (+/- 4) = 457 MB/s
-test bench::easy0_32K                        ... bench:       8,444 ns/iter (+/- 492) = 3880 MB/s
-test bench::easy1_1K                         ... bench:         272 ns/iter (+/- 98) = 3764 MB/s
-test bench::easy1_1MB                        ... bench:     273,867 ns/iter (+/- 6,351) = 3828 MB/s
-test bench::easy1_32                         ... bench:          72 ns/iter (+/- 15) = 444 MB/s
-test bench::easy1_32K                        ... bench:       8,109 ns/iter (+/- 540) = 4040 MB/s
-test bench::hard_1K                          ... bench:      31,043 ns/iter (+/- 1,237) = 32 MB/s
-test bench::hard_1MB                         ... bench:  60,077,413 ns/iter (+/- 129,611) = 16 MB/s
-test bench::hard_32                          ... bench:       1,036 ns/iter (+/- 20) = 30 MB/s
-test bench::hard_32K                         ... bench:     996,238 ns/iter (+/- 3,181) = 32 MB/s
-test bench::literal                          ... bench:          15 ns/iter (+/- 0)
-test bench::match_class                      ... bench:          75 ns/iter (+/- 7)
-test bench::match_class_in_range             ... bench:          77 ns/iter (+/- 7)
-test bench::match_class_unicode              ... bench:       2,057 ns/iter (+/- 102)
-test bench::medium_1K                        ... bench:       2,252 ns/iter (+/- 63) = 454 MB/s
-test bench::medium_1MB                       ... bench:   2,186,091 ns/iter (+/- 7,496) = 479 MB/s
-test bench::medium_32                        ... bench:         132 ns/iter (+/- 2) = 242 MB/s
-test bench::medium_32K                       ... bench:      72,394 ns/iter (+/- 342) = 452 MB/s
-test bench::no_exponential                   ... bench:     286,662 ns/iter (+/- 1,150)
-test bench::not_literal                      ... bench:       1,130 ns/iter (+/- 10)
-test bench::one_pass_long_prefix             ... bench:         271 ns/iter (+/- 0)
-test bench::one_pass_long_prefix_not         ... bench:         276 ns/iter (+/- 3)
-test bench::one_pass_short_a                 ... bench:       1,147 ns/iter (+/- 10)
-test bench::one_pass_short_a_not             ... bench:         901 ns/iter (+/- 8)
-test bench::one_pass_short_b                 ... bench:         887 ns/iter (+/- 7)
-test bench::one_pass_short_b_not             ... bench:         777 ns/iter (+/- 6)
-test bench::replace_all                      ... bench:         154 ns/iter (+/- 0)
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/09-before-compiler-rewrite b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/09-before-compiler-rewrite
deleted file mode 100644
index fe67d09..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/09-before-compiler-rewrite
+++ /dev/null
@@ -1,33 +0,0 @@
-test bench::anchored_literal_long_match      ... bench:         156 ns/iter (+/- 5)
-test bench::anchored_literal_long_non_match  ... bench:          85 ns/iter (+/- 7)
-test bench::anchored_literal_short_match     ... bench:         145 ns/iter (+/- 3)
-test bench::anchored_literal_short_non_match ... bench:          76 ns/iter (+/- 2)
-test bench::easy0_1K                         ... bench:         269 ns/iter (+/- 63) = 3806 MB/s
-test bench::easy0_1MB                        ... bench:     232,461 ns/iter (+/- 13,022) = 4509 MB/s
-test bench::easy0_32                         ... bench:          63 ns/iter (+/- 6) = 507 MB/s
-test bench::easy0_32K                        ... bench:       8,358 ns/iter (+/- 430) = 3920 MB/s
-test bench::easy1_1K                         ... bench:         274 ns/iter (+/- 101) = 3737 MB/s
-test bench::easy1_1MB                        ... bench:     278,949 ns/iter (+/- 11,324) = 3758 MB/s
-test bench::easy1_32                         ... bench:          63 ns/iter (+/- 15) = 507 MB/s
-test bench::easy1_32K                        ... bench:       7,731 ns/iter (+/- 488) = 4238 MB/s
-test bench::hard_1K                          ... bench:      44,685 ns/iter (+/- 661) = 22 MB/s
-test bench::hard_1MB                         ... bench:  60,108,237 ns/iter (+/- 814,810) = 16 MB/s
-test bench::hard_32                          ... bench:       1,412 ns/iter (+/- 38) = 22 MB/s
-test bench::hard_32K                         ... bench:   1,363,335 ns/iter (+/- 21,316) = 24 MB/s
-test bench::literal                          ... bench:          14 ns/iter (+/- 0)
-test bench::match_class                      ... bench:          81 ns/iter (+/- 0)
-test bench::match_class_in_range             ... bench:          81 ns/iter (+/- 2)
-test bench::match_class_unicode              ... bench:       2,978 ns/iter (+/- 64)
-test bench::medium_1K                        ... bench:       2,239 ns/iter (+/- 68) = 457 MB/s
-test bench::medium_1MB                       ... bench:   2,215,729 ns/iter (+/- 20,897) = 472 MB/s
-test bench::medium_32                        ... bench:         124 ns/iter (+/- 2) = 258 MB/s
-test bench::medium_32K                       ... bench:      72,486 ns/iter (+/- 1,027) = 452 MB/s
-test bench::no_exponential                   ... bench:     282,992 ns/iter (+/- 8,102)
-test bench::not_literal                      ... bench:       1,526 ns/iter (+/- 32)
-test bench::one_pass_long_prefix             ... bench:         307 ns/iter (+/- 7)
-test bench::one_pass_long_prefix_not         ... bench:         311 ns/iter (+/- 8)
-test bench::one_pass_short_a                 ... bench:         623 ns/iter (+/- 12)
-test bench::one_pass_short_a_not             ... bench:         920 ns/iter (+/- 19)
-test bench::one_pass_short_b                 ... bench:         554 ns/iter (+/- 13)
-test bench::one_pass_short_b_not             ... bench:         740 ns/iter (+/- 12)
-test bench::replace_all                      ... bench:         155 ns/iter (+/- 5)
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/10-compiler-rewrite b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/10-compiler-rewrite
deleted file mode 100644
index e25a602..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/10-compiler-rewrite
+++ /dev/null
@@ -1,33 +0,0 @@
-test bench::anchored_literal_long_match      ... bench:         145 ns/iter (+/- 1)
-test bench::anchored_literal_long_non_match  ... bench:          92 ns/iter (+/- 2)
-test bench::anchored_literal_short_match     ... bench:         129 ns/iter (+/- 3)
-test bench::anchored_literal_short_non_match ... bench:          72 ns/iter (+/- 1)
-test bench::easy0_1K                         ... bench:         268 ns/iter (+/- 88) = 3820 MB/s
-test bench::easy0_1MB                        ... bench:     234,067 ns/iter (+/- 4,663) = 4479 MB/s
-test bench::easy0_32                         ... bench:          64 ns/iter (+/- 4) = 500 MB/s
-test bench::easy0_32K                        ... bench:       8,298 ns/iter (+/- 521) = 3948 MB/s
-test bench::easy1_1K                         ... bench:         275 ns/iter (+/- 95) = 3723 MB/s
-test bench::easy1_1MB                        ... bench:     280,466 ns/iter (+/- 5,938) = 3738 MB/s
-test bench::easy1_32                         ... bench:          64 ns/iter (+/- 16) = 500 MB/s
-test bench::easy1_32K                        ... bench:       7,693 ns/iter (+/- 595) = 4259 MB/s
-test bench::hard_1K                          ... bench:      27,844 ns/iter (+/- 1,012) = 36 MB/s
-test bench::hard_1MB                         ... bench:  52,323,489 ns/iter (+/- 1,251,665) = 19 MB/s
-test bench::hard_32                          ... bench:         970 ns/iter (+/- 92) = 32 MB/s
-test bench::hard_32K                         ... bench:     896,945 ns/iter (+/- 29,977) = 36 MB/s
-test bench::literal                          ... bench:          13 ns/iter (+/- 1)
-test bench::match_class                      ... bench:          80 ns/iter (+/- 0)
-test bench::match_class_in_range             ... bench:          80 ns/iter (+/- 0)
-test bench::match_class_unicode              ... bench:       2,150 ns/iter (+/- 18)
-test bench::medium_1K                        ... bench:       2,241 ns/iter (+/- 55) = 456 MB/s
-test bench::medium_1MB                       ... bench:   2,186,354 ns/iter (+/- 9,134) = 479 MB/s
-test bench::medium_32                        ... bench:         125 ns/iter (+/- 1) = 256 MB/s
-test bench::medium_32K                       ... bench:      72,156 ns/iter (+/- 145) = 454 MB/s
-test bench::no_exponential                   ... bench:     305,034 ns/iter (+/- 1,134)
-test bench::not_literal                      ... bench:       1,169 ns/iter (+/- 105)
-test bench::one_pass_long_prefix             ... bench:         257 ns/iter (+/- 4)
-test bench::one_pass_long_prefix_not         ... bench:         276 ns/iter (+/- 4)
-test bench::one_pass_short_a                 ... bench:         680 ns/iter (+/- 3)
-test bench::one_pass_short_a_not             ... bench:         804 ns/iter (+/- 48)
-test bench::one_pass_short_b                 ... bench:         337 ns/iter (+/- 3)
-test bench::one_pass_short_b_not             ... bench:         339 ns/iter (+/- 5)
-test bench::replace_all                      ... bench:         150 ns/iter (+/- 1)
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/11-compiler-rewrite b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/11-compiler-rewrite
deleted file mode 100644
index 3296d43..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/11-compiler-rewrite
+++ /dev/null
@@ -1,33 +0,0 @@
-test bench::anchored_literal_long_match      ... bench:         171 ns/iter (+/- 20)
-test bench::anchored_literal_long_non_match  ... bench:          90 ns/iter (+/- 8)
-test bench::anchored_literal_short_match     ... bench:         180 ns/iter (+/- 33)
-test bench::anchored_literal_short_non_match ... bench:          78 ns/iter (+/- 9)
-test bench::easy0_1K                         ... bench:         272 ns/iter (+/- 82) = 3764 MB/s
-test bench::easy0_1MB                        ... bench:     233,014 ns/iter (+/- 22,144) = 4500 MB/s
-test bench::easy0_32                         ... bench:          62 ns/iter (+/- 6) = 516 MB/s
-test bench::easy0_32K                        ... bench:       8,490 ns/iter (+/- 905) = 3859 MB/s
-test bench::easy1_1K                         ... bench:         273 ns/iter (+/- 100) = 3750 MB/s
-test bench::easy1_1MB                        ... bench:     279,901 ns/iter (+/- 5,598) = 3746 MB/s
-test bench::easy1_32                         ... bench:          62 ns/iter (+/- 6) = 516 MB/s
-test bench::easy1_32K                        ... bench:       7,713 ns/iter (+/- 566) = 4248 MB/s
-test bench::hard_1K                          ... bench:      38,641 ns/iter (+/- 605) = 26 MB/s
-test bench::hard_1MB                         ... bench:  56,579,116 ns/iter (+/- 1,193,231) = 18 MB/s
-test bench::hard_32                          ... bench:       1,252 ns/iter (+/- 24) = 25 MB/s
-test bench::hard_32K                         ... bench:   1,247,639 ns/iter (+/- 12,774) = 26 MB/s
-test bench::literal                          ... bench:          13 ns/iter (+/- 1)
-test bench::match_class                      ... bench:          80 ns/iter (+/- 1)
-test bench::match_class_in_range             ... bench:          80 ns/iter (+/- 0)
-test bench::match_class_unicode              ... bench:       2,459 ns/iter (+/- 77)
-test bench::medium_1K                        ... bench:       2,244 ns/iter (+/- 63) = 456 MB/s
-test bench::medium_1MB                       ... bench:   2,192,052 ns/iter (+/- 21,460) = 478 MB/s
-test bench::medium_32                        ... bench:         122 ns/iter (+/- 3) = 262 MB/s
-test bench::medium_32K                       ... bench:      73,167 ns/iter (+/- 15,655) = 447 MB/s
-test bench::no_exponential                   ... bench:     289,292 ns/iter (+/- 1,488)
-test bench::not_literal                      ... bench:       1,480 ns/iter (+/- 18)
-test bench::one_pass_long_prefix             ... bench:         324 ns/iter (+/- 15)
-test bench::one_pass_long_prefix_not         ... bench:         337 ns/iter (+/- 5)
-test bench::one_pass_short_a                 ... bench:       1,161 ns/iter (+/- 10)
-test bench::one_pass_short_a_not             ... bench:         798 ns/iter (+/- 6)
-test bench::one_pass_short_b                 ... bench:         456 ns/iter (+/- 6)
-test bench::one_pass_short_b_not             ... bench:         452 ns/iter (+/- 33)
-test bench::replace_all                      ... bench:         148 ns/iter (+/- 0)
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/12-executor b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/12-executor
deleted file mode 100644
index 8ec8561b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/12-executor
+++ /dev/null
@@ -1,35 +0,0 @@
-test bench::anchored_literal_long_match      ... bench:         179 ns/iter (+/- 5)
-test bench::anchored_literal_long_non_match  ... bench:          90 ns/iter (+/- 1)
-test bench::anchored_literal_short_match     ... bench:         164 ns/iter (+/- 16)
-test bench::anchored_literal_short_non_match ... bench:          79 ns/iter (+/- 1)
-test bench::compile_simple                   ... bench:       3,708 ns/iter (+/- 225)
-test bench::compile_unicode                  ... bench:       5,871 ns/iter (+/- 264)
-test bench::easy0_1K                         ... bench:         263 ns/iter (+/- 92) = 3893 MB/s
-test bench::easy0_1MB                        ... bench:     217,835 ns/iter (+/- 4,074) = 4813 MB/s
-test bench::easy0_32                         ... bench:          67 ns/iter (+/- 1) = 477 MB/s
-test bench::easy0_32K                        ... bench:       8,204 ns/iter (+/- 426) = 3994 MB/s
-test bench::easy1_1K                         ... bench:         276 ns/iter (+/- 100) = 3710 MB/s
-test bench::easy1_1MB                        ... bench:     284,086 ns/iter (+/- 6,516) = 3691 MB/s
-test bench::easy1_32                         ... bench:          70 ns/iter (+/- 15) = 457 MB/s
-test bench::easy1_32K                        ... bench:       7,844 ns/iter (+/- 556) = 4177 MB/s
-test bench::hard_1K                          ... bench:      30,062 ns/iter (+/- 1,684) = 34 MB/s
-test bench::hard_1MB                         ... bench:  50,839,701 ns/iter (+/- 104,343) = 20 MB/s
-test bench::hard_32                          ... bench:       1,009 ns/iter (+/- 48) = 31 MB/s
-test bench::hard_32K                         ... bench:     965,341 ns/iter (+/- 45,075) = 33 MB/s
-test bench::literal                          ... bench:          12 ns/iter (+/- 0)
-test bench::match_class                      ... bench:          80 ns/iter (+/- 0)
-test bench::match_class_in_range             ... bench:          80 ns/iter (+/- 1)
-test bench::match_class_unicode              ... bench:       2,150 ns/iter (+/- 22)
-test bench::medium_1K                        ... bench:       2,262 ns/iter (+/- 66) = 452 MB/s
-test bench::medium_1MB                       ... bench:   2,193,428 ns/iter (+/- 6,147) = 478 MB/s
-test bench::medium_32                        ... bench:         129 ns/iter (+/- 1) = 248 MB/s
-test bench::medium_32K                       ... bench:      72,629 ns/iter (+/- 348) = 451 MB/s
-test bench::no_exponential                   ... bench:     289,043 ns/iter (+/- 2,478)
-test bench::not_literal                      ... bench:       1,195 ns/iter (+/- 10)
-test bench::one_pass_long_prefix             ... bench:         265 ns/iter (+/- 3)
-test bench::one_pass_long_prefix_not         ... bench:         270 ns/iter (+/- 4)
-test bench::one_pass_short_a                 ... bench:         730 ns/iter (+/- 4)
-test bench::one_pass_short_a_not             ... bench:         712 ns/iter (+/- 4)
-test bench::one_pass_short_b                 ... bench:         445 ns/iter (+/- 49)
-test bench::one_pass_short_b_not             ... bench:         406 ns/iter (+/- 72)
-test bench::replace_all                      ... bench:         136 ns/iter (+/- 2)
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/12-executor-bytes b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/12-executor-bytes
deleted file mode 100644
index c036920..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/12-executor-bytes
+++ /dev/null
@@ -1,35 +0,0 @@
-test bench::anchored_literal_long_match      ... bench:         190 ns/iter (+/- 12)
-test bench::anchored_literal_long_non_match  ... bench:          85 ns/iter (+/- 2)
-test bench::anchored_literal_short_match     ... bench:         147 ns/iter (+/- 9)
-test bench::anchored_literal_short_non_match ... bench:          74 ns/iter (+/- 5)
-test bench::compile_simple                   ... bench:       4,218 ns/iter (+/- 201)
-test bench::compile_unicode                  ... bench:     402,353 ns/iter (+/- 2,642)
-test bench::easy0_1K                         ... bench:         253 ns/iter (+/- 79) = 4047 MB/s
-test bench::easy0_1MB                        ... bench:     215,308 ns/iter (+/- 3,474) = 4870 MB/s
-test bench::easy0_32                         ... bench:          64 ns/iter (+/- 4) = 500 MB/s
-test bench::easy0_32K                        ... bench:       8,134 ns/iter (+/- 435) = 4028 MB/s
-test bench::easy1_1K                         ... bench:         277 ns/iter (+/- 105) = 3696 MB/s
-test bench::easy1_1MB                        ... bench:     283,435 ns/iter (+/- 5,975) = 3699 MB/s
-test bench::easy1_32                         ... bench:          64 ns/iter (+/- 14) = 500 MB/s
-test bench::easy1_32K                        ... bench:       7,832 ns/iter (+/- 575) = 4183 MB/s
-test bench::hard_1K                          ... bench:      35,380 ns/iter (+/- 772) = 28 MB/s
-test bench::hard_1MB                         ... bench:  46,639,535 ns/iter (+/- 456,010) = 22 MB/s
-test bench::hard_32                          ... bench:       1,110 ns/iter (+/- 53) = 28 MB/s
-test bench::hard_32K                         ... bench:   1,146,751 ns/iter (+/- 17,290) = 28 MB/s
-test bench::literal                          ... bench:          12 ns/iter (+/- 0)
-test bench::match_class                      ... bench:          80 ns/iter (+/- 1)
-test bench::match_class_in_range             ... bench:          80 ns/iter (+/- 0)
-test bench::match_class_unicode              ... bench:   2,487,088 ns/iter (+/- 103,259)
-test bench::medium_1K                        ... bench:       2,253 ns/iter (+/- 52) = 454 MB/s
-test bench::medium_1MB                       ... bench:   2,193,344 ns/iter (+/- 7,582) = 478 MB/s
-test bench::medium_32                        ... bench:         119 ns/iter (+/- 5) = 268 MB/s
-test bench::medium_32K                       ... bench:      72,569 ns/iter (+/- 283) = 451 MB/s
-test bench::no_exponential                   ... bench:     292,840 ns/iter (+/- 2,823)
-test bench::not_literal                      ... bench:       6,417 ns/iter (+/- 26)
-test bench::one_pass_long_prefix             ... bench:         304 ns/iter (+/- 0)
-test bench::one_pass_long_prefix_not         ... bench:         943 ns/iter (+/- 44)
-test bench::one_pass_short_a                 ... bench:         688 ns/iter (+/- 11)
-test bench::one_pass_short_a_not             ... bench:         687 ns/iter (+/- 7)
-test bench::one_pass_short_b                 ... bench:         589 ns/iter (+/- 6)
-test bench::one_pass_short_b_not             ... bench:         357 ns/iter (+/- 11)
-test bench::replace_all                      ... bench:         131 ns/iter (+/- 1)
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/13-cache-byte-range-suffixes b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/13-cache-byte-range-suffixes
deleted file mode 100644
index 5a2ec09..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/record/old-bench-log/old/13-cache-byte-range-suffixes
+++ /dev/null
@@ -1,35 +0,0 @@
-test bench::anchored_literal_long_match      ... bench:         174 ns/iter (+/- 65)
-test bench::anchored_literal_long_non_match  ... bench:          94 ns/iter (+/- 1)
-test bench::anchored_literal_short_match     ... bench:         142 ns/iter (+/- 1)
-test bench::anchored_literal_short_non_match ... bench:          82 ns/iter (+/- 0)
-test bench::compile_simple                   ... bench:       4,878 ns/iter (+/- 207)
-test bench::compile_unicode                  ... bench:     679,701 ns/iter (+/- 10,264)
-test bench::easy0_1K                         ... bench:         257 ns/iter (+/- 83) = 3984 MB/s
-test bench::easy0_1MB                        ... bench:     217,698 ns/iter (+/- 3,307) = 4816 MB/s
-test bench::easy0_32                         ... bench:          61 ns/iter (+/- 3) = 524 MB/s
-test bench::easy0_32K                        ... bench:       8,144 ns/iter (+/- 449) = 4023 MB/s
-test bench::easy1_1K                         ... bench:         276 ns/iter (+/- 106) = 3710 MB/s
-test bench::easy1_1MB                        ... bench:     285,518 ns/iter (+/- 4,933) = 3672 MB/s
-test bench::easy1_32                         ... bench:          61 ns/iter (+/- 12) = 524 MB/s
-test bench::easy1_32K                        ... bench:       7,896 ns/iter (+/- 508) = 4149 MB/s
-test bench::hard_1K                          ... bench:      35,361 ns/iter (+/- 684) = 28 MB/s
-test bench::hard_1MB                         ... bench:  48,691,236 ns/iter (+/- 2,316,446) = 21 MB/s
-test bench::hard_32                          ... bench:       1,087 ns/iter (+/- 33) = 29 MB/s
-test bench::hard_32K                         ... bench:   1,147,627 ns/iter (+/- 4,982) = 28 MB/s
-test bench::literal                          ... bench:          12 ns/iter (+/- 0)
-test bench::match_class                      ... bench:          80 ns/iter (+/- 0)
-test bench::match_class_in_range             ... bench:          80 ns/iter (+/- 0)
-test bench::match_class_unicode              ... bench:   2,431,592 ns/iter (+/- 89,268)
-test bench::medium_1K                        ... bench:       2,245 ns/iter (+/- 93) = 456 MB/s
-test bench::medium_1MB                       ... bench:   2,192,828 ns/iter (+/- 4,343) = 478 MB/s
-test bench::medium_32                        ... bench:         120 ns/iter (+/- 2) = 266 MB/s
-test bench::medium_32K                       ... bench:      72,996 ns/iter (+/- 627) = 448 MB/s
-test bench::no_exponential                   ... bench:     290,775 ns/iter (+/- 1,176)
-test bench::not_literal                      ... bench:       5,282 ns/iter (+/- 199)
-test bench::one_pass_long_prefix             ... bench:         294 ns/iter (+/- 3)
-test bench::one_pass_long_prefix_not         ... bench:         315 ns/iter (+/- 7)
-test bench::one_pass_short_a                 ... bench:         708 ns/iter (+/- 21)
-test bench::one_pass_short_a_not             ... bench:         861 ns/iter (+/- 9)
-test bench::one_pass_short_b                 ... bench:         607 ns/iter (+/- 2)
-test bench::one_pass_short_b_not             ... bench:         344 ns/iter (+/- 11)
-test bench::replace_all                      ... bench:         135 ns/iter (+/- 1)
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/rustfmt.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/rustfmt.toml
deleted file mode 100644
index aa37a218..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/rustfmt.toml
+++ /dev/null
@@ -1,2 +0,0 @@
-max_width = 79
-use_small_heuristics = "max"
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/builders.rs b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/builders.rs
deleted file mode 100644
index c111a96..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/builders.rs
+++ /dev/null
@@ -1,2539 +0,0 @@
-#![allow(warnings)]
-
-// This module defines an internal builder that encapsulates all interaction
-// with meta::Regex construction, and then 4 public API builders that wrap
-// around it. The docs are essentially repeated on each of the 4 public
-// builders, with tweaks to the examples as needed.
-//
-// The reason why there are so many builders is partially because of a misstep
-// in the initial API design: the builder constructor takes in the pattern
-// strings instead of using the `build` method to accept the pattern strings.
-// This means `new` has a different signature for each builder. It probably
-// would have been nicer to to use one builder with `fn new()`, and then add
-// `build(pat)` and `build_many(pats)` constructors.
-//
-// The other reason is because I think the `bytes` module should probably
-// have its own builder type. That way, it is completely isolated from the
-// top-level API.
-//
-// If I could do it again, I'd probably have a `regex::Builder` and a
-// `regex::bytes::Builder`. Each would have `build` and `build_set` (or
-// `build_many`) methods for constructing a single pattern `Regex` and a
-// multi-pattern `RegexSet`, respectively.
-
-use alloc::{
-    string::{String, ToString},
-    sync::Arc,
-    vec,
-    vec::Vec,
-};
-
-use regex_automata::{
-    meta, nfa::thompson::WhichCaptures, util::syntax, MatchKind,
-};
-
-use crate::error::Error;
-
-/// A builder for constructing a `Regex`, `bytes::Regex`, `RegexSet` or a
-/// `bytes::RegexSet`.
-///
-/// This is essentially the implementation of the four different builder types
-/// in the public API: `RegexBuilder`, `bytes::RegexBuilder`, `RegexSetBuilder`
-/// and `bytes::RegexSetBuilder`.
-#[derive(Clone, Debug)]
-struct Builder {
-    pats: Vec<String>,
-    metac: meta::Config,
-    syntaxc: syntax::Config,
-}
-
-impl Default for Builder {
-    fn default() -> Builder {
-        let metac = meta::Config::new()
-            .nfa_size_limit(Some(10 * (1 << 20)))
-            .hybrid_cache_capacity(2 * (1 << 20));
-        Builder { pats: vec![], metac, syntaxc: syntax::Config::default() }
-    }
-}
-
-impl Builder {
-    fn new<I, S>(patterns: I) -> Builder
-    where
-        S: AsRef<str>,
-        I: IntoIterator<Item = S>,
-    {
-        let mut b = Builder::default();
-        b.pats.extend(patterns.into_iter().map(|p| p.as_ref().to_string()));
-        b
-    }
-
-    fn build_one_string(&self) -> Result<crate::Regex, Error> {
-        assert_eq!(1, self.pats.len());
-        let metac = self
-            .metac
-            .clone()
-            .match_kind(MatchKind::LeftmostFirst)
-            .utf8_empty(true);
-        let syntaxc = self.syntaxc.clone().utf8(true);
-        let pattern = Arc::from(self.pats[0].as_str());
-        meta::Builder::new()
-            .configure(metac)
-            .syntax(syntaxc)
-            .build(&pattern)
-            .map(|meta| crate::Regex { meta, pattern })
-            .map_err(Error::from_meta_build_error)
-    }
-
-    fn build_one_bytes(&self) -> Result<crate::bytes::Regex, Error> {
-        assert_eq!(1, self.pats.len());
-        let metac = self
-            .metac
-            .clone()
-            .match_kind(MatchKind::LeftmostFirst)
-            .utf8_empty(false);
-        let syntaxc = self.syntaxc.clone().utf8(false);
-        let pattern = Arc::from(self.pats[0].as_str());
-        meta::Builder::new()
-            .configure(metac)
-            .syntax(syntaxc)
-            .build(&pattern)
-            .map(|meta| crate::bytes::Regex { meta, pattern })
-            .map_err(Error::from_meta_build_error)
-    }
-
-    fn build_many_string(&self) -> Result<crate::RegexSet, Error> {
-        let metac = self
-            .metac
-            .clone()
-            .match_kind(MatchKind::All)
-            .utf8_empty(true)
-            .which_captures(WhichCaptures::None);
-        let syntaxc = self.syntaxc.clone().utf8(true);
-        let patterns = Arc::from(self.pats.as_slice());
-        meta::Builder::new()
-            .configure(metac)
-            .syntax(syntaxc)
-            .build_many(&patterns)
-            .map(|meta| crate::RegexSet { meta, patterns })
-            .map_err(Error::from_meta_build_error)
-    }
-
-    fn build_many_bytes(&self) -> Result<crate::bytes::RegexSet, Error> {
-        let metac = self
-            .metac
-            .clone()
-            .match_kind(MatchKind::All)
-            .utf8_empty(false)
-            .which_captures(WhichCaptures::None);
-        let syntaxc = self.syntaxc.clone().utf8(false);
-        let patterns = Arc::from(self.pats.as_slice());
-        meta::Builder::new()
-            .configure(metac)
-            .syntax(syntaxc)
-            .build_many(&patterns)
-            .map(|meta| crate::bytes::RegexSet { meta, patterns })
-            .map_err(Error::from_meta_build_error)
-    }
-
-    fn case_insensitive(&mut self, yes: bool) -> &mut Builder {
-        self.syntaxc = self.syntaxc.case_insensitive(yes);
-        self
-    }
-
-    fn multi_line(&mut self, yes: bool) -> &mut Builder {
-        self.syntaxc = self.syntaxc.multi_line(yes);
-        self
-    }
-
-    fn dot_matches_new_line(&mut self, yes: bool) -> &mut Builder {
-        self.syntaxc = self.syntaxc.dot_matches_new_line(yes);
-        self
-    }
-
-    fn crlf(&mut self, yes: bool) -> &mut Builder {
-        self.syntaxc = self.syntaxc.crlf(yes);
-        self
-    }
-
-    fn line_terminator(&mut self, byte: u8) -> &mut Builder {
-        self.metac = self.metac.clone().line_terminator(byte);
-        self.syntaxc = self.syntaxc.line_terminator(byte);
-        self
-    }
-
-    fn swap_greed(&mut self, yes: bool) -> &mut Builder {
-        self.syntaxc = self.syntaxc.swap_greed(yes);
-        self
-    }
-
-    fn ignore_whitespace(&mut self, yes: bool) -> &mut Builder {
-        self.syntaxc = self.syntaxc.ignore_whitespace(yes);
-        self
-    }
-
-    fn unicode(&mut self, yes: bool) -> &mut Builder {
-        self.syntaxc = self.syntaxc.unicode(yes);
-        self
-    }
-
-    fn octal(&mut self, yes: bool) -> &mut Builder {
-        self.syntaxc = self.syntaxc.octal(yes);
-        self
-    }
-
-    fn size_limit(&mut self, limit: usize) -> &mut Builder {
-        self.metac = self.metac.clone().nfa_size_limit(Some(limit));
-        self
-    }
-
-    fn dfa_size_limit(&mut self, limit: usize) -> &mut Builder {
-        self.metac = self.metac.clone().hybrid_cache_capacity(limit);
-        self
-    }
-
-    fn nest_limit(&mut self, limit: u32) -> &mut Builder {
-        self.syntaxc = self.syntaxc.nest_limit(limit);
-        self
-    }
-}
-
-pub(crate) mod string {
-    use crate::{error::Error, Regex, RegexSet};
-
-    use super::Builder;
-
-    /// A configurable builder for a [`Regex`].
-    ///
-    /// This builder can be used to programmatically set flags such as `i`
-    /// (case insensitive) and `x` (for verbose mode). This builder can also be
-    /// used to configure things like the line terminator and a size limit on
-    /// the compiled regular expression.
-    #[derive(Clone, Debug)]
-    pub struct RegexBuilder {
-        builder: Builder,
-    }
-
-    impl RegexBuilder {
-        /// Create a new builder with a default configuration for the given
-        /// pattern.
-        ///
-        /// If the pattern is invalid or exceeds the configured size limits,
-        /// then an error will be returned when [`RegexBuilder::build`] is
-        /// called.
-        pub fn new(pattern: &str) -> RegexBuilder {
-            RegexBuilder { builder: Builder::new([pattern]) }
-        }
-
-        /// Compiles the pattern given to `RegexBuilder::new` with the
-        /// configuration set on this builder.
-        ///
-        /// If the pattern isn't a valid regex or if a configured size limit
-        /// was exceeded, then an error is returned.
-        pub fn build(&self) -> Result<Regex, Error> {
-            self.builder.build_one_string()
-        }
-
-        /// This configures Unicode mode for the entire pattern.
-        ///
-        /// Enabling Unicode mode does a number of things:
-        ///
-        /// * Most fundamentally, it causes the fundamental atom of matching
-        /// to be a single codepoint. When Unicode mode is disabled, it's a
-        /// single byte. For example, when Unicode mode is enabled, `.` will
-        /// match `đŸ’©` once, where as it will match 4 times when Unicode mode
-        /// is disabled. (Since the UTF-8 encoding of `đŸ’©` is 4 bytes long.)
-        /// * Case insensitive matching uses Unicode simple case folding rules.
-        /// * Unicode character classes like `\p{Letter}` and `\p{Greek}` are
-        /// available.
-        /// * Perl character classes are Unicode aware. That is, `\w`, `\s` and
-        /// `\d`.
-        /// * The word boundary assertions, `\b` and `\B`, use the Unicode
-        /// definition of a word character.
-        ///
-        /// Note that if Unicode mode is disabled, then the regex will fail to
-        /// compile if it could match invalid UTF-8. For example, when Unicode
-        /// mode is disabled, then since `.` matches any byte (except for
-        /// `\n`), then it can match invalid UTF-8 and thus building a regex
-        /// from it will fail. Another example is `\w` and `\W`. Since `\w` can
-        /// only match ASCII bytes when Unicode mode is disabled, it's allowed.
-        /// But `\W` can match more than ASCII bytes, including invalid UTF-8,
-        /// and so it is not allowed. This restriction can be lifted only by
-        /// using a [`bytes::Regex`](crate::bytes::Regex).
-        ///
-        /// For more details on the Unicode support in this crate, see the
-        /// [Unicode section](crate#unicode) in this crate's top-level
-        /// documentation.
-        ///
-        /// The default for this is `true`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::RegexBuilder;
-        ///
-        /// let re = RegexBuilder::new(r"\w")
-        ///     .unicode(false)
-        ///     .build()
-        ///     .unwrap();
-        /// // Normally greek letters would be included in \w, but since
-        /// // Unicode mode is disabled, it only matches ASCII letters.
-        /// assert!(!re.is_match("δ"));
-        ///
-        /// let re = RegexBuilder::new(r"s")
-        ///     .case_insensitive(true)
-        ///     .unicode(false)
-        ///     .build()
-        ///     .unwrap();
-        /// // Normally 'Ćż' is included when searching for 's' case
-        /// // insensitively due to Unicode's simple case folding rules. But
-        /// // when Unicode mode is disabled, only ASCII case insensitive rules
-        /// // are used.
-        /// assert!(!re.is_match("Ćż"));
-        /// ```
-        pub fn unicode(&mut self, yes: bool) -> &mut RegexBuilder {
-            self.builder.unicode(yes);
-            self
-        }
-
-        /// This configures whether to enable case insensitive matching for the
-        /// entire pattern.
-        ///
-        /// This setting can also be configured using the inline flag `i`
-        /// in the pattern. For example, `(?i:foo)` matches `foo` case
-        /// insensitively while `(?-i:foo)` matches `foo` case sensitively.
-        ///
-        /// The default for this is `false`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::RegexBuilder;
-        ///
-        /// let re = RegexBuilder::new(r"foo(?-i:bar)quux")
-        ///     .case_insensitive(true)
-        ///     .build()
-        ///     .unwrap();
-        /// assert!(re.is_match("FoObarQuUx"));
-        /// // Even though case insensitive matching is enabled in the builder,
-        /// // it can be locally disabled within the pattern. In this case,
-        /// // `bar` is matched case sensitively.
-        /// assert!(!re.is_match("fooBARquux"));
-        /// ```
-        pub fn case_insensitive(&mut self, yes: bool) -> &mut RegexBuilder {
-            self.builder.case_insensitive(yes);
-            self
-        }
-
-        /// This configures multi-line mode for the entire pattern.
-        ///
-        /// Enabling multi-line mode changes the behavior of the `^` and `$`
-        /// anchor assertions. Instead of only matching at the beginning and
-        /// end of a haystack, respectively, multi-line mode causes them to
-        /// match at the beginning and end of a line *in addition* to the
-        /// beginning and end of a haystack. More precisely, `^` will match at
-        /// the position immediately following a `\n` and `$` will match at the
-        /// position immediately preceding a `\n`.
-        ///
-        /// The behavior of this option can be impacted by other settings too:
-        ///
-        /// * The [`RegexBuilder::line_terminator`] option changes `\n` above
-        /// to any ASCII byte.
-        /// * The [`RegexBuilder::crlf`] option changes the line terminator to
-        /// be either `\r` or `\n`, but never at the position between a `\r`
-        /// and `\n`.
-        ///
-        /// This setting can also be configured using the inline flag `m` in
-        /// the pattern.
-        ///
-        /// The default for this is `false`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::RegexBuilder;
-        ///
-        /// let re = RegexBuilder::new(r"^foo$")
-        ///     .multi_line(true)
-        ///     .build()
-        ///     .unwrap();
-        /// assert_eq!(Some(1..4), re.find("\nfoo\n").map(|m| m.range()));
-        /// ```
-        pub fn multi_line(&mut self, yes: bool) -> &mut RegexBuilder {
-            self.builder.multi_line(yes);
-            self
-        }
-
-        /// This configures dot-matches-new-line mode for the entire pattern.
-        ///
-        /// Perhaps surprisingly, the default behavior for `.` is not to match
-        /// any character, but rather, to match any character except for the
-        /// line terminator (which is `\n` by default). When this mode is
-        /// enabled, the behavior changes such that `.` truly matches any
-        /// character.
-        ///
-        /// This setting can also be configured using the inline flag `s` in
-        /// the pattern. For example, `(?s:.)` and `\p{any}` are equivalent
-        /// regexes.
-        ///
-        /// The default for this is `false`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::RegexBuilder;
-        ///
-        /// let re = RegexBuilder::new(r"foo.bar")
-        ///     .dot_matches_new_line(true)
-        ///     .build()
-        ///     .unwrap();
-        /// let hay = "foo\nbar";
-        /// assert_eq!(Some("foo\nbar"), re.find(hay).map(|m| m.as_str()));
-        /// ```
-        pub fn dot_matches_new_line(
-            &mut self,
-            yes: bool,
-        ) -> &mut RegexBuilder {
-            self.builder.dot_matches_new_line(yes);
-            self
-        }
-
-        /// This configures CRLF mode for the entire pattern.
-        ///
-        /// When CRLF mode is enabled, both `\r` ("carriage return" or CR for
-        /// short) and `\n` ("line feed" or LF for short) are treated as line
-        /// terminators. This results in the following:
-        ///
-        /// * Unless dot-matches-new-line mode is enabled, `.` will now match
-        /// any character except for `\n` and `\r`.
-        /// * When multi-line mode is enabled, `^` will match immediately
-        /// following a `\n` or a `\r`. Similarly, `$` will match immediately
-        /// preceding a `\n` or a `\r`. Neither `^` nor `$` will ever match
-        /// between `\r` and `\n`.
-        ///
-        /// This setting can also be configured using the inline flag `R` in
-        /// the pattern.
-        ///
-        /// The default for this is `false`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::RegexBuilder;
-        ///
-        /// let re = RegexBuilder::new(r"^foo$")
-        ///     .multi_line(true)
-        ///     .crlf(true)
-        ///     .build()
-        ///     .unwrap();
-        /// let hay = "\r\nfoo\r\n";
-        /// // If CRLF mode weren't enabled here, then '$' wouldn't match
-        /// // immediately after 'foo', and thus no match would be found.
-        /// assert_eq!(Some("foo"), re.find(hay).map(|m| m.as_str()));
-        /// ```
-        ///
-        /// This example demonstrates that `^` will never match at a position
-        /// between `\r` and `\n`. (`$` will similarly not match between a `\r`
-        /// and a `\n`.)
-        ///
-        /// ```
-        /// use regex::RegexBuilder;
-        ///
-        /// let re = RegexBuilder::new(r"^")
-        ///     .multi_line(true)
-        ///     .crlf(true)
-        ///     .build()
-        ///     .unwrap();
-        /// let hay = "\r\n\r\n";
-        /// let ranges: Vec<_> = re.find_iter(hay).map(|m| m.range()).collect();
-        /// assert_eq!(ranges, vec![0..0, 2..2, 4..4]);
-        /// ```
-        pub fn crlf(&mut self, yes: bool) -> &mut RegexBuilder {
-            self.builder.crlf(yes);
-            self
-        }
-
-        /// Configures the line terminator to be used by the regex.
-        ///
-        /// The line terminator is relevant in two ways for a particular regex:
-        ///
-        /// * When dot-matches-new-line mode is *not* enabled (the default),
-        /// then `.` will match any character except for the configured line
-        /// terminator.
-        /// * When multi-line mode is enabled (not the default), then `^` and
-        /// `$` will match immediately after and before, respectively, a line
-        /// terminator.
-        ///
-        /// In both cases, if CRLF mode is enabled in a particular context,
-        /// then it takes precedence over any configured line terminator.
-        ///
-        /// This option cannot be configured from within the pattern.
-        ///
-        /// The default line terminator is `\n`.
-        ///
-        /// # Example
-        ///
-        /// This shows how to treat the NUL byte as a line terminator. This can
-        /// be a useful heuristic when searching binary data.
-        ///
-        /// ```
-        /// use regex::RegexBuilder;
-        ///
-        /// let re = RegexBuilder::new(r"^foo$")
-        ///     .multi_line(true)
-        ///     .line_terminator(b'\x00')
-        ///     .build()
-        ///     .unwrap();
-        /// let hay = "\x00foo\x00";
-        /// assert_eq!(Some(1..4), re.find(hay).map(|m| m.range()));
-        /// ```
-        ///
-        /// This example shows that the behavior of `.` is impacted by this
-        /// setting as well:
-        ///
-        /// ```
-        /// use regex::RegexBuilder;
-        ///
-        /// let re = RegexBuilder::new(r".")
-        ///     .line_terminator(b'\x00')
-        ///     .build()
-        ///     .unwrap();
-        /// assert!(re.is_match("\n"));
-        /// assert!(!re.is_match("\x00"));
-        /// ```
-        ///
-        /// This shows that building a regex will fail if the byte given
-        /// is not ASCII and the pattern could result in matching invalid
-        /// UTF-8. This is because any singular non-ASCII byte is not valid
-        /// UTF-8, and it is not permitted for a [`Regex`] to match invalid
-        /// UTF-8. (It is permissible to use a non-ASCII byte when building a
-        /// [`bytes::Regex`](crate::bytes::Regex).)
-        ///
-        /// ```
-        /// use regex::RegexBuilder;
-        ///
-        /// assert!(RegexBuilder::new(r".").line_terminator(0x80).build().is_err());
-        /// // Note that using a non-ASCII byte isn't enough on its own to
-        /// // cause regex compilation to fail. You actually have to make use
-        /// // of it in the regex in a way that leads to matching invalid
-        /// // UTF-8. If you don't, then regex compilation will succeed!
-        /// assert!(RegexBuilder::new(r"a").line_terminator(0x80).build().is_ok());
-        /// ```
-        pub fn line_terminator(&mut self, byte: u8) -> &mut RegexBuilder {
-            self.builder.line_terminator(byte);
-            self
-        }
-
-        /// This configures swap-greed mode for the entire pattern.
-        ///
-        /// When swap-greed mode is enabled, patterns like `a+` will become
-        /// non-greedy and patterns like `a+?` will become greedy. In other
-        /// words, the meanings of `a+` and `a+?` are switched.
-        ///
-        /// This setting can also be configured using the inline flag `U` in
-        /// the pattern.
-        ///
-        /// The default for this is `false`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::RegexBuilder;
-        ///
-        /// let re = RegexBuilder::new(r"a+")
-        ///     .swap_greed(true)
-        ///     .build()
-        ///     .unwrap();
-        /// assert_eq!(Some("a"), re.find("aaa").map(|m| m.as_str()));
-        /// ```
-        pub fn swap_greed(&mut self, yes: bool) -> &mut RegexBuilder {
-            self.builder.swap_greed(yes);
-            self
-        }
-
-        /// This configures verbose mode for the entire pattern.
-        ///
-        /// When enabled, whitespace will treated as insignifcant in the
-        /// pattern and `#` can be used to start a comment until the next new
-        /// line.
-        ///
-        /// Normally, in most places in a pattern, whitespace is treated
-        /// literally. For example ` +` will match one or more ASCII whitespace
-        /// characters.
-        ///
-        /// When verbose mode is enabled, `\#` can be used to match a literal
-        /// `#` and `\ ` can be used to match a literal ASCII whitespace
-        /// character.
-        ///
-        /// Verbose mode is useful for permitting regexes to be formatted and
-        /// broken up more nicely. This may make them more easily readable.
-        ///
-        /// This setting can also be configured using the inline flag `x` in
-        /// the pattern.
-        ///
-        /// The default for this is `false`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::RegexBuilder;
-        ///
-        /// let pat = r"
-        ///     \b
-        ///     (?<first>\p{Uppercase}\w*)  # always start with uppercase letter
-        ///     [\s--\n]+                   # whitespace should separate names
-        ///     (?: # middle name can be an initial!
-        ///         (?:(?<initial>\p{Uppercase})\.|(?<middle>\p{Uppercase}\w*))
-        ///         [\s--\n]+
-        ///     )?
-        ///     (?<last>\p{Uppercase}\w*)
-        ///     \b
-        /// ";
-        /// let re = RegexBuilder::new(pat)
-        ///     .ignore_whitespace(true)
-        ///     .build()
-        ///     .unwrap();
-        ///
-        /// let caps = re.captures("Harry Potter").unwrap();
-        /// assert_eq!("Harry", &caps["first"]);
-        /// assert_eq!("Potter", &caps["last"]);
-        ///
-        /// let caps = re.captures("Harry J. Potter").unwrap();
-        /// assert_eq!("Harry", &caps["first"]);
-        /// // Since a middle name/initial isn't required for an overall match,
-        /// // we can't assume that 'initial' or 'middle' will be populated!
-        /// assert_eq!(Some("J"), caps.name("initial").map(|m| m.as_str()));
-        /// assert_eq!(None, caps.name("middle").map(|m| m.as_str()));
-        /// assert_eq!("Potter", &caps["last"]);
-        ///
-        /// let caps = re.captures("Harry James Potter").unwrap();
-        /// assert_eq!("Harry", &caps["first"]);
-        /// // Since a middle name/initial isn't required for an overall match,
-        /// // we can't assume that 'initial' or 'middle' will be populated!
-        /// assert_eq!(None, caps.name("initial").map(|m| m.as_str()));
-        /// assert_eq!(Some("James"), caps.name("middle").map(|m| m.as_str()));
-        /// assert_eq!("Potter", &caps["last"]);
-        /// ```
-        pub fn ignore_whitespace(&mut self, yes: bool) -> &mut RegexBuilder {
-            self.builder.ignore_whitespace(yes);
-            self
-        }
-
-        /// This configures octal mode for the entire pattern.
-        ///
-        /// Octal syntax is a little-known way of uttering Unicode codepoints
-        /// in a pattern. For example, `a`, `\x61`, `\u0061` and `\141` are all
-        /// equivalent patterns, where the last example shows octal syntax.
-        ///
-        /// While supporting octal syntax isn't in and of itself a problem,
-        /// it does make good error messages harder. That is, in PCRE based
-        /// regex engines, syntax like `\1` invokes a backreference, which is
-        /// explicitly unsupported this library. However, many users expect
-        /// backreferences to be supported. Therefore, when octal support
-        /// is disabled, the error message will explicitly mention that
-        /// backreferences aren't supported.
-        ///
-        /// The default for this is `false`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::RegexBuilder;
-        ///
-        /// // Normally this pattern would not compile, with an error message
-        /// // about backreferences not being supported. But with octal mode
-        /// // enabled, octal escape sequences work.
-        /// let re = RegexBuilder::new(r"\141")
-        ///     .octal(true)
-        ///     .build()
-        ///     .unwrap();
-        /// assert!(re.is_match("a"));
-        /// ```
-        pub fn octal(&mut self, yes: bool) -> &mut RegexBuilder {
-            self.builder.octal(yes);
-            self
-        }
-
-        /// Sets the approximate size limit, in bytes, of the compiled regex.
-        ///
-        /// This roughly corresponds to the number of heap memory, in
-        /// bytes, occupied by a single regex. If the regex would otherwise
-        /// approximately exceed this limit, then compiling that regex will
-        /// fail.
-        ///
-        /// The main utility of a method like this is to avoid compiling
-        /// regexes that use an unexpected amount of resources, such as
-        /// time and memory. Even if the memory usage of a large regex is
-        /// acceptable, its search time may not be. Namely, worst case time
-        /// complexity for search is `O(m * n)`, where `m ~ len(pattern)` and
-        /// `n ~ len(haystack)`. That is, search time depends, in part, on the
-        /// size of the compiled regex. This means that putting a limit on the
-        /// size of the regex limits how much a regex can impact search time.
-        ///
-        /// For more information about regex size limits, see the section on
-        /// [untrusted inputs](crate#untrusted-input) in the top-level crate
-        /// documentation.
-        ///
-        /// The default for this is some reasonable number that permits most
-        /// patterns to compile successfully.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// # if !cfg!(target_pointer_width = "64") { return; } // see #1041
-        /// use regex::RegexBuilder;
-        ///
-        /// // It may surprise you how big some seemingly small patterns can
-        /// // be! Since \w is Unicode aware, this generates a regex that can
-        /// // match approximately 140,000 distinct codepoints.
-        /// assert!(RegexBuilder::new(r"\w").size_limit(45_000).build().is_err());
-        /// ```
-        pub fn size_limit(&mut self, bytes: usize) -> &mut RegexBuilder {
-            self.builder.size_limit(bytes);
-            self
-        }
-
-        /// Set the approximate capacity, in bytes, of the cache of transitions
-        /// used by the lazy DFA.
-        ///
-        /// While the lazy DFA isn't always used, in tends to be the most
-        /// commonly use regex engine in default configurations. It tends to
-        /// adopt the performance profile of a fully build DFA, but without the
-        /// downside of taking worst case exponential time to build.
-        ///
-        /// The downside is that it needs to keep a cache of transitions and
-        /// states that are built while running a search, and this cache
-        /// can fill up. When it fills up, the cache will reset itself. Any
-        /// previously generated states and transitions will then need to be
-        /// re-generated. If this happens too many times, then this library
-        /// will bail out of using the lazy DFA and switch to a different regex
-        /// engine.
-        ///
-        /// If your regex provokes this particular downside of the lazy DFA,
-        /// then it may be beneficial to increase its cache capacity. This will
-        /// potentially reduce the frequency of cache resetting (ideally to
-        /// `0`). While it won't fix all potential performance problems with
-        /// the lazy DFA, increasing the cache capacity does fix some.
-        ///
-        /// There is no easy way to determine, a priori, whether increasing
-        /// this cache capacity will help. In general, the larger your regex,
-        /// the more cache it's likely to use. But that isn't an ironclad rule.
-        /// For example, a regex like `[01]*1[01]{N}` would normally produce a
-        /// fully build DFA that is exponential in size with respect to `N`.
-        /// The lazy DFA will prevent exponential space blow-up, but it cache
-        /// is likely to fill up, even when it's large and even for smallish
-        /// values of `N`.
-        ///
-        /// If you aren't sure whether this helps or not, it is sensible to
-        /// set this to some arbitrarily large number in testing, such as
-        /// `usize::MAX`. Namely, this represents the amount of capacity that
-        /// *may* be used. It's probably not a good idea to use `usize::MAX` in
-        /// production though, since it implies there are no controls on heap
-        /// memory used by this library during a search. In effect, set it to
-        /// whatever you're willing to allocate for a single regex search.
-        pub fn dfa_size_limit(&mut self, bytes: usize) -> &mut RegexBuilder {
-            self.builder.dfa_size_limit(bytes);
-            self
-        }
-
-        /// Set the nesting limit for this parser.
-        ///
-        /// The nesting limit controls how deep the abstract syntax tree is
-        /// allowed to be. If the AST exceeds the given limit (e.g., with too
-        /// many nested groups), then an error is returned by the parser.
-        ///
-        /// The purpose of this limit is to act as a heuristic to prevent stack
-        /// overflow for consumers that do structural induction on an AST using
-        /// explicit recursion. While this crate never does this (instead using
-        /// constant stack space and moving the call stack to the heap), other
-        /// crates may.
-        ///
-        /// This limit is not checked until the entire AST is parsed.
-        /// Therefore, if callers want to put a limit on the amount of heap
-        /// space used, then they should impose a limit on the length, in
-        /// bytes, of the concrete pattern string. In particular, this is
-        /// viable since this parser implementation will limit itself to heap
-        /// space proportional to the length of the pattern string. See also
-        /// the [untrusted inputs](crate#untrusted-input) section in the
-        /// top-level crate documentation for more information about this.
-        ///
-        /// Note that a nest limit of `0` will return a nest limit error for
-        /// most patterns but not all. For example, a nest limit of `0` permits
-        /// `a` but not `ab`, since `ab` requires an explicit concatenation,
-        /// which results in a nest depth of `1`. In general, a nest limit is
-        /// not something that manifests in an obvious way in the concrete
-        /// syntax, therefore, it should not be used in a granular way.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::RegexBuilder;
-        ///
-        /// assert!(RegexBuilder::new(r"a").nest_limit(0).build().is_ok());
-        /// assert!(RegexBuilder::new(r"ab").nest_limit(0).build().is_err());
-        /// ```
-        pub fn nest_limit(&mut self, limit: u32) -> &mut RegexBuilder {
-            self.builder.nest_limit(limit);
-            self
-        }
-    }
-
-    /// A configurable builder for a [`RegexSet`].
-    ///
-    /// This builder can be used to programmatically set flags such as
-    /// `i` (case insensitive) and `x` (for verbose mode). This builder
-    /// can also be used to configure things like the line terminator
-    /// and a size limit on the compiled regular expression.
-    #[derive(Clone, Debug)]
-    pub struct RegexSetBuilder {
-        builder: Builder,
-    }
-
-    impl RegexSetBuilder {
-        /// Create a new builder with a default configuration for the given
-        /// patterns.
-        ///
-        /// If the patterns are invalid or exceed the configured size limits,
-        /// then an error will be returned when [`RegexSetBuilder::build`] is
-        /// called.
-        pub fn new<I, S>(patterns: I) -> RegexSetBuilder
-        where
-            I: IntoIterator<Item = S>,
-            S: AsRef<str>,
-        {
-            RegexSetBuilder { builder: Builder::new(patterns) }
-        }
-
-        /// Compiles the patterns given to `RegexSetBuilder::new` with the
-        /// configuration set on this builder.
-        ///
-        /// If the patterns aren't valid regexes or if a configured size limit
-        /// was exceeded, then an error is returned.
-        pub fn build(&self) -> Result<RegexSet, Error> {
-            self.builder.build_many_string()
-        }
-
-        /// This configures Unicode mode for the all of the patterns.
-        ///
-        /// Enabling Unicode mode does a number of things:
-        ///
-        /// * Most fundamentally, it causes the fundamental atom of matching
-        /// to be a single codepoint. When Unicode mode is disabled, it's a
-        /// single byte. For example, when Unicode mode is enabled, `.` will
-        /// match `đŸ’©` once, where as it will match 4 times when Unicode mode
-        /// is disabled. (Since the UTF-8 encoding of `đŸ’©` is 4 bytes long.)
-        /// * Case insensitive matching uses Unicode simple case folding rules.
-        /// * Unicode character classes like `\p{Letter}` and `\p{Greek}` are
-        /// available.
-        /// * Perl character classes are Unicode aware. That is, `\w`, `\s` and
-        /// `\d`.
-        /// * The word boundary assertions, `\b` and `\B`, use the Unicode
-        /// definition of a word character.
-        ///
-        /// Note that if Unicode mode is disabled, then the regex will fail to
-        /// compile if it could match invalid UTF-8. For example, when Unicode
-        /// mode is disabled, then since `.` matches any byte (except for
-        /// `\n`), then it can match invalid UTF-8 and thus building a regex
-        /// from it will fail. Another example is `\w` and `\W`. Since `\w` can
-        /// only match ASCII bytes when Unicode mode is disabled, it's allowed.
-        /// But `\W` can match more than ASCII bytes, including invalid UTF-8,
-        /// and so it is not allowed. This restriction can be lifted only by
-        /// using a [`bytes::RegexSet`](crate::bytes::RegexSet).
-        ///
-        /// For more details on the Unicode support in this crate, see the
-        /// [Unicode section](crate#unicode) in this crate's top-level
-        /// documentation.
-        ///
-        /// The default for this is `true`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::RegexSetBuilder;
-        ///
-        /// let re = RegexSetBuilder::new([r"\w"])
-        ///     .unicode(false)
-        ///     .build()
-        ///     .unwrap();
-        /// // Normally greek letters would be included in \w, but since
-        /// // Unicode mode is disabled, it only matches ASCII letters.
-        /// assert!(!re.is_match("δ"));
-        ///
-        /// let re = RegexSetBuilder::new([r"s"])
-        ///     .case_insensitive(true)
-        ///     .unicode(false)
-        ///     .build()
-        ///     .unwrap();
-        /// // Normally 'Ćż' is included when searching for 's' case
-        /// // insensitively due to Unicode's simple case folding rules. But
-        /// // when Unicode mode is disabled, only ASCII case insensitive rules
-        /// // are used.
-        /// assert!(!re.is_match("Ćż"));
-        /// ```
-        pub fn unicode(&mut self, yes: bool) -> &mut RegexSetBuilder {
-            self.builder.unicode(yes);
-            self
-        }
-
-        /// This configures whether to enable case insensitive matching for all
-        /// of the patterns.
-        ///
-        /// This setting can also be configured using the inline flag `i`
-        /// in the pattern. For example, `(?i:foo)` matches `foo` case
-        /// insensitively while `(?-i:foo)` matches `foo` case sensitively.
-        ///
-        /// The default for this is `false`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::RegexSetBuilder;
-        ///
-        /// let re = RegexSetBuilder::new([r"foo(?-i:bar)quux"])
-        ///     .case_insensitive(true)
-        ///     .build()
-        ///     .unwrap();
-        /// assert!(re.is_match("FoObarQuUx"));
-        /// // Even though case insensitive matching is enabled in the builder,
-        /// // it can be locally disabled within the pattern. In this case,
-        /// // `bar` is matched case sensitively.
-        /// assert!(!re.is_match("fooBARquux"));
-        /// ```
-        pub fn case_insensitive(&mut self, yes: bool) -> &mut RegexSetBuilder {
-            self.builder.case_insensitive(yes);
-            self
-        }
-
-        /// This configures multi-line mode for all of the patterns.
-        ///
-        /// Enabling multi-line mode changes the behavior of the `^` and `$`
-        /// anchor assertions. Instead of only matching at the beginning and
-        /// end of a haystack, respectively, multi-line mode causes them to
-        /// match at the beginning and end of a line *in addition* to the
-        /// beginning and end of a haystack. More precisely, `^` will match at
-        /// the position immediately following a `\n` and `$` will match at the
-        /// position immediately preceding a `\n`.
-        ///
-        /// The behavior of this option can be impacted by other settings too:
-        ///
-        /// * The [`RegexSetBuilder::line_terminator`] option changes `\n`
-        /// above to any ASCII byte.
-        /// * The [`RegexSetBuilder::crlf`] option changes the line terminator
-        /// to be either `\r` or `\n`, but never at the position between a `\r`
-        /// and `\n`.
-        ///
-        /// This setting can also be configured using the inline flag `m` in
-        /// the pattern.
-        ///
-        /// The default for this is `false`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::RegexSetBuilder;
-        ///
-        /// let re = RegexSetBuilder::new([r"^foo$"])
-        ///     .multi_line(true)
-        ///     .build()
-        ///     .unwrap();
-        /// assert!(re.is_match("\nfoo\n"));
-        /// ```
-        pub fn multi_line(&mut self, yes: bool) -> &mut RegexSetBuilder {
-            self.builder.multi_line(yes);
-            self
-        }
-
-        /// This configures dot-matches-new-line mode for the entire pattern.
-        ///
-        /// Perhaps surprisingly, the default behavior for `.` is not to match
-        /// any character, but rather, to match any character except for the
-        /// line terminator (which is `\n` by default). When this mode is
-        /// enabled, the behavior changes such that `.` truly matches any
-        /// character.
-        ///
-        /// This setting can also be configured using the inline flag `s` in
-        /// the pattern. For example, `(?s:.)` and `\p{any}` are equivalent
-        /// regexes.
-        ///
-        /// The default for this is `false`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::RegexSetBuilder;
-        ///
-        /// let re = RegexSetBuilder::new([r"foo.bar"])
-        ///     .dot_matches_new_line(true)
-        ///     .build()
-        ///     .unwrap();
-        /// let hay = "foo\nbar";
-        /// assert!(re.is_match(hay));
-        /// ```
-        pub fn dot_matches_new_line(
-            &mut self,
-            yes: bool,
-        ) -> &mut RegexSetBuilder {
-            self.builder.dot_matches_new_line(yes);
-            self
-        }
-
-        /// This configures CRLF mode for all of the patterns.
-        ///
-        /// When CRLF mode is enabled, both `\r` ("carriage return" or CR for
-        /// short) and `\n` ("line feed" or LF for short) are treated as line
-        /// terminators. This results in the following:
-        ///
-        /// * Unless dot-matches-new-line mode is enabled, `.` will now match
-        /// any character except for `\n` and `\r`.
-        /// * When multi-line mode is enabled, `^` will match immediately
-        /// following a `\n` or a `\r`. Similarly, `$` will match immediately
-        /// preceding a `\n` or a `\r`. Neither `^` nor `$` will ever match
-        /// between `\r` and `\n`.
-        ///
-        /// This setting can also be configured using the inline flag `R` in
-        /// the pattern.
-        ///
-        /// The default for this is `false`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::RegexSetBuilder;
-        ///
-        /// let re = RegexSetBuilder::new([r"^foo$"])
-        ///     .multi_line(true)
-        ///     .crlf(true)
-        ///     .build()
-        ///     .unwrap();
-        /// let hay = "\r\nfoo\r\n";
-        /// // If CRLF mode weren't enabled here, then '$' wouldn't match
-        /// // immediately after 'foo', and thus no match would be found.
-        /// assert!(re.is_match(hay));
-        /// ```
-        ///
-        /// This example demonstrates that `^` will never match at a position
-        /// between `\r` and `\n`. (`$` will similarly not match between a `\r`
-        /// and a `\n`.)
-        ///
-        /// ```
-        /// use regex::RegexSetBuilder;
-        ///
-        /// let re = RegexSetBuilder::new([r"^\n"])
-        ///     .multi_line(true)
-        ///     .crlf(true)
-        ///     .build()
-        ///     .unwrap();
-        /// assert!(!re.is_match("\r\n"));
-        /// ```
-        pub fn crlf(&mut self, yes: bool) -> &mut RegexSetBuilder {
-            self.builder.crlf(yes);
-            self
-        }
-
-        /// Configures the line terminator to be used by the regex.
-        ///
-        /// The line terminator is relevant in two ways for a particular regex:
-        ///
-        /// * When dot-matches-new-line mode is *not* enabled (the default),
-        /// then `.` will match any character except for the configured line
-        /// terminator.
-        /// * When multi-line mode is enabled (not the default), then `^` and
-        /// `$` will match immediately after and before, respectively, a line
-        /// terminator.
-        ///
-        /// In both cases, if CRLF mode is enabled in a particular context,
-        /// then it takes precedence over any configured line terminator.
-        ///
-        /// This option cannot be configured from within the pattern.
-        ///
-        /// The default line terminator is `\n`.
-        ///
-        /// # Example
-        ///
-        /// This shows how to treat the NUL byte as a line terminator. This can
-        /// be a useful heuristic when searching binary data.
-        ///
-        /// ```
-        /// use regex::RegexSetBuilder;
-        ///
-        /// let re = RegexSetBuilder::new([r"^foo$"])
-        ///     .multi_line(true)
-        ///     .line_terminator(b'\x00')
-        ///     .build()
-        ///     .unwrap();
-        /// let hay = "\x00foo\x00";
-        /// assert!(re.is_match(hay));
-        /// ```
-        ///
-        /// This example shows that the behavior of `.` is impacted by this
-        /// setting as well:
-        ///
-        /// ```
-        /// use regex::RegexSetBuilder;
-        ///
-        /// let re = RegexSetBuilder::new([r"."])
-        ///     .line_terminator(b'\x00')
-        ///     .build()
-        ///     .unwrap();
-        /// assert!(re.is_match("\n"));
-        /// assert!(!re.is_match("\x00"));
-        /// ```
-        ///
-        /// This shows that building a regex will fail if the byte given
-        /// is not ASCII and the pattern could result in matching invalid
-        /// UTF-8. This is because any singular non-ASCII byte is not valid
-        /// UTF-8, and it is not permitted for a [`RegexSet`] to match invalid
-        /// UTF-8. (It is permissible to use a non-ASCII byte when building a
-        /// [`bytes::RegexSet`](crate::bytes::RegexSet).)
-        ///
-        /// ```
-        /// use regex::RegexSetBuilder;
-        ///
-        /// assert!(
-        ///     RegexSetBuilder::new([r"."])
-        ///         .line_terminator(0x80)
-        ///         .build()
-        ///         .is_err()
-        /// );
-        /// // Note that using a non-ASCII byte isn't enough on its own to
-        /// // cause regex compilation to fail. You actually have to make use
-        /// // of it in the regex in a way that leads to matching invalid
-        /// // UTF-8. If you don't, then regex compilation will succeed!
-        /// assert!(
-        ///     RegexSetBuilder::new([r"a"])
-        ///         .line_terminator(0x80)
-        ///         .build()
-        ///         .is_ok()
-        /// );
-        /// ```
-        pub fn line_terminator(&mut self, byte: u8) -> &mut RegexSetBuilder {
-            self.builder.line_terminator(byte);
-            self
-        }
-
-        /// This configures swap-greed mode for all of the patterns.
-        ///
-        /// When swap-greed mode is enabled, patterns like `a+` will become
-        /// non-greedy and patterns like `a+?` will become greedy. In other
-        /// words, the meanings of `a+` and `a+?` are switched.
-        ///
-        /// This setting can also be configured using the inline flag `U` in
-        /// the pattern.
-        ///
-        /// Note that this is generally not useful for a `RegexSet` since a
-        /// `RegexSet` can only report whether a pattern matches or not. Since
-        /// greediness never impacts whether a match is found or not (only the
-        /// offsets of the match), it follows that whether parts of a pattern
-        /// are greedy or not doesn't matter for a `RegexSet`.
-        ///
-        /// The default for this is `false`.
-        pub fn swap_greed(&mut self, yes: bool) -> &mut RegexSetBuilder {
-            self.builder.swap_greed(yes);
-            self
-        }
-
-        /// This configures verbose mode for all of the patterns.
-        ///
-        /// When enabled, whitespace will treated as insignifcant in the
-        /// pattern and `#` can be used to start a comment until the next new
-        /// line.
-        ///
-        /// Normally, in most places in a pattern, whitespace is treated
-        /// literally. For example ` +` will match one or more ASCII whitespace
-        /// characters.
-        ///
-        /// When verbose mode is enabled, `\#` can be used to match a literal
-        /// `#` and `\ ` can be used to match a literal ASCII whitespace
-        /// character.
-        ///
-        /// Verbose mode is useful for permitting regexes to be formatted and
-        /// broken up more nicely. This may make them more easily readable.
-        ///
-        /// This setting can also be configured using the inline flag `x` in
-        /// the pattern.
-        ///
-        /// The default for this is `false`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::RegexSetBuilder;
-        ///
-        /// let pat = r"
-        ///     \b
-        ///     (?<first>\p{Uppercase}\w*)  # always start with uppercase letter
-        ///     [\s--\n]+                   # whitespace should separate names
-        ///     (?: # middle name can be an initial!
-        ///         (?:(?<initial>\p{Uppercase})\.|(?<middle>\p{Uppercase}\w*))
-        ///         [\s--\n]+
-        ///     )?
-        ///     (?<last>\p{Uppercase}\w*)
-        ///     \b
-        /// ";
-        /// let re = RegexSetBuilder::new([pat])
-        ///     .ignore_whitespace(true)
-        ///     .build()
-        ///     .unwrap();
-        /// assert!(re.is_match("Harry Potter"));
-        /// assert!(re.is_match("Harry J. Potter"));
-        /// assert!(re.is_match("Harry James Potter"));
-        /// assert!(!re.is_match("harry J. Potter"));
-        /// ```
-        pub fn ignore_whitespace(
-            &mut self,
-            yes: bool,
-        ) -> &mut RegexSetBuilder {
-            self.builder.ignore_whitespace(yes);
-            self
-        }
-
-        /// This configures octal mode for all of the patterns.
-        ///
-        /// Octal syntax is a little-known way of uttering Unicode codepoints
-        /// in a pattern. For example, `a`, `\x61`, `\u0061` and `\141` are all
-        /// equivalent patterns, where the last example shows octal syntax.
-        ///
-        /// While supporting octal syntax isn't in and of itself a problem,
-        /// it does make good error messages harder. That is, in PCRE based
-        /// regex engines, syntax like `\1` invokes a backreference, which is
-        /// explicitly unsupported this library. However, many users expect
-        /// backreferences to be supported. Therefore, when octal support
-        /// is disabled, the error message will explicitly mention that
-        /// backreferences aren't supported.
-        ///
-        /// The default for this is `false`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::RegexSetBuilder;
-        ///
-        /// // Normally this pattern would not compile, with an error message
-        /// // about backreferences not being supported. But with octal mode
-        /// // enabled, octal escape sequences work.
-        /// let re = RegexSetBuilder::new([r"\141"])
-        ///     .octal(true)
-        ///     .build()
-        ///     .unwrap();
-        /// assert!(re.is_match("a"));
-        /// ```
-        pub fn octal(&mut self, yes: bool) -> &mut RegexSetBuilder {
-            self.builder.octal(yes);
-            self
-        }
-
-        /// Sets the approximate size limit, in bytes, of the compiled regex.
-        ///
-        /// This roughly corresponds to the number of heap memory, in
-        /// bytes, occupied by a single regex. If the regex would otherwise
-        /// approximately exceed this limit, then compiling that regex will
-        /// fail.
-        ///
-        /// The main utility of a method like this is to avoid compiling
-        /// regexes that use an unexpected amount of resources, such as
-        /// time and memory. Even if the memory usage of a large regex is
-        /// acceptable, its search time may not be. Namely, worst case time
-        /// complexity for search is `O(m * n)`, where `m ~ len(pattern)` and
-        /// `n ~ len(haystack)`. That is, search time depends, in part, on the
-        /// size of the compiled regex. This means that putting a limit on the
-        /// size of the regex limits how much a regex can impact search time.
-        ///
-        /// For more information about regex size limits, see the section on
-        /// [untrusted inputs](crate#untrusted-input) in the top-level crate
-        /// documentation.
-        ///
-        /// The default for this is some reasonable number that permits most
-        /// patterns to compile successfully.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// # if !cfg!(target_pointer_width = "64") { return; } // see #1041
-        /// use regex::RegexSetBuilder;
-        ///
-        /// // It may surprise you how big some seemingly small patterns can
-        /// // be! Since \w is Unicode aware, this generates a regex that can
-        /// // match approximately 140,000 distinct codepoints.
-        /// assert!(
-        ///     RegexSetBuilder::new([r"\w"])
-        ///         .size_limit(45_000)
-        ///         .build()
-        ///         .is_err()
-        /// );
-        /// ```
-        pub fn size_limit(&mut self, bytes: usize) -> &mut RegexSetBuilder {
-            self.builder.size_limit(bytes);
-            self
-        }
-
-        /// Set the approximate capacity, in bytes, of the cache of transitions
-        /// used by the lazy DFA.
-        ///
-        /// While the lazy DFA isn't always used, in tends to be the most
-        /// commonly use regex engine in default configurations. It tends to
-        /// adopt the performance profile of a fully build DFA, but without the
-        /// downside of taking worst case exponential time to build.
-        ///
-        /// The downside is that it needs to keep a cache of transitions and
-        /// states that are built while running a search, and this cache
-        /// can fill up. When it fills up, the cache will reset itself. Any
-        /// previously generated states and transitions will then need to be
-        /// re-generated. If this happens too many times, then this library
-        /// will bail out of using the lazy DFA and switch to a different regex
-        /// engine.
-        ///
-        /// If your regex provokes this particular downside of the lazy DFA,
-        /// then it may be beneficial to increase its cache capacity. This will
-        /// potentially reduce the frequency of cache resetting (ideally to
-        /// `0`). While it won't fix all potential performance problems with
-        /// the lazy DFA, increasing the cache capacity does fix some.
-        ///
-        /// There is no easy way to determine, a priori, whether increasing
-        /// this cache capacity will help. In general, the larger your regex,
-        /// the more cache it's likely to use. But that isn't an ironclad rule.
-        /// For example, a regex like `[01]*1[01]{N}` would normally produce a
-        /// fully build DFA that is exponential in size with respect to `N`.
-        /// The lazy DFA will prevent exponential space blow-up, but it cache
-        /// is likely to fill up, even when it's large and even for smallish
-        /// values of `N`.
-        ///
-        /// If you aren't sure whether this helps or not, it is sensible to
-        /// set this to some arbitrarily large number in testing, such as
-        /// `usize::MAX`. Namely, this represents the amount of capacity that
-        /// *may* be used. It's probably not a good idea to use `usize::MAX` in
-        /// production though, since it implies there are no controls on heap
-        /// memory used by this library during a search. In effect, set it to
-        /// whatever you're willing to allocate for a single regex search.
-        pub fn dfa_size_limit(
-            &mut self,
-            bytes: usize,
-        ) -> &mut RegexSetBuilder {
-            self.builder.dfa_size_limit(bytes);
-            self
-        }
-
-        /// Set the nesting limit for this parser.
-        ///
-        /// The nesting limit controls how deep the abstract syntax tree is
-        /// allowed to be. If the AST exceeds the given limit (e.g., with too
-        /// many nested groups), then an error is returned by the parser.
-        ///
-        /// The purpose of this limit is to act as a heuristic to prevent stack
-        /// overflow for consumers that do structural induction on an AST using
-        /// explicit recursion. While this crate never does this (instead using
-        /// constant stack space and moving the call stack to the heap), other
-        /// crates may.
-        ///
-        /// This limit is not checked until the entire AST is parsed.
-        /// Therefore, if callers want to put a limit on the amount of heap
-        /// space used, then they should impose a limit on the length, in
-        /// bytes, of the concrete pattern string. In particular, this is
-        /// viable since this parser implementation will limit itself to heap
-        /// space proportional to the length of the pattern string. See also
-        /// the [untrusted inputs](crate#untrusted-input) section in the
-        /// top-level crate documentation for more information about this.
-        ///
-        /// Note that a nest limit of `0` will return a nest limit error for
-        /// most patterns but not all. For example, a nest limit of `0` permits
-        /// `a` but not `ab`, since `ab` requires an explicit concatenation,
-        /// which results in a nest depth of `1`. In general, a nest limit is
-        /// not something that manifests in an obvious way in the concrete
-        /// syntax, therefore, it should not be used in a granular way.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::RegexSetBuilder;
-        ///
-        /// assert!(RegexSetBuilder::new([r"a"]).nest_limit(0).build().is_ok());
-        /// assert!(RegexSetBuilder::new([r"ab"]).nest_limit(0).build().is_err());
-        /// ```
-        pub fn nest_limit(&mut self, limit: u32) -> &mut RegexSetBuilder {
-            self.builder.nest_limit(limit);
-            self
-        }
-    }
-}
-
-pub(crate) mod bytes {
-    use crate::{
-        bytes::{Regex, RegexSet},
-        error::Error,
-    };
-
-    use super::Builder;
-
-    /// A configurable builder for a [`Regex`].
-    ///
-    /// This builder can be used to programmatically set flags such as `i`
-    /// (case insensitive) and `x` (for verbose mode). This builder can also be
-    /// used to configure things like the line terminator and a size limit on
-    /// the compiled regular expression.
-    #[derive(Clone, Debug)]
-    pub struct RegexBuilder {
-        builder: Builder,
-    }
-
-    impl RegexBuilder {
-        /// Create a new builder with a default configuration for the given
-        /// pattern.
-        ///
-        /// If the pattern is invalid or exceeds the configured size limits,
-        /// then an error will be returned when [`RegexBuilder::build`] is
-        /// called.
-        pub fn new(pattern: &str) -> RegexBuilder {
-            RegexBuilder { builder: Builder::new([pattern]) }
-        }
-
-        /// Compiles the pattern given to `RegexBuilder::new` with the
-        /// configuration set on this builder.
-        ///
-        /// If the pattern isn't a valid regex or if a configured size limit
-        /// was exceeded, then an error is returned.
-        pub fn build(&self) -> Result<Regex, Error> {
-            self.builder.build_one_bytes()
-        }
-
-        /// This configures Unicode mode for the entire pattern.
-        ///
-        /// Enabling Unicode mode does a number of things:
-        ///
-        /// * Most fundamentally, it causes the fundamental atom of matching
-        /// to be a single codepoint. When Unicode mode is disabled, it's a
-        /// single byte. For example, when Unicode mode is enabled, `.` will
-        /// match `đŸ’©` once, where as it will match 4 times when Unicode mode
-        /// is disabled. (Since the UTF-8 encoding of `đŸ’©` is 4 bytes long.)
-        /// * Case insensitive matching uses Unicode simple case folding rules.
-        /// * Unicode character classes like `\p{Letter}` and `\p{Greek}` are
-        /// available.
-        /// * Perl character classes are Unicode aware. That is, `\w`, `\s` and
-        /// `\d`.
-        /// * The word boundary assertions, `\b` and `\B`, use the Unicode
-        /// definition of a word character.
-        ///
-        /// Note that unlike the top-level `Regex` for searching `&str`, it
-        /// is permitted to disable Unicode mode even if the resulting pattern
-        /// could match invalid UTF-8. For example, `(?-u:.)` is not a valid
-        /// pattern for a top-level `Regex`, but is valid for a `bytes::Regex`.
-        ///
-        /// For more details on the Unicode support in this crate, see the
-        /// [Unicode section](crate#unicode) in this crate's top-level
-        /// documentation.
-        ///
-        /// The default for this is `true`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::bytes::RegexBuilder;
-        ///
-        /// let re = RegexBuilder::new(r"\w")
-        ///     .unicode(false)
-        ///     .build()
-        ///     .unwrap();
-        /// // Normally greek letters would be included in \w, but since
-        /// // Unicode mode is disabled, it only matches ASCII letters.
-        /// assert!(!re.is_match("δ".as_bytes()));
-        ///
-        /// let re = RegexBuilder::new(r"s")
-        ///     .case_insensitive(true)
-        ///     .unicode(false)
-        ///     .build()
-        ///     .unwrap();
-        /// // Normally 'Ćż' is included when searching for 's' case
-        /// // insensitively due to Unicode's simple case folding rules. But
-        /// // when Unicode mode is disabled, only ASCII case insensitive rules
-        /// // are used.
-        /// assert!(!re.is_match("Ćż".as_bytes()));
-        /// ```
-        ///
-        /// Since this builder is for constructing a [`bytes::Regex`](Regex),
-        /// one can disable Unicode mode even if it would match invalid UTF-8:
-        ///
-        /// ```
-        /// use regex::bytes::RegexBuilder;
-        ///
-        /// let re = RegexBuilder::new(r".")
-        ///     .unicode(false)
-        ///     .build()
-        ///     .unwrap();
-        /// // Normally greek letters would be included in \w, but since
-        /// // Unicode mode is disabled, it only matches ASCII letters.
-        /// assert!(re.is_match(b"\xFF"));
-        /// ```
-        pub fn unicode(&mut self, yes: bool) -> &mut RegexBuilder {
-            self.builder.unicode(yes);
-            self
-        }
-
-        /// This configures whether to enable case insensitive matching for the
-        /// entire pattern.
-        ///
-        /// This setting can also be configured using the inline flag `i`
-        /// in the pattern. For example, `(?i:foo)` matches `foo` case
-        /// insensitively while `(?-i:foo)` matches `foo` case sensitively.
-        ///
-        /// The default for this is `false`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::bytes::RegexBuilder;
-        ///
-        /// let re = RegexBuilder::new(r"foo(?-i:bar)quux")
-        ///     .case_insensitive(true)
-        ///     .build()
-        ///     .unwrap();
-        /// assert!(re.is_match(b"FoObarQuUx"));
-        /// // Even though case insensitive matching is enabled in the builder,
-        /// // it can be locally disabled within the pattern. In this case,
-        /// // `bar` is matched case sensitively.
-        /// assert!(!re.is_match(b"fooBARquux"));
-        /// ```
-        pub fn case_insensitive(&mut self, yes: bool) -> &mut RegexBuilder {
-            self.builder.case_insensitive(yes);
-            self
-        }
-
-        /// This configures multi-line mode for the entire pattern.
-        ///
-        /// Enabling multi-line mode changes the behavior of the `^` and `$`
-        /// anchor assertions. Instead of only matching at the beginning and
-        /// end of a haystack, respectively, multi-line mode causes them to
-        /// match at the beginning and end of a line *in addition* to the
-        /// beginning and end of a haystack. More precisely, `^` will match at
-        /// the position immediately following a `\n` and `$` will match at the
-        /// position immediately preceding a `\n`.
-        ///
-        /// The behavior of this option can be impacted by other settings too:
-        ///
-        /// * The [`RegexBuilder::line_terminator`] option changes `\n` above
-        /// to any ASCII byte.
-        /// * The [`RegexBuilder::crlf`] option changes the line terminator to
-        /// be either `\r` or `\n`, but never at the position between a `\r`
-        /// and `\n`.
-        ///
-        /// This setting can also be configured using the inline flag `m` in
-        /// the pattern.
-        ///
-        /// The default for this is `false`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::bytes::RegexBuilder;
-        ///
-        /// let re = RegexBuilder::new(r"^foo$")
-        ///     .multi_line(true)
-        ///     .build()
-        ///     .unwrap();
-        /// assert_eq!(Some(1..4), re.find(b"\nfoo\n").map(|m| m.range()));
-        /// ```
-        pub fn multi_line(&mut self, yes: bool) -> &mut RegexBuilder {
-            self.builder.multi_line(yes);
-            self
-        }
-
-        /// This configures dot-matches-new-line mode for the entire pattern.
-        ///
-        /// Perhaps surprisingly, the default behavior for `.` is not to match
-        /// any character, but rather, to match any character except for the
-        /// line terminator (which is `\n` by default). When this mode is
-        /// enabled, the behavior changes such that `.` truly matches any
-        /// character.
-        ///
-        /// This setting can also be configured using the inline flag `s` in
-        /// the pattern. For example, `(?s:.)` and `\p{any}` are equivalent
-        /// regexes.
-        ///
-        /// The default for this is `false`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::bytes::RegexBuilder;
-        ///
-        /// let re = RegexBuilder::new(r"foo.bar")
-        ///     .dot_matches_new_line(true)
-        ///     .build()
-        ///     .unwrap();
-        /// let hay = b"foo\nbar";
-        /// assert_eq!(Some(&b"foo\nbar"[..]), re.find(hay).map(|m| m.as_bytes()));
-        /// ```
-        pub fn dot_matches_new_line(
-            &mut self,
-            yes: bool,
-        ) -> &mut RegexBuilder {
-            self.builder.dot_matches_new_line(yes);
-            self
-        }
-
-        /// This configures CRLF mode for the entire pattern.
-        ///
-        /// When CRLF mode is enabled, both `\r` ("carriage return" or CR for
-        /// short) and `\n` ("line feed" or LF for short) are treated as line
-        /// terminators. This results in the following:
-        ///
-        /// * Unless dot-matches-new-line mode is enabled, `.` will now match
-        /// any character except for `\n` and `\r`.
-        /// * When multi-line mode is enabled, `^` will match immediately
-        /// following a `\n` or a `\r`. Similarly, `$` will match immediately
-        /// preceding a `\n` or a `\r`. Neither `^` nor `$` will ever match
-        /// between `\r` and `\n`.
-        ///
-        /// This setting can also be configured using the inline flag `R` in
-        /// the pattern.
-        ///
-        /// The default for this is `false`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::bytes::RegexBuilder;
-        ///
-        /// let re = RegexBuilder::new(r"^foo$")
-        ///     .multi_line(true)
-        ///     .crlf(true)
-        ///     .build()
-        ///     .unwrap();
-        /// let hay = b"\r\nfoo\r\n";
-        /// // If CRLF mode weren't enabled here, then '$' wouldn't match
-        /// // immediately after 'foo', and thus no match would be found.
-        /// assert_eq!(Some(&b"foo"[..]), re.find(hay).map(|m| m.as_bytes()));
-        /// ```
-        ///
-        /// This example demonstrates that `^` will never match at a position
-        /// between `\r` and `\n`. (`$` will similarly not match between a `\r`
-        /// and a `\n`.)
-        ///
-        /// ```
-        /// use regex::bytes::RegexBuilder;
-        ///
-        /// let re = RegexBuilder::new(r"^")
-        ///     .multi_line(true)
-        ///     .crlf(true)
-        ///     .build()
-        ///     .unwrap();
-        /// let hay = b"\r\n\r\n";
-        /// let ranges: Vec<_> = re.find_iter(hay).map(|m| m.range()).collect();
-        /// assert_eq!(ranges, vec![0..0, 2..2, 4..4]);
-        /// ```
-        pub fn crlf(&mut self, yes: bool) -> &mut RegexBuilder {
-            self.builder.crlf(yes);
-            self
-        }
-
-        /// Configures the line terminator to be used by the regex.
-        ///
-        /// The line terminator is relevant in two ways for a particular regex:
-        ///
-        /// * When dot-matches-new-line mode is *not* enabled (the default),
-        /// then `.` will match any character except for the configured line
-        /// terminator.
-        /// * When multi-line mode is enabled (not the default), then `^` and
-        /// `$` will match immediately after and before, respectively, a line
-        /// terminator.
-        ///
-        /// In both cases, if CRLF mode is enabled in a particular context,
-        /// then it takes precedence over any configured line terminator.
-        ///
-        /// This option cannot be configured from within the pattern.
-        ///
-        /// The default line terminator is `\n`.
-        ///
-        /// # Example
-        ///
-        /// This shows how to treat the NUL byte as a line terminator. This can
-        /// be a useful heuristic when searching binary data.
-        ///
-        /// ```
-        /// use regex::bytes::RegexBuilder;
-        ///
-        /// let re = RegexBuilder::new(r"^foo$")
-        ///     .multi_line(true)
-        ///     .line_terminator(b'\x00')
-        ///     .build()
-        ///     .unwrap();
-        /// let hay = b"\x00foo\x00";
-        /// assert_eq!(Some(1..4), re.find(hay).map(|m| m.range()));
-        /// ```
-        ///
-        /// This example shows that the behavior of `.` is impacted by this
-        /// setting as well:
-        ///
-        /// ```
-        /// use regex::bytes::RegexBuilder;
-        ///
-        /// let re = RegexBuilder::new(r".")
-        ///     .line_terminator(b'\x00')
-        ///     .build()
-        ///     .unwrap();
-        /// assert!(re.is_match(b"\n"));
-        /// assert!(!re.is_match(b"\x00"));
-        /// ```
-        ///
-        /// This shows that building a regex will work even when the byte
-        /// given is not ASCII. This is unlike the top-level `Regex` API where
-        /// matching invalid UTF-8 is not allowed.
-        ///
-        /// Note though that you must disable Unicode mode. This is required
-        /// because Unicode mode requires matching one codepoint at a time,
-        /// and there is no way to match a non-ASCII byte as if it were a
-        /// codepoint.
-        ///
-        /// ```
-        /// use regex::bytes::RegexBuilder;
-        ///
-        /// assert!(
-        ///     RegexBuilder::new(r".")
-        ///         .unicode(false)
-        ///         .line_terminator(0x80)
-        ///         .build()
-        ///         .is_ok(),
-        /// );
-        /// ```
-        pub fn line_terminator(&mut self, byte: u8) -> &mut RegexBuilder {
-            self.builder.line_terminator(byte);
-            self
-        }
-
-        /// This configures swap-greed mode for the entire pattern.
-        ///
-        /// When swap-greed mode is enabled, patterns like `a+` will become
-        /// non-greedy and patterns like `a+?` will become greedy. In other
-        /// words, the meanings of `a+` and `a+?` are switched.
-        ///
-        /// This setting can also be configured using the inline flag `U` in
-        /// the pattern.
-        ///
-        /// The default for this is `false`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::bytes::RegexBuilder;
-        ///
-        /// let re = RegexBuilder::new(r"a+")
-        ///     .swap_greed(true)
-        ///     .build()
-        ///     .unwrap();
-        /// assert_eq!(Some(&b"a"[..]), re.find(b"aaa").map(|m| m.as_bytes()));
-        /// ```
-        pub fn swap_greed(&mut self, yes: bool) -> &mut RegexBuilder {
-            self.builder.swap_greed(yes);
-            self
-        }
-
-        /// This configures verbose mode for the entire pattern.
-        ///
-        /// When enabled, whitespace will treated as insignifcant in the
-        /// pattern and `#` can be used to start a comment until the next new
-        /// line.
-        ///
-        /// Normally, in most places in a pattern, whitespace is treated
-        /// literally. For example ` +` will match one or more ASCII whitespace
-        /// characters.
-        ///
-        /// When verbose mode is enabled, `\#` can be used to match a literal
-        /// `#` and `\ ` can be used to match a literal ASCII whitespace
-        /// character.
-        ///
-        /// Verbose mode is useful for permitting regexes to be formatted and
-        /// broken up more nicely. This may make them more easily readable.
-        ///
-        /// This setting can also be configured using the inline flag `x` in
-        /// the pattern.
-        ///
-        /// The default for this is `false`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::bytes::RegexBuilder;
-        ///
-        /// let pat = r"
-        ///     \b
-        ///     (?<first>\p{Uppercase}\w*)  # always start with uppercase letter
-        ///     [\s--\n]+                   # whitespace should separate names
-        ///     (?: # middle name can be an initial!
-        ///         (?:(?<initial>\p{Uppercase})\.|(?<middle>\p{Uppercase}\w*))
-        ///         [\s--\n]+
-        ///     )?
-        ///     (?<last>\p{Uppercase}\w*)
-        ///     \b
-        /// ";
-        /// let re = RegexBuilder::new(pat)
-        ///     .ignore_whitespace(true)
-        ///     .build()
-        ///     .unwrap();
-        ///
-        /// let caps = re.captures(b"Harry Potter").unwrap();
-        /// assert_eq!(&b"Harry"[..], &caps["first"]);
-        /// assert_eq!(&b"Potter"[..], &caps["last"]);
-        ///
-        /// let caps = re.captures(b"Harry J. Potter").unwrap();
-        /// assert_eq!(&b"Harry"[..], &caps["first"]);
-        /// // Since a middle name/initial isn't required for an overall match,
-        /// // we can't assume that 'initial' or 'middle' will be populated!
-        /// assert_eq!(
-        ///     Some(&b"J"[..]),
-        ///     caps.name("initial").map(|m| m.as_bytes()),
-        /// );
-        /// assert_eq!(None, caps.name("middle").map(|m| m.as_bytes()));
-        /// assert_eq!(&b"Potter"[..], &caps["last"]);
-        ///
-        /// let caps = re.captures(b"Harry James Potter").unwrap();
-        /// assert_eq!(&b"Harry"[..], &caps["first"]);
-        /// // Since a middle name/initial isn't required for an overall match,
-        /// // we can't assume that 'initial' or 'middle' will be populated!
-        /// assert_eq!(None, caps.name("initial").map(|m| m.as_bytes()));
-        /// assert_eq!(
-        ///     Some(&b"James"[..]),
-        ///     caps.name("middle").map(|m| m.as_bytes()),
-        /// );
-        /// assert_eq!(&b"Potter"[..], &caps["last"]);
-        /// ```
-        pub fn ignore_whitespace(&mut self, yes: bool) -> &mut RegexBuilder {
-            self.builder.ignore_whitespace(yes);
-            self
-        }
-
-        /// This configures octal mode for the entire pattern.
-        ///
-        /// Octal syntax is a little-known way of uttering Unicode codepoints
-        /// in a pattern. For example, `a`, `\x61`, `\u0061` and `\141` are all
-        /// equivalent patterns, where the last example shows octal syntax.
-        ///
-        /// While supporting octal syntax isn't in and of itself a problem,
-        /// it does make good error messages harder. That is, in PCRE based
-        /// regex engines, syntax like `\1` invokes a backreference, which is
-        /// explicitly unsupported this library. However, many users expect
-        /// backreferences to be supported. Therefore, when octal support
-        /// is disabled, the error message will explicitly mention that
-        /// backreferences aren't supported.
-        ///
-        /// The default for this is `false`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::bytes::RegexBuilder;
-        ///
-        /// // Normally this pattern would not compile, with an error message
-        /// // about backreferences not being supported. But with octal mode
-        /// // enabled, octal escape sequences work.
-        /// let re = RegexBuilder::new(r"\141")
-        ///     .octal(true)
-        ///     .build()
-        ///     .unwrap();
-        /// assert!(re.is_match(b"a"));
-        /// ```
-        pub fn octal(&mut self, yes: bool) -> &mut RegexBuilder {
-            self.builder.octal(yes);
-            self
-        }
-
-        /// Sets the approximate size limit, in bytes, of the compiled regex.
-        ///
-        /// This roughly corresponds to the number of heap memory, in
-        /// bytes, occupied by a single regex. If the regex would otherwise
-        /// approximately exceed this limit, then compiling that regex will
-        /// fail.
-        ///
-        /// The main utility of a method like this is to avoid compiling
-        /// regexes that use an unexpected amount of resources, such as
-        /// time and memory. Even if the memory usage of a large regex is
-        /// acceptable, its search time may not be. Namely, worst case time
-        /// complexity for search is `O(m * n)`, where `m ~ len(pattern)` and
-        /// `n ~ len(haystack)`. That is, search time depends, in part, on the
-        /// size of the compiled regex. This means that putting a limit on the
-        /// size of the regex limits how much a regex can impact search time.
-        ///
-        /// For more information about regex size limits, see the section on
-        /// [untrusted inputs](crate#untrusted-input) in the top-level crate
-        /// documentation.
-        ///
-        /// The default for this is some reasonable number that permits most
-        /// patterns to compile successfully.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// # if !cfg!(target_pointer_width = "64") { return; } // see #1041
-        /// use regex::bytes::RegexBuilder;
-        ///
-        /// // It may surprise you how big some seemingly small patterns can
-        /// // be! Since \w is Unicode aware, this generates a regex that can
-        /// // match approximately 140,000 distinct codepoints.
-        /// assert!(RegexBuilder::new(r"\w").size_limit(45_000).build().is_err());
-        /// ```
-        pub fn size_limit(&mut self, bytes: usize) -> &mut RegexBuilder {
-            self.builder.size_limit(bytes);
-            self
-        }
-
-        /// Set the approximate capacity, in bytes, of the cache of transitions
-        /// used by the lazy DFA.
-        ///
-        /// While the lazy DFA isn't always used, in tends to be the most
-        /// commonly use regex engine in default configurations. It tends to
-        /// adopt the performance profile of a fully build DFA, but without the
-        /// downside of taking worst case exponential time to build.
-        ///
-        /// The downside is that it needs to keep a cache of transitions and
-        /// states that are built while running a search, and this cache
-        /// can fill up. When it fills up, the cache will reset itself. Any
-        /// previously generated states and transitions will then need to be
-        /// re-generated. If this happens too many times, then this library
-        /// will bail out of using the lazy DFA and switch to a different regex
-        /// engine.
-        ///
-        /// If your regex provokes this particular downside of the lazy DFA,
-        /// then it may be beneficial to increase its cache capacity. This will
-        /// potentially reduce the frequency of cache resetting (ideally to
-        /// `0`). While it won't fix all potential performance problems with
-        /// the lazy DFA, increasing the cache capacity does fix some.
-        ///
-        /// There is no easy way to determine, a priori, whether increasing
-        /// this cache capacity will help. In general, the larger your regex,
-        /// the more cache it's likely to use. But that isn't an ironclad rule.
-        /// For example, a regex like `[01]*1[01]{N}` would normally produce a
-        /// fully build DFA that is exponential in size with respect to `N`.
-        /// The lazy DFA will prevent exponential space blow-up, but it cache
-        /// is likely to fill up, even when it's large and even for smallish
-        /// values of `N`.
-        ///
-        /// If you aren't sure whether this helps or not, it is sensible to
-        /// set this to some arbitrarily large number in testing, such as
-        /// `usize::MAX`. Namely, this represents the amount of capacity that
-        /// *may* be used. It's probably not a good idea to use `usize::MAX` in
-        /// production though, since it implies there are no controls on heap
-        /// memory used by this library during a search. In effect, set it to
-        /// whatever you're willing to allocate for a single regex search.
-        pub fn dfa_size_limit(&mut self, bytes: usize) -> &mut RegexBuilder {
-            self.builder.dfa_size_limit(bytes);
-            self
-        }
-
-        /// Set the nesting limit for this parser.
-        ///
-        /// The nesting limit controls how deep the abstract syntax tree is
-        /// allowed to be. If the AST exceeds the given limit (e.g., with too
-        /// many nested groups), then an error is returned by the parser.
-        ///
-        /// The purpose of this limit is to act as a heuristic to prevent stack
-        /// overflow for consumers that do structural induction on an AST using
-        /// explicit recursion. While this crate never does this (instead using
-        /// constant stack space and moving the call stack to the heap), other
-        /// crates may.
-        ///
-        /// This limit is not checked until the entire AST is parsed.
-        /// Therefore, if callers want to put a limit on the amount of heap
-        /// space used, then they should impose a limit on the length, in
-        /// bytes, of the concrete pattern string. In particular, this is
-        /// viable since this parser implementation will limit itself to heap
-        /// space proportional to the length of the pattern string. See also
-        /// the [untrusted inputs](crate#untrusted-input) section in the
-        /// top-level crate documentation for more information about this.
-        ///
-        /// Note that a nest limit of `0` will return a nest limit error for
-        /// most patterns but not all. For example, a nest limit of `0` permits
-        /// `a` but not `ab`, since `ab` requires an explicit concatenation,
-        /// which results in a nest depth of `1`. In general, a nest limit is
-        /// not something that manifests in an obvious way in the concrete
-        /// syntax, therefore, it should not be used in a granular way.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::bytes::RegexBuilder;
-        ///
-        /// assert!(RegexBuilder::new(r"a").nest_limit(0).build().is_ok());
-        /// assert!(RegexBuilder::new(r"ab").nest_limit(0).build().is_err());
-        /// ```
-        pub fn nest_limit(&mut self, limit: u32) -> &mut RegexBuilder {
-            self.builder.nest_limit(limit);
-            self
-        }
-    }
-
-    /// A configurable builder for a [`RegexSet`].
-    ///
-    /// This builder can be used to programmatically set flags such as `i`
-    /// (case insensitive) and `x` (for verbose mode). This builder can also be
-    /// used to configure things like the line terminator and a size limit on
-    /// the compiled regular expression.
-    #[derive(Clone, Debug)]
-    pub struct RegexSetBuilder {
-        builder: Builder,
-    }
-
-    impl RegexSetBuilder {
-        /// Create a new builder with a default configuration for the given
-        /// patterns.
-        ///
-        /// If the patterns are invalid or exceed the configured size limits,
-        /// then an error will be returned when [`RegexSetBuilder::build`] is
-        /// called.
-        pub fn new<I, S>(patterns: I) -> RegexSetBuilder
-        where
-            I: IntoIterator<Item = S>,
-            S: AsRef<str>,
-        {
-            RegexSetBuilder { builder: Builder::new(patterns) }
-        }
-
-        /// Compiles the patterns given to `RegexSetBuilder::new` with the
-        /// configuration set on this builder.
-        ///
-        /// If the patterns aren't valid regexes or if a configured size limit
-        /// was exceeded, then an error is returned.
-        pub fn build(&self) -> Result<RegexSet, Error> {
-            self.builder.build_many_bytes()
-        }
-
-        /// This configures Unicode mode for the all of the patterns.
-        ///
-        /// Enabling Unicode mode does a number of things:
-        ///
-        /// * Most fundamentally, it causes the fundamental atom of matching
-        /// to be a single codepoint. When Unicode mode is disabled, it's a
-        /// single byte. For example, when Unicode mode is enabled, `.` will
-        /// match `đŸ’©` once, where as it will match 4 times when Unicode mode
-        /// is disabled. (Since the UTF-8 encoding of `đŸ’©` is 4 bytes long.)
-        /// * Case insensitive matching uses Unicode simple case folding rules.
-        /// * Unicode character classes like `\p{Letter}` and `\p{Greek}` are
-        /// available.
-        /// * Perl character classes are Unicode aware. That is, `\w`, `\s` and
-        /// `\d`.
-        /// * The word boundary assertions, `\b` and `\B`, use the Unicode
-        /// definition of a word character.
-        ///
-        /// Note that unlike the top-level `RegexSet` for searching `&str`,
-        /// it is permitted to disable Unicode mode even if the resulting
-        /// pattern could match invalid UTF-8. For example, `(?-u:.)` is not
-        /// a valid pattern for a top-level `RegexSet`, but is valid for a
-        /// `bytes::RegexSet`.
-        ///
-        /// For more details on the Unicode support in this crate, see the
-        /// [Unicode section](crate#unicode) in this crate's top-level
-        /// documentation.
-        ///
-        /// The default for this is `true`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::bytes::RegexSetBuilder;
-        ///
-        /// let re = RegexSetBuilder::new([r"\w"])
-        ///     .unicode(false)
-        ///     .build()
-        ///     .unwrap();
-        /// // Normally greek letters would be included in \w, but since
-        /// // Unicode mode is disabled, it only matches ASCII letters.
-        /// assert!(!re.is_match("δ".as_bytes()));
-        ///
-        /// let re = RegexSetBuilder::new([r"s"])
-        ///     .case_insensitive(true)
-        ///     .unicode(false)
-        ///     .build()
-        ///     .unwrap();
-        /// // Normally 'Ćż' is included when searching for 's' case
-        /// // insensitively due to Unicode's simple case folding rules. But
-        /// // when Unicode mode is disabled, only ASCII case insensitive rules
-        /// // are used.
-        /// assert!(!re.is_match("Ćż".as_bytes()));
-        /// ```
-        ///
-        /// Since this builder is for constructing a
-        /// [`bytes::RegexSet`](RegexSet), one can disable Unicode mode even if
-        /// it would match invalid UTF-8:
-        ///
-        /// ```
-        /// use regex::bytes::RegexSetBuilder;
-        ///
-        /// let re = RegexSetBuilder::new([r"."])
-        ///     .unicode(false)
-        ///     .build()
-        ///     .unwrap();
-        /// // Normally greek letters would be included in \w, but since
-        /// // Unicode mode is disabled, it only matches ASCII letters.
-        /// assert!(re.is_match(b"\xFF"));
-        /// ```
-        pub fn unicode(&mut self, yes: bool) -> &mut RegexSetBuilder {
-            self.builder.unicode(yes);
-            self
-        }
-
-        /// This configures whether to enable case insensitive matching for all
-        /// of the patterns.
-        ///
-        /// This setting can also be configured using the inline flag `i`
-        /// in the pattern. For example, `(?i:foo)` matches `foo` case
-        /// insensitively while `(?-i:foo)` matches `foo` case sensitively.
-        ///
-        /// The default for this is `false`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::bytes::RegexSetBuilder;
-        ///
-        /// let re = RegexSetBuilder::new([r"foo(?-i:bar)quux"])
-        ///     .case_insensitive(true)
-        ///     .build()
-        ///     .unwrap();
-        /// assert!(re.is_match(b"FoObarQuUx"));
-        /// // Even though case insensitive matching is enabled in the builder,
-        /// // it can be locally disabled within the pattern. In this case,
-        /// // `bar` is matched case sensitively.
-        /// assert!(!re.is_match(b"fooBARquux"));
-        /// ```
-        pub fn case_insensitive(&mut self, yes: bool) -> &mut RegexSetBuilder {
-            self.builder.case_insensitive(yes);
-            self
-        }
-
-        /// This configures multi-line mode for all of the patterns.
-        ///
-        /// Enabling multi-line mode changes the behavior of the `^` and `$`
-        /// anchor assertions. Instead of only matching at the beginning and
-        /// end of a haystack, respectively, multi-line mode causes them to
-        /// match at the beginning and end of a line *in addition* to the
-        /// beginning and end of a haystack. More precisely, `^` will match at
-        /// the position immediately following a `\n` and `$` will match at the
-        /// position immediately preceding a `\n`.
-        ///
-        /// The behavior of this option can be impacted by other settings too:
-        ///
-        /// * The [`RegexSetBuilder::line_terminator`] option changes `\n`
-        /// above to any ASCII byte.
-        /// * The [`RegexSetBuilder::crlf`] option changes the line terminator
-        /// to be either `\r` or `\n`, but never at the position between a `\r`
-        /// and `\n`.
-        ///
-        /// This setting can also be configured using the inline flag `m` in
-        /// the pattern.
-        ///
-        /// The default for this is `false`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::bytes::RegexSetBuilder;
-        ///
-        /// let re = RegexSetBuilder::new([r"^foo$"])
-        ///     .multi_line(true)
-        ///     .build()
-        ///     .unwrap();
-        /// assert!(re.is_match(b"\nfoo\n"));
-        /// ```
-        pub fn multi_line(&mut self, yes: bool) -> &mut RegexSetBuilder {
-            self.builder.multi_line(yes);
-            self
-        }
-
-        /// This configures dot-matches-new-line mode for the entire pattern.
-        ///
-        /// Perhaps surprisingly, the default behavior for `.` is not to match
-        /// any character, but rather, to match any character except for the
-        /// line terminator (which is `\n` by default). When this mode is
-        /// enabled, the behavior changes such that `.` truly matches any
-        /// character.
-        ///
-        /// This setting can also be configured using the inline flag `s` in
-        /// the pattern. For example, `(?s:.)` and `\p{any}` are equivalent
-        /// regexes.
-        ///
-        /// The default for this is `false`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::bytes::RegexSetBuilder;
-        ///
-        /// let re = RegexSetBuilder::new([r"foo.bar"])
-        ///     .dot_matches_new_line(true)
-        ///     .build()
-        ///     .unwrap();
-        /// let hay = b"foo\nbar";
-        /// assert!(re.is_match(hay));
-        /// ```
-        pub fn dot_matches_new_line(
-            &mut self,
-            yes: bool,
-        ) -> &mut RegexSetBuilder {
-            self.builder.dot_matches_new_line(yes);
-            self
-        }
-
-        /// This configures CRLF mode for all of the patterns.
-        ///
-        /// When CRLF mode is enabled, both `\r` ("carriage return" or CR for
-        /// short) and `\n` ("line feed" or LF for short) are treated as line
-        /// terminators. This results in the following:
-        ///
-        /// * Unless dot-matches-new-line mode is enabled, `.` will now match
-        /// any character except for `\n` and `\r`.
-        /// * When multi-line mode is enabled, `^` will match immediately
-        /// following a `\n` or a `\r`. Similarly, `$` will match immediately
-        /// preceding a `\n` or a `\r`. Neither `^` nor `$` will ever match
-        /// between `\r` and `\n`.
-        ///
-        /// This setting can also be configured using the inline flag `R` in
-        /// the pattern.
-        ///
-        /// The default for this is `false`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::bytes::RegexSetBuilder;
-        ///
-        /// let re = RegexSetBuilder::new([r"^foo$"])
-        ///     .multi_line(true)
-        ///     .crlf(true)
-        ///     .build()
-        ///     .unwrap();
-        /// let hay = b"\r\nfoo\r\n";
-        /// // If CRLF mode weren't enabled here, then '$' wouldn't match
-        /// // immediately after 'foo', and thus no match would be found.
-        /// assert!(re.is_match(hay));
-        /// ```
-        ///
-        /// This example demonstrates that `^` will never match at a position
-        /// between `\r` and `\n`. (`$` will similarly not match between a `\r`
-        /// and a `\n`.)
-        ///
-        /// ```
-        /// use regex::bytes::RegexSetBuilder;
-        ///
-        /// let re = RegexSetBuilder::new([r"^\n"])
-        ///     .multi_line(true)
-        ///     .crlf(true)
-        ///     .build()
-        ///     .unwrap();
-        /// assert!(!re.is_match(b"\r\n"));
-        /// ```
-        pub fn crlf(&mut self, yes: bool) -> &mut RegexSetBuilder {
-            self.builder.crlf(yes);
-            self
-        }
-
-        /// Configures the line terminator to be used by the regex.
-        ///
-        /// The line terminator is relevant in two ways for a particular regex:
-        ///
-        /// * When dot-matches-new-line mode is *not* enabled (the default),
-        /// then `.` will match any character except for the configured line
-        /// terminator.
-        /// * When multi-line mode is enabled (not the default), then `^` and
-        /// `$` will match immediately after and before, respectively, a line
-        /// terminator.
-        ///
-        /// In both cases, if CRLF mode is enabled in a particular context,
-        /// then it takes precedence over any configured line terminator.
-        ///
-        /// This option cannot be configured from within the pattern.
-        ///
-        /// The default line terminator is `\n`.
-        ///
-        /// # Example
-        ///
-        /// This shows how to treat the NUL byte as a line terminator. This can
-        /// be a useful heuristic when searching binary data.
-        ///
-        /// ```
-        /// use regex::bytes::RegexSetBuilder;
-        ///
-        /// let re = RegexSetBuilder::new([r"^foo$"])
-        ///     .multi_line(true)
-        ///     .line_terminator(b'\x00')
-        ///     .build()
-        ///     .unwrap();
-        /// let hay = b"\x00foo\x00";
-        /// assert!(re.is_match(hay));
-        /// ```
-        ///
-        /// This example shows that the behavior of `.` is impacted by this
-        /// setting as well:
-        ///
-        /// ```
-        /// use regex::bytes::RegexSetBuilder;
-        ///
-        /// let re = RegexSetBuilder::new([r"."])
-        ///     .line_terminator(b'\x00')
-        ///     .build()
-        ///     .unwrap();
-        /// assert!(re.is_match(b"\n"));
-        /// assert!(!re.is_match(b"\x00"));
-        /// ```
-        ///
-        /// This shows that building a regex will work even when the byte given
-        /// is not ASCII. This is unlike the top-level `RegexSet` API where
-        /// matching invalid UTF-8 is not allowed.
-        ///
-        /// Note though that you must disable Unicode mode. This is required
-        /// because Unicode mode requires matching one codepoint at a time,
-        /// and there is no way to match a non-ASCII byte as if it were a
-        /// codepoint.
-        ///
-        /// ```
-        /// use regex::bytes::RegexSetBuilder;
-        ///
-        /// assert!(
-        ///     RegexSetBuilder::new([r"."])
-        ///         .unicode(false)
-        ///         .line_terminator(0x80)
-        ///         .build()
-        ///         .is_ok(),
-        /// );
-        /// ```
-        pub fn line_terminator(&mut self, byte: u8) -> &mut RegexSetBuilder {
-            self.builder.line_terminator(byte);
-            self
-        }
-
-        /// This configures swap-greed mode for all of the patterns.
-        ///
-        /// When swap-greed mode is enabled, patterns like `a+` will become
-        /// non-greedy and patterns like `a+?` will become greedy. In other
-        /// words, the meanings of `a+` and `a+?` are switched.
-        ///
-        /// This setting can also be configured using the inline flag `U` in
-        /// the pattern.
-        ///
-        /// Note that this is generally not useful for a `RegexSet` since a
-        /// `RegexSet` can only report whether a pattern matches or not. Since
-        /// greediness never impacts whether a match is found or not (only the
-        /// offsets of the match), it follows that whether parts of a pattern
-        /// are greedy or not doesn't matter for a `RegexSet`.
-        ///
-        /// The default for this is `false`.
-        pub fn swap_greed(&mut self, yes: bool) -> &mut RegexSetBuilder {
-            self.builder.swap_greed(yes);
-            self
-        }
-
-        /// This configures verbose mode for all of the patterns.
-        ///
-        /// When enabled, whitespace will treated as insignifcant in the
-        /// pattern and `#` can be used to start a comment until the next new
-        /// line.
-        ///
-        /// Normally, in most places in a pattern, whitespace is treated
-        /// literally. For example ` +` will match one or more ASCII whitespace
-        /// characters.
-        ///
-        /// When verbose mode is enabled, `\#` can be used to match a literal
-        /// `#` and `\ ` can be used to match a literal ASCII whitespace
-        /// character.
-        ///
-        /// Verbose mode is useful for permitting regexes to be formatted and
-        /// broken up more nicely. This may make them more easily readable.
-        ///
-        /// This setting can also be configured using the inline flag `x` in
-        /// the pattern.
-        ///
-        /// The default for this is `false`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::bytes::RegexSetBuilder;
-        ///
-        /// let pat = r"
-        ///     \b
-        ///     (?<first>\p{Uppercase}\w*)  # always start with uppercase letter
-        ///     [\s--\n]+                   # whitespace should separate names
-        ///     (?: # middle name can be an initial!
-        ///         (?:(?<initial>\p{Uppercase})\.|(?<middle>\p{Uppercase}\w*))
-        ///         [\s--\n]+
-        ///     )?
-        ///     (?<last>\p{Uppercase}\w*)
-        ///     \b
-        /// ";
-        /// let re = RegexSetBuilder::new([pat])
-        ///     .ignore_whitespace(true)
-        ///     .build()
-        ///     .unwrap();
-        /// assert!(re.is_match(b"Harry Potter"));
-        /// assert!(re.is_match(b"Harry J. Potter"));
-        /// assert!(re.is_match(b"Harry James Potter"));
-        /// assert!(!re.is_match(b"harry J. Potter"));
-        /// ```
-        pub fn ignore_whitespace(
-            &mut self,
-            yes: bool,
-        ) -> &mut RegexSetBuilder {
-            self.builder.ignore_whitespace(yes);
-            self
-        }
-
-        /// This configures octal mode for all of the patterns.
-        ///
-        /// Octal syntax is a little-known way of uttering Unicode codepoints
-        /// in a pattern. For example, `a`, `\x61`, `\u0061` and `\141` are all
-        /// equivalent patterns, where the last example shows octal syntax.
-        ///
-        /// While supporting octal syntax isn't in and of itself a problem,
-        /// it does make good error messages harder. That is, in PCRE based
-        /// regex engines, syntax like `\1` invokes a backreference, which is
-        /// explicitly unsupported this library. However, many users expect
-        /// backreferences to be supported. Therefore, when octal support
-        /// is disabled, the error message will explicitly mention that
-        /// backreferences aren't supported.
-        ///
-        /// The default for this is `false`.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::bytes::RegexSetBuilder;
-        ///
-        /// // Normally this pattern would not compile, with an error message
-        /// // about backreferences not being supported. But with octal mode
-        /// // enabled, octal escape sequences work.
-        /// let re = RegexSetBuilder::new([r"\141"])
-        ///     .octal(true)
-        ///     .build()
-        ///     .unwrap();
-        /// assert!(re.is_match(b"a"));
-        /// ```
-        pub fn octal(&mut self, yes: bool) -> &mut RegexSetBuilder {
-            self.builder.octal(yes);
-            self
-        }
-
-        /// Sets the approximate size limit, in bytes, of the compiled regex.
-        ///
-        /// This roughly corresponds to the number of heap memory, in
-        /// bytes, occupied by a single regex. If the regex would otherwise
-        /// approximately exceed this limit, then compiling that regex will
-        /// fail.
-        ///
-        /// The main utility of a method like this is to avoid compiling
-        /// regexes that use an unexpected amount of resources, such as
-        /// time and memory. Even if the memory usage of a large regex is
-        /// acceptable, its search time may not be. Namely, worst case time
-        /// complexity for search is `O(m * n)`, where `m ~ len(pattern)` and
-        /// `n ~ len(haystack)`. That is, search time depends, in part, on the
-        /// size of the compiled regex. This means that putting a limit on the
-        /// size of the regex limits how much a regex can impact search time.
-        ///
-        /// For more information about regex size limits, see the section on
-        /// [untrusted inputs](crate#untrusted-input) in the top-level crate
-        /// documentation.
-        ///
-        /// The default for this is some reasonable number that permits most
-        /// patterns to compile successfully.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// # if !cfg!(target_pointer_width = "64") { return; } // see #1041
-        /// use regex::bytes::RegexSetBuilder;
-        ///
-        /// // It may surprise you how big some seemingly small patterns can
-        /// // be! Since \w is Unicode aware, this generates a regex that can
-        /// // match approximately 140,000 distinct codepoints.
-        /// assert!(
-        ///     RegexSetBuilder::new([r"\w"])
-        ///         .size_limit(45_000)
-        ///         .build()
-        ///         .is_err()
-        /// );
-        /// ```
-        pub fn size_limit(&mut self, bytes: usize) -> &mut RegexSetBuilder {
-            self.builder.size_limit(bytes);
-            self
-        }
-
-        /// Set the approximate capacity, in bytes, of the cache of transitions
-        /// used by the lazy DFA.
-        ///
-        /// While the lazy DFA isn't always used, in tends to be the most
-        /// commonly use regex engine in default configurations. It tends to
-        /// adopt the performance profile of a fully build DFA, but without the
-        /// downside of taking worst case exponential time to build.
-        ///
-        /// The downside is that it needs to keep a cache of transitions and
-        /// states that are built while running a search, and this cache
-        /// can fill up. When it fills up, the cache will reset itself. Any
-        /// previously generated states and transitions will then need to be
-        /// re-generated. If this happens too many times, then this library
-        /// will bail out of using the lazy DFA and switch to a different regex
-        /// engine.
-        ///
-        /// If your regex provokes this particular downside of the lazy DFA,
-        /// then it may be beneficial to increase its cache capacity. This will
-        /// potentially reduce the frequency of cache resetting (ideally to
-        /// `0`). While it won't fix all potential performance problems with
-        /// the lazy DFA, increasing the cache capacity does fix some.
-        ///
-        /// There is no easy way to determine, a priori, whether increasing
-        /// this cache capacity will help. In general, the larger your regex,
-        /// the more cache it's likely to use. But that isn't an ironclad rule.
-        /// For example, a regex like `[01]*1[01]{N}` would normally produce a
-        /// fully build DFA that is exponential in size with respect to `N`.
-        /// The lazy DFA will prevent exponential space blow-up, but it cache
-        /// is likely to fill up, even when it's large and even for smallish
-        /// values of `N`.
-        ///
-        /// If you aren't sure whether this helps or not, it is sensible to
-        /// set this to some arbitrarily large number in testing, such as
-        /// `usize::MAX`. Namely, this represents the amount of capacity that
-        /// *may* be used. It's probably not a good idea to use `usize::MAX` in
-        /// production though, since it implies there are no controls on heap
-        /// memory used by this library during a search. In effect, set it to
-        /// whatever you're willing to allocate for a single regex search.
-        pub fn dfa_size_limit(
-            &mut self,
-            bytes: usize,
-        ) -> &mut RegexSetBuilder {
-            self.builder.dfa_size_limit(bytes);
-            self
-        }
-
-        /// Set the nesting limit for this parser.
-        ///
-        /// The nesting limit controls how deep the abstract syntax tree is
-        /// allowed to be. If the AST exceeds the given limit (e.g., with too
-        /// many nested groups), then an error is returned by the parser.
-        ///
-        /// The purpose of this limit is to act as a heuristic to prevent stack
-        /// overflow for consumers that do structural induction on an AST using
-        /// explicit recursion. While this crate never does this (instead using
-        /// constant stack space and moving the call stack to the heap), other
-        /// crates may.
-        ///
-        /// This limit is not checked until the entire AST is parsed.
-        /// Therefore, if callers want to put a limit on the amount of heap
-        /// space used, then they should impose a limit on the length, in
-        /// bytes, of the concrete pattern string. In particular, this is
-        /// viable since this parser implementation will limit itself to heap
-        /// space proportional to the length of the pattern string. See also
-        /// the [untrusted inputs](crate#untrusted-input) section in the
-        /// top-level crate documentation for more information about this.
-        ///
-        /// Note that a nest limit of `0` will return a nest limit error for
-        /// most patterns but not all. For example, a nest limit of `0` permits
-        /// `a` but not `ab`, since `ab` requires an explicit concatenation,
-        /// which results in a nest depth of `1`. In general, a nest limit is
-        /// not something that manifests in an obvious way in the concrete
-        /// syntax, therefore, it should not be used in a granular way.
-        ///
-        /// # Example
-        ///
-        /// ```
-        /// use regex::bytes::RegexSetBuilder;
-        ///
-        /// assert!(RegexSetBuilder::new([r"a"]).nest_limit(0).build().is_ok());
-        /// assert!(RegexSetBuilder::new([r"ab"]).nest_limit(0).build().is_err());
-        /// ```
-        pub fn nest_limit(&mut self, limit: u32) -> &mut RegexSetBuilder {
-            self.builder.nest_limit(limit);
-            self
-        }
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/bytes.rs b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/bytes.rs
deleted file mode 100644
index 383ac4a..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/bytes.rs
+++ /dev/null
@@ -1,91 +0,0 @@
-/*!
-Search for regex matches in `&[u8]` haystacks.
-
-This module provides a nearly identical API via [`Regex`] to the one found in
-the top-level of this crate. There are two important differences:
-
-1. Matching is done on `&[u8]` instead of `&str`. Additionally, `Vec<u8>`
-is used where `String` would have been used in the top-level API.
-2. Unicode support can be disabled even when disabling it would result in
-matching invalid UTF-8 bytes.
-
-# Example: match null terminated string
-
-This shows how to find all null-terminated strings in a slice of bytes. This
-works even if a C string contains invalid UTF-8.
-
-```rust
-use regex::bytes::Regex;
-
-let re = Regex::new(r"(?-u)(?<cstr>[^\x00]+)\x00").unwrap();
-let hay = b"foo\x00qu\xFFux\x00baz\x00";
-
-// Extract all of the strings without the NUL terminator from each match.
-// The unwrap is OK here since a match requires the `cstr` capture to match.
-let cstrs: Vec<&[u8]> =
-    re.captures_iter(hay)
-      .map(|c| c.name("cstr").unwrap().as_bytes())
-      .collect();
-assert_eq!(cstrs, vec![&b"foo"[..], &b"qu\xFFux"[..], &b"baz"[..]]);
-```
-
-# Example: selectively enable Unicode support
-
-This shows how to match an arbitrary byte pattern followed by a UTF-8 encoded
-string (e.g., to extract a title from a Matroska file):
-
-```rust
-use regex::bytes::Regex;
-
-let re = Regex::new(
-    r"(?-u)\x7b\xa9(?:[\x80-\xfe]|[\x40-\xff].)(?u:(.*))"
-).unwrap();
-let hay = b"\x12\xd0\x3b\x5f\x7b\xa9\x85\xe2\x98\x83\x80\x98\x54\x76\x68\x65";
-
-// Notice that despite the `.*` at the end, it will only match valid UTF-8
-// because Unicode mode was enabled with the `u` flag. Without the `u` flag,
-// the `.*` would match the rest of the bytes regardless of whether they were
-// valid UTF-8.
-let (_, [title]) = re.captures(hay).unwrap().extract();
-assert_eq!(title, b"\xE2\x98\x83");
-// We can UTF-8 decode the title now. And the unwrap here
-// is correct because the existence of a match guarantees
-// that `title` is valid UTF-8.
-let title = std::str::from_utf8(title).unwrap();
-assert_eq!(title, "☃");
-```
-
-In general, if the Unicode flag is enabled in a capture group and that capture
-is part of the overall match, then the capture is *guaranteed* to be valid
-UTF-8.
-
-# Syntax
-
-The supported syntax is pretty much the same as the syntax for Unicode
-regular expressions with a few changes that make sense for matching arbitrary
-bytes:
-
-1. The `u` flag can be disabled even when disabling it might cause the regex to
-match invalid UTF-8. When the `u` flag is disabled, the regex is said to be in
-"ASCII compatible" mode.
-2. In ASCII compatible mode, Unicode character classes are not allowed. Literal
-Unicode scalar values outside of character classes are allowed.
-3. In ASCII compatible mode, Perl character classes (`\w`, `\d` and `\s`)
-revert to their typical ASCII definition. `\w` maps to `[[:word:]]`, `\d` maps
-to `[[:digit:]]` and `\s` maps to `[[:space:]]`.
-4. In ASCII compatible mode, word boundaries use the ASCII compatible `\w` to
-determine whether a byte is a word byte or not.
-5. Hexadecimal notation can be used to specify arbitrary bytes instead of
-Unicode codepoints. For example, in ASCII compatible mode, `\xFF` matches the
-literal byte `\xFF`, while in Unicode mode, `\xFF` is the Unicode codepoint
-`U+00FF` that matches its UTF-8 encoding of `\xC3\xBF`. Similarly for octal
-notation when enabled.
-6. In ASCII compatible mode, `.` matches any *byte* except for `\n`. When the
-`s` flag is additionally enabled, `.` matches any byte.
-
-# Performance
-
-In general, one should expect performance on `&[u8]` to be roughly similar to
-performance on `&str`.
-*/
-pub use crate::{builders::bytes::*, regex::bytes::*, regexset::bytes::*};
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/error.rs b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/error.rs
deleted file mode 100644
index 6026b38..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/error.rs
+++ /dev/null
@@ -1,102 +0,0 @@
-use alloc::string::{String, ToString};
-
-use regex_automata::meta;
-
-/// An error that occurred during parsing or compiling a regular expression.
-#[non_exhaustive]
-#[derive(Clone, PartialEq)]
-pub enum Error {
-    /// A syntax error.
-    Syntax(String),
-    /// The compiled program exceeded the set size
-    /// limit. The argument is the size limit imposed by
-    /// [`RegexBuilder::size_limit`](crate::RegexBuilder::size_limit). Even
-    /// when not configured explicitly, it defaults to a reasonable limit.
-    ///
-    /// If you're getting this error, it occurred because your regex has been
-    /// compiled to an intermediate state that is too big. It is important to
-    /// note that exceeding this limit does _not_ mean the regex is too big to
-    /// _work_, but rather, the regex is big enough that it may wind up being
-    /// surprisingly slow when used in a search. In other words, this error is
-    /// meant to be a practical heuristic for avoiding a performance footgun,
-    /// and especially so for the case where the regex pattern is coming from
-    /// an untrusted source.
-    ///
-    /// There are generally two ways to move forward if you hit this error.
-    /// The first is to find some way to use a smaller regex. The second is to
-    /// increase the size limit via `RegexBuilder::size_limit`. However, if
-    /// your regex pattern is not from a trusted source, then neither of these
-    /// approaches may be appropriate. Instead, you'll have to determine just
-    /// how big of a regex you want to allow.
-    CompiledTooBig(usize),
-}
-
-impl Error {
-    pub(crate) fn from_meta_build_error(err: meta::BuildError) -> Error {
-        if let Some(size_limit) = err.size_limit() {
-            Error::CompiledTooBig(size_limit)
-        } else if let Some(ref err) = err.syntax_error() {
-            Error::Syntax(err.to_string())
-        } else {
-            // This is a little suspect. Technically there are more ways for
-            // a meta regex to fail to build other than "exceeded size limit"
-            // and "syntax error." For example, if there are too many states
-            // or even too many patterns. But in practice this is probably
-            // good enough. The worst thing that happens is that Error::Syntax
-            // represents an error that isn't technically a syntax error, but
-            // the actual message will still be shown. So... it's not too bad.
-            //
-            // We really should have made the Error type in the regex crate
-            // completely opaque. Rookie mistake.
-            Error::Syntax(err.to_string())
-        }
-    }
-}
-
-#[cfg(feature = "std")]
-impl std::error::Error for Error {
-    // TODO: Remove this method entirely on the next breaking semver release.
-    #[allow(deprecated)]
-    fn description(&self) -> &str {
-        match *self {
-            Error::Syntax(ref err) => err,
-            Error::CompiledTooBig(_) => "compiled program too big",
-        }
-    }
-}
-
-impl core::fmt::Display for Error {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        match *self {
-            Error::Syntax(ref err) => err.fmt(f),
-            Error::CompiledTooBig(limit) => write!(
-                f,
-                "Compiled regex exceeds size limit of {} bytes.",
-                limit
-            ),
-        }
-    }
-}
-
-// We implement our own Debug implementation so that we show nicer syntax
-// errors when people use `Regex::new(...).unwrap()`. It's a little weird,
-// but the `Syntax` variant is already storing a `String` anyway, so we might
-// as well format it nicely.
-impl core::fmt::Debug for Error {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        match *self {
-            Error::Syntax(ref err) => {
-                let hr: String = core::iter::repeat('~').take(79).collect();
-                writeln!(f, "Syntax(")?;
-                writeln!(f, "{}", hr)?;
-                writeln!(f, "{}", err)?;
-                writeln!(f, "{}", hr)?;
-                write!(f, ")")?;
-                Ok(())
-            }
-            Error::CompiledTooBig(limit) => {
-                f.debug_tuple("CompiledTooBig").field(&limit).finish()
-            }
-        }
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/find_byte.rs b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/find_byte.rs
deleted file mode 100644
index 9c6915d..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/find_byte.rs
+++ /dev/null
@@ -1,17 +0,0 @@
-/// Searches for the given needle in the given haystack.
-///
-/// If the perf-literal feature is enabled, then this uses the super optimized
-/// memchr crate. Otherwise, it uses the naive byte-at-a-time implementation.
-pub(crate) fn find_byte(needle: u8, haystack: &[u8]) -> Option<usize> {
-    #[cfg(not(feature = "perf-literal"))]
-    fn imp(needle: u8, haystack: &[u8]) -> Option<usize> {
-        haystack.iter().position(|&b| b == needle)
-    }
-
-    #[cfg(feature = "perf-literal")]
-    fn imp(needle: u8, haystack: &[u8]) -> Option<usize> {
-        memchr::memchr(needle, haystack)
-    }
-
-    imp(needle, haystack)
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/lib.rs
deleted file mode 100644
index 6dbd3c20..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/lib.rs
+++ /dev/null
@@ -1,1346 +0,0 @@
-/*!
-This crate provides routines for searching strings for matches of a [regular
-expression] (aka "regex"). The regex syntax supported by this crate is similar
-to other regex engines, but it lacks several features that are not known how to
-implement efficiently. This includes, but is not limited to, look-around and
-backreferences. In exchange, all regex searches in this crate have worst case
-`O(m * n)` time complexity, where `m` is proportional to the size of the regex
-and `n` is proportional to the size of the string being searched.
-
-[regular expression]: https://en.wikipedia.org/wiki/Regular_expression
-
-If you just want API documentation, then skip to the [`Regex`] type. Otherwise,
-here's a quick example showing one way of parsing the output of a grep-like
-program:
-
-```rust
-use regex::Regex;
-
-let re = Regex::new(r"(?m)^([^:]+):([0-9]+):(.+)$").unwrap();
-let hay = "\
-path/to/foo:54:Blue Harvest
-path/to/bar:90:Something, Something, Something, Dark Side
-path/to/baz:3:It's a Trap!
-";
-
-let mut results = vec![];
-for (_, [path, lineno, line]) in re.captures_iter(hay).map(|c| c.extract()) {
-    results.push((path, lineno.parse::<u64>()?, line));
-}
-assert_eq!(results, vec![
-    ("path/to/foo", 54, "Blue Harvest"),
-    ("path/to/bar", 90, "Something, Something, Something, Dark Side"),
-    ("path/to/baz", 3, "It's a Trap!"),
-]);
-# Ok::<(), Box<dyn std::error::Error>>(())
-```
-
-# Overview
-
-The primary type in this crate is a [`Regex`]. Its most important methods are
-as follows:
-
-* [`Regex::new`] compiles a regex using the default configuration. A
-[`RegexBuilder`] permits setting a non-default configuration. (For example,
-case insensitive matching, verbose mode and others.)
-* [`Regex::is_match`] reports whether a match exists in a particular haystack.
-* [`Regex::find`] reports the byte offsets of a match in a haystack, if one
-exists. [`Regex::find_iter`] returns an iterator over all such matches.
-* [`Regex::captures`] returns a [`Captures`], which reports both the byte
-offsets of a match in a haystack and the byte offsets of each matching capture
-group from the regex in the haystack.
-[`Regex::captures_iter`] returns an iterator over all such matches.
-
-There is also a [`RegexSet`], which permits searching for multiple regex
-patterns simultaneously in a single search. However, it currently only reports
-which patterns match and *not* the byte offsets of a match.
-
-Otherwise, this top-level crate documentation is organized as follows:
-
-* [Usage](#usage) shows how to add the `regex` crate to your Rust project.
-* [Examples](#examples) provides a limited selection of regex search examples.
-* [Performance](#performance) provides a brief summary of how to optimize regex
-searching speed.
-* [Unicode](#unicode) discusses support for non-ASCII patterns.
-* [Syntax](#syntax) enumerates the specific regex syntax supported by this
-crate.
-* [Untrusted input](#untrusted-input) discusses how this crate deals with regex
-patterns or haystacks that are untrusted.
-* [Crate features](#crate-features) documents the Cargo features that can be
-enabled or disabled for this crate.
-* [Other crates](#other-crates) links to other crates in the `regex` family.
-
-# Usage
-
-The `regex` crate is [on crates.io](https://crates.io/crates/regex) and can be
-used by adding `regex` to your dependencies in your project's `Cargo.toml`.
-Or more simply, just run `cargo add regex`.
-
-Here is a complete example that creates a new Rust project, adds a dependency
-on `regex`, creates the source code for a regex search and then runs the
-program.
-
-First, create the project in a new directory:
-
-```text
-$ mkdir regex-example
-$ cd regex-example
-$ cargo init
-```
-
-Second, add a dependency on `regex`:
-
-```text
-$ cargo add regex
-```
-
-Third, edit `src/main.rs`. Delete what's there and replace it with this:
-
-```
-use regex::Regex;
-
-fn main() {
-    let re = Regex::new(r"Hello (?<name>\w+)!").unwrap();
-    let Some(caps) = re.captures("Hello Murphy!") else {
-        println!("no match!");
-        return;
-    };
-    println!("The name is: {}", &caps["name"]);
-}
-```
-
-Fourth, run it with `cargo run`:
-
-```text
-$ cargo run
-   Compiling memchr v2.5.0
-   Compiling regex-syntax v0.7.1
-   Compiling aho-corasick v1.0.1
-   Compiling regex v1.8.1
-   Compiling regex-example v0.1.0 (/tmp/regex-example)
-    Finished dev [unoptimized + debuginfo] target(s) in 4.22s
-     Running `target/debug/regex-example`
-The name is: Murphy
-```
-
-The first time you run the program will show more output like above. But
-subsequent runs shouldn't have to re-compile the dependencies.
-
-# Examples
-
-This section provides a few examples, in tutorial style, showing how to
-search a haystack with a regex. There are more examples throughout the API
-documentation.
-
-Before starting though, it's worth defining a few terms:
-
-* A **regex** is a Rust value whose type is `Regex`. We use `re` as a
-variable name for a regex.
-* A **pattern** is the string that is used to build a regex. We use `pat` as
-a variable name for a pattern.
-* A **haystack** is the string that is searched by a regex. We use `hay` as a
-variable name for a haystack.
-
-Sometimes the words "regex" and "pattern" are used interchangeably.
-
-General use of regular expressions in this crate proceeds by compiling a
-**pattern** into a **regex**, and then using that regex to search, split or
-replace parts of a **haystack**.
-
-### Example: find a middle initial
-
-We'll start off with a very simple example: a regex that looks for a specific
-name but uses a wildcard to match a middle initial. Our pattern serves as
-something like a template that will match a particular name with *any* middle
-initial.
-
-```rust
-use regex::Regex;
-
-// We use 'unwrap()' here because it would be a bug in our program if the
-// pattern failed to compile to a regex. Panicking in the presence of a bug
-// is okay.
-let re = Regex::new(r"Homer (.)\. Simpson").unwrap();
-let hay = "Homer J. Simpson";
-let Some(caps) = re.captures(hay) else { return };
-assert_eq!("J", &caps[1]);
-```
-
-There are a few things worth noticing here in our first example:
-
-* The `.` is a special pattern meta character that means "match any single
-character except for new lines." (More precisely, in this crate, it means
-"match any UTF-8 encoding of any Unicode scalar value other than `\n`.")
-* We can match an actual `.` literally by escaping it, i.e., `\.`.
-* We use Rust's [raw strings] to avoid needing to deal with escape sequences in
-both the regex pattern syntax and in Rust's string literal syntax. If we didn't
-use raw strings here, we would have had to use `\\.` to match a literal `.`
-character. That is, `r"\."` and `"\\."` are equivalent patterns.
-* We put our wildcard `.` instruction in parentheses. These parentheses have a
-special meaning that says, "make whatever part of the haystack matches within
-these parentheses available as a capturing group." After finding a match, we
-access this capture group with `&caps[1]`.
-
-[raw strings]: https://doc.rust-lang.org/stable/reference/tokens.html#raw-string-literals
-
-Otherwise, we execute a search using `re.captures(hay)` and return from our
-function if no match occurred. We then reference the middle initial by asking
-for the part of the haystack that matched the capture group indexed at `1`.
-(The capture group at index 0 is implicit and always corresponds to the entire
-match. In this case, that's `Homer J. Simpson`.)
-
-### Example: named capture groups
-
-Continuing from our middle initial example above, we can tweak the pattern
-slightly to give a name to the group that matches the middle initial:
-
-```rust
-use regex::Regex;
-
-// Note that (?P<middle>.) is a different way to spell the same thing.
-let re = Regex::new(r"Homer (?<middle>.)\. Simpson").unwrap();
-let hay = "Homer J. Simpson";
-let Some(caps) = re.captures(hay) else { return };
-assert_eq!("J", &caps["middle"]);
-```
-
-Giving a name to a group can be useful when there are multiple groups in
-a pattern. It makes the code referring to those groups a bit easier to
-understand.
-
-### Example: validating a particular date format
-
-This examples shows how to confirm whether a haystack, in its entirety, matches
-a particular date format:
-
-```rust
-use regex::Regex;
-
-let re = Regex::new(r"^\d{4}-\d{2}-\d{2}$").unwrap();
-assert!(re.is_match("2010-03-14"));
-```
-
-Notice the use of the `^` and `$` anchors. In this crate, every regex search is
-run with an implicit `(?s:.)*?` at the beginning of its pattern, which allows
-the regex to match anywhere in a haystack. Anchors, as above, can be used to
-ensure that the full haystack matches a pattern.
-
-This crate is also Unicode aware by default, which means that `\d` might match
-more than you might expect it to. For example:
-
-```rust
-use regex::Regex;
-
-let re = Regex::new(r"^\d{4}-\d{2}-\d{2}$").unwrap();
-assert!(re.is_match("𝟚𝟘𝟙𝟘-𝟘𝟛-𝟙𝟜"));
-```
-
-To only match an ASCII decimal digit, all of the following are equivalent:
-
-* `[0-9]`
-* `(?-u:\d)`
-* `[[:digit:]]`
-* `[\d&&\p{ascii}]`
-
-### Example: finding dates in a haystack
-
-In the previous example, we showed how one might validate that a haystack,
-in its entirety, corresponded to a particular date format. But what if we wanted
-to extract all things that look like dates in a specific format from a haystack?
-To do this, we can use an iterator API to find all matches (notice that we've
-removed the anchors and switched to looking for ASCII-only digits):
-
-```rust
-use regex::Regex;
-
-let re = Regex::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}").unwrap();
-let hay = "What do 1865-04-14, 1881-07-02, 1901-09-06 and 1963-11-22 have in common?";
-// 'm' is a 'Match', and 'as_str()' returns the matching part of the haystack.
-let dates: Vec<&str> = re.find_iter(hay).map(|m| m.as_str()).collect();
-assert_eq!(dates, vec![
-    "1865-04-14",
-    "1881-07-02",
-    "1901-09-06",
-    "1963-11-22",
-]);
-```
-
-We can also iterate over [`Captures`] values instead of [`Match`] values, and
-that in turn permits accessing each component of the date via capturing groups:
-
-```rust
-use regex::Regex;
-
-let re = Regex::new(r"(?<y>[0-9]{4})-(?<m>[0-9]{2})-(?<d>[0-9]{2})").unwrap();
-let hay = "What do 1865-04-14, 1881-07-02, 1901-09-06 and 1963-11-22 have in common?";
-// 'm' is a 'Match', and 'as_str()' returns the matching part of the haystack.
-let dates: Vec<(&str, &str, &str)> = re.captures_iter(hay).map(|caps| {
-    // The unwraps are okay because every capture group must match if the whole
-    // regex matches, and in this context, we know we have a match.
-    //
-    // Note that we use `caps.name("y").unwrap().as_str()` instead of
-    // `&caps["y"]` because the lifetime of the former is the same as the
-    // lifetime of `hay` above, but the lifetime of the latter is tied to the
-    // lifetime of `caps` due to how the `Index` trait is defined.
-    let year = caps.name("y").unwrap().as_str();
-    let month = caps.name("m").unwrap().as_str();
-    let day = caps.name("d").unwrap().as_str();
-    (year, month, day)
-}).collect();
-assert_eq!(dates, vec![
-    ("1865", "04", "14"),
-    ("1881", "07", "02"),
-    ("1901", "09", "06"),
-    ("1963", "11", "22"),
-]);
-```
-
-### Example: simpler capture group extraction
-
-One can use [`Captures::extract`] to make the code from the previous example a
-bit simpler in this case:
-
-```rust
-use regex::Regex;
-
-let re = Regex::new(r"([0-9]{4})-([0-9]{2})-([0-9]{2})").unwrap();
-let hay = "What do 1865-04-14, 1881-07-02, 1901-09-06 and 1963-11-22 have in common?";
-let dates: Vec<(&str, &str, &str)> = re.captures_iter(hay).map(|caps| {
-    let (_, [year, month, day]) = caps.extract();
-    (year, month, day)
-}).collect();
-assert_eq!(dates, vec![
-    ("1865", "04", "14"),
-    ("1881", "07", "02"),
-    ("1901", "09", "06"),
-    ("1963", "11", "22"),
-]);
-```
-
-`Captures::extract` works by ensuring that the number of matching groups match
-the number of groups requested via the `[year, month, day]` syntax. If they do,
-then the substrings for each corresponding capture group are automatically
-returned in an appropriately sized array. Rust's syntax for pattern matching
-arrays does the rest.
-
-### Example: replacement with named capture groups
-
-Building on the previous example, perhaps we'd like to rearrange the date
-formats. This can be done by finding each match and replacing it with
-something different. The [`Regex::replace_all`] routine provides a convenient
-way to do this, including by supporting references to named groups in the
-replacement string:
-
-```rust
-use regex::Regex;
-
-let re = Regex::new(r"(?<y>\d{4})-(?<m>\d{2})-(?<d>\d{2})").unwrap();
-let before = "1973-01-05, 1975-08-25 and 1980-10-18";
-let after = re.replace_all(before, "$m/$d/$y");
-assert_eq!(after, "01/05/1973, 08/25/1975 and 10/18/1980");
-```
-
-The replace methods are actually polymorphic in the replacement, which
-provides more flexibility than is seen here. (See the documentation for
-[`Regex::replace`] for more details.)
-
-### Example: verbose mode
-
-When your regex gets complicated, you might consider using something other
-than regex. But if you stick with regex, you can use the `x` flag to enable
-insignificant whitespace mode or "verbose mode." In this mode, whitespace
-is treated as insignificant and one may write comments. This may make your
-patterns easier to comprehend.
-
-```rust
-use regex::Regex;
-
-let re = Regex::new(r"(?x)
-  (?P<y>\d{4}) # the year, including all Unicode digits
-  -
-  (?P<m>\d{2}) # the month, including all Unicode digits
-  -
-  (?P<d>\d{2}) # the day, including all Unicode digits
-").unwrap();
-
-let before = "1973-01-05, 1975-08-25 and 1980-10-18";
-let after = re.replace_all(before, "$m/$d/$y");
-assert_eq!(after, "01/05/1973, 08/25/1975 and 10/18/1980");
-```
-
-If you wish to match against whitespace in this mode, you can still use `\s`,
-`\n`, `\t`, etc. For escaping a single space character, you can escape it
-directly with `\ `, use its hex character code `\x20` or temporarily disable
-the `x` flag, e.g., `(?-x: )`.
-
-### Example: match multiple regular expressions simultaneously
-
-This demonstrates how to use a [`RegexSet`] to match multiple (possibly
-overlapping) regexes in a single scan of a haystack:
-
-```rust
-use regex::RegexSet;
-
-let set = RegexSet::new(&[
-    r"\w+",
-    r"\d+",
-    r"\pL+",
-    r"foo",
-    r"bar",
-    r"barfoo",
-    r"foobar",
-]).unwrap();
-
-// Iterate over and collect all of the matches. Each match corresponds to the
-// ID of the matching pattern.
-let matches: Vec<_> = set.matches("foobar").into_iter().collect();
-assert_eq!(matches, vec![0, 2, 3, 4, 6]);
-
-// You can also test whether a particular regex matched:
-let matches = set.matches("foobar");
-assert!(!matches.matched(5));
-assert!(matches.matched(6));
-```
-
-# Performance
-
-This section briefly discusses a few concerns regarding the speed and resource
-usage of regexes.
-
-### Only ask for what you need
-
-When running a search with a regex, there are generally three different types
-of information one can ask for:
-
-1. Does a regex match in a haystack?
-2. Where does a regex match in a haystack?
-3. Where do each of the capturing groups match in a haystack?
-
-Generally speaking, this crate could provide a function to answer only #3,
-which would subsume #1 and #2 automatically. However, it can be significantly
-more expensive to compute the location of capturing group matches, so it's best
-not to do it if you don't need to.
-
-Therefore, only ask for what you need. For example, don't use [`Regex::find`]
-if you only need to test if a regex matches a haystack. Use [`Regex::is_match`]
-instead.
-
-### Unicode can impact memory usage and search speed
-
-This crate has first class support for Unicode and it is **enabled by default**.
-In many cases, the extra memory required to support it will be negligible and
-it typically won't impact search speed. But it can in some cases.
-
-With respect to memory usage, the impact of Unicode principally manifests
-through the use of Unicode character classes. Unicode character classes
-tend to be quite large. For example, `\w` by default matches around 140,000
-distinct codepoints. This requires additional memory, and tends to slow down
-regex compilation. While a `\w` here and there is unlikely to be noticed,
-writing `\w{100}` will for example result in quite a large regex by default.
-Indeed, `\w` is considerably larger than its ASCII-only version, so if your
-requirements are satisfied by ASCII, it's probably a good idea to stick to
-ASCII classes. The ASCII-only version of `\w` can be spelled in a number of
-ways. All of the following are equivalent:
-
-* `[0-9A-Za-z_]`
-* `(?-u:\w)`
-* `[[:word:]]`
-* `[\w&&\p{ascii}]`
-
-With respect to search speed, Unicode tends to be handled pretty well, even when
-using large Unicode character classes. However, some of the faster internal
-regex engines cannot handle a Unicode aware word boundary assertion. So if you
-don't need Unicode-aware word boundary assertions, you might consider using
-`(?-u:\b)` instead of `\b`, where the former uses an ASCII-only definition of
-a word character.
-
-### Literals might accelerate searches
-
-This crate tends to be quite good at recognizing literals in a regex pattern
-and using them to accelerate a search. If it is at all possible to include
-some kind of literal in your pattern, then it might make search substantially
-faster. For example, in the regex `\w+@\w+`, the engine will look for
-occurrences of `@` and then try a reverse match for `\w+` to find the start
-position.
-
-### Avoid re-compiling regexes, especially in a loop
-
-It is an anti-pattern to compile the same pattern in a loop since regex
-compilation is typically expensive. (It takes anywhere from a few microseconds
-to a few **milliseconds** depending on the size of the pattern.) Not only is
-compilation itself expensive, but this also prevents optimizations that reuse
-allocations internally to the regex engine.
-
-In Rust, it can sometimes be a pain to pass regexes around if they're used from
-inside a helper function. Instead, we recommend using crates like [`once_cell`]
-and [`lazy_static`] to ensure that patterns are compiled exactly once.
-
-[`once_cell`]: https://crates.io/crates/once_cell
-[`lazy_static`]: https://crates.io/crates/lazy_static
-
-This example shows how to use `once_cell`:
-
-```rust
-use {
-    once_cell::sync::Lazy,
-    regex::Regex,
-};
-
-fn some_helper_function(haystack: &str) -> bool {
-    static RE: Lazy<Regex> = Lazy::new(|| Regex::new(r"...").unwrap());
-    RE.is_match(haystack)
-}
-
-fn main() {
-    assert!(some_helper_function("abc"));
-    assert!(!some_helper_function("ac"));
-}
-```
-
-Specifically, in this example, the regex will be compiled when it is used for
-the first time. On subsequent uses, it will reuse the previously built `Regex`.
-Notice how one can define the `Regex` locally to a specific function.
-
-### Sharing a regex across threads can result in contention
-
-While a single `Regex` can be freely used from multiple threads simultaneously,
-there is a small synchronization cost that must be paid. Generally speaking,
-one shouldn't expect to observe this unless the principal task in each thread
-is searching with the regex *and* most searches are on short haystacks. In this
-case, internal contention on shared resources can spike and increase latency,
-which in turn may slow down each individual search.
-
-One can work around this by cloning each `Regex` before sending it to another
-thread. The cloned regexes will still share the same internal read-only portion
-of its compiled state (it's reference counted), but each thread will get
-optimized access to the mutable space that is used to run a search. In general,
-there is no additional cost in memory to doing this. The only cost is the added
-code complexity required to explicitly clone the regex. (If you share the same
-`Regex` across multiple threads, each thread still gets its own mutable space,
-but accessing that space is slower.)
-
-# Unicode
-
-This section discusses what kind of Unicode support this regex library has.
-Before showing some examples, we'll summarize the relevant points:
-
-* This crate almost fully implements "Basic Unicode Support" (Level 1) as
-specified by the [Unicode Technical Standard #18][UTS18]. The full details
-of what is supported are documented in [UNICODE.md] in the root of the regex
-crate repository. There is virtually no support for "Extended Unicode Support"
-(Level 2) from UTS#18.
-* The top-level [`Regex`] runs searches *as if* iterating over each of the
-codepoints in the haystack. That is, the fundamental atom of matching is a
-single codepoint.
-* [`bytes::Regex`], in contrast, permits disabling Unicode mode for part of all
-of your pattern in all cases. When Unicode mode is disabled, then a search is
-run *as if* iterating over each byte in the haystack. That is, the fundamental
-atom of matching is a single byte. (A top-level `Regex` also permits disabling
-Unicode and thus matching *as if* it were one byte at a time, but only when
-doing so wouldn't permit matching invalid UTF-8.)
-* When Unicode mode is enabled (the default), `.` will match an entire Unicode
-scalar value, even when it is encoded using multiple bytes. When Unicode mode
-is disabled (e.g., `(?-u:.)`), then `.` will match a single byte in all cases.
-* The character classes `\w`, `\d` and `\s` are all Unicode-aware by default.
-Use `(?-u:\w)`, `(?-u:\d)` and `(?-u:\s)` to get their ASCII-only definitions.
-* Similarly, `\b` and `\B` use a Unicode definition of a "word" character.
-To get ASCII-only word boundaries, use `(?-u:\b)` and `(?-u:\B)`. This also
-applies to the special word boundary assertions. (That is, `\b{start}`,
-`\b{end}`, `\b{start-half}`, `\b{end-half}`.)
-* `^` and `$` are **not** Unicode-aware in multi-line mode. Namely, they only
-recognize `\n` (assuming CRLF mode is not enabled) and not any of the other
-forms of line terminators defined by Unicode.
-* Case insensitive searching is Unicode-aware and uses simple case folding.
-* Unicode general categories, scripts and many boolean properties are available
-by default via the `\p{property name}` syntax.
-* In all cases, matches are reported using byte offsets. Or more precisely,
-UTF-8 code unit offsets. This permits constant time indexing and slicing of the
-haystack.
-
-[UTS18]: https://unicode.org/reports/tr18/
-[UNICODE.md]: https://github.com/rust-lang/regex/blob/master/UNICODE.md
-
-Patterns themselves are **only** interpreted as a sequence of Unicode scalar
-values. This means you can use Unicode characters directly in your pattern:
-
-```rust
-use regex::Regex;
-
-let re = Regex::new(r"(?i)Δ+").unwrap();
-let m = re.find("ΔδΔ").unwrap();
-assert_eq!((0, 6), (m.start(), m.end()));
-// alternatively:
-assert_eq!(0..6, m.range());
-```
-
-As noted above, Unicode general categories, scripts, script extensions, ages
-and a smattering of boolean properties are available as character classes. For
-example, you can match a sequence of numerals, Greek or Cherokee letters:
-
-```rust
-use regex::Regex;
-
-let re = Regex::new(r"[\pN\p{Greek}\p{Cherokee}]+").unwrap();
-let m = re.find("abcΔᎠβⅠᏮγδⅡxyz").unwrap();
-assert_eq!(3..23, m.range());
-```
-
-While not specific to Unicode, this library also supports character class set
-operations. Namely, one can nest character classes arbitrarily and perform set
-operations on them. Those set operations are union (the default), intersection,
-difference and symmetric difference. These set operations tend to be most
-useful with Unicode character classes. For example, to match any codepoint
-that is both in the `Greek` script and in the `Letter` general category:
-
-```rust
-use regex::Regex;
-
-let re = Regex::new(r"[\p{Greek}&&\pL]+").unwrap();
-let subs: Vec<&str> = re.find_iter("ΔδΔ𐅌ΔδΔ").map(|m| m.as_str()).collect();
-assert_eq!(subs, vec!["ΔδΔ", "ΔδΔ"]);
-
-// If we just matches on Greek, then all codepoints would match!
-let re = Regex::new(r"\p{Greek}+").unwrap();
-let subs: Vec<&str> = re.find_iter("ΔδΔ𐅌ΔδΔ").map(|m| m.as_str()).collect();
-assert_eq!(subs, vec!["ΔδΔ𐅌ΔδΔ"]);
-```
-
-### Opt out of Unicode support
-
-The [`bytes::Regex`] type that can be used to search `&[u8]` haystacks. By
-default, haystacks are conventionally treated as UTF-8 just like it is with the
-main `Regex` type. However, this behavior can be disabled by turning off the
-`u` flag, even if doing so could result in matching invalid UTF-8. For example,
-when the `u` flag is disabled, `.` will match any byte instead of any Unicode
-scalar value.
-
-Disabling the `u` flag is also possible with the standard `&str`-based `Regex`
-type, but it is only allowed where the UTF-8 invariant is maintained. For
-example, `(?-u:\w)` is an ASCII-only `\w` character class and is legal in an
-`&str`-based `Regex`, but `(?-u:\W)` will attempt to match *any byte* that
-isn't in `(?-u:\w)`, which in turn includes bytes that are invalid UTF-8.
-Similarly, `(?-u:\xFF)` will attempt to match the raw byte `\xFF` (instead of
-`U+00FF`), which is invalid UTF-8 and therefore is illegal in `&str`-based
-regexes.
-
-Finally, since Unicode support requires bundling large Unicode data
-tables, this crate exposes knobs to disable the compilation of those
-data tables, which can be useful for shrinking binary size and reducing
-compilation times. For details on how to do that, see the section on [crate
-features](#crate-features).
-
-# Syntax
-
-The syntax supported in this crate is documented below.
-
-Note that the regular expression parser and abstract syntax are exposed in
-a separate crate, [`regex-syntax`](https://docs.rs/regex-syntax).
-
-### Matching one character
-
-<pre class="rust">
-.             any character except new line (includes new line with s flag)
-[0-9]         any ASCII digit
-\d            digit (\p{Nd})
-\D            not digit
-\pX           Unicode character class identified by a one-letter name
-\p{Greek}     Unicode character class (general category or script)
-\PX           Negated Unicode character class identified by a one-letter name
-\P{Greek}     negated Unicode character class (general category or script)
-</pre>
-
-### Character classes
-
-<pre class="rust">
-[xyz]         A character class matching either x, y or z (union).
-[^xyz]        A character class matching any character except x, y and z.
-[a-z]         A character class matching any character in range a-z.
-[[:alpha:]]   ASCII character class ([A-Za-z])
-[[:^alpha:]]  Negated ASCII character class ([^A-Za-z])
-[x[^xyz]]     Nested/grouping character class (matching any character except y and z)
-[a-y&&xyz]    Intersection (matching x or y)
-[0-9&&[^4]]   Subtraction using intersection and negation (matching 0-9 except 4)
-[0-9--4]      Direct subtraction (matching 0-9 except 4)
-[a-g~~b-h]    Symmetric difference (matching `a` and `h` only)
-[\[\]]        Escaping in character classes (matching [ or ])
-[a&&b]        An empty character class matching nothing
-</pre>
-
-Any named character class may appear inside a bracketed `[...]` character
-class. For example, `[\p{Greek}[:digit:]]` matches any ASCII digit or any
-codepoint in the `Greek` script. `[\p{Greek}&&\pL]` matches Greek letters.
-
-Precedence in character classes, from most binding to least:
-
-1. Ranges: `[a-cd]` == `[[a-c]d]`
-2. Union: `[ab&&bc]` == `[[ab]&&[bc]]`
-3. Intersection, difference, symmetric difference. All three have equivalent
-precedence, and are evaluated in left-to-right order. For example,
-`[\pL--\p{Greek}&&\p{Uppercase}]` == `[[\pL--\p{Greek}]&&\p{Uppercase}]`.
-4. Negation: `[^a-z&&b]` == `[^[a-z&&b]]`.
-
-### Composites
-
-<pre class="rust">
-xy    concatenation (x followed by y)
-x|y   alternation (x or y, prefer x)
-</pre>
-
-This example shows how an alternation works, and what it means to prefer a
-branch in the alternation over subsequent branches.
-
-```
-use regex::Regex;
-
-let haystack = "samwise";
-// If 'samwise' comes first in our alternation, then it is
-// preferred as a match, even if the regex engine could
-// technically detect that 'sam' led to a match earlier.
-let re = Regex::new(r"samwise|sam").unwrap();
-assert_eq!("samwise", re.find(haystack).unwrap().as_str());
-// But if 'sam' comes first, then it will match instead.
-// In this case, it is impossible for 'samwise' to match
-// because 'sam' is a prefix of it.
-let re = Regex::new(r"sam|samwise").unwrap();
-assert_eq!("sam", re.find(haystack).unwrap().as_str());
-```
-
-### Repetitions
-
-<pre class="rust">
-x*        zero or more of x (greedy)
-x+        one or more of x (greedy)
-x?        zero or one of x (greedy)
-x*?       zero or more of x (ungreedy/lazy)
-x+?       one or more of x (ungreedy/lazy)
-x??       zero or one of x (ungreedy/lazy)
-x{n,m}    at least n x and at most m x (greedy)
-x{n,}     at least n x (greedy)
-x{n}      exactly n x
-x{n,m}?   at least n x and at most m x (ungreedy/lazy)
-x{n,}?    at least n x (ungreedy/lazy)
-x{n}?     exactly n x
-</pre>
-
-### Empty matches
-
-<pre class="rust">
-^               the beginning of a haystack (or start-of-line with multi-line mode)
-$               the end of a haystack (or end-of-line with multi-line mode)
-\A              only the beginning of a haystack (even with multi-line mode enabled)
-\z              only the end of a haystack (even with multi-line mode enabled)
-\b              a Unicode word boundary (\w on one side and \W, \A, or \z on other)
-\B              not a Unicode word boundary
-\b{start}, \<   a Unicode start-of-word boundary (\W|\A on the left, \w on the right)
-\b{end}, \>     a Unicode end-of-word boundary (\w on the left, \W|\z on the right))
-\b{start-half}  half of a Unicode start-of-word boundary (\W|\A on the left)
-\b{end-half}    half of a Unicode end-of-word boundary (\W|\z on the right)
-</pre>
-
-The empty regex is valid and matches the empty string. For example, the
-empty regex matches `abc` at positions `0`, `1`, `2` and `3`. When using the
-top-level [`Regex`] on `&str` haystacks, an empty match that splits a codepoint
-is guaranteed to never be returned. However, such matches are permitted when
-using a [`bytes::Regex`]. For example:
-
-```rust
-let re = regex::Regex::new(r"").unwrap();
-let ranges: Vec<_> = re.find_iter("đŸ’©").map(|m| m.range()).collect();
-assert_eq!(ranges, vec![0..0, 4..4]);
-
-let re = regex::bytes::Regex::new(r"").unwrap();
-let ranges: Vec<_> = re.find_iter("đŸ’©".as_bytes()).map(|m| m.range()).collect();
-assert_eq!(ranges, vec![0..0, 1..1, 2..2, 3..3, 4..4]);
-```
-
-Note that an empty regex is distinct from a regex that can never match.
-For example, the regex `[a&&b]` is a character class that represents the
-intersection of `a` and `b`. That intersection is empty, which means the
-character class is empty. Since nothing is in the empty set, `[a&&b]` matches
-nothing, not even the empty string.
-
-### Grouping and flags
-
-<pre class="rust">
-(exp)          numbered capture group (indexed by opening parenthesis)
-(?P&lt;name&gt;exp)  named (also numbered) capture group (names must be alpha-numeric)
-(?&lt;name&gt;exp)   named (also numbered) capture group (names must be alpha-numeric)
-(?:exp)        non-capturing group
-(?flags)       set flags within current group
-(?flags:exp)   set flags for exp (non-capturing)
-</pre>
-
-Capture group names must be any sequence of alpha-numeric Unicode codepoints,
-in addition to `.`, `_`, `[` and `]`. Names must start with either an `_` or
-an alphabetic codepoint. Alphabetic codepoints correspond to the `Alphabetic`
-Unicode property, while numeric codepoints correspond to the union of the
-`Decimal_Number`, `Letter_Number` and `Other_Number` general categories.
-
-Flags are each a single character. For example, `(?x)` sets the flag `x`
-and `(?-x)` clears the flag `x`. Multiple flags can be set or cleared at
-the same time: `(?xy)` sets both the `x` and `y` flags and `(?x-y)` sets
-the `x` flag and clears the `y` flag.
-
-All flags are by default disabled unless stated otherwise. They are:
-
-<pre class="rust">
-i     case-insensitive: letters match both upper and lower case
-m     multi-line mode: ^ and $ match begin/end of line
-s     allow . to match \n
-R     enables CRLF mode: when multi-line mode is enabled, \r\n is used
-U     swap the meaning of x* and x*?
-u     Unicode support (enabled by default)
-x     verbose mode, ignores whitespace and allow line comments (starting with `#`)
-</pre>
-
-Note that in verbose mode, whitespace is ignored everywhere, including within
-character classes. To insert whitespace, use its escaped form or a hex literal.
-For example, `\ ` or `\x20` for an ASCII space.
-
-Flags can be toggled within a pattern. Here's an example that matches
-case-insensitively for the first part but case-sensitively for the second part:
-
-```rust
-use regex::Regex;
-
-let re = Regex::new(r"(?i)a+(?-i)b+").unwrap();
-let m = re.find("AaAaAbbBBBb").unwrap();
-assert_eq!(m.as_str(), "AaAaAbb");
-```
-
-Notice that the `a+` matches either `a` or `A`, but the `b+` only matches
-`b`.
-
-Multi-line mode means `^` and `$` no longer match just at the beginning/end of
-the input, but also at the beginning/end of lines:
-
-```
-use regex::Regex;
-
-let re = Regex::new(r"(?m)^line \d+").unwrap();
-let m = re.find("line one\nline 2\n").unwrap();
-assert_eq!(m.as_str(), "line 2");
-```
-
-Note that `^` matches after new lines, even at the end of input:
-
-```
-use regex::Regex;
-
-let re = Regex::new(r"(?m)^").unwrap();
-let m = re.find_iter("test\n").last().unwrap();
-assert_eq!((m.start(), m.end()), (5, 5));
-```
-
-When both CRLF mode and multi-line mode are enabled, then `^` and `$` will
-match either `\r` and `\n`, but never in the middle of a `\r\n`:
-
-```
-use regex::Regex;
-
-let re = Regex::new(r"(?mR)^foo$").unwrap();
-let m = re.find("\r\nfoo\r\n").unwrap();
-assert_eq!(m.as_str(), "foo");
-```
-
-Unicode mode can also be selectively disabled, although only when the result
-*would not* match invalid UTF-8. One good example of this is using an ASCII
-word boundary instead of a Unicode word boundary, which might make some regex
-searches run faster:
-
-```rust
-use regex::Regex;
-
-let re = Regex::new(r"(?-u:\b).+(?-u:\b)").unwrap();
-let m = re.find("$$abc$$").unwrap();
-assert_eq!(m.as_str(), "abc");
-```
-
-### Escape sequences
-
-Note that this includes all possible escape sequences, even ones that are
-documented elsewhere.
-
-<pre class="rust">
-\*              literal *, applies to all ASCII except [0-9A-Za-z<>]
-\a              bell (\x07)
-\f              form feed (\x0C)
-\t              horizontal tab
-\n              new line
-\r              carriage return
-\v              vertical tab (\x0B)
-\A              matches at the beginning of a haystack
-\z              matches at the end of a haystack
-\b              word boundary assertion
-\B              negated word boundary assertion
-\b{start}, \<   start-of-word boundary assertion
-\b{end}, \>     end-of-word boundary assertion
-\b{start-half}  half of a start-of-word boundary assertion
-\b{end-half}    half of a end-of-word boundary assertion
-\123            octal character code, up to three digits (when enabled)
-\x7F            hex character code (exactly two digits)
-\x{10FFFF}      any hex character code corresponding to a Unicode code point
-\u007F          hex character code (exactly four digits)
-\u{7F}          any hex character code corresponding to a Unicode code point
-\U0000007F      hex character code (exactly eight digits)
-\U{7F}          any hex character code corresponding to a Unicode code point
-\p{Letter}      Unicode character class
-\P{Letter}      negated Unicode character class
-\d, \s, \w      Perl character class
-\D, \S, \W      negated Perl character class
-</pre>
-
-### Perl character classes (Unicode friendly)
-
-These classes are based on the definitions provided in
-[UTS#18](https://www.unicode.org/reports/tr18/#Compatibility_Properties):
-
-<pre class="rust">
-\d     digit (\p{Nd})
-\D     not digit
-\s     whitespace (\p{White_Space})
-\S     not whitespace
-\w     word character (\p{Alphabetic} + \p{M} + \d + \p{Pc} + \p{Join_Control})
-\W     not word character
-</pre>
-
-### ASCII character classes
-
-These classes are based on the definitions provided in
-[UTS#18](https://www.unicode.org/reports/tr18/#Compatibility_Properties):
-
-<pre class="rust">
-[[:alnum:]]    alphanumeric ([0-9A-Za-z])
-[[:alpha:]]    alphabetic ([A-Za-z])
-[[:ascii:]]    ASCII ([\x00-\x7F])
-[[:blank:]]    blank ([\t ])
-[[:cntrl:]]    control ([\x00-\x1F\x7F])
-[[:digit:]]    digits ([0-9])
-[[:graph:]]    graphical ([!-~])
-[[:lower:]]    lower case ([a-z])
-[[:print:]]    printable ([ -~])
-[[:punct:]]    punctuation ([!-/:-@\[-`{-~])
-[[:space:]]    whitespace ([\t\n\v\f\r ])
-[[:upper:]]    upper case ([A-Z])
-[[:word:]]     word characters ([0-9A-Za-z_])
-[[:xdigit:]]   hex digit ([0-9A-Fa-f])
-</pre>
-
-# Untrusted input
-
-This crate is meant to be able to run regex searches on untrusted haystacks
-without fear of [ReDoS]. This crate also, to a certain extent, supports
-untrusted patterns.
-
-[ReDoS]: https://en.wikipedia.org/wiki/ReDoS
-
-This crate differs from most (but not all) other regex engines in that it
-doesn't use unbounded backtracking to run a regex search. In those cases,
-one generally cannot use untrusted patterns *or* untrusted haystacks because
-it can be very difficult to know whether a particular pattern will result in
-catastrophic backtracking or not.
-
-We'll first discuss how this crate deals with untrusted inputs and then wrap
-it up with a realistic discussion about what practice really looks like.
-
-### Panics
-
-Outside of clearly documented cases, most APIs in this crate are intended to
-never panic regardless of the inputs given to them. For example, `Regex::new`,
-`Regex::is_match`, `Regex::find` and `Regex::captures` should never panic. That
-is, it is an API promise that those APIs will never panic no matter what inputs
-are given to them. With that said, regex engines are complicated beasts, and
-providing a rock solid guarantee that these APIs literally never panic is
-essentially equivalent to saying, "there are no bugs in this library." That is
-a bold claim, and not really one that can be feasibly made with a straight
-face.
-
-Don't get the wrong impression here. This crate is extensively tested, not just
-with unit and integration tests, but also via fuzz testing. For example, this
-crate is part of the [OSS-fuzz project]. Panics should be incredibly rare, but
-it is possible for bugs to exist, and thus possible for a panic to occur. If
-you need a rock solid guarantee against panics, then you should wrap calls into
-this library with [`std::panic::catch_unwind`].
-
-It's also worth pointing out that this library will *generally* panic when
-other regex engines would commit undefined behavior. When undefined behavior
-occurs, your program might continue as if nothing bad has happened, but it also
-might mean your program is open to the worst kinds of exploits. In contrast,
-the worst thing a panic can do is a denial of service.
-
-[OSS-fuzz project]: https://android.googlesource.com/platform/external/oss-fuzz/+/refs/tags/android-t-preview-1/projects/rust-regex/
-[`std::panic::catch_unwind`]: https://doc.rust-lang.org/std/panic/fn.catch_unwind.html
-
-### Untrusted patterns
-
-The principal way this crate deals with them is by limiting their size by
-default. The size limit can be configured via [`RegexBuilder::size_limit`]. The
-idea of a size limit is that compiling a pattern into a `Regex` will fail if it
-becomes "too big." Namely, while *most* resources consumed by compiling a regex
-are approximately proportional (albeit with some high constant factors in some
-cases, such as with Unicode character classes) to the length of the pattern
-itself, there is one particular exception to this: counted repetitions. Namely,
-this pattern:
-
-```text
-a{5}{5}{5}{5}{5}{5}
-```
-
-Is equivalent to this pattern:
-
-```text
-a{15625}
-```
-
-In both of these cases, the actual pattern string is quite small, but the
-resulting `Regex` value is quite large. Indeed, as the first pattern shows,
-it isn't enough to locally limit the size of each repetition because they can
-be stacked in a way that results in exponential growth.
-
-To provide a bit more context, a simplified view of regex compilation looks
-like this:
-
-* The pattern string is parsed into a structured representation called an AST.
-Counted repetitions are not expanded and Unicode character classes are not
-looked up in this stage. That is, the size of the AST is proportional to the
-size of the pattern with "reasonable" constant factors. In other words, one
-can reasonably limit the memory used by an AST by limiting the length of the
-pattern string.
-* The AST is translated into an HIR. Counted repetitions are still *not*
-expanded at this stage, but Unicode character classes are embedded into the
-HIR. The memory usage of a HIR is still proportional to the length of the
-original pattern string, but the constant factors---mostly as a result of
-Unicode character classes---can be quite high. Still though, the memory used by
-an HIR can be reasonably limited by limiting the length of the pattern string.
-* The HIR is compiled into a [Thompson NFA]. This is the stage at which
-something like `\w{5}` is rewritten to `\w\w\w\w\w`. Thus, this is the stage
-at which [`RegexBuilder::size_limit`] is enforced. If the NFA exceeds the
-configured size, then this stage will fail.
-
-[Thompson NFA]: https://en.wikipedia.org/wiki/Thompson%27s_construction
-
-The size limit helps avoid two different kinds of exorbitant resource usage:
-
-* It avoids permitting exponential memory usage based on the size of the
-pattern string.
-* It avoids long search times. This will be discussed in more detail in the
-next section, but worst case search time *is* dependent on the size of the
-regex. So keeping regexes limited to a reasonable size is also a way of keeping
-search times reasonable.
-
-Finally, it's worth pointing out that regex compilation is guaranteed to take
-worst case `O(m)` time, where `m` is proportional to the size of regex. The
-size of the regex here is *after* the counted repetitions have been expanded.
-
-**Advice for those using untrusted regexes**: limit the pattern length to
-something small and expand it as needed. Configure [`RegexBuilder::size_limit`]
-to something small and then expand it as needed.
-
-### Untrusted haystacks
-
-The main way this crate guards against searches from taking a long time is by
-using algorithms that guarantee a `O(m * n)` worst case time and space bound.
-Namely:
-
-* `m` is proportional to the size of the regex, where the size of the regex
-includes the expansion of all counted repetitions. (See the previous section on
-untrusted patterns.)
-* `n` is proportional to the length, in bytes, of the haystack.
-
-In other words, if you consider `m` to be a constant (for example, the regex
-pattern is a literal in the source code), then the search can be said to run
-in "linear time." Or equivalently, "linear time with respect to the size of the
-haystack."
-
-But the `m` factor here is important not to ignore. If a regex is
-particularly big, the search times can get quite slow. This is why, in part,
-[`RegexBuilder::size_limit`] exists.
-
-**Advice for those searching untrusted haystacks**: As long as your regexes
-are not enormous, you should expect to be able to search untrusted haystacks
-without fear. If you aren't sure, you should benchmark it. Unlike backtracking
-engines, if your regex is so big that it's likely to result in slow searches,
-this is probably something you'll be able to observe regardless of what the
-haystack is made up of.
-
-### Iterating over matches
-
-One thing that is perhaps easy to miss is that the worst case time
-complexity bound of `O(m * n)` applies to methods like [`Regex::is_match`],
-[`Regex::find`] and [`Regex::captures`]. It does **not** apply to
-[`Regex::find_iter`] or [`Regex::captures_iter`]. Namely, since iterating over
-all matches can execute many searches, and each search can scan the entire
-haystack, the worst case time complexity for iterators is `O(m * n^2)`.
-
-One example of where this occurs is when a pattern consists of an alternation,
-where an earlier branch of the alternation requires scanning the entire
-haystack only to discover that there is no match. It also requires a later
-branch of the alternation to have matched at the beginning of the search. For
-example, consider the pattern `.*[^A-Z]|[A-Z]` and the haystack `AAAAA`. The
-first search will scan to the end looking for matches of `.*[^A-Z]` even though
-a finite automata engine (as in this crate) knows that `[A-Z]` has already
-matched the first character of the haystack. This is due to the greedy nature
-of regex searching. That first search will report a match at the first `A` only
-after scanning to the end to discover that no other match exists. The next
-search then begins at the second `A` and the behavior repeats.
-
-There is no way to avoid this. This means that if both patterns and haystacks
-are untrusted and you're iterating over all matches, you're susceptible to
-worst case quadratic time complexity. One possible way to mitigate this
-is to drop down to the lower level `regex-automata` crate and use its
-`meta::Regex` iterator APIs. There, you can configure the search to operate
-in "earliest" mode by passing a `Input::new(haystack).earliest(true)` to
-`meta::Regex::find_iter` (for example). By enabling this mode, you give up
-the normal greedy match semantics of regex searches and instead ask the regex
-engine to immediately stop as soon as a match has been found. Enabling this
-mode will thus restore the worst case `O(m * n)` time complexity bound, but at
-the cost of different semantics.
-
-### Untrusted inputs in practice
-
-While providing a `O(m * n)` worst case time bound on all searches goes a long
-way toward preventing [ReDoS], that doesn't mean every search you can possibly
-run will complete without burning CPU time. In general, there are a few ways
-for the `m * n` time bound to still bite you:
-
-* You are searching an exceptionally long haystack. No matter how you slice
-it, a longer haystack will take more time to search. This crate may often make
-very quick work of even long haystacks because of its literal optimizations,
-but those aren't available for all regexes.
-* Unicode character classes can cause searches to be quite slow in some cases.
-This is especially true when they are combined with counted repetitions. While
-the regex size limit above will protect you from the most egregious cases,
-the default size limit still permits pretty big regexes that can execute more
-slowly than one might expect.
-* While routines like [`Regex::find`] and [`Regex::captures`] guarantee
-worst case `O(m * n)` search time, routines like [`Regex::find_iter`] and
-[`Regex::captures_iter`] actually have worst case `O(m * n^2)` search time.
-This is because `find_iter` runs many searches, and each search takes worst
-case `O(m * n)` time. Thus, iteration of all matches in a haystack has
-worst case `O(m * n^2)`. A good example of a pattern that exhibits this is
-`(?:A+){1000}|` or even `.*[^A-Z]|[A-Z]`.
-
-In general, unstrusted haystacks are easier to stomach than untrusted patterns.
-Untrusted patterns give a lot more control to the caller to impact the
-performance of a search. In many cases, a regex search will actually execute in
-average case `O(n)` time (i.e., not dependent on the size of the regex), but
-this can't be guaranteed in general. Therefore, permitting untrusted patterns
-means that your only line of defense is to put a limit on how big `m` (and
-perhaps also `n`) can be in `O(m * n)`. `n` is limited by simply inspecting
-the length of the haystack while `m` is limited by *both* applying a limit to
-the length of the pattern *and* a limit on the compiled size of the regex via
-[`RegexBuilder::size_limit`].
-
-It bears repeating: if you're accepting untrusted patterns, it would be a good
-idea to start with conservative limits on `m` and `n`, and then carefully
-increase them as needed.
-
-# Crate features
-
-By default, this crate tries pretty hard to make regex matching both as fast
-as possible and as correct as it can be. This means that there is a lot of
-code dedicated to performance, the handling of Unicode data and the Unicode
-data itself. Overall, this leads to more dependencies, larger binaries and
-longer compile times. This trade off may not be appropriate in all cases, and
-indeed, even when all Unicode and performance features are disabled, one is
-still left with a perfectly serviceable regex engine that will work well in
-many cases. (Note that code is not arbitrarily reducible, and for this reason,
-the [`regex-lite`](https://docs.rs/regex-lite) crate exists to provide an even
-more minimal experience by cutting out Unicode and performance, but still
-maintaining the linear search time bound.)
-
-This crate exposes a number of features for controlling that trade off. Some
-of these features are strictly performance oriented, such that disabling them
-won't result in a loss of functionality, but may result in worse performance.
-Other features, such as the ones controlling the presence or absence of Unicode
-data, can result in a loss of functionality. For example, if one disables the
-`unicode-case` feature (described below), then compiling the regex `(?i)a`
-will fail since Unicode case insensitivity is enabled by default. Instead,
-callers must use `(?i-u)a` to disable Unicode case folding. Stated differently,
-enabling or disabling any of the features below can only add or subtract from
-the total set of valid regular expressions. Enabling or disabling a feature
-will never modify the match semantics of a regular expression.
-
-Most features below are enabled by default. Features that aren't enabled by
-default are noted.
-
-### Ecosystem features
-
-* **std** -
-  When enabled, this will cause `regex` to use the standard library. In terms
-  of APIs, `std` causes error types to implement the `std::error::Error`
-  trait. Enabling `std` will also result in performance optimizations,
-  including SIMD and faster synchronization primitives. Notably, **disabling
-  the `std` feature will result in the use of spin locks**. To use a regex
-  engine without `std` and without spin locks, you'll need to drop down to
-  the [`regex-automata`](https://docs.rs/regex-automata) crate.
-* **logging** -
-  When enabled, the `log` crate is used to emit messages about regex
-  compilation and search strategies. This is **disabled by default**. This is
-  typically only useful to someone working on this crate's internals, but might
-  be useful if you're doing some rabbit hole performance hacking. Or if you're
-  just interested in the kinds of decisions being made by the regex engine.
-
-### Performance features
-
-* **perf** -
-  Enables all performance related features except for `perf-dfa-full`. This
-  feature is enabled by default is intended to cover all reasonable features
-  that improve performance, even if more are added in the future.
-* **perf-dfa** -
-  Enables the use of a lazy DFA for matching. The lazy DFA is used to compile
-  portions of a regex to a very fast DFA on an as-needed basis. This can
-  result in substantial speedups, usually by an order of magnitude on large
-  haystacks. The lazy DFA does not bring in any new dependencies, but it can
-  make compile times longer.
-* **perf-dfa-full** -
-  Enables the use of a full DFA for matching. Full DFAs are problematic because
-  they have worst case `O(2^n)` construction time. For this reason, when this
-  feature is enabled, full DFAs are only used for very small regexes and a
-  very small space bound is used during determinization to avoid the DFA
-  from blowing up. This feature is not enabled by default, even as part of
-  `perf`, because it results in fairly sizeable increases in binary size and
-  compilation time. It can result in faster search times, but they tend to be
-  more modest and limited to non-Unicode regexes.
-* **perf-onepass** -
-  Enables the use of a one-pass DFA for extracting the positions of capture
-  groups. This optimization applies to a subset of certain types of NFAs and
-  represents the fastest engine in this crate for dealing with capture groups.
-* **perf-backtrack** -
-  Enables the use of a bounded backtracking algorithm for extracting the
-  positions of capture groups. This usually sits between the slowest engine
-  (the PikeVM) and the fastest engine (one-pass DFA) for extracting capture
-  groups. It's used whenever the regex is not one-pass and is small enough.
-* **perf-inline** -
-  Enables the use of aggressive inlining inside match routines. This reduces
-  the overhead of each match. The aggressive inlining, however, increases
-  compile times and binary size.
-* **perf-literal** -
-  Enables the use of literal optimizations for speeding up matches. In some
-  cases, literal optimizations can result in speedups of _several_ orders of
-  magnitude. Disabling this drops the `aho-corasick` and `memchr` dependencies.
-* **perf-cache** -
-  This feature used to enable a faster internal cache at the cost of using
-  additional dependencies, but this is no longer an option. A fast internal
-  cache is now used unconditionally with no additional dependencies. This may
-  change in the future.
-
-### Unicode features
-
-* **unicode** -
-  Enables all Unicode features. This feature is enabled by default, and will
-  always cover all Unicode features, even if more are added in the future.
-* **unicode-age** -
-  Provide the data for the
-  [Unicode `Age` property](https://www.unicode.org/reports/tr44/tr44-24.html#Character_Age).
-  This makes it possible to use classes like `\p{Age:6.0}` to refer to all
-  codepoints first introduced in Unicode 6.0
-* **unicode-bool** -
-  Provide the data for numerous Unicode boolean properties. The full list
-  is not included here, but contains properties like `Alphabetic`, `Emoji`,
-  `Lowercase`, `Math`, `Uppercase` and `White_Space`.
-* **unicode-case** -
-  Provide the data for case insensitive matching using
-  [Unicode's "simple loose matches" specification](https://www.unicode.org/reports/tr18/#Simple_Loose_Matches).
-* **unicode-gencat** -
-  Provide the data for
-  [Unicode general categories](https://www.unicode.org/reports/tr44/tr44-24.html#General_Category_Values).
-  This includes, but is not limited to, `Decimal_Number`, `Letter`,
-  `Math_Symbol`, `Number` and `Punctuation`.
-* **unicode-perl** -
-  Provide the data for supporting the Unicode-aware Perl character classes,
-  corresponding to `\w`, `\s` and `\d`. This is also necessary for using
-  Unicode-aware word boundary assertions. Note that if this feature is
-  disabled, the `\s` and `\d` character classes are still available if the
-  `unicode-bool` and `unicode-gencat` features are enabled, respectively.
-* **unicode-script** -
-  Provide the data for
-  [Unicode scripts and script extensions](https://www.unicode.org/reports/tr24/).
-  This includes, but is not limited to, `Arabic`, `Cyrillic`, `Hebrew`,
-  `Latin` and `Thai`.
-* **unicode-segment** -
-  Provide the data necessary to provide the properties used to implement the
-  [Unicode text segmentation algorithms](https://www.unicode.org/reports/tr29/).
-  This enables using classes like `\p{gcb=Extend}`, `\p{wb=Katakana}` and
-  `\p{sb=ATerm}`.
-
-# Other crates
-
-This crate has two required dependencies and several optional dependencies.
-This section briefly describes them with the goal of raising awareness of how
-different components of this crate may be used independently.
-
-It is somewhat unusual for a regex engine to have dependencies, as most regex
-libraries are self contained units with no dependencies other than a particular
-environment's standard library. Indeed, for other similarly optimized regex
-engines, most or all of the code in the dependencies of this crate would
-normally just be unseparable or coupled parts of the crate itself. But since
-Rust and its tooling ecosystem make the use of dependencies so easy, it made
-sense to spend some effort de-coupling parts of this crate and making them
-independently useful.
-
-We only briefly describe each crate here.
-
-* [`regex-lite`](https://docs.rs/regex-lite) is not a dependency of `regex`,
-but rather, a standalone zero-dependency simpler version of `regex` that
-prioritizes compile times and binary size. In exchange, it eschews Unicode
-support and performance. Its match semantics are as identical as possible to
-the `regex` crate, and for the things it supports, its APIs are identical to
-the APIs in this crate. In other words, for a lot of use cases, it is a drop-in
-replacement.
-* [`regex-syntax`](https://docs.rs/regex-syntax) provides a regular expression
-parser via `Ast` and `Hir` types. It also provides routines for extracting
-literals from a pattern. Folks can use this crate to do analysis, or even to
-build their own regex engine without having to worry about writing a parser.
-* [`regex-automata`](https://docs.rs/regex-automata) provides the regex engines
-themselves. One of the downsides of finite automata based regex engines is that
-they often need multiple internal engines in order to have similar or better
-performance than an unbounded backtracking engine in practice. `regex-automata`
-in particular provides public APIs for a PikeVM, a bounded backtracker, a
-one-pass DFA, a lazy DFA, a fully compiled DFA and a meta regex engine that
-combines all them together. It also has native multi-pattern support and
-provides a way to compile and serialize full DFAs such that they can be loaded
-and searched in a no-std no-alloc environment. `regex-automata` itself doesn't
-even have a required dependency on `regex-syntax`!
-* [`memchr`](https://docs.rs/memchr) provides low level SIMD vectorized
-routines for quickly finding the location of single bytes or even substrings
-in a haystack. In other words, it provides fast `memchr` and `memmem` routines.
-These are used by this crate in literal optimizations.
-* [`aho-corasick`](https://docs.rs/aho-corasick) provides multi-substring
-search. It also provides SIMD vectorized routines in the case where the number
-of substrings to search for is relatively small. The `regex` crate also uses
-this for literal optimizations.
-*/
-
-#![no_std]
-#![deny(missing_docs)]
-#![cfg_attr(feature = "pattern", feature(pattern))]
-#![warn(missing_debug_implementations)]
-
-#[cfg(doctest)]
-doc_comment::doctest!("../README.md");
-
-extern crate alloc;
-#[cfg(any(test, feature = "std"))]
-extern crate std;
-
-pub use crate::error::Error;
-
-pub use crate::{builders::string::*, regex::string::*, regexset::string::*};
-
-mod builders;
-pub mod bytes;
-mod error;
-mod find_byte;
-#[cfg(feature = "pattern")]
-mod pattern;
-mod regex;
-mod regexset;
-
-/// Escapes all regular expression meta characters in `pattern`.
-///
-/// The string returned may be safely used as a literal in a regular
-/// expression.
-pub fn escape(pattern: &str) -> alloc::string::String {
-    regex_syntax::escape(pattern)
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/pattern.rs b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/pattern.rs
deleted file mode 100644
index d7bf148..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/pattern.rs
+++ /dev/null
@@ -1,67 +0,0 @@
-use core::str::pattern::{Pattern, SearchStep, Searcher, Utf8Pattern};
-
-use crate::{Matches, Regex};
-
-#[derive(Debug)]
-pub struct RegexSearcher<'r, 't> {
-    haystack: &'t str,
-    it: Matches<'r, 't>,
-    last_step_end: usize,
-    next_match: Option<(usize, usize)>,
-}
-
-impl<'r> Pattern for &'r Regex {
-    type Searcher<'t> = RegexSearcher<'r, 't>;
-
-    fn into_searcher<'t>(self, haystack: &'t str) -> RegexSearcher<'r, 't> {
-        RegexSearcher {
-            haystack,
-            it: self.find_iter(haystack),
-            last_step_end: 0,
-            next_match: None,
-        }
-    }
-
-    fn as_utf8_pattern<'p>(&'p self) -> Option<Utf8Pattern<'p>> {
-        None
-    }
-}
-
-unsafe impl<'r, 't> Searcher<'t> for RegexSearcher<'r, 't> {
-    #[inline]
-    fn haystack(&self) -> &'t str {
-        self.haystack
-    }
-
-    #[inline]
-    fn next(&mut self) -> SearchStep {
-        if let Some((s, e)) = self.next_match {
-            self.next_match = None;
-            self.last_step_end = e;
-            return SearchStep::Match(s, e);
-        }
-        match self.it.next() {
-            None => {
-                if self.last_step_end < self.haystack().len() {
-                    let last = self.last_step_end;
-                    self.last_step_end = self.haystack().len();
-                    SearchStep::Reject(last, self.haystack().len())
-                } else {
-                    SearchStep::Done
-                }
-            }
-            Some(m) => {
-                let (s, e) = (m.start(), m.end());
-                if s == self.last_step_end {
-                    self.last_step_end = e;
-                    SearchStep::Match(s, e)
-                } else {
-                    self.next_match = Some((s, e));
-                    let last = self.last_step_end;
-                    self.last_step_end = s;
-                    SearchStep::Reject(last, s)
-                }
-            }
-        }
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/regex/bytes.rs b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/regex/bytes.rs
deleted file mode 100644
index 39af6e7..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/regex/bytes.rs
+++ /dev/null
@@ -1,2702 +0,0 @@
-use alloc::{borrow::Cow, string::String, sync::Arc, vec::Vec};
-
-use regex_automata::{meta, util::captures, Input, PatternID};
-
-use crate::{bytes::RegexBuilder, error::Error};
-
-/// A compiled regular expression for searching Unicode haystacks.
-///
-/// A `Regex` can be used to search haystacks, split haystacks into substrings
-/// or replace substrings in a haystack with a different substring. All
-/// searching is done with an implicit `(?s:.)*?` at the beginning and end of
-/// an pattern. To force an expression to match the whole string (or a prefix
-/// or a suffix), you must use an anchor like `^` or `$` (or `\A` and `\z`).
-///
-/// Like the `Regex` type in the parent module, matches with this regex return
-/// byte offsets into the haystack. **Unlike** the parent `Regex` type, these
-/// byte offsets may not correspond to UTF-8 sequence boundaries since the
-/// regexes in this module can match arbitrary bytes.
-///
-/// The only methods that allocate new byte strings are the string replacement
-/// methods. All other methods (searching and splitting) return borrowed
-/// references into the haystack given.
-///
-/// # Example
-///
-/// Find the offsets of a US phone number:
-///
-/// ```
-/// use regex::bytes::Regex;
-///
-/// let re = Regex::new("[0-9]{3}-[0-9]{3}-[0-9]{4}").unwrap();
-/// let m = re.find(b"phone: 111-222-3333").unwrap();
-/// assert_eq!(7..19, m.range());
-/// ```
-///
-/// # Example: extracting capture groups
-///
-/// A common way to use regexes is with capture groups. That is, instead of
-/// just looking for matches of an entire regex, parentheses are used to create
-/// groups that represent part of the match.
-///
-/// For example, consider a haystack with multiple lines, and each line has
-/// three whitespace delimited fields where the second field is expected to be
-/// a number and the third field a boolean. To make this convenient, we use
-/// the [`Captures::extract`] API to put the strings that match each group
-/// into a fixed size array:
-///
-/// ```
-/// use regex::bytes::Regex;
-///
-/// let hay = b"
-/// rabbit         54 true
-/// groundhog 2 true
-/// does not match
-/// fox   109    false
-/// ";
-/// let re = Regex::new(r"(?m)^\s*(\S+)\s+([0-9]+)\s+(true|false)\s*$").unwrap();
-/// let mut fields: Vec<(&[u8], i64, bool)> = vec![];
-/// for (_, [f1, f2, f3]) in re.captures_iter(hay).map(|caps| caps.extract()) {
-///     // These unwraps are OK because our pattern is written in a way where
-///     // all matches for f2 and f3 will be valid UTF-8.
-///     let f2 = std::str::from_utf8(f2).unwrap();
-///     let f3 = std::str::from_utf8(f3).unwrap();
-///     fields.push((f1, f2.parse()?, f3.parse()?));
-/// }
-/// assert_eq!(fields, vec![
-///     (&b"rabbit"[..], 54, true),
-///     (&b"groundhog"[..], 2, true),
-///     (&b"fox"[..], 109, false),
-/// ]);
-///
-/// # Ok::<(), Box<dyn std::error::Error>>(())
-/// ```
-///
-/// # Example: matching invalid UTF-8
-///
-/// One of the reasons for searching `&[u8]` haystacks is that the `&[u8]`
-/// might not be valid UTF-8. Indeed, with a `bytes::Regex`, patterns that
-/// match invalid UTF-8 are explicitly allowed. Here's one example that looks
-/// for valid UTF-8 fields that might be separated by invalid UTF-8. In this
-/// case, we use `(?s-u:.)`, which matches any byte. Attempting to use it in a
-/// top-level `Regex` will result in the regex failing to compile. Notice also
-/// that we use `.` with Unicode mode enabled, in which case, only valid UTF-8
-/// is matched. In this way, we can build one pattern where some parts only
-/// match valid UTF-8 while other parts are more permissive.
-///
-/// ```
-/// use regex::bytes::Regex;
-///
-/// // F0 9F 92 A9 is the UTF-8 encoding for a Pile of Poo.
-/// let hay = b"\xFF\xFFfoo\xFF\xFF\xFF\xF0\x9F\x92\xA9\xFF";
-/// // An equivalent to '(?s-u:.)' is '(?-u:[\x00-\xFF])'.
-/// let re = Regex::new(r"(?s)(?-u:.)*?(?<f1>.+)(?-u:.)*?(?<f2>.+)").unwrap();
-/// let caps = re.captures(hay).unwrap();
-/// assert_eq!(&caps["f1"], &b"foo"[..]);
-/// assert_eq!(&caps["f2"], "đŸ’©".as_bytes());
-/// ```
-#[derive(Clone)]
-pub struct Regex {
-    pub(crate) meta: meta::Regex,
-    pub(crate) pattern: Arc<str>,
-}
-
-impl core::fmt::Display for Regex {
-    /// Shows the original regular expression.
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        write!(f, "{}", self.as_str())
-    }
-}
-
-impl core::fmt::Debug for Regex {
-    /// Shows the original regular expression.
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        f.debug_tuple("Regex").field(&self.as_str()).finish()
-    }
-}
-
-impl core::str::FromStr for Regex {
-    type Err = Error;
-
-    /// Attempts to parse a string into a regular expression
-    fn from_str(s: &str) -> Result<Regex, Error> {
-        Regex::new(s)
-    }
-}
-
-impl TryFrom<&str> for Regex {
-    type Error = Error;
-
-    /// Attempts to parse a string into a regular expression
-    fn try_from(s: &str) -> Result<Regex, Error> {
-        Regex::new(s)
-    }
-}
-
-impl TryFrom<String> for Regex {
-    type Error = Error;
-
-    /// Attempts to parse a string into a regular expression
-    fn try_from(s: String) -> Result<Regex, Error> {
-        Regex::new(&s)
-    }
-}
-
-/// Core regular expression methods.
-impl Regex {
-    /// Compiles a regular expression. Once compiled, it can be used repeatedly
-    /// to search, split or replace substrings in a haystack.
-    ///
-    /// Note that regex compilation tends to be a somewhat expensive process,
-    /// and unlike higher level environments, compilation is not automatically
-    /// cached for you. One should endeavor to compile a regex once and then
-    /// reuse it. For example, it's a bad idea to compile the same regex
-    /// repeatedly in a loop.
-    ///
-    /// # Errors
-    ///
-    /// If an invalid pattern is given, then an error is returned.
-    /// An error is also returned if the pattern is valid, but would
-    /// produce a regex that is bigger than the configured size limit via
-    /// [`RegexBuilder::size_limit`]. (A reasonable size limit is enabled by
-    /// default.)
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// // An Invalid pattern because of an unclosed parenthesis
-    /// assert!(Regex::new(r"foo(bar").is_err());
-    /// // An invalid pattern because the regex would be too big
-    /// // because Unicode tends to inflate things.
-    /// assert!(Regex::new(r"\w{1000}").is_err());
-    /// // Disabling Unicode can make the regex much smaller,
-    /// // potentially by up to or more than an order of magnitude.
-    /// assert!(Regex::new(r"(?-u:\w){1000}").is_ok());
-    /// ```
-    pub fn new(re: &str) -> Result<Regex, Error> {
-        RegexBuilder::new(re).build()
-    }
-
-    /// Returns true if and only if there is a match for the regex anywhere
-    /// in the haystack given.
-    ///
-    /// It is recommended to use this method if all you need to do is test
-    /// whether a match exists, since the underlying matching engine may be
-    /// able to do less work.
-    ///
-    /// # Example
-    ///
-    /// Test if some haystack contains at least one word with exactly 13
-    /// Unicode word characters:
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"\b\w{13}\b").unwrap();
-    /// let hay = b"I categorically deny having triskaidekaphobia.";
-    /// assert!(re.is_match(hay));
-    /// ```
-    #[inline]
-    pub fn is_match(&self, haystack: &[u8]) -> bool {
-        self.is_match_at(haystack, 0)
-    }
-
-    /// This routine searches for the first match of this regex in the
-    /// haystack given, and if found, returns a [`Match`]. The `Match`
-    /// provides access to both the byte offsets of the match and the actual
-    /// substring that matched.
-    ///
-    /// Note that this should only be used if you want to find the entire
-    /// match. If instead you just want to test the existence of a match,
-    /// it's potentially faster to use `Regex::is_match(hay)` instead of
-    /// `Regex::find(hay).is_some()`.
-    ///
-    /// # Example
-    ///
-    /// Find the first word with exactly 13 Unicode word characters:
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"\b\w{13}\b").unwrap();
-    /// let hay = b"I categorically deny having triskaidekaphobia.";
-    /// let mat = re.find(hay).unwrap();
-    /// assert_eq!(2..15, mat.range());
-    /// assert_eq!(b"categorically", mat.as_bytes());
-    /// ```
-    #[inline]
-    pub fn find<'h>(&self, haystack: &'h [u8]) -> Option<Match<'h>> {
-        self.find_at(haystack, 0)
-    }
-
-    /// Returns an iterator that yields successive non-overlapping matches in
-    /// the given haystack. The iterator yields values of type [`Match`].
-    ///
-    /// # Time complexity
-    ///
-    /// Note that since `find_iter` runs potentially many searches on the
-    /// haystack and since each search has worst case `O(m * n)` time
-    /// complexity, the overall worst case time complexity for iteration is
-    /// `O(m * n^2)`.
-    ///
-    /// # Example
-    ///
-    /// Find every word with exactly 13 Unicode word characters:
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"\b\w{13}\b").unwrap();
-    /// let hay = b"Retroactively relinquishing remunerations is reprehensible.";
-    /// let matches: Vec<_> = re.find_iter(hay).map(|m| m.as_bytes()).collect();
-    /// assert_eq!(matches, vec![
-    ///     &b"Retroactively"[..],
-    ///     &b"relinquishing"[..],
-    ///     &b"remunerations"[..],
-    ///     &b"reprehensible"[..],
-    /// ]);
-    /// ```
-    #[inline]
-    pub fn find_iter<'r, 'h>(&'r self, haystack: &'h [u8]) -> Matches<'r, 'h> {
-        Matches { haystack, it: self.meta.find_iter(haystack) }
-    }
-
-    /// This routine searches for the first match of this regex in the haystack
-    /// given, and if found, returns not only the overall match but also the
-    /// matches of each capture group in the regex. If no match is found, then
-    /// `None` is returned.
-    ///
-    /// Capture group `0` always corresponds to an implicit unnamed group that
-    /// includes the entire match. If a match is found, this group is always
-    /// present. Subsequent groups may be named and are numbered, starting
-    /// at 1, by the order in which the opening parenthesis appears in the
-    /// pattern. For example, in the pattern `(?<a>.(?<b>.))(?<c>.)`, `a`,
-    /// `b` and `c` correspond to capture group indices `1`, `2` and `3`,
-    /// respectively.
-    ///
-    /// You should only use `captures` if you need access to the capture group
-    /// matches. Otherwise, [`Regex::find`] is generally faster for discovering
-    /// just the overall match.
-    ///
-    /// # Example
-    ///
-    /// Say you have some haystack with movie names and their release years,
-    /// like "'Citizen Kane' (1941)". It'd be nice if we could search for
-    /// strings looking like that, while also extracting the movie name and its
-    /// release year separately. The example below shows how to do that.
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"'([^']+)'\s+\((\d{4})\)").unwrap();
-    /// let hay = b"Not my favorite movie: 'Citizen Kane' (1941).";
-    /// let caps = re.captures(hay).unwrap();
-    /// assert_eq!(caps.get(0).unwrap().as_bytes(), b"'Citizen Kane' (1941)");
-    /// assert_eq!(caps.get(1).unwrap().as_bytes(), b"Citizen Kane");
-    /// assert_eq!(caps.get(2).unwrap().as_bytes(), b"1941");
-    /// // You can also access the groups by index using the Index notation.
-    /// // Note that this will panic on an invalid index. In this case, these
-    /// // accesses are always correct because the overall regex will only
-    /// // match when these capture groups match.
-    /// assert_eq!(&caps[0], b"'Citizen Kane' (1941)");
-    /// assert_eq!(&caps[1], b"Citizen Kane");
-    /// assert_eq!(&caps[2], b"1941");
-    /// ```
-    ///
-    /// Note that the full match is at capture group `0`. Each subsequent
-    /// capture group is indexed by the order of its opening `(`.
-    ///
-    /// We can make this example a bit clearer by using *named* capture groups:
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"'(?<title>[^']+)'\s+\((?<year>\d{4})\)").unwrap();
-    /// let hay = b"Not my favorite movie: 'Citizen Kane' (1941).";
-    /// let caps = re.captures(hay).unwrap();
-    /// assert_eq!(caps.get(0).unwrap().as_bytes(), b"'Citizen Kane' (1941)");
-    /// assert_eq!(caps.name("title").unwrap().as_bytes(), b"Citizen Kane");
-    /// assert_eq!(caps.name("year").unwrap().as_bytes(), b"1941");
-    /// // You can also access the groups by name using the Index notation.
-    /// // Note that this will panic on an invalid group name. In this case,
-    /// // these accesses are always correct because the overall regex will
-    /// // only match when these capture groups match.
-    /// assert_eq!(&caps[0], b"'Citizen Kane' (1941)");
-    /// assert_eq!(&caps["title"], b"Citizen Kane");
-    /// assert_eq!(&caps["year"], b"1941");
-    /// ```
-    ///
-    /// Here we name the capture groups, which we can access with the `name`
-    /// method or the `Index` notation with a `&str`. Note that the named
-    /// capture groups are still accessible with `get` or the `Index` notation
-    /// with a `usize`.
-    ///
-    /// The `0`th capture group is always unnamed, so it must always be
-    /// accessed with `get(0)` or `[0]`.
-    ///
-    /// Finally, one other way to to get the matched substrings is with the
-    /// [`Captures::extract`] API:
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"'([^']+)'\s+\((\d{4})\)").unwrap();
-    /// let hay = b"Not my favorite movie: 'Citizen Kane' (1941).";
-    /// let (full, [title, year]) = re.captures(hay).unwrap().extract();
-    /// assert_eq!(full, b"'Citizen Kane' (1941)");
-    /// assert_eq!(title, b"Citizen Kane");
-    /// assert_eq!(year, b"1941");
-    /// ```
-    #[inline]
-    pub fn captures<'h>(&self, haystack: &'h [u8]) -> Option<Captures<'h>> {
-        self.captures_at(haystack, 0)
-    }
-
-    /// Returns an iterator that yields successive non-overlapping matches in
-    /// the given haystack. The iterator yields values of type [`Captures`].
-    ///
-    /// This is the same as [`Regex::find_iter`], but instead of only providing
-    /// access to the overall match, each value yield includes access to the
-    /// matches of all capture groups in the regex. Reporting this extra match
-    /// data is potentially costly, so callers should only use `captures_iter`
-    /// over `find_iter` when they actually need access to the capture group
-    /// matches.
-    ///
-    /// # Time complexity
-    ///
-    /// Note that since `captures_iter` runs potentially many searches on the
-    /// haystack and since each search has worst case `O(m * n)` time
-    /// complexity, the overall worst case time complexity for iteration is
-    /// `O(m * n^2)`.
-    ///
-    /// # Example
-    ///
-    /// We can use this to find all movie titles and their release years in
-    /// some haystack, where the movie is formatted like "'Title' (xxxx)":
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"'([^']+)'\s+\(([0-9]{4})\)").unwrap();
-    /// let hay = b"'Citizen Kane' (1941), 'The Wizard of Oz' (1939), 'M' (1931).";
-    /// let mut movies = vec![];
-    /// for (_, [title, year]) in re.captures_iter(hay).map(|c| c.extract()) {
-    ///     // OK because [0-9]{4} can only match valid UTF-8.
-    ///     let year = std::str::from_utf8(year).unwrap();
-    ///     movies.push((title, year.parse::<i64>()?));
-    /// }
-    /// assert_eq!(movies, vec![
-    ///     (&b"Citizen Kane"[..], 1941),
-    ///     (&b"The Wizard of Oz"[..], 1939),
-    ///     (&b"M"[..], 1931),
-    /// ]);
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    ///
-    /// Or with named groups:
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"'(?<title>[^']+)'\s+\((?<year>[0-9]{4})\)").unwrap();
-    /// let hay = b"'Citizen Kane' (1941), 'The Wizard of Oz' (1939), 'M' (1931).";
-    /// let mut it = re.captures_iter(hay);
-    ///
-    /// let caps = it.next().unwrap();
-    /// assert_eq!(&caps["title"], b"Citizen Kane");
-    /// assert_eq!(&caps["year"], b"1941");
-    ///
-    /// let caps = it.next().unwrap();
-    /// assert_eq!(&caps["title"], b"The Wizard of Oz");
-    /// assert_eq!(&caps["year"], b"1939");
-    ///
-    /// let caps = it.next().unwrap();
-    /// assert_eq!(&caps["title"], b"M");
-    /// assert_eq!(&caps["year"], b"1931");
-    /// ```
-    #[inline]
-    pub fn captures_iter<'r, 'h>(
-        &'r self,
-        haystack: &'h [u8],
-    ) -> CaptureMatches<'r, 'h> {
-        CaptureMatches { haystack, it: self.meta.captures_iter(haystack) }
-    }
-
-    /// Returns an iterator of substrings of the haystack given, delimited by a
-    /// match of the regex. Namely, each element of the iterator corresponds to
-    /// a part of the haystack that *isn't* matched by the regular expression.
-    ///
-    /// # Time complexity
-    ///
-    /// Since iterators over all matches requires running potentially many
-    /// searches on the haystack, and since each search has worst case
-    /// `O(m * n)` time complexity, the overall worst case time complexity for
-    /// this routine is `O(m * n^2)`.
-    ///
-    /// # Example
-    ///
-    /// To split a string delimited by arbitrary amounts of spaces or tabs:
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"[ \t]+").unwrap();
-    /// let hay = b"a b \t  c\td    e";
-    /// let fields: Vec<&[u8]> = re.split(hay).collect();
-    /// assert_eq!(fields, vec![
-    ///     &b"a"[..], &b"b"[..], &b"c"[..], &b"d"[..], &b"e"[..],
-    /// ]);
-    /// ```
-    ///
-    /// # Example: more cases
-    ///
-    /// Basic usage:
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r" ").unwrap();
-    /// let hay = b"Mary had a little lamb";
-    /// let got: Vec<&[u8]> = re.split(hay).collect();
-    /// assert_eq!(got, vec![
-    ///     &b"Mary"[..], &b"had"[..], &b"a"[..], &b"little"[..], &b"lamb"[..],
-    /// ]);
-    ///
-    /// let re = Regex::new(r"X").unwrap();
-    /// let hay = b"";
-    /// let got: Vec<&[u8]> = re.split(hay).collect();
-    /// assert_eq!(got, vec![&b""[..]]);
-    ///
-    /// let re = Regex::new(r"X").unwrap();
-    /// let hay = b"lionXXtigerXleopard";
-    /// let got: Vec<&[u8]> = re.split(hay).collect();
-    /// assert_eq!(got, vec![
-    ///     &b"lion"[..], &b""[..], &b"tiger"[..], &b"leopard"[..],
-    /// ]);
-    ///
-    /// let re = Regex::new(r"::").unwrap();
-    /// let hay = b"lion::tiger::leopard";
-    /// let got: Vec<&[u8]> = re.split(hay).collect();
-    /// assert_eq!(got, vec![&b"lion"[..], &b"tiger"[..], &b"leopard"[..]]);
-    /// ```
-    ///
-    /// If a haystack contains multiple contiguous matches, you will end up
-    /// with empty spans yielded by the iterator:
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"X").unwrap();
-    /// let hay = b"XXXXaXXbXc";
-    /// let got: Vec<&[u8]> = re.split(hay).collect();
-    /// assert_eq!(got, vec![
-    ///     &b""[..], &b""[..], &b""[..], &b""[..],
-    ///     &b"a"[..], &b""[..], &b"b"[..], &b"c"[..],
-    /// ]);
-    ///
-    /// let re = Regex::new(r"/").unwrap();
-    /// let hay = b"(///)";
-    /// let got: Vec<&[u8]> = re.split(hay).collect();
-    /// assert_eq!(got, vec![&b"("[..], &b""[..], &b""[..], &b")"[..]]);
-    /// ```
-    ///
-    /// Separators at the start or end of a haystack are neighbored by empty
-    /// substring.
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"0").unwrap();
-    /// let hay = b"010";
-    /// let got: Vec<&[u8]> = re.split(hay).collect();
-    /// assert_eq!(got, vec![&b""[..], &b"1"[..], &b""[..]]);
-    /// ```
-    ///
-    /// When the regex can match the empty string, it splits at every byte
-    /// position in the haystack. This includes between all UTF-8 code units.
-    /// (The top-level [`Regex::split`](crate::Regex::split) will only split
-    /// at valid UTF-8 boundaries.)
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"").unwrap();
-    /// let hay = "☃".as_bytes();
-    /// let got: Vec<&[u8]> = re.split(hay).collect();
-    /// assert_eq!(got, vec![
-    ///     &[][..], &[b'\xE2'][..], &[b'\x98'][..], &[b'\x83'][..], &[][..],
-    /// ]);
-    /// ```
-    ///
-    /// Contiguous separators (commonly shows up with whitespace), can lead to
-    /// possibly surprising behavior. For example, this code is correct:
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r" ").unwrap();
-    /// let hay = b"    a  b c";
-    /// let got: Vec<&[u8]> = re.split(hay).collect();
-    /// assert_eq!(got, vec![
-    ///     &b""[..], &b""[..], &b""[..], &b""[..],
-    ///     &b"a"[..], &b""[..], &b"b"[..], &b"c"[..],
-    /// ]);
-    /// ```
-    ///
-    /// It does *not* give you `["a", "b", "c"]`. For that behavior, you'd want
-    /// to match contiguous space characters:
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r" +").unwrap();
-    /// let hay = b"    a  b c";
-    /// let got: Vec<&[u8]> = re.split(hay).collect();
-    /// // N.B. This does still include a leading empty span because ' +'
-    /// // matches at the beginning of the haystack.
-    /// assert_eq!(got, vec![&b""[..], &b"a"[..], &b"b"[..], &b"c"[..]]);
-    /// ```
-    #[inline]
-    pub fn split<'r, 'h>(&'r self, haystack: &'h [u8]) -> Split<'r, 'h> {
-        Split { haystack, it: self.meta.split(haystack) }
-    }
-
-    /// Returns an iterator of at most `limit` substrings of the haystack
-    /// given, delimited by a match of the regex. (A `limit` of `0` will return
-    /// no substrings.) Namely, each element of the iterator corresponds to a
-    /// part of the haystack that *isn't* matched by the regular expression.
-    /// The remainder of the haystack that is not split will be the last
-    /// element in the iterator.
-    ///
-    /// # Time complexity
-    ///
-    /// Since iterators over all matches requires running potentially many
-    /// searches on the haystack, and since each search has worst case
-    /// `O(m * n)` time complexity, the overall worst case time complexity for
-    /// this routine is `O(m * n^2)`.
-    ///
-    /// Although note that the worst case time here has an upper bound given
-    /// by the `limit` parameter.
-    ///
-    /// # Example
-    ///
-    /// Get the first two words in some haystack:
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"\W+").unwrap();
-    /// let hay = b"Hey! How are you?";
-    /// let fields: Vec<&[u8]> = re.splitn(hay, 3).collect();
-    /// assert_eq!(fields, vec![&b"Hey"[..], &b"How"[..], &b"are you?"[..]]);
-    /// ```
-    ///
-    /// # Examples: more cases
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r" ").unwrap();
-    /// let hay = b"Mary had a little lamb";
-    /// let got: Vec<&[u8]> = re.splitn(hay, 3).collect();
-    /// assert_eq!(got, vec![&b"Mary"[..], &b"had"[..], &b"a little lamb"[..]]);
-    ///
-    /// let re = Regex::new(r"X").unwrap();
-    /// let hay = b"";
-    /// let got: Vec<&[u8]> = re.splitn(hay, 3).collect();
-    /// assert_eq!(got, vec![&b""[..]]);
-    ///
-    /// let re = Regex::new(r"X").unwrap();
-    /// let hay = b"lionXXtigerXleopard";
-    /// let got: Vec<&[u8]> = re.splitn(hay, 3).collect();
-    /// assert_eq!(got, vec![&b"lion"[..], &b""[..], &b"tigerXleopard"[..]]);
-    ///
-    /// let re = Regex::new(r"::").unwrap();
-    /// let hay = b"lion::tiger::leopard";
-    /// let got: Vec<&[u8]> = re.splitn(hay, 2).collect();
-    /// assert_eq!(got, vec![&b"lion"[..], &b"tiger::leopard"[..]]);
-    ///
-    /// let re = Regex::new(r"X").unwrap();
-    /// let hay = b"abcXdef";
-    /// let got: Vec<&[u8]> = re.splitn(hay, 1).collect();
-    /// assert_eq!(got, vec![&b"abcXdef"[..]]);
-    ///
-    /// let re = Regex::new(r"X").unwrap();
-    /// let hay = b"abcdef";
-    /// let got: Vec<&[u8]> = re.splitn(hay, 2).collect();
-    /// assert_eq!(got, vec![&b"abcdef"[..]]);
-    ///
-    /// let re = Regex::new(r"X").unwrap();
-    /// let hay = b"abcXdef";
-    /// let got: Vec<&[u8]> = re.splitn(hay, 0).collect();
-    /// assert!(got.is_empty());
-    /// ```
-    #[inline]
-    pub fn splitn<'r, 'h>(
-        &'r self,
-        haystack: &'h [u8],
-        limit: usize,
-    ) -> SplitN<'r, 'h> {
-        SplitN { haystack, it: self.meta.splitn(haystack, limit) }
-    }
-
-    /// Replaces the leftmost-first match in the given haystack with the
-    /// replacement provided. The replacement can be a regular string (where
-    /// `$N` and `$name` are expanded to match capture groups) or a function
-    /// that takes a [`Captures`] and returns the replaced string.
-    ///
-    /// If no match is found, then the haystack is returned unchanged. In that
-    /// case, this implementation will likely return a `Cow::Borrowed` value
-    /// such that no allocation is performed.
-    ///
-    /// When a `Cow::Borrowed` is returned, the value returned is guaranteed
-    /// to be equivalent to the `haystack` given.
-    ///
-    /// # Replacement string syntax
-    ///
-    /// All instances of `$ref` in the replacement string are replaced with
-    /// the substring corresponding to the capture group identified by `ref`.
-    ///
-    /// `ref` may be an integer corresponding to the index of the capture group
-    /// (counted by order of opening parenthesis where `0` is the entire match)
-    /// or it can be a name (consisting of letters, digits or underscores)
-    /// corresponding to a named capture group.
-    ///
-    /// If `ref` isn't a valid capture group (whether the name doesn't exist or
-    /// isn't a valid index), then it is replaced with the empty string.
-    ///
-    /// The longest possible name is used. For example, `$1a` looks up the
-    /// capture group named `1a` and not the capture group at index `1`. To
-    /// exert more precise control over the name, use braces, e.g., `${1}a`.
-    ///
-    /// To write a literal `$` use `$$`.
-    ///
-    /// # Example
-    ///
-    /// Note that this function is polymorphic with respect to the replacement.
-    /// In typical usage, this can just be a normal string:
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"[^01]+").unwrap();
-    /// assert_eq!(re.replace(b"1078910", b""), &b"1010"[..]);
-    /// ```
-    ///
-    /// But anything satisfying the [`Replacer`] trait will work. For example,
-    /// a closure of type `|&Captures| -> String` provides direct access to the
-    /// captures corresponding to a match. This allows one to access capturing
-    /// group matches easily:
-    ///
-    /// ```
-    /// use regex::bytes::{Captures, Regex};
-    ///
-    /// let re = Regex::new(r"([^,\s]+),\s+(\S+)").unwrap();
-    /// let result = re.replace(b"Springsteen, Bruce", |caps: &Captures| {
-    ///     let mut buf = vec![];
-    ///     buf.extend_from_slice(&caps[2]);
-    ///     buf.push(b' ');
-    ///     buf.extend_from_slice(&caps[1]);
-    ///     buf
-    /// });
-    /// assert_eq!(result, &b"Bruce Springsteen"[..]);
-    /// ```
-    ///
-    /// But this is a bit cumbersome to use all the time. Instead, a simple
-    /// syntax is supported (as described above) that expands `$name` into the
-    /// corresponding capture group. Here's the last example, but using this
-    /// expansion technique with named capture groups:
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"(?<last>[^,\s]+),\s+(?<first>\S+)").unwrap();
-    /// let result = re.replace(b"Springsteen, Bruce", b"$first $last");
-    /// assert_eq!(result, &b"Bruce Springsteen"[..]);
-    /// ```
-    ///
-    /// Note that using `$2` instead of `$first` or `$1` instead of `$last`
-    /// would produce the same result. To write a literal `$` use `$$`.
-    ///
-    /// Sometimes the replacement string requires use of curly braces to
-    /// delineate a capture group replacement when it is adjacent to some other
-    /// literal text. For example, if we wanted to join two words together with
-    /// an underscore:
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"(?<first>\w+)\s+(?<second>\w+)").unwrap();
-    /// let result = re.replace(b"deep fried", b"${first}_$second");
-    /// assert_eq!(result, &b"deep_fried"[..]);
-    /// ```
-    ///
-    /// Without the curly braces, the capture group name `first_` would be
-    /// used, and since it doesn't exist, it would be replaced with the empty
-    /// string.
-    ///
-    /// Finally, sometimes you just want to replace a literal string with no
-    /// regard for capturing group expansion. This can be done by wrapping a
-    /// string with [`NoExpand`]:
-    ///
-    /// ```
-    /// use regex::bytes::{NoExpand, Regex};
-    ///
-    /// let re = Regex::new(r"(?<last>[^,\s]+),\s+(\S+)").unwrap();
-    /// let result = re.replace(b"Springsteen, Bruce", NoExpand(b"$2 $last"));
-    /// assert_eq!(result, &b"$2 $last"[..]);
-    /// ```
-    ///
-    /// Using `NoExpand` may also be faster, since the replacement string won't
-    /// need to be parsed for the `$` syntax.
-    #[inline]
-    pub fn replace<'h, R: Replacer>(
-        &self,
-        haystack: &'h [u8],
-        rep: R,
-    ) -> Cow<'h, [u8]> {
-        self.replacen(haystack, 1, rep)
-    }
-
-    /// Replaces all non-overlapping matches in the haystack with the
-    /// replacement provided. This is the same as calling `replacen` with
-    /// `limit` set to `0`.
-    ///
-    /// If no match is found, then the haystack is returned unchanged. In that
-    /// case, this implementation will likely return a `Cow::Borrowed` value
-    /// such that no allocation is performed.
-    ///
-    /// When a `Cow::Borrowed` is returned, the value returned is guaranteed
-    /// to be equivalent to the `haystack` given.
-    ///
-    /// The documentation for [`Regex::replace`] goes into more detail about
-    /// what kinds of replacement strings are supported.
-    ///
-    /// # Time complexity
-    ///
-    /// Since iterators over all matches requires running potentially many
-    /// searches on the haystack, and since each search has worst case
-    /// `O(m * n)` time complexity, the overall worst case time complexity for
-    /// this routine is `O(m * n^2)`.
-    ///
-    /// # Fallibility
-    ///
-    /// If you need to write a replacement routine where any individual
-    /// replacement might "fail," doing so with this API isn't really feasible
-    /// because there's no way to stop the search process if a replacement
-    /// fails. Instead, if you need this functionality, you should consider
-    /// implementing your own replacement routine:
-    ///
-    /// ```
-    /// use regex::bytes::{Captures, Regex};
-    ///
-    /// fn replace_all<E>(
-    ///     re: &Regex,
-    ///     haystack: &[u8],
-    ///     replacement: impl Fn(&Captures) -> Result<Vec<u8>, E>,
-    /// ) -> Result<Vec<u8>, E> {
-    ///     let mut new = Vec::with_capacity(haystack.len());
-    ///     let mut last_match = 0;
-    ///     for caps in re.captures_iter(haystack) {
-    ///         let m = caps.get(0).unwrap();
-    ///         new.extend_from_slice(&haystack[last_match..m.start()]);
-    ///         new.extend_from_slice(&replacement(&caps)?);
-    ///         last_match = m.end();
-    ///     }
-    ///     new.extend_from_slice(&haystack[last_match..]);
-    ///     Ok(new)
-    /// }
-    ///
-    /// // Let's replace each word with the number of bytes in that word.
-    /// // But if we see a word that is "too long," we'll give up.
-    /// let re = Regex::new(r"\w+").unwrap();
-    /// let replacement = |caps: &Captures| -> Result<Vec<u8>, &'static str> {
-    ///     if caps[0].len() >= 5 {
-    ///         return Err("word too long");
-    ///     }
-    ///     Ok(caps[0].len().to_string().into_bytes())
-    /// };
-    /// assert_eq!(
-    ///     Ok(b"2 3 3 3?".to_vec()),
-    ///     replace_all(&re, b"hi how are you?", &replacement),
-    /// );
-    /// assert!(replace_all(&re, b"hi there", &replacement).is_err());
-    /// ```
-    ///
-    /// # Example
-    ///
-    /// This example shows how to flip the order of whitespace (excluding line
-    /// terminators) delimited fields, and normalizes the whitespace that
-    /// delimits the fields:
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"(?m)^(\S+)[\s--\r\n]+(\S+)$").unwrap();
-    /// let hay = b"
-    /// Greetings  1973
-    /// Wild\t1973
-    /// BornToRun\t\t\t\t1975
-    /// Darkness                    1978
-    /// TheRiver 1980
-    /// ";
-    /// let new = re.replace_all(hay, b"$2 $1");
-    /// assert_eq!(new, &b"
-    /// 1973 Greetings
-    /// 1973 Wild
-    /// 1975 BornToRun
-    /// 1978 Darkness
-    /// 1980 TheRiver
-    /// "[..]);
-    /// ```
-    #[inline]
-    pub fn replace_all<'h, R: Replacer>(
-        &self,
-        haystack: &'h [u8],
-        rep: R,
-    ) -> Cow<'h, [u8]> {
-        self.replacen(haystack, 0, rep)
-    }
-
-    /// Replaces at most `limit` non-overlapping matches in the haystack with
-    /// the replacement provided. If `limit` is `0`, then all non-overlapping
-    /// matches are replaced. That is, `Regex::replace_all(hay, rep)` is
-    /// equivalent to `Regex::replacen(hay, 0, rep)`.
-    ///
-    /// If no match is found, then the haystack is returned unchanged. In that
-    /// case, this implementation will likely return a `Cow::Borrowed` value
-    /// such that no allocation is performed.
-    ///
-    /// When a `Cow::Borrowed` is returned, the value returned is guaranteed
-    /// to be equivalent to the `haystack` given.
-    ///
-    /// The documentation for [`Regex::replace`] goes into more detail about
-    /// what kinds of replacement strings are supported.
-    ///
-    /// # Time complexity
-    ///
-    /// Since iterators over all matches requires running potentially many
-    /// searches on the haystack, and since each search has worst case
-    /// `O(m * n)` time complexity, the overall worst case time complexity for
-    /// this routine is `O(m * n^2)`.
-    ///
-    /// Although note that the worst case time here has an upper bound given
-    /// by the `limit` parameter.
-    ///
-    /// # Fallibility
-    ///
-    /// See the corresponding section in the docs for [`Regex::replace_all`]
-    /// for tips on how to deal with a replacement routine that can fail.
-    ///
-    /// # Example
-    ///
-    /// This example shows how to flip the order of whitespace (excluding line
-    /// terminators) delimited fields, and normalizes the whitespace that
-    /// delimits the fields. But we only do it for the first two matches.
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"(?m)^(\S+)[\s--\r\n]+(\S+)$").unwrap();
-    /// let hay = b"
-    /// Greetings  1973
-    /// Wild\t1973
-    /// BornToRun\t\t\t\t1975
-    /// Darkness                    1978
-    /// TheRiver 1980
-    /// ";
-    /// let new = re.replacen(hay, 2, b"$2 $1");
-    /// assert_eq!(new, &b"
-    /// 1973 Greetings
-    /// 1973 Wild
-    /// BornToRun\t\t\t\t1975
-    /// Darkness                    1978
-    /// TheRiver 1980
-    /// "[..]);
-    /// ```
-    #[inline]
-    pub fn replacen<'h, R: Replacer>(
-        &self,
-        haystack: &'h [u8],
-        limit: usize,
-        mut rep: R,
-    ) -> Cow<'h, [u8]> {
-        // If we know that the replacement doesn't have any capture expansions,
-        // then we can use the fast path. The fast path can make a tremendous
-        // difference:
-        //
-        //   1) We use `find_iter` instead of `captures_iter`. Not asking for
-        //      captures generally makes the regex engines faster.
-        //   2) We don't need to look up all of the capture groups and do
-        //      replacements inside the replacement string. We just push it
-        //      at each match and be done with it.
-        if let Some(rep) = rep.no_expansion() {
-            let mut it = self.find_iter(haystack).enumerate().peekable();
-            if it.peek().is_none() {
-                return Cow::Borrowed(haystack);
-            }
-            let mut new = Vec::with_capacity(haystack.len());
-            let mut last_match = 0;
-            for (i, m) in it {
-                new.extend_from_slice(&haystack[last_match..m.start()]);
-                new.extend_from_slice(&rep);
-                last_match = m.end();
-                if limit > 0 && i >= limit - 1 {
-                    break;
-                }
-            }
-            new.extend_from_slice(&haystack[last_match..]);
-            return Cow::Owned(new);
-        }
-
-        // The slower path, which we use if the replacement needs access to
-        // capture groups.
-        let mut it = self.captures_iter(haystack).enumerate().peekable();
-        if it.peek().is_none() {
-            return Cow::Borrowed(haystack);
-        }
-        let mut new = Vec::with_capacity(haystack.len());
-        let mut last_match = 0;
-        for (i, cap) in it {
-            // unwrap on 0 is OK because captures only reports matches
-            let m = cap.get(0).unwrap();
-            new.extend_from_slice(&haystack[last_match..m.start()]);
-            rep.replace_append(&cap, &mut new);
-            last_match = m.end();
-            if limit > 0 && i >= limit - 1 {
-                break;
-            }
-        }
-        new.extend_from_slice(&haystack[last_match..]);
-        Cow::Owned(new)
-    }
-}
-
-/// A group of advanced or "lower level" search methods. Some methods permit
-/// starting the search at a position greater than `0` in the haystack. Other
-/// methods permit reusing allocations, for example, when extracting the
-/// matches for capture groups.
-impl Regex {
-    /// Returns the end byte offset of the first match in the haystack given.
-    ///
-    /// This method may have the same performance characteristics as
-    /// `is_match`. Behaviorlly, it doesn't just report whether it match
-    /// occurs, but also the end offset for a match. In particular, the offset
-    /// returned *may be shorter* than the proper end of the leftmost-first
-    /// match that you would find via [`Regex::find`].
-    ///
-    /// Note that it is not guaranteed that this routine finds the shortest or
-    /// "earliest" possible match. Instead, the main idea of this API is that
-    /// it returns the offset at the point at which the internal regex engine
-    /// has determined that a match has occurred. This may vary depending on
-    /// which internal regex engine is used, and thus, the offset itself may
-    /// change based on internal heuristics.
-    ///
-    /// # Example
-    ///
-    /// Typically, `a+` would match the entire first sequence of `a` in some
-    /// haystack, but `shortest_match` *may* give up as soon as it sees the
-    /// first `a`.
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"a+").unwrap();
-    /// let offset = re.shortest_match(b"aaaaa").unwrap();
-    /// assert_eq!(offset, 1);
-    /// ```
-    #[inline]
-    pub fn shortest_match(&self, haystack: &[u8]) -> Option<usize> {
-        self.shortest_match_at(haystack, 0)
-    }
-
-    /// Returns the same as `shortest_match`, but starts the search at the
-    /// given offset.
-    ///
-    /// The significance of the starting point is that it takes the surrounding
-    /// context into consideration. For example, the `\A` anchor can only match
-    /// when `start == 0`.
-    ///
-    /// If a match is found, the offset returned is relative to the beginning
-    /// of the haystack, not the beginning of the search.
-    ///
-    /// # Panics
-    ///
-    /// This panics when `start >= haystack.len() + 1`.
-    ///
-    /// # Example
-    ///
-    /// This example shows the significance of `start` by demonstrating how it
-    /// can be used to permit look-around assertions in a regex to take the
-    /// surrounding context into account.
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"\bchew\b").unwrap();
-    /// let hay = b"eschew";
-    /// // We get a match here, but it's probably not intended.
-    /// assert_eq!(re.shortest_match(&hay[2..]), Some(4));
-    /// // No match because the  assertions take the context into account.
-    /// assert_eq!(re.shortest_match_at(hay, 2), None);
-    /// ```
-    #[inline]
-    pub fn shortest_match_at(
-        &self,
-        haystack: &[u8],
-        start: usize,
-    ) -> Option<usize> {
-        let input =
-            Input::new(haystack).earliest(true).span(start..haystack.len());
-        self.meta.search_half(&input).map(|hm| hm.offset())
-    }
-
-    /// Returns the same as [`Regex::is_match`], but starts the search at the
-    /// given offset.
-    ///
-    /// The significance of the starting point is that it takes the surrounding
-    /// context into consideration. For example, the `\A` anchor can only
-    /// match when `start == 0`.
-    ///
-    /// # Panics
-    ///
-    /// This panics when `start >= haystack.len() + 1`.
-    ///
-    /// # Example
-    ///
-    /// This example shows the significance of `start` by demonstrating how it
-    /// can be used to permit look-around assertions in a regex to take the
-    /// surrounding context into account.
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"\bchew\b").unwrap();
-    /// let hay = b"eschew";
-    /// // We get a match here, but it's probably not intended.
-    /// assert!(re.is_match(&hay[2..]));
-    /// // No match because the  assertions take the context into account.
-    /// assert!(!re.is_match_at(hay, 2));
-    /// ```
-    #[inline]
-    pub fn is_match_at(&self, haystack: &[u8], start: usize) -> bool {
-        self.meta.is_match(Input::new(haystack).span(start..haystack.len()))
-    }
-
-    /// Returns the same as [`Regex::find`], but starts the search at the given
-    /// offset.
-    ///
-    /// The significance of the starting point is that it takes the surrounding
-    /// context into consideration. For example, the `\A` anchor can only
-    /// match when `start == 0`.
-    ///
-    /// # Panics
-    ///
-    /// This panics when `start >= haystack.len() + 1`.
-    ///
-    /// # Example
-    ///
-    /// This example shows the significance of `start` by demonstrating how it
-    /// can be used to permit look-around assertions in a regex to take the
-    /// surrounding context into account.
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"\bchew\b").unwrap();
-    /// let hay = b"eschew";
-    /// // We get a match here, but it's probably not intended.
-    /// assert_eq!(re.find(&hay[2..]).map(|m| m.range()), Some(0..4));
-    /// // No match because the  assertions take the context into account.
-    /// assert_eq!(re.find_at(hay, 2), None);
-    /// ```
-    #[inline]
-    pub fn find_at<'h>(
-        &self,
-        haystack: &'h [u8],
-        start: usize,
-    ) -> Option<Match<'h>> {
-        let input = Input::new(haystack).span(start..haystack.len());
-        self.meta.find(input).map(|m| Match::new(haystack, m.start(), m.end()))
-    }
-
-    /// Returns the same as [`Regex::captures`], but starts the search at the
-    /// given offset.
-    ///
-    /// The significance of the starting point is that it takes the surrounding
-    /// context into consideration. For example, the `\A` anchor can only
-    /// match when `start == 0`.
-    ///
-    /// # Panics
-    ///
-    /// This panics when `start >= haystack.len() + 1`.
-    ///
-    /// # Example
-    ///
-    /// This example shows the significance of `start` by demonstrating how it
-    /// can be used to permit look-around assertions in a regex to take the
-    /// surrounding context into account.
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"\bchew\b").unwrap();
-    /// let hay = b"eschew";
-    /// // We get a match here, but it's probably not intended.
-    /// assert_eq!(&re.captures(&hay[2..]).unwrap()[0], b"chew");
-    /// // No match because the  assertions take the context into account.
-    /// assert!(re.captures_at(hay, 2).is_none());
-    /// ```
-    #[inline]
-    pub fn captures_at<'h>(
-        &self,
-        haystack: &'h [u8],
-        start: usize,
-    ) -> Option<Captures<'h>> {
-        let input = Input::new(haystack).span(start..haystack.len());
-        let mut caps = self.meta.create_captures();
-        self.meta.captures(input, &mut caps);
-        if caps.is_match() {
-            let static_captures_len = self.static_captures_len();
-            Some(Captures { haystack, caps, static_captures_len })
-        } else {
-            None
-        }
-    }
-
-    /// This is like [`Regex::captures`], but writes the byte offsets of each
-    /// capture group match into the locations given.
-    ///
-    /// A [`CaptureLocations`] stores the same byte offsets as a [`Captures`],
-    /// but does *not* store a reference to the haystack. This makes its API
-    /// a bit lower level and less convenient. But in exchange, callers
-    /// may allocate their own `CaptureLocations` and reuse it for multiple
-    /// searches. This may be helpful if allocating a `Captures` shows up in a
-    /// profile as too costly.
-    ///
-    /// To create a `CaptureLocations` value, use the
-    /// [`Regex::capture_locations`] method.
-    ///
-    /// This also returns the overall match if one was found. When a match is
-    /// found, its offsets are also always stored in `locs` at index `0`.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"^([a-z]+)=(\S*)$").unwrap();
-    /// let mut locs = re.capture_locations();
-    /// assert!(re.captures_read(&mut locs, b"id=foo123").is_some());
-    /// assert_eq!(Some((0, 9)), locs.get(0));
-    /// assert_eq!(Some((0, 2)), locs.get(1));
-    /// assert_eq!(Some((3, 9)), locs.get(2));
-    /// ```
-    #[inline]
-    pub fn captures_read<'h>(
-        &self,
-        locs: &mut CaptureLocations,
-        haystack: &'h [u8],
-    ) -> Option<Match<'h>> {
-        self.captures_read_at(locs, haystack, 0)
-    }
-
-    /// Returns the same as [`Regex::captures_read`], but starts the search at
-    /// the given offset.
-    ///
-    /// The significance of the starting point is that it takes the surrounding
-    /// context into consideration. For example, the `\A` anchor can only
-    /// match when `start == 0`.
-    ///
-    /// # Panics
-    ///
-    /// This panics when `start >= haystack.len() + 1`.
-    ///
-    /// # Example
-    ///
-    /// This example shows the significance of `start` by demonstrating how it
-    /// can be used to permit look-around assertions in a regex to take the
-    /// surrounding context into account.
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"\bchew\b").unwrap();
-    /// let hay = b"eschew";
-    /// let mut locs = re.capture_locations();
-    /// // We get a match here, but it's probably not intended.
-    /// assert!(re.captures_read(&mut locs, &hay[2..]).is_some());
-    /// // No match because the  assertions take the context into account.
-    /// assert!(re.captures_read_at(&mut locs, hay, 2).is_none());
-    /// ```
-    #[inline]
-    pub fn captures_read_at<'h>(
-        &self,
-        locs: &mut CaptureLocations,
-        haystack: &'h [u8],
-        start: usize,
-    ) -> Option<Match<'h>> {
-        let input = Input::new(haystack).span(start..haystack.len());
-        self.meta.search_captures(&input, &mut locs.0);
-        locs.0.get_match().map(|m| Match::new(haystack, m.start(), m.end()))
-    }
-
-    /// An undocumented alias for `captures_read_at`.
-    ///
-    /// The `regex-capi` crate previously used this routine, so to avoid
-    /// breaking that crate, we continue to provide the name as an undocumented
-    /// alias.
-    #[doc(hidden)]
-    #[inline]
-    pub fn read_captures_at<'h>(
-        &self,
-        locs: &mut CaptureLocations,
-        haystack: &'h [u8],
-        start: usize,
-    ) -> Option<Match<'h>> {
-        self.captures_read_at(locs, haystack, start)
-    }
-}
-
-/// Auxiliary methods.
-impl Regex {
-    /// Returns the original string of this regex.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"foo\w+bar").unwrap();
-    /// assert_eq!(re.as_str(), r"foo\w+bar");
-    /// ```
-    #[inline]
-    pub fn as_str(&self) -> &str {
-        &self.pattern
-    }
-
-    /// Returns an iterator over the capture names in this regex.
-    ///
-    /// The iterator returned yields elements of type `Option<&str>`. That is,
-    /// the iterator yields values for all capture groups, even ones that are
-    /// unnamed. The order of the groups corresponds to the order of the group's
-    /// corresponding opening parenthesis.
-    ///
-    /// The first element of the iterator always yields the group corresponding
-    /// to the overall match, and this group is always unnamed. Therefore, the
-    /// iterator always yields at least one group.
-    ///
-    /// # Example
-    ///
-    /// This shows basic usage with a mix of named and unnamed capture groups:
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"(?<a>.(?<b>.))(.)(?:.)(?<c>.)").unwrap();
-    /// let mut names = re.capture_names();
-    /// assert_eq!(names.next(), Some(None));
-    /// assert_eq!(names.next(), Some(Some("a")));
-    /// assert_eq!(names.next(), Some(Some("b")));
-    /// assert_eq!(names.next(), Some(None));
-    /// // the '(?:.)' group is non-capturing and so doesn't appear here!
-    /// assert_eq!(names.next(), Some(Some("c")));
-    /// assert_eq!(names.next(), None);
-    /// ```
-    ///
-    /// The iterator always yields at least one element, even for regexes with
-    /// no capture groups and even for regexes that can never match:
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"").unwrap();
-    /// let mut names = re.capture_names();
-    /// assert_eq!(names.next(), Some(None));
-    /// assert_eq!(names.next(), None);
-    ///
-    /// let re = Regex::new(r"[a&&b]").unwrap();
-    /// let mut names = re.capture_names();
-    /// assert_eq!(names.next(), Some(None));
-    /// assert_eq!(names.next(), None);
-    /// ```
-    #[inline]
-    pub fn capture_names(&self) -> CaptureNames<'_> {
-        CaptureNames(self.meta.group_info().pattern_names(PatternID::ZERO))
-    }
-
-    /// Returns the number of captures groups in this regex.
-    ///
-    /// This includes all named and unnamed groups, including the implicit
-    /// unnamed group that is always present and corresponds to the entire
-    /// match.
-    ///
-    /// Since the implicit unnamed group is always included in this length, the
-    /// length returned is guaranteed to be greater than zero.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"foo").unwrap();
-    /// assert_eq!(1, re.captures_len());
-    ///
-    /// let re = Regex::new(r"(foo)").unwrap();
-    /// assert_eq!(2, re.captures_len());
-    ///
-    /// let re = Regex::new(r"(?<a>.(?<b>.))(.)(?:.)(?<c>.)").unwrap();
-    /// assert_eq!(5, re.captures_len());
-    ///
-    /// let re = Regex::new(r"[a&&b]").unwrap();
-    /// assert_eq!(1, re.captures_len());
-    /// ```
-    #[inline]
-    pub fn captures_len(&self) -> usize {
-        self.meta.group_info().group_len(PatternID::ZERO)
-    }
-
-    /// Returns the total number of capturing groups that appear in every
-    /// possible match.
-    ///
-    /// If the number of capture groups can vary depending on the match, then
-    /// this returns `None`. That is, a value is only returned when the number
-    /// of matching groups is invariant or "static."
-    ///
-    /// Note that like [`Regex::captures_len`], this **does** include the
-    /// implicit capturing group corresponding to the entire match. Therefore,
-    /// when a non-None value is returned, it is guaranteed to be at least `1`.
-    /// Stated differently, a return value of `Some(0)` is impossible.
-    ///
-    /// # Example
-    ///
-    /// This shows a few cases where a static number of capture groups is
-    /// available and a few cases where it is not.
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let len = |pattern| {
-    ///     Regex::new(pattern).map(|re| re.static_captures_len())
-    /// };
-    ///
-    /// assert_eq!(Some(1), len("a")?);
-    /// assert_eq!(Some(2), len("(a)")?);
-    /// assert_eq!(Some(2), len("(a)|(b)")?);
-    /// assert_eq!(Some(3), len("(a)(b)|(c)(d)")?);
-    /// assert_eq!(None, len("(a)|b")?);
-    /// assert_eq!(None, len("a|(b)")?);
-    /// assert_eq!(None, len("(b)*")?);
-    /// assert_eq!(Some(2), len("(b)+")?);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    #[inline]
-    pub fn static_captures_len(&self) -> Option<usize> {
-        self.meta.static_captures_len()
-    }
-
-    /// Returns a fresh allocated set of capture locations that can
-    /// be reused in multiple calls to [`Regex::captures_read`] or
-    /// [`Regex::captures_read_at`].
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"(.)(.)(\w+)").unwrap();
-    /// let mut locs = re.capture_locations();
-    /// assert!(re.captures_read(&mut locs, b"Padron").is_some());
-    /// assert_eq!(locs.get(0), Some((0, 6)));
-    /// assert_eq!(locs.get(1), Some((0, 1)));
-    /// assert_eq!(locs.get(2), Some((1, 2)));
-    /// assert_eq!(locs.get(3), Some((2, 6)));
-    /// ```
-    #[inline]
-    pub fn capture_locations(&self) -> CaptureLocations {
-        CaptureLocations(self.meta.create_captures())
-    }
-
-    /// An alias for `capture_locations` to preserve backward compatibility.
-    ///
-    /// The `regex-capi` crate uses this method, so to avoid breaking that
-    /// crate, we continue to export it as an undocumented API.
-    #[doc(hidden)]
-    #[inline]
-    pub fn locations(&self) -> CaptureLocations {
-        self.capture_locations()
-    }
-}
-
-/// Represents a single match of a regex in a haystack.
-///
-/// A `Match` contains both the start and end byte offsets of the match and the
-/// actual substring corresponding to the range of those byte offsets. It is
-/// guaranteed that `start <= end`. When `start == end`, the match is empty.
-///
-/// Unlike the top-level `Match` type, this `Match` type is produced by APIs
-/// that search `&[u8]` haystacks. This means that the offsets in a `Match` can
-/// point to anywhere in the haystack, including in a place that splits the
-/// UTF-8 encoding of a Unicode scalar value.
-///
-/// The lifetime parameter `'h` refers to the lifetime of the matched of the
-/// haystack that this match was produced from.
-///
-/// # Numbering
-///
-/// The byte offsets in a `Match` form a half-open interval. That is, the
-/// start of the range is inclusive and the end of the range is exclusive.
-/// For example, given a haystack `abcFOOxyz` and a match of `FOO`, its byte
-/// offset range starts at `3` and ends at `6`. `3` corresponds to `F` and
-/// `6` corresponds to `x`, which is one past the end of the match. This
-/// corresponds to the same kind of slicing that Rust uses.
-///
-/// For more on why this was chosen over other schemes (aside from being
-/// consistent with how Rust the language works), see [this discussion] and
-/// [Dijkstra's note on a related topic][note].
-///
-/// [this discussion]: https://github.com/rust-lang/regex/discussions/866
-/// [note]: https://www.cs.utexas.edu/users/EWD/transcriptions/EWD08xx/EWD831.html
-///
-/// # Example
-///
-/// This example shows the value of each of the methods on `Match` for a
-/// particular search.
-///
-/// ```
-/// use regex::bytes::Regex;
-///
-/// let re = Regex::new(r"\p{Greek}+").unwrap();
-/// let hay = "Greek: αβγδ".as_bytes();
-/// let m = re.find(hay).unwrap();
-/// assert_eq!(7, m.start());
-/// assert_eq!(15, m.end());
-/// assert!(!m.is_empty());
-/// assert_eq!(8, m.len());
-/// assert_eq!(7..15, m.range());
-/// assert_eq!("αβγδ".as_bytes(), m.as_bytes());
-/// ```
-#[derive(Copy, Clone, Eq, PartialEq)]
-pub struct Match<'h> {
-    haystack: &'h [u8],
-    start: usize,
-    end: usize,
-}
-
-impl<'h> Match<'h> {
-    /// Returns the byte offset of the start of the match in the haystack. The
-    /// start of the match corresponds to the position where the match begins
-    /// and includes the first byte in the match.
-    ///
-    /// It is guaranteed that `Match::start() <= Match::end()`.
-    ///
-    /// Unlike the top-level `Match` type, the start offset may appear anywhere
-    /// in the haystack. This includes between the code units of a UTF-8
-    /// encoded Unicode scalar value.
-    #[inline]
-    pub fn start(&self) -> usize {
-        self.start
-    }
-
-    /// Returns the byte offset of the end of the match in the haystack. The
-    /// end of the match corresponds to the byte immediately following the last
-    /// byte in the match. This means that `&slice[start..end]` works as one
-    /// would expect.
-    ///
-    /// It is guaranteed that `Match::start() <= Match::end()`.
-    ///
-    /// Unlike the top-level `Match` type, the start offset may appear anywhere
-    /// in the haystack. This includes between the code units of a UTF-8
-    /// encoded Unicode scalar value.
-    #[inline]
-    pub fn end(&self) -> usize {
-        self.end
-    }
-
-    /// Returns true if and only if this match has a length of zero.
-    ///
-    /// Note that an empty match can only occur when the regex itself can
-    /// match the empty string. Here are some examples of regexes that can
-    /// all match the empty string: `^`, `^$`, `\b`, `a?`, `a*`, `a{0}`,
-    /// `(foo|\d+|quux)?`.
-    #[inline]
-    pub fn is_empty(&self) -> bool {
-        self.start == self.end
-    }
-
-    /// Returns the length, in bytes, of this match.
-    #[inline]
-    pub fn len(&self) -> usize {
-        self.end - self.start
-    }
-
-    /// Returns the range over the starting and ending byte offsets of the
-    /// match in the haystack.
-    #[inline]
-    pub fn range(&self) -> core::ops::Range<usize> {
-        self.start..self.end
-    }
-
-    /// Returns the substring of the haystack that matched.
-    #[inline]
-    pub fn as_bytes(&self) -> &'h [u8] {
-        &self.haystack[self.range()]
-    }
-
-    /// Creates a new match from the given haystack and byte offsets.
-    #[inline]
-    fn new(haystack: &'h [u8], start: usize, end: usize) -> Match<'h> {
-        Match { haystack, start, end }
-    }
-}
-
-impl<'h> core::fmt::Debug for Match<'h> {
-    fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-        use regex_automata::util::escape::DebugHaystack;
-
-        let mut fmt = f.debug_struct("Match");
-        fmt.field("start", &self.start)
-            .field("end", &self.end)
-            .field("bytes", &DebugHaystack(&self.as_bytes()));
-
-        fmt.finish()
-    }
-}
-
-impl<'h> From<Match<'h>> for &'h [u8] {
-    fn from(m: Match<'h>) -> &'h [u8] {
-        m.as_bytes()
-    }
-}
-
-impl<'h> From<Match<'h>> for core::ops::Range<usize> {
-    fn from(m: Match<'h>) -> core::ops::Range<usize> {
-        m.range()
-    }
-}
-
-/// Represents the capture groups for a single match.
-///
-/// Capture groups refer to parts of a regex enclosed in parentheses. They
-/// can be optionally named. The purpose of capture groups is to be able to
-/// reference different parts of a match based on the original pattern. In
-/// essence, a `Captures` is a container of [`Match`] values for each group
-/// that participated in a regex match. Each `Match` can be looked up by either
-/// its capture group index or name (if it has one).
-///
-/// For example, say you want to match the individual letters in a 5-letter
-/// word:
-///
-/// ```text
-/// (?<first>\w)(\w)(?:\w)\w(?<last>\w)
-/// ```
-///
-/// This regex has 4 capture groups:
-///
-/// * The group at index `0` corresponds to the overall match. It is always
-/// present in every match and never has a name.
-/// * The group at index `1` with name `first` corresponding to the first
-/// letter.
-/// * The group at index `2` with no name corresponding to the second letter.
-/// * The group at index `3` with name `last` corresponding to the fifth and
-/// last letter.
-///
-/// Notice that `(?:\w)` was not listed above as a capture group despite it
-/// being enclosed in parentheses. That's because `(?:pattern)` is a special
-/// syntax that permits grouping but *without* capturing. The reason for not
-/// treating it as a capture is that tracking and reporting capture groups
-/// requires additional state that may lead to slower searches. So using as few
-/// capture groups as possible can help performance. (Although the difference
-/// in performance of a couple of capture groups is likely immaterial.)
-///
-/// Values with this type are created by [`Regex::captures`] or
-/// [`Regex::captures_iter`].
-///
-/// `'h` is the lifetime of the haystack that these captures were matched from.
-///
-/// # Example
-///
-/// ```
-/// use regex::bytes::Regex;
-///
-/// let re = Regex::new(r"(?<first>\w)(\w)(?:\w)\w(?<last>\w)").unwrap();
-/// let caps = re.captures(b"toady").unwrap();
-/// assert_eq!(b"toady", &caps[0]);
-/// assert_eq!(b"t", &caps["first"]);
-/// assert_eq!(b"o", &caps[2]);
-/// assert_eq!(b"y", &caps["last"]);
-/// ```
-pub struct Captures<'h> {
-    haystack: &'h [u8],
-    caps: captures::Captures,
-    static_captures_len: Option<usize>,
-}
-
-impl<'h> Captures<'h> {
-    /// Returns the `Match` associated with the capture group at index `i`. If
-    /// `i` does not correspond to a capture group, or if the capture group did
-    /// not participate in the match, then `None` is returned.
-    ///
-    /// When `i == 0`, this is guaranteed to return a non-`None` value.
-    ///
-    /// # Examples
-    ///
-    /// Get the substring that matched with a default of an empty string if the
-    /// group didn't participate in the match:
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"[a-z]+(?:([0-9]+)|([A-Z]+))").unwrap();
-    /// let caps = re.captures(b"abc123").unwrap();
-    ///
-    /// let substr1 = caps.get(1).map_or(&b""[..], |m| m.as_bytes());
-    /// let substr2 = caps.get(2).map_or(&b""[..], |m| m.as_bytes());
-    /// assert_eq!(substr1, b"123");
-    /// assert_eq!(substr2, b"");
-    /// ```
-    #[inline]
-    pub fn get(&self, i: usize) -> Option<Match<'h>> {
-        self.caps
-            .get_group(i)
-            .map(|sp| Match::new(self.haystack, sp.start, sp.end))
-    }
-
-    /// Returns the `Match` associated with the capture group named `name`. If
-    /// `name` isn't a valid capture group or it refers to a group that didn't
-    /// match, then `None` is returned.
-    ///
-    /// Note that unlike `caps["name"]`, this returns a `Match` whose lifetime
-    /// matches the lifetime of the haystack in this `Captures` value.
-    /// Conversely, the substring returned by `caps["name"]` has a lifetime
-    /// of the `Captures` value, which is likely shorter than the lifetime of
-    /// the haystack. In some cases, it may be necessary to use this method to
-    /// access the matching substring instead of the `caps["name"]` notation.
-    ///
-    /// # Examples
-    ///
-    /// Get the substring that matched with a default of an empty string if the
-    /// group didn't participate in the match:
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(
-    ///     r"[a-z]+(?:(?<numbers>[0-9]+)|(?<letters>[A-Z]+))",
-    /// ).unwrap();
-    /// let caps = re.captures(b"abc123").unwrap();
-    ///
-    /// let numbers = caps.name("numbers").map_or(&b""[..], |m| m.as_bytes());
-    /// let letters = caps.name("letters").map_or(&b""[..], |m| m.as_bytes());
-    /// assert_eq!(numbers, b"123");
-    /// assert_eq!(letters, b"");
-    /// ```
-    #[inline]
-    pub fn name(&self, name: &str) -> Option<Match<'h>> {
-        self.caps
-            .get_group_by_name(name)
-            .map(|sp| Match::new(self.haystack, sp.start, sp.end))
-    }
-
-    /// This is a convenience routine for extracting the substrings
-    /// corresponding to matching capture groups.
-    ///
-    /// This returns a tuple where the first element corresponds to the full
-    /// substring of the haystack that matched the regex. The second element is
-    /// an array of substrings, with each corresponding to the substring that
-    /// matched for a particular capture group.
-    ///
-    /// # Panics
-    ///
-    /// This panics if the number of possible matching groups in this
-    /// `Captures` value is not fixed to `N` in all circumstances.
-    /// More precisely, this routine only works when `N` is equivalent to
-    /// [`Regex::static_captures_len`].
-    ///
-    /// Stated more plainly, if the number of matching capture groups in a
-    /// regex can vary from match to match, then this function always panics.
-    ///
-    /// For example, `(a)(b)|(c)` could produce two matching capture groups
-    /// or one matching capture group for any given match. Therefore, one
-    /// cannot use `extract` with such a pattern.
-    ///
-    /// But a pattern like `(a)(b)|(c)(d)` can be used with `extract` because
-    /// the number of capture groups in every match is always equivalent,
-    /// even if the capture _indices_ in each match are not.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"([0-9]{4})-([0-9]{2})-([0-9]{2})").unwrap();
-    /// let hay = b"On 2010-03-14, I became a Tenneessee lamb.";
-    /// let Some((full, [year, month, day])) =
-    ///     re.captures(hay).map(|caps| caps.extract()) else { return };
-    /// assert_eq!(b"2010-03-14", full);
-    /// assert_eq!(b"2010", year);
-    /// assert_eq!(b"03", month);
-    /// assert_eq!(b"14", day);
-    /// ```
-    ///
-    /// # Example: iteration
-    ///
-    /// This example shows how to use this method when iterating over all
-    /// `Captures` matches in a haystack.
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"([0-9]{4})-([0-9]{2})-([0-9]{2})").unwrap();
-    /// let hay = b"1973-01-05, 1975-08-25 and 1980-10-18";
-    ///
-    /// let mut dates: Vec<(&[u8], &[u8], &[u8])> = vec![];
-    /// for (_, [y, m, d]) in re.captures_iter(hay).map(|c| c.extract()) {
-    ///     dates.push((y, m, d));
-    /// }
-    /// assert_eq!(dates, vec![
-    ///     (&b"1973"[..], &b"01"[..], &b"05"[..]),
-    ///     (&b"1975"[..], &b"08"[..], &b"25"[..]),
-    ///     (&b"1980"[..], &b"10"[..], &b"18"[..]),
-    /// ]);
-    /// ```
-    ///
-    /// # Example: parsing different formats
-    ///
-    /// This API is particularly useful when you need to extract a particular
-    /// value that might occur in a different format. Consider, for example,
-    /// an identifier that might be in double quotes or single quotes:
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r#"id:(?:"([^"]+)"|'([^']+)')"#).unwrap();
-    /// let hay = br#"The first is id:"foo" and the second is id:'bar'."#;
-    /// let mut ids = vec![];
-    /// for (_, [id]) in re.captures_iter(hay).map(|c| c.extract()) {
-    ///     ids.push(id);
-    /// }
-    /// assert_eq!(ids, vec![b"foo", b"bar"]);
-    /// ```
-    pub fn extract<const N: usize>(&self) -> (&'h [u8], [&'h [u8]; N]) {
-        let len = self
-            .static_captures_len
-            .expect("number of capture groups can vary in a match")
-            .checked_sub(1)
-            .expect("number of groups is always greater than zero");
-        assert_eq!(N, len, "asked for {} groups, but must ask for {}", N, len);
-        // The regex-automata variant of extract is a bit more permissive.
-        // It doesn't require the number of matching capturing groups to be
-        // static, and you can even request fewer groups than what's there. So
-        // this is guaranteed to never panic because we've asserted above that
-        // the user has requested precisely the number of groups that must be
-        // present in any match for this regex.
-        self.caps.extract_bytes(self.haystack)
-    }
-
-    /// Expands all instances of `$ref` in `replacement` to the corresponding
-    /// capture group, and writes them to the `dst` buffer given. A `ref` can
-    /// be a capture group index or a name. If `ref` doesn't refer to a capture
-    /// group that participated in the match, then it is replaced with the
-    /// empty string.
-    ///
-    /// # Format
-    ///
-    /// The format of the replacement string supports two different kinds of
-    /// capture references: unbraced and braced.
-    ///
-    /// For the unbraced format, the format supported is `$ref` where `name`
-    /// can be any character in the class `[0-9A-Za-z_]`. `ref` is always
-    /// the longest possible parse. So for example, `$1a` corresponds to the
-    /// capture group named `1a` and not the capture group at index `1`. If
-    /// `ref` matches `^[0-9]+$`, then it is treated as a capture group index
-    /// itself and not a name.
-    ///
-    /// For the braced format, the format supported is `${ref}` where `ref` can
-    /// be any sequence of bytes except for `}`. If no closing brace occurs,
-    /// then it is not considered a capture reference. As with the unbraced
-    /// format, if `ref` matches `^[0-9]+$`, then it is treated as a capture
-    /// group index and not a name.
-    ///
-    /// The braced format is useful for exerting precise control over the name
-    /// of the capture reference. For example, `${1}a` corresponds to the
-    /// capture group reference `1` followed by the letter `a`, where as `$1a`
-    /// (as mentioned above) corresponds to the capture group reference `1a`.
-    /// The braced format is also useful for expressing capture group names
-    /// that use characters not supported by the unbraced format. For example,
-    /// `${foo[bar].baz}` refers to the capture group named `foo[bar].baz`.
-    ///
-    /// If a capture group reference is found and it does not refer to a valid
-    /// capture group, then it will be replaced with the empty string.
-    ///
-    /// To write a literal `$`, use `$$`.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(
-    ///     r"(?<day>[0-9]{2})-(?<month>[0-9]{2})-(?<year>[0-9]{4})",
-    /// ).unwrap();
-    /// let hay = b"On 14-03-2010, I became a Tenneessee lamb.";
-    /// let caps = re.captures(hay).unwrap();
-    ///
-    /// let mut dst = vec![];
-    /// caps.expand(b"year=$year, month=$month, day=$day", &mut dst);
-    /// assert_eq!(dst, b"year=2010, month=03, day=14");
-    /// ```
-    #[inline]
-    pub fn expand(&self, replacement: &[u8], dst: &mut Vec<u8>) {
-        self.caps.interpolate_bytes_into(self.haystack, replacement, dst);
-    }
-
-    /// Returns an iterator over all capture groups. This includes both
-    /// matching and non-matching groups.
-    ///
-    /// The iterator always yields at least one matching group: the first group
-    /// (at index `0`) with no name. Subsequent groups are returned in the order
-    /// of their opening parenthesis in the regex.
-    ///
-    /// The elements yielded have type `Option<Match<'h>>`, where a non-`None`
-    /// value is present if the capture group matches.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"(\w)(\d)?(\w)").unwrap();
-    /// let caps = re.captures(b"AZ").unwrap();
-    ///
-    /// let mut it = caps.iter();
-    /// assert_eq!(it.next().unwrap().map(|m| m.as_bytes()), Some(&b"AZ"[..]));
-    /// assert_eq!(it.next().unwrap().map(|m| m.as_bytes()), Some(&b"A"[..]));
-    /// assert_eq!(it.next().unwrap().map(|m| m.as_bytes()), None);
-    /// assert_eq!(it.next().unwrap().map(|m| m.as_bytes()), Some(&b"Z"[..]));
-    /// assert_eq!(it.next(), None);
-    /// ```
-    #[inline]
-    pub fn iter<'c>(&'c self) -> SubCaptureMatches<'c, 'h> {
-        SubCaptureMatches { haystack: self.haystack, it: self.caps.iter() }
-    }
-
-    /// Returns the total number of capture groups. This includes both
-    /// matching and non-matching groups.
-    ///
-    /// The length returned is always equivalent to the number of elements
-    /// yielded by [`Captures::iter`]. Consequently, the length is always
-    /// greater than zero since every `Captures` value always includes the
-    /// match for the entire regex.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"(\w)(\d)?(\w)").unwrap();
-    /// let caps = re.captures(b"AZ").unwrap();
-    /// assert_eq!(caps.len(), 4);
-    /// ```
-    #[inline]
-    pub fn len(&self) -> usize {
-        self.caps.group_len()
-    }
-}
-
-impl<'h> core::fmt::Debug for Captures<'h> {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        /// A little helper type to provide a nice map-like debug
-        /// representation for our capturing group spans.
-        ///
-        /// regex-automata has something similar, but it includes the pattern
-        /// ID in its debug output, which is confusing. It also doesn't include
-        /// that strings that match because a regex-automata `Captures` doesn't
-        /// borrow the haystack.
-        struct CapturesDebugMap<'a> {
-            caps: &'a Captures<'a>,
-        }
-
-        impl<'a> core::fmt::Debug for CapturesDebugMap<'a> {
-            fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-                let mut map = f.debug_map();
-                let names =
-                    self.caps.caps.group_info().pattern_names(PatternID::ZERO);
-                for (group_index, maybe_name) in names.enumerate() {
-                    let key = Key(group_index, maybe_name);
-                    match self.caps.get(group_index) {
-                        None => map.entry(&key, &None::<()>),
-                        Some(mat) => map.entry(&key, &Value(mat)),
-                    };
-                }
-                map.finish()
-            }
-        }
-
-        struct Key<'a>(usize, Option<&'a str>);
-
-        impl<'a> core::fmt::Debug for Key<'a> {
-            fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-                write!(f, "{}", self.0)?;
-                if let Some(name) = self.1 {
-                    write!(f, "/{:?}", name)?;
-                }
-                Ok(())
-            }
-        }
-
-        struct Value<'a>(Match<'a>);
-
-        impl<'a> core::fmt::Debug for Value<'a> {
-            fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-                use regex_automata::util::escape::DebugHaystack;
-
-                write!(
-                    f,
-                    "{}..{}/{:?}",
-                    self.0.start(),
-                    self.0.end(),
-                    DebugHaystack(self.0.as_bytes())
-                )
-            }
-        }
-
-        f.debug_tuple("Captures")
-            .field(&CapturesDebugMap { caps: self })
-            .finish()
-    }
-}
-
-/// Get a matching capture group's haystack substring by index.
-///
-/// The haystack substring returned can't outlive the `Captures` object if this
-/// method is used, because of how `Index` is defined (normally `a[i]` is part
-/// of `a` and can't outlive it). To work around this limitation, do that, use
-/// [`Captures::get`] instead.
-///
-/// `'h` is the lifetime of the matched haystack, but the lifetime of the
-/// `&str` returned by this implementation is the lifetime of the `Captures`
-/// value itself.
-///
-/// # Panics
-///
-/// If there is no matching group at the given index.
-impl<'h> core::ops::Index<usize> for Captures<'h> {
-    type Output = [u8];
-
-    // The lifetime is written out to make it clear that the &str returned
-    // does NOT have a lifetime equivalent to 'h.
-    fn index<'a>(&'a self, i: usize) -> &'a [u8] {
-        self.get(i)
-            .map(|m| m.as_bytes())
-            .unwrap_or_else(|| panic!("no group at index '{}'", i))
-    }
-}
-
-/// Get a matching capture group's haystack substring by name.
-///
-/// The haystack substring returned can't outlive the `Captures` object if this
-/// method is used, because of how `Index` is defined (normally `a[i]` is part
-/// of `a` and can't outlive it). To work around this limitation, do that, use
-/// [`Captures::name`] instead.
-///
-/// `'h` is the lifetime of the matched haystack, but the lifetime of the
-/// `&str` returned by this implementation is the lifetime of the `Captures`
-/// value itself.
-///
-/// `'n` is the lifetime of the group name used to index the `Captures` value.
-///
-/// # Panics
-///
-/// If there is no matching group at the given name.
-impl<'h, 'n> core::ops::Index<&'n str> for Captures<'h> {
-    type Output = [u8];
-
-    fn index<'a>(&'a self, name: &'n str) -> &'a [u8] {
-        self.name(name)
-            .map(|m| m.as_bytes())
-            .unwrap_or_else(|| panic!("no group named '{}'", name))
-    }
-}
-
-/// A low level representation of the byte offsets of each capture group.
-///
-/// You can think of this as a lower level [`Captures`], where this type does
-/// not support named capturing groups directly and it does not borrow the
-/// haystack that these offsets were matched on.
-///
-/// Primarily, this type is useful when using the lower level `Regex` APIs such
-/// as [`Regex::captures_read`], which permits amortizing the allocation in
-/// which capture match offsets are stored.
-///
-/// In order to build a value of this type, you'll need to call the
-/// [`Regex::capture_locations`] method. The value returned can then be reused
-/// in subsequent searches for that regex. Using it for other regexes may
-/// result in a panic or otherwise incorrect results.
-///
-/// # Example
-///
-/// This example shows how to create and use `CaptureLocations` in a search.
-///
-/// ```
-/// use regex::bytes::Regex;
-///
-/// let re = Regex::new(r"(?<first>\w+)\s+(?<last>\w+)").unwrap();
-/// let mut locs = re.capture_locations();
-/// let m = re.captures_read(&mut locs, b"Bruce Springsteen").unwrap();
-/// assert_eq!(0..17, m.range());
-/// assert_eq!(Some((0, 17)), locs.get(0));
-/// assert_eq!(Some((0, 5)), locs.get(1));
-/// assert_eq!(Some((6, 17)), locs.get(2));
-///
-/// // Asking for an invalid capture group always returns None.
-/// assert_eq!(None, locs.get(3));
-/// # // literals are too big for 32-bit usize: #1041
-/// # #[cfg(target_pointer_width = "64")]
-/// assert_eq!(None, locs.get(34973498648));
-/// # #[cfg(target_pointer_width = "64")]
-/// assert_eq!(None, locs.get(9944060567225171988));
-/// ```
-#[derive(Clone, Debug)]
-pub struct CaptureLocations(captures::Captures);
-
-/// A type alias for `CaptureLocations` for backwards compatibility.
-///
-/// Previously, we exported `CaptureLocations` as `Locations` in an
-/// undocumented API. To prevent breaking that code (e.g., in `regex-capi`),
-/// we continue re-exporting the same undocumented API.
-#[doc(hidden)]
-pub type Locations = CaptureLocations;
-
-impl CaptureLocations {
-    /// Returns the start and end byte offsets of the capture group at index
-    /// `i`. This returns `None` if `i` is not a valid capture group or if the
-    /// capture group did not match.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"(?<first>\w+)\s+(?<last>\w+)").unwrap();
-    /// let mut locs = re.capture_locations();
-    /// re.captures_read(&mut locs, b"Bruce Springsteen").unwrap();
-    /// assert_eq!(Some((0, 17)), locs.get(0));
-    /// assert_eq!(Some((0, 5)), locs.get(1));
-    /// assert_eq!(Some((6, 17)), locs.get(2));
-    /// ```
-    #[inline]
-    pub fn get(&self, i: usize) -> Option<(usize, usize)> {
-        self.0.get_group(i).map(|sp| (sp.start, sp.end))
-    }
-
-    /// Returns the total number of capture groups (even if they didn't match).
-    /// That is, the length returned is unaffected by the result of a search.
-    ///
-    /// This is always at least `1` since every regex has at least `1`
-    /// capturing group that corresponds to the entire match.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"(?<first>\w+)\s+(?<last>\w+)").unwrap();
-    /// let mut locs = re.capture_locations();
-    /// assert_eq!(3, locs.len());
-    /// re.captures_read(&mut locs, b"Bruce Springsteen").unwrap();
-    /// assert_eq!(3, locs.len());
-    /// ```
-    ///
-    /// Notice that the length is always at least `1`, regardless of the regex:
-    ///
-    /// ```
-    /// use regex::bytes::Regex;
-    ///
-    /// let re = Regex::new(r"").unwrap();
-    /// let locs = re.capture_locations();
-    /// assert_eq!(1, locs.len());
-    ///
-    /// // [a&&b] is a regex that never matches anything.
-    /// let re = Regex::new(r"[a&&b]").unwrap();
-    /// let locs = re.capture_locations();
-    /// assert_eq!(1, locs.len());
-    /// ```
-    #[inline]
-    pub fn len(&self) -> usize {
-        // self.0.group_len() returns 0 if the underlying captures doesn't
-        // represent a match, but the behavior guaranteed for this method is
-        // that the length doesn't change based on a match or not.
-        self.0.group_info().group_len(PatternID::ZERO)
-    }
-
-    /// An alias for the `get` method for backwards compatibility.
-    ///
-    /// Previously, we exported `get` as `pos` in an undocumented API. To
-    /// prevent breaking that code (e.g., in `regex-capi`), we continue
-    /// re-exporting the same undocumented API.
-    #[doc(hidden)]
-    #[inline]
-    pub fn pos(&self, i: usize) -> Option<(usize, usize)> {
-        self.get(i)
-    }
-}
-
-/// An iterator over all non-overlapping matches in a haystack.
-///
-/// This iterator yields [`Match`] values. The iterator stops when no more
-/// matches can be found.
-///
-/// `'r` is the lifetime of the compiled regular expression and `'h` is the
-/// lifetime of the haystack.
-///
-/// This iterator is created by [`Regex::find_iter`].
-///
-/// # Time complexity
-///
-/// Note that since an iterator runs potentially many searches on the haystack
-/// and since each search has worst case `O(m * n)` time complexity, the
-/// overall worst case time complexity for iteration is `O(m * n^2)`.
-#[derive(Debug)]
-pub struct Matches<'r, 'h> {
-    haystack: &'h [u8],
-    it: meta::FindMatches<'r, 'h>,
-}
-
-impl<'r, 'h> Iterator for Matches<'r, 'h> {
-    type Item = Match<'h>;
-
-    #[inline]
-    fn next(&mut self) -> Option<Match<'h>> {
-        self.it
-            .next()
-            .map(|sp| Match::new(self.haystack, sp.start(), sp.end()))
-    }
-
-    #[inline]
-    fn count(self) -> usize {
-        // This can actually be up to 2x faster than calling `next()` until
-        // completion, because counting matches when using a DFA only requires
-        // finding the end of each match. But returning a `Match` via `next()`
-        // requires the start of each match which, with a DFA, requires a
-        // reverse forward scan to find it.
-        self.it.count()
-    }
-}
-
-impl<'r, 'h> core::iter::FusedIterator for Matches<'r, 'h> {}
-
-/// An iterator over all non-overlapping capture matches in a haystack.
-///
-/// This iterator yields [`Captures`] values. The iterator stops when no more
-/// matches can be found.
-///
-/// `'r` is the lifetime of the compiled regular expression and `'h` is the
-/// lifetime of the matched string.
-///
-/// This iterator is created by [`Regex::captures_iter`].
-///
-/// # Time complexity
-///
-/// Note that since an iterator runs potentially many searches on the haystack
-/// and since each search has worst case `O(m * n)` time complexity, the
-/// overall worst case time complexity for iteration is `O(m * n^2)`.
-#[derive(Debug)]
-pub struct CaptureMatches<'r, 'h> {
-    haystack: &'h [u8],
-    it: meta::CapturesMatches<'r, 'h>,
-}
-
-impl<'r, 'h> Iterator for CaptureMatches<'r, 'h> {
-    type Item = Captures<'h>;
-
-    #[inline]
-    fn next(&mut self) -> Option<Captures<'h>> {
-        let static_captures_len = self.it.regex().static_captures_len();
-        self.it.next().map(|caps| Captures {
-            haystack: self.haystack,
-            caps,
-            static_captures_len,
-        })
-    }
-
-    #[inline]
-    fn count(self) -> usize {
-        // This can actually be up to 2x faster than calling `next()` until
-        // completion, because counting matches when using a DFA only requires
-        // finding the end of each match. But returning a `Match` via `next()`
-        // requires the start of each match which, with a DFA, requires a
-        // reverse forward scan to find it.
-        self.it.count()
-    }
-}
-
-impl<'r, 'h> core::iter::FusedIterator for CaptureMatches<'r, 'h> {}
-
-/// An iterator over all substrings delimited by a regex match.
-///
-/// `'r` is the lifetime of the compiled regular expression and `'h` is the
-/// lifetime of the byte string being split.
-///
-/// This iterator is created by [`Regex::split`].
-///
-/// # Time complexity
-///
-/// Note that since an iterator runs potentially many searches on the haystack
-/// and since each search has worst case `O(m * n)` time complexity, the
-/// overall worst case time complexity for iteration is `O(m * n^2)`.
-#[derive(Debug)]
-pub struct Split<'r, 'h> {
-    haystack: &'h [u8],
-    it: meta::Split<'r, 'h>,
-}
-
-impl<'r, 'h> Iterator for Split<'r, 'h> {
-    type Item = &'h [u8];
-
-    #[inline]
-    fn next(&mut self) -> Option<&'h [u8]> {
-        self.it.next().map(|span| &self.haystack[span])
-    }
-}
-
-impl<'r, 'h> core::iter::FusedIterator for Split<'r, 'h> {}
-
-/// An iterator over at most `N` substrings delimited by a regex match.
-///
-/// The last substring yielded by this iterator will be whatever remains after
-/// `N-1` splits.
-///
-/// `'r` is the lifetime of the compiled regular expression and `'h` is the
-/// lifetime of the byte string being split.
-///
-/// This iterator is created by [`Regex::splitn`].
-///
-/// # Time complexity
-///
-/// Note that since an iterator runs potentially many searches on the haystack
-/// and since each search has worst case `O(m * n)` time complexity, the
-/// overall worst case time complexity for iteration is `O(m * n^2)`.
-///
-/// Although note that the worst case time here has an upper bound given
-/// by the `limit` parameter to [`Regex::splitn`].
-#[derive(Debug)]
-pub struct SplitN<'r, 'h> {
-    haystack: &'h [u8],
-    it: meta::SplitN<'r, 'h>,
-}
-
-impl<'r, 'h> Iterator for SplitN<'r, 'h> {
-    type Item = &'h [u8];
-
-    #[inline]
-    fn next(&mut self) -> Option<&'h [u8]> {
-        self.it.next().map(|span| &self.haystack[span])
-    }
-
-    #[inline]
-    fn size_hint(&self) -> (usize, Option<usize>) {
-        self.it.size_hint()
-    }
-}
-
-impl<'r, 'h> core::iter::FusedIterator for SplitN<'r, 'h> {}
-
-/// An iterator over the names of all capture groups in a regex.
-///
-/// This iterator yields values of type `Option<&str>` in order of the opening
-/// capture group parenthesis in the regex pattern. `None` is yielded for
-/// groups with no name. The first element always corresponds to the implicit
-/// and unnamed group for the overall match.
-///
-/// `'r` is the lifetime of the compiled regular expression.
-///
-/// This iterator is created by [`Regex::capture_names`].
-#[derive(Clone, Debug)]
-pub struct CaptureNames<'r>(captures::GroupInfoPatternNames<'r>);
-
-impl<'r> Iterator for CaptureNames<'r> {
-    type Item = Option<&'r str>;
-
-    #[inline]
-    fn next(&mut self) -> Option<Option<&'r str>> {
-        self.0.next()
-    }
-
-    #[inline]
-    fn size_hint(&self) -> (usize, Option<usize>) {
-        self.0.size_hint()
-    }
-
-    #[inline]
-    fn count(self) -> usize {
-        self.0.count()
-    }
-}
-
-impl<'r> ExactSizeIterator for CaptureNames<'r> {}
-
-impl<'r> core::iter::FusedIterator for CaptureNames<'r> {}
-
-/// An iterator over all group matches in a [`Captures`] value.
-///
-/// This iterator yields values of type `Option<Match<'h>>`, where `'h` is the
-/// lifetime of the haystack that the matches are for. The order of elements
-/// yielded corresponds to the order of the opening parenthesis for the group
-/// in the regex pattern. `None` is yielded for groups that did not participate
-/// in the match.
-///
-/// The first element always corresponds to the implicit group for the overall
-/// match. Since this iterator is created by a [`Captures`] value, and a
-/// `Captures` value is only created when a match occurs, it follows that the
-/// first element yielded by this iterator is guaranteed to be non-`None`.
-///
-/// The lifetime `'c` corresponds to the lifetime of the `Captures` value that
-/// created this iterator, and the lifetime `'h` corresponds to the originally
-/// matched haystack.
-#[derive(Clone, Debug)]
-pub struct SubCaptureMatches<'c, 'h> {
-    haystack: &'h [u8],
-    it: captures::CapturesPatternIter<'c>,
-}
-
-impl<'c, 'h> Iterator for SubCaptureMatches<'c, 'h> {
-    type Item = Option<Match<'h>>;
-
-    #[inline]
-    fn next(&mut self) -> Option<Option<Match<'h>>> {
-        self.it.next().map(|group| {
-            group.map(|sp| Match::new(self.haystack, sp.start, sp.end))
-        })
-    }
-
-    #[inline]
-    fn size_hint(&self) -> (usize, Option<usize>) {
-        self.it.size_hint()
-    }
-
-    #[inline]
-    fn count(self) -> usize {
-        self.it.count()
-    }
-}
-
-impl<'c, 'h> ExactSizeIterator for SubCaptureMatches<'c, 'h> {}
-
-impl<'c, 'h> core::iter::FusedIterator for SubCaptureMatches<'c, 'h> {}
-
-/// A trait for types that can be used to replace matches in a haystack.
-///
-/// In general, users of this crate shouldn't need to implement this trait,
-/// since implementations are already provided for `&[u8]` along with other
-/// variants of byte string types, as well as `FnMut(&Captures) -> Vec<u8>` (or
-/// any `FnMut(&Captures) -> T` where `T: AsRef<[u8]>`). Those cover most use
-/// cases, but callers can implement this trait directly if necessary.
-///
-/// # Example
-///
-/// This example shows a basic implementation of the `Replacer` trait. This can
-/// be done much more simply using the replacement byte string interpolation
-/// support (e.g., `$first $last`), but this approach avoids needing to parse
-/// the replacement byte string at all.
-///
-/// ```
-/// use regex::bytes::{Captures, Regex, Replacer};
-///
-/// struct NameSwapper;
-///
-/// impl Replacer for NameSwapper {
-///     fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) {
-///         dst.extend_from_slice(&caps["first"]);
-///         dst.extend_from_slice(b" ");
-///         dst.extend_from_slice(&caps["last"]);
-///     }
-/// }
-///
-/// let re = Regex::new(r"(?<last>[^,\s]+),\s+(?<first>\S+)").unwrap();
-/// let result = re.replace(b"Springsteen, Bruce", NameSwapper);
-/// assert_eq!(result, &b"Bruce Springsteen"[..]);
-/// ```
-pub trait Replacer {
-    /// Appends possibly empty data to `dst` to replace the current match.
-    ///
-    /// The current match is represented by `caps`, which is guaranteed to have
-    /// a match at capture group `0`.
-    ///
-    /// For example, a no-op replacement would be
-    /// `dst.extend_from_slice(&caps[0])`.
-    fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>);
-
-    /// Return a fixed unchanging replacement byte string.
-    ///
-    /// When doing replacements, if access to [`Captures`] is not needed (e.g.,
-    /// the replacement byte string does not need `$` expansion), then it can
-    /// be beneficial to avoid finding sub-captures.
-    ///
-    /// In general, this is called once for every call to a replacement routine
-    /// such as [`Regex::replace_all`].
-    fn no_expansion<'r>(&'r mut self) -> Option<Cow<'r, [u8]>> {
-        None
-    }
-
-    /// Returns a type that implements `Replacer`, but that borrows and wraps
-    /// this `Replacer`.
-    ///
-    /// This is useful when you want to take a generic `Replacer` (which might
-    /// not be cloneable) and use it without consuming it, so it can be used
-    /// more than once.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::bytes::{Regex, Replacer};
-    ///
-    /// fn replace_all_twice<R: Replacer>(
-    ///     re: Regex,
-    ///     src: &[u8],
-    ///     mut rep: R,
-    /// ) -> Vec<u8> {
-    ///     let dst = re.replace_all(src, rep.by_ref());
-    ///     let dst = re.replace_all(&dst, rep.by_ref());
-    ///     dst.into_owned()
-    /// }
-    /// ```
-    fn by_ref<'r>(&'r mut self) -> ReplacerRef<'r, Self> {
-        ReplacerRef(self)
-    }
-}
-
-impl<'a, const N: usize> Replacer for &'a [u8; N] {
-    fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) {
-        caps.expand(&**self, dst);
-    }
-
-    fn no_expansion(&mut self) -> Option<Cow<'_, [u8]>> {
-        no_expansion(self)
-    }
-}
-
-impl<const N: usize> Replacer for [u8; N] {
-    fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) {
-        caps.expand(&*self, dst);
-    }
-
-    fn no_expansion(&mut self) -> Option<Cow<'_, [u8]>> {
-        no_expansion(self)
-    }
-}
-
-impl<'a> Replacer for &'a [u8] {
-    fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) {
-        caps.expand(*self, dst);
-    }
-
-    fn no_expansion(&mut self) -> Option<Cow<'_, [u8]>> {
-        no_expansion(self)
-    }
-}
-
-impl<'a> Replacer for &'a Vec<u8> {
-    fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) {
-        caps.expand(*self, dst);
-    }
-
-    fn no_expansion(&mut self) -> Option<Cow<'_, [u8]>> {
-        no_expansion(self)
-    }
-}
-
-impl Replacer for Vec<u8> {
-    fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) {
-        caps.expand(self, dst);
-    }
-
-    fn no_expansion(&mut self) -> Option<Cow<'_, [u8]>> {
-        no_expansion(self)
-    }
-}
-
-impl<'a> Replacer for Cow<'a, [u8]> {
-    fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) {
-        caps.expand(self.as_ref(), dst);
-    }
-
-    fn no_expansion(&mut self) -> Option<Cow<'_, [u8]>> {
-        no_expansion(self)
-    }
-}
-
-impl<'a> Replacer for &'a Cow<'a, [u8]> {
-    fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) {
-        caps.expand(self.as_ref(), dst);
-    }
-
-    fn no_expansion(&mut self) -> Option<Cow<'_, [u8]>> {
-        no_expansion(self)
-    }
-}
-
-impl<F, T> Replacer for F
-where
-    F: FnMut(&Captures<'_>) -> T,
-    T: AsRef<[u8]>,
-{
-    fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) {
-        dst.extend_from_slice((*self)(caps).as_ref());
-    }
-}
-
-/// A by-reference adaptor for a [`Replacer`].
-///
-/// This permits reusing the same `Replacer` value in multiple calls to a
-/// replacement routine like [`Regex::replace_all`].
-///
-/// This type is created by [`Replacer::by_ref`].
-#[derive(Debug)]
-pub struct ReplacerRef<'a, R: ?Sized>(&'a mut R);
-
-impl<'a, R: Replacer + ?Sized + 'a> Replacer for ReplacerRef<'a, R> {
-    fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) {
-        self.0.replace_append(caps, dst)
-    }
-
-    fn no_expansion<'r>(&'r mut self) -> Option<Cow<'r, [u8]>> {
-        self.0.no_expansion()
-    }
-}
-
-/// A helper type for forcing literal string replacement.
-///
-/// It can be used with routines like [`Regex::replace`] and
-/// [`Regex::replace_all`] to do a literal string replacement without expanding
-/// `$name` to their corresponding capture groups. This can be both convenient
-/// (to avoid escaping `$`, for example) and faster (since capture groups
-/// don't need to be found).
-///
-/// `'s` is the lifetime of the literal string to use.
-///
-/// # Example
-///
-/// ```
-/// use regex::bytes::{NoExpand, Regex};
-///
-/// let re = Regex::new(r"(?<last>[^,\s]+),\s+(\S+)").unwrap();
-/// let result = re.replace(b"Springsteen, Bruce", NoExpand(b"$2 $last"));
-/// assert_eq!(result, &b"$2 $last"[..]);
-/// ```
-#[derive(Clone, Debug)]
-pub struct NoExpand<'s>(pub &'s [u8]);
-
-impl<'s> Replacer for NoExpand<'s> {
-    fn replace_append(&mut self, _: &Captures<'_>, dst: &mut Vec<u8>) {
-        dst.extend_from_slice(self.0);
-    }
-
-    fn no_expansion(&mut self) -> Option<Cow<'_, [u8]>> {
-        Some(Cow::Borrowed(self.0))
-    }
-}
-
-/// Quickly checks the given replacement string for whether interpolation
-/// should be done on it. It returns `None` if a `$` was found anywhere in the
-/// given string, which suggests interpolation needs to be done. But if there's
-/// no `$` anywhere, then interpolation definitely does not need to be done. In
-/// that case, the given string is returned as a borrowed `Cow`.
-///
-/// This is meant to be used to implement the `Replacer::no_expandsion` method
-/// in its various trait impls.
-fn no_expansion<T: AsRef<[u8]>>(replacement: &T) -> Option<Cow<'_, [u8]>> {
-    let replacement = replacement.as_ref();
-    match crate::find_byte::find_byte(b'$', replacement) {
-        Some(_) => None,
-        None => Some(Cow::Borrowed(replacement)),
-    }
-}
-
-#[cfg(test)]
-mod tests {
-    use super::*;
-    use alloc::format;
-
-    #[test]
-    fn test_match_properties() {
-        let haystack = b"Hello, world!";
-        let m = Match::new(haystack, 7, 12);
-
-        assert_eq!(m.start(), 7);
-        assert_eq!(m.end(), 12);
-        assert_eq!(m.is_empty(), false);
-        assert_eq!(m.len(), 5);
-        assert_eq!(m.as_bytes(), b"world");
-    }
-
-    #[test]
-    fn test_empty_match() {
-        let haystack = b"";
-        let m = Match::new(haystack, 0, 0);
-
-        assert_eq!(m.is_empty(), true);
-        assert_eq!(m.len(), 0);
-    }
-
-    #[test]
-    fn test_debug_output_valid_utf8() {
-        let haystack = b"Hello, world!";
-        let m = Match::new(haystack, 7, 12);
-        let debug_str = format!("{:?}", m);
-
-        assert_eq!(
-            debug_str,
-            r#"Match { start: 7, end: 12, bytes: "world" }"#
-        );
-    }
-
-    #[test]
-    fn test_debug_output_invalid_utf8() {
-        let haystack = b"Hello, \xFFworld!";
-        let m = Match::new(haystack, 7, 13);
-        let debug_str = format!("{:?}", m);
-
-        assert_eq!(
-            debug_str,
-            r#"Match { start: 7, end: 13, bytes: "\xffworld" }"#
-        );
-    }
-
-    #[test]
-    fn test_debug_output_various_unicode() {
-        let haystack =
-            "Hello, 😊 world! 안녕하섞요? Ù…Ű±Ű­ŰšŰ§ ŰšŰ§Ù„ŰčŰ§Ù„Ù…!".as_bytes();
-        let m = Match::new(haystack, 0, haystack.len());
-        let debug_str = format!("{:?}", m);
-
-        assert_eq!(
-            debug_str,
-            r#"Match { start: 0, end: 62, bytes: "Hello, 😊 world! 안녕하섞요? Ù…Ű±Ű­ŰšŰ§ ŰšŰ§Ù„ŰčŰ§Ù„Ù…!" }"#
-        );
-    }
-
-    #[test]
-    fn test_debug_output_ascii_escape() {
-        let haystack = b"Hello,\tworld!\nThis is a \x1b[31mtest\x1b[0m.";
-        let m = Match::new(haystack, 0, haystack.len());
-        let debug_str = format!("{:?}", m);
-
-        assert_eq!(
-            debug_str,
-            r#"Match { start: 0, end: 38, bytes: "Hello,\tworld!\nThis is a \u{1b}[31mtest\u{1b}[0m." }"#
-        );
-    }
-
-    #[test]
-    fn test_debug_output_match_in_middle() {
-        let haystack = b"The quick brown fox jumps over the lazy dog.";
-        let m = Match::new(haystack, 16, 19);
-        let debug_str = format!("{:?}", m);
-
-        assert_eq!(debug_str, r#"Match { start: 16, end: 19, bytes: "fox" }"#);
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/regex/mod.rs b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/regex/mod.rs
deleted file mode 100644
index 93fadec..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/regex/mod.rs
+++ /dev/null
@@ -1,2 +0,0 @@
-pub(crate) mod bytes;
-pub(crate) mod string;
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/regex/string.rs b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/regex/string.rs
deleted file mode 100644
index fab178a..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/regex/string.rs
+++ /dev/null
@@ -1,2604 +0,0 @@
-use alloc::{borrow::Cow, string::String, sync::Arc};
-
-use regex_automata::{meta, util::captures, Input, PatternID};
-
-use crate::{error::Error, RegexBuilder};
-
-/// A compiled regular expression for searching Unicode haystacks.
-///
-/// A `Regex` can be used to search haystacks, split haystacks into substrings
-/// or replace substrings in a haystack with a different substring. All
-/// searching is done with an implicit `(?s:.)*?` at the beginning and end of
-/// an pattern. To force an expression to match the whole string (or a prefix
-/// or a suffix), you must use an anchor like `^` or `$` (or `\A` and `\z`).
-///
-/// While this crate will handle Unicode strings (whether in the regular
-/// expression or in the haystack), all positions returned are **byte
-/// offsets**. Every byte offset is guaranteed to be at a Unicode code point
-/// boundary. That is, all offsets returned by the `Regex` API are guaranteed
-/// to be ranges that can slice a `&str` without panicking. If you want to
-/// relax this requirement, then you must search `&[u8]` haystacks with a
-/// [`bytes::Regex`](crate::bytes::Regex).
-///
-/// The only methods that allocate new strings are the string replacement
-/// methods. All other methods (searching and splitting) return borrowed
-/// references into the haystack given.
-///
-/// # Example
-///
-/// Find the offsets of a US phone number:
-///
-/// ```
-/// use regex::Regex;
-///
-/// let re = Regex::new("[0-9]{3}-[0-9]{3}-[0-9]{4}").unwrap();
-/// let m = re.find("phone: 111-222-3333").unwrap();
-/// assert_eq!(7..19, m.range());
-/// ```
-///
-/// # Example: extracting capture groups
-///
-/// A common way to use regexes is with capture groups. That is, instead of
-/// just looking for matches of an entire regex, parentheses are used to create
-/// groups that represent part of the match.
-///
-/// For example, consider a haystack with multiple lines, and each line has
-/// three whitespace delimited fields where the second field is expected to be
-/// a number and the third field a boolean. To make this convenient, we use
-/// the [`Captures::extract`] API to put the strings that match each group
-/// into a fixed size array:
-///
-/// ```
-/// use regex::Regex;
-///
-/// let hay = "
-/// rabbit         54 true
-/// groundhog 2 true
-/// does not match
-/// fox   109    false
-/// ";
-/// let re = Regex::new(r"(?m)^\s*(\S+)\s+([0-9]+)\s+(true|false)\s*$").unwrap();
-/// let mut fields: Vec<(&str, i64, bool)> = vec![];
-/// for (_, [f1, f2, f3]) in re.captures_iter(hay).map(|caps| caps.extract()) {
-///     fields.push((f1, f2.parse()?, f3.parse()?));
-/// }
-/// assert_eq!(fields, vec![
-///     ("rabbit", 54, true),
-///     ("groundhog", 2, true),
-///     ("fox", 109, false),
-/// ]);
-///
-/// # Ok::<(), Box<dyn std::error::Error>>(())
-/// ```
-///
-/// # Example: searching with the `Pattern` trait
-///
-/// **Note**: This section requires that this crate is compiled with the
-/// `pattern` Cargo feature enabled, which **requires nightly Rust**.
-///
-/// Since `Regex` implements `Pattern` from the standard library, one can
-/// use regexes with methods defined on `&str`. For example, `is_match`,
-/// `find`, `find_iter` and `split` can, in some cases, be replaced with
-/// `str::contains`, `str::find`, `str::match_indices` and `str::split`.
-///
-/// Here are some examples:
-///
-/// ```ignore
-/// use regex::Regex;
-///
-/// let re = Regex::new(r"\d+").unwrap();
-/// let hay = "a111b222c";
-///
-/// assert!(hay.contains(&re));
-/// assert_eq!(hay.find(&re), Some(1));
-/// assert_eq!(hay.match_indices(&re).collect::<Vec<_>>(), vec![
-///     (1, "111"),
-///     (5, "222"),
-/// ]);
-/// assert_eq!(hay.split(&re).collect::<Vec<_>>(), vec!["a", "b", "c"]);
-/// ```
-#[derive(Clone)]
-pub struct Regex {
-    pub(crate) meta: meta::Regex,
-    pub(crate) pattern: Arc<str>,
-}
-
-impl core::fmt::Display for Regex {
-    /// Shows the original regular expression.
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        write!(f, "{}", self.as_str())
-    }
-}
-
-impl core::fmt::Debug for Regex {
-    /// Shows the original regular expression.
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        f.debug_tuple("Regex").field(&self.as_str()).finish()
-    }
-}
-
-impl core::str::FromStr for Regex {
-    type Err = Error;
-
-    /// Attempts to parse a string into a regular expression
-    fn from_str(s: &str) -> Result<Regex, Error> {
-        Regex::new(s)
-    }
-}
-
-impl TryFrom<&str> for Regex {
-    type Error = Error;
-
-    /// Attempts to parse a string into a regular expression
-    fn try_from(s: &str) -> Result<Regex, Error> {
-        Regex::new(s)
-    }
-}
-
-impl TryFrom<String> for Regex {
-    type Error = Error;
-
-    /// Attempts to parse a string into a regular expression
-    fn try_from(s: String) -> Result<Regex, Error> {
-        Regex::new(&s)
-    }
-}
-
-/// Core regular expression methods.
-impl Regex {
-    /// Compiles a regular expression. Once compiled, it can be used repeatedly
-    /// to search, split or replace substrings in a haystack.
-    ///
-    /// Note that regex compilation tends to be a somewhat expensive process,
-    /// and unlike higher level environments, compilation is not automatically
-    /// cached for you. One should endeavor to compile a regex once and then
-    /// reuse it. For example, it's a bad idea to compile the same regex
-    /// repeatedly in a loop.
-    ///
-    /// # Errors
-    ///
-    /// If an invalid pattern is given, then an error is returned.
-    /// An error is also returned if the pattern is valid, but would
-    /// produce a regex that is bigger than the configured size limit via
-    /// [`RegexBuilder::size_limit`]. (A reasonable size limit is enabled by
-    /// default.)
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// // An Invalid pattern because of an unclosed parenthesis
-    /// assert!(Regex::new(r"foo(bar").is_err());
-    /// // An invalid pattern because the regex would be too big
-    /// // because Unicode tends to inflate things.
-    /// assert!(Regex::new(r"\w{1000}").is_err());
-    /// // Disabling Unicode can make the regex much smaller,
-    /// // potentially by up to or more than an order of magnitude.
-    /// assert!(Regex::new(r"(?-u:\w){1000}").is_ok());
-    /// ```
-    pub fn new(re: &str) -> Result<Regex, Error> {
-        RegexBuilder::new(re).build()
-    }
-
-    /// Returns true if and only if there is a match for the regex anywhere
-    /// in the haystack given.
-    ///
-    /// It is recommended to use this method if all you need to do is test
-    /// whether a match exists, since the underlying matching engine may be
-    /// able to do less work.
-    ///
-    /// # Example
-    ///
-    /// Test if some haystack contains at least one word with exactly 13
-    /// Unicode word characters:
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"\b\w{13}\b").unwrap();
-    /// let hay = "I categorically deny having triskaidekaphobia.";
-    /// assert!(re.is_match(hay));
-    /// ```
-    #[inline]
-    pub fn is_match(&self, haystack: &str) -> bool {
-        self.is_match_at(haystack, 0)
-    }
-
-    /// This routine searches for the first match of this regex in the
-    /// haystack given, and if found, returns a [`Match`]. The `Match`
-    /// provides access to both the byte offsets of the match and the actual
-    /// substring that matched.
-    ///
-    /// Note that this should only be used if you want to find the entire
-    /// match. If instead you just want to test the existence of a match,
-    /// it's potentially faster to use `Regex::is_match(hay)` instead of
-    /// `Regex::find(hay).is_some()`.
-    ///
-    /// # Example
-    ///
-    /// Find the first word with exactly 13 Unicode word characters:
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"\b\w{13}\b").unwrap();
-    /// let hay = "I categorically deny having triskaidekaphobia.";
-    /// let mat = re.find(hay).unwrap();
-    /// assert_eq!(2..15, mat.range());
-    /// assert_eq!("categorically", mat.as_str());
-    /// ```
-    #[inline]
-    pub fn find<'h>(&self, haystack: &'h str) -> Option<Match<'h>> {
-        self.find_at(haystack, 0)
-    }
-
-    /// Returns an iterator that yields successive non-overlapping matches in
-    /// the given haystack. The iterator yields values of type [`Match`].
-    ///
-    /// # Time complexity
-    ///
-    /// Note that since `find_iter` runs potentially many searches on the
-    /// haystack and since each search has worst case `O(m * n)` time
-    /// complexity, the overall worst case time complexity for iteration is
-    /// `O(m * n^2)`.
-    ///
-    /// # Example
-    ///
-    /// Find every word with exactly 13 Unicode word characters:
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"\b\w{13}\b").unwrap();
-    /// let hay = "Retroactively relinquishing remunerations is reprehensible.";
-    /// let matches: Vec<_> = re.find_iter(hay).map(|m| m.as_str()).collect();
-    /// assert_eq!(matches, vec![
-    ///     "Retroactively",
-    ///     "relinquishing",
-    ///     "remunerations",
-    ///     "reprehensible",
-    /// ]);
-    /// ```
-    #[inline]
-    pub fn find_iter<'r, 'h>(&'r self, haystack: &'h str) -> Matches<'r, 'h> {
-        Matches { haystack, it: self.meta.find_iter(haystack) }
-    }
-
-    /// This routine searches for the first match of this regex in the haystack
-    /// given, and if found, returns not only the overall match but also the
-    /// matches of each capture group in the regex. If no match is found, then
-    /// `None` is returned.
-    ///
-    /// Capture group `0` always corresponds to an implicit unnamed group that
-    /// includes the entire match. If a match is found, this group is always
-    /// present. Subsequent groups may be named and are numbered, starting
-    /// at 1, by the order in which the opening parenthesis appears in the
-    /// pattern. For example, in the pattern `(?<a>.(?<b>.))(?<c>.)`, `a`,
-    /// `b` and `c` correspond to capture group indices `1`, `2` and `3`,
-    /// respectively.
-    ///
-    /// You should only use `captures` if you need access to the capture group
-    /// matches. Otherwise, [`Regex::find`] is generally faster for discovering
-    /// just the overall match.
-    ///
-    /// # Example
-    ///
-    /// Say you have some haystack with movie names and their release years,
-    /// like "'Citizen Kane' (1941)". It'd be nice if we could search for
-    /// substrings looking like that, while also extracting the movie name and
-    /// its release year separately. The example below shows how to do that.
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"'([^']+)'\s+\((\d{4})\)").unwrap();
-    /// let hay = "Not my favorite movie: 'Citizen Kane' (1941).";
-    /// let caps = re.captures(hay).unwrap();
-    /// assert_eq!(caps.get(0).unwrap().as_str(), "'Citizen Kane' (1941)");
-    /// assert_eq!(caps.get(1).unwrap().as_str(), "Citizen Kane");
-    /// assert_eq!(caps.get(2).unwrap().as_str(), "1941");
-    /// // You can also access the groups by index using the Index notation.
-    /// // Note that this will panic on an invalid index. In this case, these
-    /// // accesses are always correct because the overall regex will only
-    /// // match when these capture groups match.
-    /// assert_eq!(&caps[0], "'Citizen Kane' (1941)");
-    /// assert_eq!(&caps[1], "Citizen Kane");
-    /// assert_eq!(&caps[2], "1941");
-    /// ```
-    ///
-    /// Note that the full match is at capture group `0`. Each subsequent
-    /// capture group is indexed by the order of its opening `(`.
-    ///
-    /// We can make this example a bit clearer by using *named* capture groups:
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"'(?<title>[^']+)'\s+\((?<year>\d{4})\)").unwrap();
-    /// let hay = "Not my favorite movie: 'Citizen Kane' (1941).";
-    /// let caps = re.captures(hay).unwrap();
-    /// assert_eq!(caps.get(0).unwrap().as_str(), "'Citizen Kane' (1941)");
-    /// assert_eq!(caps.name("title").unwrap().as_str(), "Citizen Kane");
-    /// assert_eq!(caps.name("year").unwrap().as_str(), "1941");
-    /// // You can also access the groups by name using the Index notation.
-    /// // Note that this will panic on an invalid group name. In this case,
-    /// // these accesses are always correct because the overall regex will
-    /// // only match when these capture groups match.
-    /// assert_eq!(&caps[0], "'Citizen Kane' (1941)");
-    /// assert_eq!(&caps["title"], "Citizen Kane");
-    /// assert_eq!(&caps["year"], "1941");
-    /// ```
-    ///
-    /// Here we name the capture groups, which we can access with the `name`
-    /// method or the `Index` notation with a `&str`. Note that the named
-    /// capture groups are still accessible with `get` or the `Index` notation
-    /// with a `usize`.
-    ///
-    /// The `0`th capture group is always unnamed, so it must always be
-    /// accessed with `get(0)` or `[0]`.
-    ///
-    /// Finally, one other way to to get the matched substrings is with the
-    /// [`Captures::extract`] API:
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"'([^']+)'\s+\((\d{4})\)").unwrap();
-    /// let hay = "Not my favorite movie: 'Citizen Kane' (1941).";
-    /// let (full, [title, year]) = re.captures(hay).unwrap().extract();
-    /// assert_eq!(full, "'Citizen Kane' (1941)");
-    /// assert_eq!(title, "Citizen Kane");
-    /// assert_eq!(year, "1941");
-    /// ```
-    #[inline]
-    pub fn captures<'h>(&self, haystack: &'h str) -> Option<Captures<'h>> {
-        self.captures_at(haystack, 0)
-    }
-
-    /// Returns an iterator that yields successive non-overlapping matches in
-    /// the given haystack. The iterator yields values of type [`Captures`].
-    ///
-    /// This is the same as [`Regex::find_iter`], but instead of only providing
-    /// access to the overall match, each value yield includes access to the
-    /// matches of all capture groups in the regex. Reporting this extra match
-    /// data is potentially costly, so callers should only use `captures_iter`
-    /// over `find_iter` when they actually need access to the capture group
-    /// matches.
-    ///
-    /// # Time complexity
-    ///
-    /// Note that since `captures_iter` runs potentially many searches on the
-    /// haystack and since each search has worst case `O(m * n)` time
-    /// complexity, the overall worst case time complexity for iteration is
-    /// `O(m * n^2)`.
-    ///
-    /// # Example
-    ///
-    /// We can use this to find all movie titles and their release years in
-    /// some haystack, where the movie is formatted like "'Title' (xxxx)":
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"'([^']+)'\s+\(([0-9]{4})\)").unwrap();
-    /// let hay = "'Citizen Kane' (1941), 'The Wizard of Oz' (1939), 'M' (1931).";
-    /// let mut movies = vec![];
-    /// for (_, [title, year]) in re.captures_iter(hay).map(|c| c.extract()) {
-    ///     movies.push((title, year.parse::<i64>()?));
-    /// }
-    /// assert_eq!(movies, vec![
-    ///     ("Citizen Kane", 1941),
-    ///     ("The Wizard of Oz", 1939),
-    ///     ("M", 1931),
-    /// ]);
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    ///
-    /// Or with named groups:
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"'(?<title>[^']+)'\s+\((?<year>[0-9]{4})\)").unwrap();
-    /// let hay = "'Citizen Kane' (1941), 'The Wizard of Oz' (1939), 'M' (1931).";
-    /// let mut it = re.captures_iter(hay);
-    ///
-    /// let caps = it.next().unwrap();
-    /// assert_eq!(&caps["title"], "Citizen Kane");
-    /// assert_eq!(&caps["year"], "1941");
-    ///
-    /// let caps = it.next().unwrap();
-    /// assert_eq!(&caps["title"], "The Wizard of Oz");
-    /// assert_eq!(&caps["year"], "1939");
-    ///
-    /// let caps = it.next().unwrap();
-    /// assert_eq!(&caps["title"], "M");
-    /// assert_eq!(&caps["year"], "1931");
-    /// ```
-    #[inline]
-    pub fn captures_iter<'r, 'h>(
-        &'r self,
-        haystack: &'h str,
-    ) -> CaptureMatches<'r, 'h> {
-        CaptureMatches { haystack, it: self.meta.captures_iter(haystack) }
-    }
-
-    /// Returns an iterator of substrings of the haystack given, delimited by a
-    /// match of the regex. Namely, each element of the iterator corresponds to
-    /// a part of the haystack that *isn't* matched by the regular expression.
-    ///
-    /// # Time complexity
-    ///
-    /// Since iterators over all matches requires running potentially many
-    /// searches on the haystack, and since each search has worst case
-    /// `O(m * n)` time complexity, the overall worst case time complexity for
-    /// this routine is `O(m * n^2)`.
-    ///
-    /// # Example
-    ///
-    /// To split a string delimited by arbitrary amounts of spaces or tabs:
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"[ \t]+").unwrap();
-    /// let hay = "a b \t  c\td    e";
-    /// let fields: Vec<&str> = re.split(hay).collect();
-    /// assert_eq!(fields, vec!["a", "b", "c", "d", "e"]);
-    /// ```
-    ///
-    /// # Example: more cases
-    ///
-    /// Basic usage:
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r" ").unwrap();
-    /// let hay = "Mary had a little lamb";
-    /// let got: Vec<&str> = re.split(hay).collect();
-    /// assert_eq!(got, vec!["Mary", "had", "a", "little", "lamb"]);
-    ///
-    /// let re = Regex::new(r"X").unwrap();
-    /// let hay = "";
-    /// let got: Vec<&str> = re.split(hay).collect();
-    /// assert_eq!(got, vec![""]);
-    ///
-    /// let re = Regex::new(r"X").unwrap();
-    /// let hay = "lionXXtigerXleopard";
-    /// let got: Vec<&str> = re.split(hay).collect();
-    /// assert_eq!(got, vec!["lion", "", "tiger", "leopard"]);
-    ///
-    /// let re = Regex::new(r"::").unwrap();
-    /// let hay = "lion::tiger::leopard";
-    /// let got: Vec<&str> = re.split(hay).collect();
-    /// assert_eq!(got, vec!["lion", "tiger", "leopard"]);
-    /// ```
-    ///
-    /// If a haystack contains multiple contiguous matches, you will end up
-    /// with empty spans yielded by the iterator:
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"X").unwrap();
-    /// let hay = "XXXXaXXbXc";
-    /// let got: Vec<&str> = re.split(hay).collect();
-    /// assert_eq!(got, vec!["", "", "", "", "a", "", "b", "c"]);
-    ///
-    /// let re = Regex::new(r"/").unwrap();
-    /// let hay = "(///)";
-    /// let got: Vec<&str> = re.split(hay).collect();
-    /// assert_eq!(got, vec!["(", "", "", ")"]);
-    /// ```
-    ///
-    /// Separators at the start or end of a haystack are neighbored by empty
-    /// substring.
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"0").unwrap();
-    /// let hay = "010";
-    /// let got: Vec<&str> = re.split(hay).collect();
-    /// assert_eq!(got, vec!["", "1", ""]);
-    /// ```
-    ///
-    /// When the empty string is used as a regex, it splits at every valid
-    /// UTF-8 boundary by default (which includes the beginning and end of the
-    /// haystack):
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"").unwrap();
-    /// let hay = "rust";
-    /// let got: Vec<&str> = re.split(hay).collect();
-    /// assert_eq!(got, vec!["", "r", "u", "s", "t", ""]);
-    ///
-    /// // Splitting by an empty string is UTF-8 aware by default!
-    /// let re = Regex::new(r"").unwrap();
-    /// let hay = "☃";
-    /// let got: Vec<&str> = re.split(hay).collect();
-    /// assert_eq!(got, vec!["", "☃", ""]);
-    /// ```
-    ///
-    /// Contiguous separators (commonly shows up with whitespace), can lead to
-    /// possibly surprising behavior. For example, this code is correct:
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r" ").unwrap();
-    /// let hay = "    a  b c";
-    /// let got: Vec<&str> = re.split(hay).collect();
-    /// assert_eq!(got, vec!["", "", "", "", "a", "", "b", "c"]);
-    /// ```
-    ///
-    /// It does *not* give you `["a", "b", "c"]`. For that behavior, you'd want
-    /// to match contiguous space characters:
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r" +").unwrap();
-    /// let hay = "    a  b c";
-    /// let got: Vec<&str> = re.split(hay).collect();
-    /// // N.B. This does still include a leading empty span because ' +'
-    /// // matches at the beginning of the haystack.
-    /// assert_eq!(got, vec!["", "a", "b", "c"]);
-    /// ```
-    #[inline]
-    pub fn split<'r, 'h>(&'r self, haystack: &'h str) -> Split<'r, 'h> {
-        Split { haystack, it: self.meta.split(haystack) }
-    }
-
-    /// Returns an iterator of at most `limit` substrings of the haystack
-    /// given, delimited by a match of the regex. (A `limit` of `0` will return
-    /// no substrings.) Namely, each element of the iterator corresponds to a
-    /// part of the haystack that *isn't* matched by the regular expression.
-    /// The remainder of the haystack that is not split will be the last
-    /// element in the iterator.
-    ///
-    /// # Time complexity
-    ///
-    /// Since iterators over all matches requires running potentially many
-    /// searches on the haystack, and since each search has worst case
-    /// `O(m * n)` time complexity, the overall worst case time complexity for
-    /// this routine is `O(m * n^2)`.
-    ///
-    /// Although note that the worst case time here has an upper bound given
-    /// by the `limit` parameter.
-    ///
-    /// # Example
-    ///
-    /// Get the first two words in some haystack:
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"\W+").unwrap();
-    /// let hay = "Hey! How are you?";
-    /// let fields: Vec<&str> = re.splitn(hay, 3).collect();
-    /// assert_eq!(fields, vec!["Hey", "How", "are you?"]);
-    /// ```
-    ///
-    /// # Examples: more cases
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r" ").unwrap();
-    /// let hay = "Mary had a little lamb";
-    /// let got: Vec<&str> = re.splitn(hay, 3).collect();
-    /// assert_eq!(got, vec!["Mary", "had", "a little lamb"]);
-    ///
-    /// let re = Regex::new(r"X").unwrap();
-    /// let hay = "";
-    /// let got: Vec<&str> = re.splitn(hay, 3).collect();
-    /// assert_eq!(got, vec![""]);
-    ///
-    /// let re = Regex::new(r"X").unwrap();
-    /// let hay = "lionXXtigerXleopard";
-    /// let got: Vec<&str> = re.splitn(hay, 3).collect();
-    /// assert_eq!(got, vec!["lion", "", "tigerXleopard"]);
-    ///
-    /// let re = Regex::new(r"::").unwrap();
-    /// let hay = "lion::tiger::leopard";
-    /// let got: Vec<&str> = re.splitn(hay, 2).collect();
-    /// assert_eq!(got, vec!["lion", "tiger::leopard"]);
-    ///
-    /// let re = Regex::new(r"X").unwrap();
-    /// let hay = "abcXdef";
-    /// let got: Vec<&str> = re.splitn(hay, 1).collect();
-    /// assert_eq!(got, vec!["abcXdef"]);
-    ///
-    /// let re = Regex::new(r"X").unwrap();
-    /// let hay = "abcdef";
-    /// let got: Vec<&str> = re.splitn(hay, 2).collect();
-    /// assert_eq!(got, vec!["abcdef"]);
-    ///
-    /// let re = Regex::new(r"X").unwrap();
-    /// let hay = "abcXdef";
-    /// let got: Vec<&str> = re.splitn(hay, 0).collect();
-    /// assert!(got.is_empty());
-    /// ```
-    #[inline]
-    pub fn splitn<'r, 'h>(
-        &'r self,
-        haystack: &'h str,
-        limit: usize,
-    ) -> SplitN<'r, 'h> {
-        SplitN { haystack, it: self.meta.splitn(haystack, limit) }
-    }
-
-    /// Replaces the leftmost-first match in the given haystack with the
-    /// replacement provided. The replacement can be a regular string (where
-    /// `$N` and `$name` are expanded to match capture groups) or a function
-    /// that takes a [`Captures`] and returns the replaced string.
-    ///
-    /// If no match is found, then the haystack is returned unchanged. In that
-    /// case, this implementation will likely return a `Cow::Borrowed` value
-    /// such that no allocation is performed.
-    ///
-    /// When a `Cow::Borrowed` is returned, the value returned is guaranteed
-    /// to be equivalent to the `haystack` given.
-    ///
-    /// # Replacement string syntax
-    ///
-    /// All instances of `$ref` in the replacement string are replaced with
-    /// the substring corresponding to the capture group identified by `ref`.
-    ///
-    /// `ref` may be an integer corresponding to the index of the capture group
-    /// (counted by order of opening parenthesis where `0` is the entire match)
-    /// or it can be a name (consisting of letters, digits or underscores)
-    /// corresponding to a named capture group.
-    ///
-    /// If `ref` isn't a valid capture group (whether the name doesn't exist or
-    /// isn't a valid index), then it is replaced with the empty string.
-    ///
-    /// The longest possible name is used. For example, `$1a` looks up the
-    /// capture group named `1a` and not the capture group at index `1`. To
-    /// exert more precise control over the name, use braces, e.g., `${1}a`.
-    ///
-    /// To write a literal `$` use `$$`.
-    ///
-    /// # Example
-    ///
-    /// Note that this function is polymorphic with respect to the replacement.
-    /// In typical usage, this can just be a normal string:
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"[^01]+").unwrap();
-    /// assert_eq!(re.replace("1078910", ""), "1010");
-    /// ```
-    ///
-    /// But anything satisfying the [`Replacer`] trait will work. For example,
-    /// a closure of type `|&Captures| -> String` provides direct access to the
-    /// captures corresponding to a match. This allows one to access capturing
-    /// group matches easily:
-    ///
-    /// ```
-    /// use regex::{Captures, Regex};
-    ///
-    /// let re = Regex::new(r"([^,\s]+),\s+(\S+)").unwrap();
-    /// let result = re.replace("Springsteen, Bruce", |caps: &Captures| {
-    ///     format!("{} {}", &caps[2], &caps[1])
-    /// });
-    /// assert_eq!(result, "Bruce Springsteen");
-    /// ```
-    ///
-    /// But this is a bit cumbersome to use all the time. Instead, a simple
-    /// syntax is supported (as described above) that expands `$name` into the
-    /// corresponding capture group. Here's the last example, but using this
-    /// expansion technique with named capture groups:
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"(?<last>[^,\s]+),\s+(?<first>\S+)").unwrap();
-    /// let result = re.replace("Springsteen, Bruce", "$first $last");
-    /// assert_eq!(result, "Bruce Springsteen");
-    /// ```
-    ///
-    /// Note that using `$2` instead of `$first` or `$1` instead of `$last`
-    /// would produce the same result. To write a literal `$` use `$$`.
-    ///
-    /// Sometimes the replacement string requires use of curly braces to
-    /// delineate a capture group replacement when it is adjacent to some other
-    /// literal text. For example, if we wanted to join two words together with
-    /// an underscore:
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"(?<first>\w+)\s+(?<second>\w+)").unwrap();
-    /// let result = re.replace("deep fried", "${first}_$second");
-    /// assert_eq!(result, "deep_fried");
-    /// ```
-    ///
-    /// Without the curly braces, the capture group name `first_` would be
-    /// used, and since it doesn't exist, it would be replaced with the empty
-    /// string.
-    ///
-    /// Finally, sometimes you just want to replace a literal string with no
-    /// regard for capturing group expansion. This can be done by wrapping a
-    /// string with [`NoExpand`]:
-    ///
-    /// ```
-    /// use regex::{NoExpand, Regex};
-    ///
-    /// let re = Regex::new(r"(?<last>[^,\s]+),\s+(\S+)").unwrap();
-    /// let result = re.replace("Springsteen, Bruce", NoExpand("$2 $last"));
-    /// assert_eq!(result, "$2 $last");
-    /// ```
-    ///
-    /// Using `NoExpand` may also be faster, since the replacement string won't
-    /// need to be parsed for the `$` syntax.
-    #[inline]
-    pub fn replace<'h, R: Replacer>(
-        &self,
-        haystack: &'h str,
-        rep: R,
-    ) -> Cow<'h, str> {
-        self.replacen(haystack, 1, rep)
-    }
-
-    /// Replaces all non-overlapping matches in the haystack with the
-    /// replacement provided. This is the same as calling `replacen` with
-    /// `limit` set to `0`.
-    ///
-    /// If no match is found, then the haystack is returned unchanged. In that
-    /// case, this implementation will likely return a `Cow::Borrowed` value
-    /// such that no allocation is performed.
-    ///
-    /// When a `Cow::Borrowed` is returned, the value returned is guaranteed
-    /// to be equivalent to the `haystack` given.
-    ///
-    /// The documentation for [`Regex::replace`] goes into more detail about
-    /// what kinds of replacement strings are supported.
-    ///
-    /// # Time complexity
-    ///
-    /// Since iterators over all matches requires running potentially many
-    /// searches on the haystack, and since each search has worst case
-    /// `O(m * n)` time complexity, the overall worst case time complexity for
-    /// this routine is `O(m * n^2)`.
-    ///
-    /// # Fallibility
-    ///
-    /// If you need to write a replacement routine where any individual
-    /// replacement might "fail," doing so with this API isn't really feasible
-    /// because there's no way to stop the search process if a replacement
-    /// fails. Instead, if you need this functionality, you should consider
-    /// implementing your own replacement routine:
-    ///
-    /// ```
-    /// use regex::{Captures, Regex};
-    ///
-    /// fn replace_all<E>(
-    ///     re: &Regex,
-    ///     haystack: &str,
-    ///     replacement: impl Fn(&Captures) -> Result<String, E>,
-    /// ) -> Result<String, E> {
-    ///     let mut new = String::with_capacity(haystack.len());
-    ///     let mut last_match = 0;
-    ///     for caps in re.captures_iter(haystack) {
-    ///         let m = caps.get(0).unwrap();
-    ///         new.push_str(&haystack[last_match..m.start()]);
-    ///         new.push_str(&replacement(&caps)?);
-    ///         last_match = m.end();
-    ///     }
-    ///     new.push_str(&haystack[last_match..]);
-    ///     Ok(new)
-    /// }
-    ///
-    /// // Let's replace each word with the number of bytes in that word.
-    /// // But if we see a word that is "too long," we'll give up.
-    /// let re = Regex::new(r"\w+").unwrap();
-    /// let replacement = |caps: &Captures| -> Result<String, &'static str> {
-    ///     if caps[0].len() >= 5 {
-    ///         return Err("word too long");
-    ///     }
-    ///     Ok(caps[0].len().to_string())
-    /// };
-    /// assert_eq!(
-    ///     Ok("2 3 3 3?".to_string()),
-    ///     replace_all(&re, "hi how are you?", &replacement),
-    /// );
-    /// assert!(replace_all(&re, "hi there", &replacement).is_err());
-    /// ```
-    ///
-    /// # Example
-    ///
-    /// This example shows how to flip the order of whitespace (excluding line
-    /// terminators) delimited fields, and normalizes the whitespace that
-    /// delimits the fields:
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"(?m)^(\S+)[\s--\r\n]+(\S+)$").unwrap();
-    /// let hay = "
-    /// Greetings  1973
-    /// Wild\t1973
-    /// BornToRun\t\t\t\t1975
-    /// Darkness                    1978
-    /// TheRiver 1980
-    /// ";
-    /// let new = re.replace_all(hay, "$2 $1");
-    /// assert_eq!(new, "
-    /// 1973 Greetings
-    /// 1973 Wild
-    /// 1975 BornToRun
-    /// 1978 Darkness
-    /// 1980 TheRiver
-    /// ");
-    /// ```
-    #[inline]
-    pub fn replace_all<'h, R: Replacer>(
-        &self,
-        haystack: &'h str,
-        rep: R,
-    ) -> Cow<'h, str> {
-        self.replacen(haystack, 0, rep)
-    }
-
-    /// Replaces at most `limit` non-overlapping matches in the haystack with
-    /// the replacement provided. If `limit` is `0`, then all non-overlapping
-    /// matches are replaced. That is, `Regex::replace_all(hay, rep)` is
-    /// equivalent to `Regex::replacen(hay, 0, rep)`.
-    ///
-    /// If no match is found, then the haystack is returned unchanged. In that
-    /// case, this implementation will likely return a `Cow::Borrowed` value
-    /// such that no allocation is performed.
-    ///
-    /// When a `Cow::Borrowed` is returned, the value returned is guaranteed
-    /// to be equivalent to the `haystack` given.
-    ///
-    /// The documentation for [`Regex::replace`] goes into more detail about
-    /// what kinds of replacement strings are supported.
-    ///
-    /// # Time complexity
-    ///
-    /// Since iterators over all matches requires running potentially many
-    /// searches on the haystack, and since each search has worst case
-    /// `O(m * n)` time complexity, the overall worst case time complexity for
-    /// this routine is `O(m * n^2)`.
-    ///
-    /// Although note that the worst case time here has an upper bound given
-    /// by the `limit` parameter.
-    ///
-    /// # Fallibility
-    ///
-    /// See the corresponding section in the docs for [`Regex::replace_all`]
-    /// for tips on how to deal with a replacement routine that can fail.
-    ///
-    /// # Example
-    ///
-    /// This example shows how to flip the order of whitespace (excluding line
-    /// terminators) delimited fields, and normalizes the whitespace that
-    /// delimits the fields. But we only do it for the first two matches.
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"(?m)^(\S+)[\s--\r\n]+(\S+)$").unwrap();
-    /// let hay = "
-    /// Greetings  1973
-    /// Wild\t1973
-    /// BornToRun\t\t\t\t1975
-    /// Darkness                    1978
-    /// TheRiver 1980
-    /// ";
-    /// let new = re.replacen(hay, 2, "$2 $1");
-    /// assert_eq!(new, "
-    /// 1973 Greetings
-    /// 1973 Wild
-    /// BornToRun\t\t\t\t1975
-    /// Darkness                    1978
-    /// TheRiver 1980
-    /// ");
-    /// ```
-    #[inline]
-    pub fn replacen<'h, R: Replacer>(
-        &self,
-        haystack: &'h str,
-        limit: usize,
-        mut rep: R,
-    ) -> Cow<'h, str> {
-        // If we know that the replacement doesn't have any capture expansions,
-        // then we can use the fast path. The fast path can make a tremendous
-        // difference:
-        //
-        //   1) We use `find_iter` instead of `captures_iter`. Not asking for
-        //      captures generally makes the regex engines faster.
-        //   2) We don't need to look up all of the capture groups and do
-        //      replacements inside the replacement string. We just push it
-        //      at each match and be done with it.
-        if let Some(rep) = rep.no_expansion() {
-            let mut it = self.find_iter(haystack).enumerate().peekable();
-            if it.peek().is_none() {
-                return Cow::Borrowed(haystack);
-            }
-            let mut new = String::with_capacity(haystack.len());
-            let mut last_match = 0;
-            for (i, m) in it {
-                new.push_str(&haystack[last_match..m.start()]);
-                new.push_str(&rep);
-                last_match = m.end();
-                if limit > 0 && i >= limit - 1 {
-                    break;
-                }
-            }
-            new.push_str(&haystack[last_match..]);
-            return Cow::Owned(new);
-        }
-
-        // The slower path, which we use if the replacement may need access to
-        // capture groups.
-        let mut it = self.captures_iter(haystack).enumerate().peekable();
-        if it.peek().is_none() {
-            return Cow::Borrowed(haystack);
-        }
-        let mut new = String::with_capacity(haystack.len());
-        let mut last_match = 0;
-        for (i, cap) in it {
-            // unwrap on 0 is OK because captures only reports matches
-            let m = cap.get(0).unwrap();
-            new.push_str(&haystack[last_match..m.start()]);
-            rep.replace_append(&cap, &mut new);
-            last_match = m.end();
-            if limit > 0 && i >= limit - 1 {
-                break;
-            }
-        }
-        new.push_str(&haystack[last_match..]);
-        Cow::Owned(new)
-    }
-}
-
-/// A group of advanced or "lower level" search methods. Some methods permit
-/// starting the search at a position greater than `0` in the haystack. Other
-/// methods permit reusing allocations, for example, when extracting the
-/// matches for capture groups.
-impl Regex {
-    /// Returns the end byte offset of the first match in the haystack given.
-    ///
-    /// This method may have the same performance characteristics as
-    /// `is_match`. Behaviorlly, it doesn't just report whether it match
-    /// occurs, but also the end offset for a match. In particular, the offset
-    /// returned *may be shorter* than the proper end of the leftmost-first
-    /// match that you would find via [`Regex::find`].
-    ///
-    /// Note that it is not guaranteed that this routine finds the shortest or
-    /// "earliest" possible match. Instead, the main idea of this API is that
-    /// it returns the offset at the point at which the internal regex engine
-    /// has determined that a match has occurred. This may vary depending on
-    /// which internal regex engine is used, and thus, the offset itself may
-    /// change based on internal heuristics.
-    ///
-    /// # Example
-    ///
-    /// Typically, `a+` would match the entire first sequence of `a` in some
-    /// haystack, but `shortest_match` *may* give up as soon as it sees the
-    /// first `a`.
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"a+").unwrap();
-    /// let offset = re.shortest_match("aaaaa").unwrap();
-    /// assert_eq!(offset, 1);
-    /// ```
-    #[inline]
-    pub fn shortest_match(&self, haystack: &str) -> Option<usize> {
-        self.shortest_match_at(haystack, 0)
-    }
-
-    /// Returns the same as [`Regex::shortest_match`], but starts the search at
-    /// the given offset.
-    ///
-    /// The significance of the starting point is that it takes the surrounding
-    /// context into consideration. For example, the `\A` anchor can only match
-    /// when `start == 0`.
-    ///
-    /// If a match is found, the offset returned is relative to the beginning
-    /// of the haystack, not the beginning of the search.
-    ///
-    /// # Panics
-    ///
-    /// This panics when `start >= haystack.len() + 1`.
-    ///
-    /// # Example
-    ///
-    /// This example shows the significance of `start` by demonstrating how it
-    /// can be used to permit look-around assertions in a regex to take the
-    /// surrounding context into account.
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"\bchew\b").unwrap();
-    /// let hay = "eschew";
-    /// // We get a match here, but it's probably not intended.
-    /// assert_eq!(re.shortest_match(&hay[2..]), Some(4));
-    /// // No match because the  assertions take the context into account.
-    /// assert_eq!(re.shortest_match_at(hay, 2), None);
-    /// ```
-    #[inline]
-    pub fn shortest_match_at(
-        &self,
-        haystack: &str,
-        start: usize,
-    ) -> Option<usize> {
-        let input =
-            Input::new(haystack).earliest(true).span(start..haystack.len());
-        self.meta.search_half(&input).map(|hm| hm.offset())
-    }
-
-    /// Returns the same as [`Regex::is_match`], but starts the search at the
-    /// given offset.
-    ///
-    /// The significance of the starting point is that it takes the surrounding
-    /// context into consideration. For example, the `\A` anchor can only
-    /// match when `start == 0`.
-    ///
-    /// # Panics
-    ///
-    /// This panics when `start >= haystack.len() + 1`.
-    ///
-    /// # Example
-    ///
-    /// This example shows the significance of `start` by demonstrating how it
-    /// can be used to permit look-around assertions in a regex to take the
-    /// surrounding context into account.
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"\bchew\b").unwrap();
-    /// let hay = "eschew";
-    /// // We get a match here, but it's probably not intended.
-    /// assert!(re.is_match(&hay[2..]));
-    /// // No match because the  assertions take the context into account.
-    /// assert!(!re.is_match_at(hay, 2));
-    /// ```
-    #[inline]
-    pub fn is_match_at(&self, haystack: &str, start: usize) -> bool {
-        let input =
-            Input::new(haystack).earliest(true).span(start..haystack.len());
-        self.meta.search_half(&input).is_some()
-    }
-
-    /// Returns the same as [`Regex::find`], but starts the search at the given
-    /// offset.
-    ///
-    /// The significance of the starting point is that it takes the surrounding
-    /// context into consideration. For example, the `\A` anchor can only
-    /// match when `start == 0`.
-    ///
-    /// # Panics
-    ///
-    /// This panics when `start >= haystack.len() + 1`.
-    ///
-    /// # Example
-    ///
-    /// This example shows the significance of `start` by demonstrating how it
-    /// can be used to permit look-around assertions in a regex to take the
-    /// surrounding context into account.
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"\bchew\b").unwrap();
-    /// let hay = "eschew";
-    /// // We get a match here, but it's probably not intended.
-    /// assert_eq!(re.find(&hay[2..]).map(|m| m.range()), Some(0..4));
-    /// // No match because the  assertions take the context into account.
-    /// assert_eq!(re.find_at(hay, 2), None);
-    /// ```
-    #[inline]
-    pub fn find_at<'h>(
-        &self,
-        haystack: &'h str,
-        start: usize,
-    ) -> Option<Match<'h>> {
-        let input = Input::new(haystack).span(start..haystack.len());
-        self.meta
-            .search(&input)
-            .map(|m| Match::new(haystack, m.start(), m.end()))
-    }
-
-    /// Returns the same as [`Regex::captures`], but starts the search at the
-    /// given offset.
-    ///
-    /// The significance of the starting point is that it takes the surrounding
-    /// context into consideration. For example, the `\A` anchor can only
-    /// match when `start == 0`.
-    ///
-    /// # Panics
-    ///
-    /// This panics when `start >= haystack.len() + 1`.
-    ///
-    /// # Example
-    ///
-    /// This example shows the significance of `start` by demonstrating how it
-    /// can be used to permit look-around assertions in a regex to take the
-    /// surrounding context into account.
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"\bchew\b").unwrap();
-    /// let hay = "eschew";
-    /// // We get a match here, but it's probably not intended.
-    /// assert_eq!(&re.captures(&hay[2..]).unwrap()[0], "chew");
-    /// // No match because the  assertions take the context into account.
-    /// assert!(re.captures_at(hay, 2).is_none());
-    /// ```
-    #[inline]
-    pub fn captures_at<'h>(
-        &self,
-        haystack: &'h str,
-        start: usize,
-    ) -> Option<Captures<'h>> {
-        let input = Input::new(haystack).span(start..haystack.len());
-        let mut caps = self.meta.create_captures();
-        self.meta.search_captures(&input, &mut caps);
-        if caps.is_match() {
-            let static_captures_len = self.static_captures_len();
-            Some(Captures { haystack, caps, static_captures_len })
-        } else {
-            None
-        }
-    }
-
-    /// This is like [`Regex::captures`], but writes the byte offsets of each
-    /// capture group match into the locations given.
-    ///
-    /// A [`CaptureLocations`] stores the same byte offsets as a [`Captures`],
-    /// but does *not* store a reference to the haystack. This makes its API
-    /// a bit lower level and less convenient. But in exchange, callers
-    /// may allocate their own `CaptureLocations` and reuse it for multiple
-    /// searches. This may be helpful if allocating a `Captures` shows up in a
-    /// profile as too costly.
-    ///
-    /// To create a `CaptureLocations` value, use the
-    /// [`Regex::capture_locations`] method.
-    ///
-    /// This also returns the overall match if one was found. When a match is
-    /// found, its offsets are also always stored in `locs` at index `0`.
-    ///
-    /// # Panics
-    ///
-    /// This routine may panic if the given `CaptureLocations` was not created
-    /// by this regex.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"^([a-z]+)=(\S*)$").unwrap();
-    /// let mut locs = re.capture_locations();
-    /// assert!(re.captures_read(&mut locs, "id=foo123").is_some());
-    /// assert_eq!(Some((0, 9)), locs.get(0));
-    /// assert_eq!(Some((0, 2)), locs.get(1));
-    /// assert_eq!(Some((3, 9)), locs.get(2));
-    /// ```
-    #[inline]
-    pub fn captures_read<'h>(
-        &self,
-        locs: &mut CaptureLocations,
-        haystack: &'h str,
-    ) -> Option<Match<'h>> {
-        self.captures_read_at(locs, haystack, 0)
-    }
-
-    /// Returns the same as [`Regex::captures_read`], but starts the search at
-    /// the given offset.
-    ///
-    /// The significance of the starting point is that it takes the surrounding
-    /// context into consideration. For example, the `\A` anchor can only
-    /// match when `start == 0`.
-    ///
-    /// # Panics
-    ///
-    /// This panics when `start >= haystack.len() + 1`.
-    ///
-    /// This routine may also panic if the given `CaptureLocations` was not
-    /// created by this regex.
-    ///
-    /// # Example
-    ///
-    /// This example shows the significance of `start` by demonstrating how it
-    /// can be used to permit look-around assertions in a regex to take the
-    /// surrounding context into account.
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"\bchew\b").unwrap();
-    /// let hay = "eschew";
-    /// let mut locs = re.capture_locations();
-    /// // We get a match here, but it's probably not intended.
-    /// assert!(re.captures_read(&mut locs, &hay[2..]).is_some());
-    /// // No match because the  assertions take the context into account.
-    /// assert!(re.captures_read_at(&mut locs, hay, 2).is_none());
-    /// ```
-    #[inline]
-    pub fn captures_read_at<'h>(
-        &self,
-        locs: &mut CaptureLocations,
-        haystack: &'h str,
-        start: usize,
-    ) -> Option<Match<'h>> {
-        let input = Input::new(haystack).span(start..haystack.len());
-        self.meta.search_captures(&input, &mut locs.0);
-        locs.0.get_match().map(|m| Match::new(haystack, m.start(), m.end()))
-    }
-
-    /// An undocumented alias for `captures_read_at`.
-    ///
-    /// The `regex-capi` crate previously used this routine, so to avoid
-    /// breaking that crate, we continue to provide the name as an undocumented
-    /// alias.
-    #[doc(hidden)]
-    #[inline]
-    pub fn read_captures_at<'h>(
-        &self,
-        locs: &mut CaptureLocations,
-        haystack: &'h str,
-        start: usize,
-    ) -> Option<Match<'h>> {
-        self.captures_read_at(locs, haystack, start)
-    }
-}
-
-/// Auxiliary methods.
-impl Regex {
-    /// Returns the original string of this regex.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"foo\w+bar").unwrap();
-    /// assert_eq!(re.as_str(), r"foo\w+bar");
-    /// ```
-    #[inline]
-    pub fn as_str(&self) -> &str {
-        &self.pattern
-    }
-
-    /// Returns an iterator over the capture names in this regex.
-    ///
-    /// The iterator returned yields elements of type `Option<&str>`. That is,
-    /// the iterator yields values for all capture groups, even ones that are
-    /// unnamed. The order of the groups corresponds to the order of the group's
-    /// corresponding opening parenthesis.
-    ///
-    /// The first element of the iterator always yields the group corresponding
-    /// to the overall match, and this group is always unnamed. Therefore, the
-    /// iterator always yields at least one group.
-    ///
-    /// # Example
-    ///
-    /// This shows basic usage with a mix of named and unnamed capture groups:
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"(?<a>.(?<b>.))(.)(?:.)(?<c>.)").unwrap();
-    /// let mut names = re.capture_names();
-    /// assert_eq!(names.next(), Some(None));
-    /// assert_eq!(names.next(), Some(Some("a")));
-    /// assert_eq!(names.next(), Some(Some("b")));
-    /// assert_eq!(names.next(), Some(None));
-    /// // the '(?:.)' group is non-capturing and so doesn't appear here!
-    /// assert_eq!(names.next(), Some(Some("c")));
-    /// assert_eq!(names.next(), None);
-    /// ```
-    ///
-    /// The iterator always yields at least one element, even for regexes with
-    /// no capture groups and even for regexes that can never match:
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"").unwrap();
-    /// let mut names = re.capture_names();
-    /// assert_eq!(names.next(), Some(None));
-    /// assert_eq!(names.next(), None);
-    ///
-    /// let re = Regex::new(r"[a&&b]").unwrap();
-    /// let mut names = re.capture_names();
-    /// assert_eq!(names.next(), Some(None));
-    /// assert_eq!(names.next(), None);
-    /// ```
-    #[inline]
-    pub fn capture_names(&self) -> CaptureNames<'_> {
-        CaptureNames(self.meta.group_info().pattern_names(PatternID::ZERO))
-    }
-
-    /// Returns the number of captures groups in this regex.
-    ///
-    /// This includes all named and unnamed groups, including the implicit
-    /// unnamed group that is always present and corresponds to the entire
-    /// match.
-    ///
-    /// Since the implicit unnamed group is always included in this length, the
-    /// length returned is guaranteed to be greater than zero.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"foo").unwrap();
-    /// assert_eq!(1, re.captures_len());
-    ///
-    /// let re = Regex::new(r"(foo)").unwrap();
-    /// assert_eq!(2, re.captures_len());
-    ///
-    /// let re = Regex::new(r"(?<a>.(?<b>.))(.)(?:.)(?<c>.)").unwrap();
-    /// assert_eq!(5, re.captures_len());
-    ///
-    /// let re = Regex::new(r"[a&&b]").unwrap();
-    /// assert_eq!(1, re.captures_len());
-    /// ```
-    #[inline]
-    pub fn captures_len(&self) -> usize {
-        self.meta.group_info().group_len(PatternID::ZERO)
-    }
-
-    /// Returns the total number of capturing groups that appear in every
-    /// possible match.
-    ///
-    /// If the number of capture groups can vary depending on the match, then
-    /// this returns `None`. That is, a value is only returned when the number
-    /// of matching groups is invariant or "static."
-    ///
-    /// Note that like [`Regex::captures_len`], this **does** include the
-    /// implicit capturing group corresponding to the entire match. Therefore,
-    /// when a non-None value is returned, it is guaranteed to be at least `1`.
-    /// Stated differently, a return value of `Some(0)` is impossible.
-    ///
-    /// # Example
-    ///
-    /// This shows a few cases where a static number of capture groups is
-    /// available and a few cases where it is not.
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let len = |pattern| {
-    ///     Regex::new(pattern).map(|re| re.static_captures_len())
-    /// };
-    ///
-    /// assert_eq!(Some(1), len("a")?);
-    /// assert_eq!(Some(2), len("(a)")?);
-    /// assert_eq!(Some(2), len("(a)|(b)")?);
-    /// assert_eq!(Some(3), len("(a)(b)|(c)(d)")?);
-    /// assert_eq!(None, len("(a)|b")?);
-    /// assert_eq!(None, len("a|(b)")?);
-    /// assert_eq!(None, len("(b)*")?);
-    /// assert_eq!(Some(2), len("(b)+")?);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    #[inline]
-    pub fn static_captures_len(&self) -> Option<usize> {
-        self.meta.static_captures_len()
-    }
-
-    /// Returns a fresh allocated set of capture locations that can
-    /// be reused in multiple calls to [`Regex::captures_read`] or
-    /// [`Regex::captures_read_at`].
-    ///
-    /// The returned locations can be used for any subsequent search for this
-    /// particular regex. There is no guarantee that it is correct to use for
-    /// other regexes, even if they have the same number of capture groups.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"(.)(.)(\w+)").unwrap();
-    /// let mut locs = re.capture_locations();
-    /// assert!(re.captures_read(&mut locs, "Padron").is_some());
-    /// assert_eq!(locs.get(0), Some((0, 6)));
-    /// assert_eq!(locs.get(1), Some((0, 1)));
-    /// assert_eq!(locs.get(2), Some((1, 2)));
-    /// assert_eq!(locs.get(3), Some((2, 6)));
-    /// ```
-    #[inline]
-    pub fn capture_locations(&self) -> CaptureLocations {
-        CaptureLocations(self.meta.create_captures())
-    }
-
-    /// An alias for `capture_locations` to preserve backward compatibility.
-    ///
-    /// The `regex-capi` crate used this method, so to avoid breaking that
-    /// crate, we continue to export it as an undocumented API.
-    #[doc(hidden)]
-    #[inline]
-    pub fn locations(&self) -> CaptureLocations {
-        self.capture_locations()
-    }
-}
-
-/// Represents a single match of a regex in a haystack.
-///
-/// A `Match` contains both the start and end byte offsets of the match and the
-/// actual substring corresponding to the range of those byte offsets. It is
-/// guaranteed that `start <= end`. When `start == end`, the match is empty.
-///
-/// Since this `Match` can only be produced by the top-level `Regex` APIs
-/// that only support searching UTF-8 encoded strings, the byte offsets for a
-/// `Match` are guaranteed to fall on valid UTF-8 codepoint boundaries. That
-/// is, slicing a `&str` with [`Match::range`] is guaranteed to never panic.
-///
-/// Values with this type are created by [`Regex::find`] or
-/// [`Regex::find_iter`]. Other APIs can create `Match` values too. For
-/// example, [`Captures::get`].
-///
-/// The lifetime parameter `'h` refers to the lifetime of the matched of the
-/// haystack that this match was produced from.
-///
-/// # Numbering
-///
-/// The byte offsets in a `Match` form a half-open interval. That is, the
-/// start of the range is inclusive and the end of the range is exclusive.
-/// For example, given a haystack `abcFOOxyz` and a match of `FOO`, its byte
-/// offset range starts at `3` and ends at `6`. `3` corresponds to `F` and
-/// `6` corresponds to `x`, which is one past the end of the match. This
-/// corresponds to the same kind of slicing that Rust uses.
-///
-/// For more on why this was chosen over other schemes (aside from being
-/// consistent with how Rust the language works), see [this discussion] and
-/// [Dijkstra's note on a related topic][note].
-///
-/// [this discussion]: https://github.com/rust-lang/regex/discussions/866
-/// [note]: https://www.cs.utexas.edu/users/EWD/transcriptions/EWD08xx/EWD831.html
-///
-/// # Example
-///
-/// This example shows the value of each of the methods on `Match` for a
-/// particular search.
-///
-/// ```
-/// use regex::Regex;
-///
-/// let re = Regex::new(r"\p{Greek}+").unwrap();
-/// let hay = "Greek: αβγδ";
-/// let m = re.find(hay).unwrap();
-/// assert_eq!(7, m.start());
-/// assert_eq!(15, m.end());
-/// assert!(!m.is_empty());
-/// assert_eq!(8, m.len());
-/// assert_eq!(7..15, m.range());
-/// assert_eq!("αβγδ", m.as_str());
-/// ```
-#[derive(Copy, Clone, Eq, PartialEq)]
-pub struct Match<'h> {
-    haystack: &'h str,
-    start: usize,
-    end: usize,
-}
-
-impl<'h> Match<'h> {
-    /// Returns the byte offset of the start of the match in the haystack. The
-    /// start of the match corresponds to the position where the match begins
-    /// and includes the first byte in the match.
-    ///
-    /// It is guaranteed that `Match::start() <= Match::end()`.
-    ///
-    /// This is guaranteed to fall on a valid UTF-8 codepoint boundary. That
-    /// is, it will never be an offset that appears between the UTF-8 code
-    /// units of a UTF-8 encoded Unicode scalar value. Consequently, it is
-    /// always safe to slice the corresponding haystack using this offset.
-    #[inline]
-    pub fn start(&self) -> usize {
-        self.start
-    }
-
-    /// Returns the byte offset of the end of the match in the haystack. The
-    /// end of the match corresponds to the byte immediately following the last
-    /// byte in the match. This means that `&slice[start..end]` works as one
-    /// would expect.
-    ///
-    /// It is guaranteed that `Match::start() <= Match::end()`.
-    ///
-    /// This is guaranteed to fall on a valid UTF-8 codepoint boundary. That
-    /// is, it will never be an offset that appears between the UTF-8 code
-    /// units of a UTF-8 encoded Unicode scalar value. Consequently, it is
-    /// always safe to slice the corresponding haystack using this offset.
-    #[inline]
-    pub fn end(&self) -> usize {
-        self.end
-    }
-
-    /// Returns true if and only if this match has a length of zero.
-    ///
-    /// Note that an empty match can only occur when the regex itself can
-    /// match the empty string. Here are some examples of regexes that can
-    /// all match the empty string: `^`, `^$`, `\b`, `a?`, `a*`, `a{0}`,
-    /// `(foo|\d+|quux)?`.
-    #[inline]
-    pub fn is_empty(&self) -> bool {
-        self.start == self.end
-    }
-
-    /// Returns the length, in bytes, of this match.
-    #[inline]
-    pub fn len(&self) -> usize {
-        self.end - self.start
-    }
-
-    /// Returns the range over the starting and ending byte offsets of the
-    /// match in the haystack.
-    ///
-    /// It is always correct to slice the original haystack searched with this
-    /// range. That is, because the offsets are guaranteed to fall on valid
-    /// UTF-8 boundaries, the range returned is always valid.
-    #[inline]
-    pub fn range(&self) -> core::ops::Range<usize> {
-        self.start..self.end
-    }
-
-    /// Returns the substring of the haystack that matched.
-    #[inline]
-    pub fn as_str(&self) -> &'h str {
-        &self.haystack[self.range()]
-    }
-
-    /// Creates a new match from the given haystack and byte offsets.
-    #[inline]
-    fn new(haystack: &'h str, start: usize, end: usize) -> Match<'h> {
-        Match { haystack, start, end }
-    }
-}
-
-impl<'h> core::fmt::Debug for Match<'h> {
-    fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-        f.debug_struct("Match")
-            .field("start", &self.start)
-            .field("end", &self.end)
-            .field("string", &self.as_str())
-            .finish()
-    }
-}
-
-impl<'h> From<Match<'h>> for &'h str {
-    fn from(m: Match<'h>) -> &'h str {
-        m.as_str()
-    }
-}
-
-impl<'h> From<Match<'h>> for core::ops::Range<usize> {
-    fn from(m: Match<'h>) -> core::ops::Range<usize> {
-        m.range()
-    }
-}
-
-/// Represents the capture groups for a single match.
-///
-/// Capture groups refer to parts of a regex enclosed in parentheses. They
-/// can be optionally named. The purpose of capture groups is to be able to
-/// reference different parts of a match based on the original pattern. In
-/// essence, a `Captures` is a container of [`Match`] values for each group
-/// that participated in a regex match. Each `Match` can be looked up by either
-/// its capture group index or name (if it has one).
-///
-/// For example, say you want to match the individual letters in a 5-letter
-/// word:
-///
-/// ```text
-/// (?<first>\w)(\w)(?:\w)\w(?<last>\w)
-/// ```
-///
-/// This regex has 4 capture groups:
-///
-/// * The group at index `0` corresponds to the overall match. It is always
-/// present in every match and never has a name.
-/// * The group at index `1` with name `first` corresponding to the first
-/// letter.
-/// * The group at index `2` with no name corresponding to the second letter.
-/// * The group at index `3` with name `last` corresponding to the fifth and
-/// last letter.
-///
-/// Notice that `(?:\w)` was not listed above as a capture group despite it
-/// being enclosed in parentheses. That's because `(?:pattern)` is a special
-/// syntax that permits grouping but *without* capturing. The reason for not
-/// treating it as a capture is that tracking and reporting capture groups
-/// requires additional state that may lead to slower searches. So using as few
-/// capture groups as possible can help performance. (Although the difference
-/// in performance of a couple of capture groups is likely immaterial.)
-///
-/// Values with this type are created by [`Regex::captures`] or
-/// [`Regex::captures_iter`].
-///
-/// `'h` is the lifetime of the haystack that these captures were matched from.
-///
-/// # Example
-///
-/// ```
-/// use regex::Regex;
-///
-/// let re = Regex::new(r"(?<first>\w)(\w)(?:\w)\w(?<last>\w)").unwrap();
-/// let caps = re.captures("toady").unwrap();
-/// assert_eq!("toady", &caps[0]);
-/// assert_eq!("t", &caps["first"]);
-/// assert_eq!("o", &caps[2]);
-/// assert_eq!("y", &caps["last"]);
-/// ```
-pub struct Captures<'h> {
-    haystack: &'h str,
-    caps: captures::Captures,
-    static_captures_len: Option<usize>,
-}
-
-impl<'h> Captures<'h> {
-    /// Returns the `Match` associated with the capture group at index `i`. If
-    /// `i` does not correspond to a capture group, or if the capture group did
-    /// not participate in the match, then `None` is returned.
-    ///
-    /// When `i == 0`, this is guaranteed to return a non-`None` value.
-    ///
-    /// # Examples
-    ///
-    /// Get the substring that matched with a default of an empty string if the
-    /// group didn't participate in the match:
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"[a-z]+(?:([0-9]+)|([A-Z]+))").unwrap();
-    /// let caps = re.captures("abc123").unwrap();
-    ///
-    /// let substr1 = caps.get(1).map_or("", |m| m.as_str());
-    /// let substr2 = caps.get(2).map_or("", |m| m.as_str());
-    /// assert_eq!(substr1, "123");
-    /// assert_eq!(substr2, "");
-    /// ```
-    #[inline]
-    pub fn get(&self, i: usize) -> Option<Match<'h>> {
-        self.caps
-            .get_group(i)
-            .map(|sp| Match::new(self.haystack, sp.start, sp.end))
-    }
-
-    /// Returns the `Match` associated with the capture group named `name`. If
-    /// `name` isn't a valid capture group or it refers to a group that didn't
-    /// match, then `None` is returned.
-    ///
-    /// Note that unlike `caps["name"]`, this returns a `Match` whose lifetime
-    /// matches the lifetime of the haystack in this `Captures` value.
-    /// Conversely, the substring returned by `caps["name"]` has a lifetime
-    /// of the `Captures` value, which is likely shorter than the lifetime of
-    /// the haystack. In some cases, it may be necessary to use this method to
-    /// access the matching substring instead of the `caps["name"]` notation.
-    ///
-    /// # Examples
-    ///
-    /// Get the substring that matched with a default of an empty string if the
-    /// group didn't participate in the match:
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(
-    ///     r"[a-z]+(?:(?<numbers>[0-9]+)|(?<letters>[A-Z]+))",
-    /// ).unwrap();
-    /// let caps = re.captures("abc123").unwrap();
-    ///
-    /// let numbers = caps.name("numbers").map_or("", |m| m.as_str());
-    /// let letters = caps.name("letters").map_or("", |m| m.as_str());
-    /// assert_eq!(numbers, "123");
-    /// assert_eq!(letters, "");
-    /// ```
-    #[inline]
-    pub fn name(&self, name: &str) -> Option<Match<'h>> {
-        self.caps
-            .get_group_by_name(name)
-            .map(|sp| Match::new(self.haystack, sp.start, sp.end))
-    }
-
-    /// This is a convenience routine for extracting the substrings
-    /// corresponding to matching capture groups.
-    ///
-    /// This returns a tuple where the first element corresponds to the full
-    /// substring of the haystack that matched the regex. The second element is
-    /// an array of substrings, with each corresponding to the substring that
-    /// matched for a particular capture group.
-    ///
-    /// # Panics
-    ///
-    /// This panics if the number of possible matching groups in this
-    /// `Captures` value is not fixed to `N` in all circumstances.
-    /// More precisely, this routine only works when `N` is equivalent to
-    /// [`Regex::static_captures_len`].
-    ///
-    /// Stated more plainly, if the number of matching capture groups in a
-    /// regex can vary from match to match, then this function always panics.
-    ///
-    /// For example, `(a)(b)|(c)` could produce two matching capture groups
-    /// or one matching capture group for any given match. Therefore, one
-    /// cannot use `extract` with such a pattern.
-    ///
-    /// But a pattern like `(a)(b)|(c)(d)` can be used with `extract` because
-    /// the number of capture groups in every match is always equivalent,
-    /// even if the capture _indices_ in each match are not.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"([0-9]{4})-([0-9]{2})-([0-9]{2})").unwrap();
-    /// let hay = "On 2010-03-14, I became a Tenneessee lamb.";
-    /// let Some((full, [year, month, day])) =
-    ///     re.captures(hay).map(|caps| caps.extract()) else { return };
-    /// assert_eq!("2010-03-14", full);
-    /// assert_eq!("2010", year);
-    /// assert_eq!("03", month);
-    /// assert_eq!("14", day);
-    /// ```
-    ///
-    /// # Example: iteration
-    ///
-    /// This example shows how to use this method when iterating over all
-    /// `Captures` matches in a haystack.
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"([0-9]{4})-([0-9]{2})-([0-9]{2})").unwrap();
-    /// let hay = "1973-01-05, 1975-08-25 and 1980-10-18";
-    ///
-    /// let mut dates: Vec<(&str, &str, &str)> = vec![];
-    /// for (_, [y, m, d]) in re.captures_iter(hay).map(|c| c.extract()) {
-    ///     dates.push((y, m, d));
-    /// }
-    /// assert_eq!(dates, vec![
-    ///     ("1973", "01", "05"),
-    ///     ("1975", "08", "25"),
-    ///     ("1980", "10", "18"),
-    /// ]);
-    /// ```
-    ///
-    /// # Example: parsing different formats
-    ///
-    /// This API is particularly useful when you need to extract a particular
-    /// value that might occur in a different format. Consider, for example,
-    /// an identifier that might be in double quotes or single quotes:
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r#"id:(?:"([^"]+)"|'([^']+)')"#).unwrap();
-    /// let hay = r#"The first is id:"foo" and the second is id:'bar'."#;
-    /// let mut ids = vec![];
-    /// for (_, [id]) in re.captures_iter(hay).map(|c| c.extract()) {
-    ///     ids.push(id);
-    /// }
-    /// assert_eq!(ids, vec!["foo", "bar"]);
-    /// ```
-    pub fn extract<const N: usize>(&self) -> (&'h str, [&'h str; N]) {
-        let len = self
-            .static_captures_len
-            .expect("number of capture groups can vary in a match")
-            .checked_sub(1)
-            .expect("number of groups is always greater than zero");
-        assert_eq!(N, len, "asked for {} groups, but must ask for {}", N, len);
-        // The regex-automata variant of extract is a bit more permissive.
-        // It doesn't require the number of matching capturing groups to be
-        // static, and you can even request fewer groups than what's there. So
-        // this is guaranteed to never panic because we've asserted above that
-        // the user has requested precisely the number of groups that must be
-        // present in any match for this regex.
-        self.caps.extract(self.haystack)
-    }
-
-    /// Expands all instances of `$ref` in `replacement` to the corresponding
-    /// capture group, and writes them to the `dst` buffer given. A `ref` can
-    /// be a capture group index or a name. If `ref` doesn't refer to a capture
-    /// group that participated in the match, then it is replaced with the
-    /// empty string.
-    ///
-    /// # Format
-    ///
-    /// The format of the replacement string supports two different kinds of
-    /// capture references: unbraced and braced.
-    ///
-    /// For the unbraced format, the format supported is `$ref` where `name`
-    /// can be any character in the class `[0-9A-Za-z_]`. `ref` is always
-    /// the longest possible parse. So for example, `$1a` corresponds to the
-    /// capture group named `1a` and not the capture group at index `1`. If
-    /// `ref` matches `^[0-9]+$`, then it is treated as a capture group index
-    /// itself and not a name.
-    ///
-    /// For the braced format, the format supported is `${ref}` where `ref` can
-    /// be any sequence of bytes except for `}`. If no closing brace occurs,
-    /// then it is not considered a capture reference. As with the unbraced
-    /// format, if `ref` matches `^[0-9]+$`, then it is treated as a capture
-    /// group index and not a name.
-    ///
-    /// The braced format is useful for exerting precise control over the name
-    /// of the capture reference. For example, `${1}a` corresponds to the
-    /// capture group reference `1` followed by the letter `a`, where as `$1a`
-    /// (as mentioned above) corresponds to the capture group reference `1a`.
-    /// The braced format is also useful for expressing capture group names
-    /// that use characters not supported by the unbraced format. For example,
-    /// `${foo[bar].baz}` refers to the capture group named `foo[bar].baz`.
-    ///
-    /// If a capture group reference is found and it does not refer to a valid
-    /// capture group, then it will be replaced with the empty string.
-    ///
-    /// To write a literal `$`, use `$$`.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(
-    ///     r"(?<day>[0-9]{2})-(?<month>[0-9]{2})-(?<year>[0-9]{4})",
-    /// ).unwrap();
-    /// let hay = "On 14-03-2010, I became a Tenneessee lamb.";
-    /// let caps = re.captures(hay).unwrap();
-    ///
-    /// let mut dst = String::new();
-    /// caps.expand("year=$year, month=$month, day=$day", &mut dst);
-    /// assert_eq!(dst, "year=2010, month=03, day=14");
-    /// ```
-    #[inline]
-    pub fn expand(&self, replacement: &str, dst: &mut String) {
-        self.caps.interpolate_string_into(self.haystack, replacement, dst);
-    }
-
-    /// Returns an iterator over all capture groups. This includes both
-    /// matching and non-matching groups.
-    ///
-    /// The iterator always yields at least one matching group: the first group
-    /// (at index `0`) with no name. Subsequent groups are returned in the order
-    /// of their opening parenthesis in the regex.
-    ///
-    /// The elements yielded have type `Option<Match<'h>>`, where a non-`None`
-    /// value is present if the capture group matches.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"(\w)(\d)?(\w)").unwrap();
-    /// let caps = re.captures("AZ").unwrap();
-    ///
-    /// let mut it = caps.iter();
-    /// assert_eq!(it.next().unwrap().map(|m| m.as_str()), Some("AZ"));
-    /// assert_eq!(it.next().unwrap().map(|m| m.as_str()), Some("A"));
-    /// assert_eq!(it.next().unwrap().map(|m| m.as_str()), None);
-    /// assert_eq!(it.next().unwrap().map(|m| m.as_str()), Some("Z"));
-    /// assert_eq!(it.next(), None);
-    /// ```
-    #[inline]
-    pub fn iter<'c>(&'c self) -> SubCaptureMatches<'c, 'h> {
-        SubCaptureMatches { haystack: self.haystack, it: self.caps.iter() }
-    }
-
-    /// Returns the total number of capture groups. This includes both
-    /// matching and non-matching groups.
-    ///
-    /// The length returned is always equivalent to the number of elements
-    /// yielded by [`Captures::iter`]. Consequently, the length is always
-    /// greater than zero since every `Captures` value always includes the
-    /// match for the entire regex.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"(\w)(\d)?(\w)").unwrap();
-    /// let caps = re.captures("AZ").unwrap();
-    /// assert_eq!(caps.len(), 4);
-    /// ```
-    #[inline]
-    pub fn len(&self) -> usize {
-        self.caps.group_len()
-    }
-}
-
-impl<'h> core::fmt::Debug for Captures<'h> {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        /// A little helper type to provide a nice map-like debug
-        /// representation for our capturing group spans.
-        ///
-        /// regex-automata has something similar, but it includes the pattern
-        /// ID in its debug output, which is confusing. It also doesn't include
-        /// that strings that match because a regex-automata `Captures` doesn't
-        /// borrow the haystack.
-        struct CapturesDebugMap<'a> {
-            caps: &'a Captures<'a>,
-        }
-
-        impl<'a> core::fmt::Debug for CapturesDebugMap<'a> {
-            fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-                let mut map = f.debug_map();
-                let names =
-                    self.caps.caps.group_info().pattern_names(PatternID::ZERO);
-                for (group_index, maybe_name) in names.enumerate() {
-                    let key = Key(group_index, maybe_name);
-                    match self.caps.get(group_index) {
-                        None => map.entry(&key, &None::<()>),
-                        Some(mat) => map.entry(&key, &Value(mat)),
-                    };
-                }
-                map.finish()
-            }
-        }
-
-        struct Key<'a>(usize, Option<&'a str>);
-
-        impl<'a> core::fmt::Debug for Key<'a> {
-            fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-                write!(f, "{}", self.0)?;
-                if let Some(name) = self.1 {
-                    write!(f, "/{:?}", name)?;
-                }
-                Ok(())
-            }
-        }
-
-        struct Value<'a>(Match<'a>);
-
-        impl<'a> core::fmt::Debug for Value<'a> {
-            fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-                write!(
-                    f,
-                    "{}..{}/{:?}",
-                    self.0.start(),
-                    self.0.end(),
-                    self.0.as_str()
-                )
-            }
-        }
-
-        f.debug_tuple("Captures")
-            .field(&CapturesDebugMap { caps: self })
-            .finish()
-    }
-}
-
-/// Get a matching capture group's haystack substring by index.
-///
-/// The haystack substring returned can't outlive the `Captures` object if this
-/// method is used, because of how `Index` is defined (normally `a[i]` is part
-/// of `a` and can't outlive it). To work around this limitation, do that, use
-/// [`Captures::get`] instead.
-///
-/// `'h` is the lifetime of the matched haystack, but the lifetime of the
-/// `&str` returned by this implementation is the lifetime of the `Captures`
-/// value itself.
-///
-/// # Panics
-///
-/// If there is no matching group at the given index.
-impl<'h> core::ops::Index<usize> for Captures<'h> {
-    type Output = str;
-
-    // The lifetime is written out to make it clear that the &str returned
-    // does NOT have a lifetime equivalent to 'h.
-    fn index<'a>(&'a self, i: usize) -> &'a str {
-        self.get(i)
-            .map(|m| m.as_str())
-            .unwrap_or_else(|| panic!("no group at index '{}'", i))
-    }
-}
-
-/// Get a matching capture group's haystack substring by name.
-///
-/// The haystack substring returned can't outlive the `Captures` object if this
-/// method is used, because of how `Index` is defined (normally `a[i]` is part
-/// of `a` and can't outlive it). To work around this limitation, do that, use
-/// [`Captures::name`] instead.
-///
-/// `'h` is the lifetime of the matched haystack, but the lifetime of the
-/// `&str` returned by this implementation is the lifetime of the `Captures`
-/// value itself.
-///
-/// `'n` is the lifetime of the group name used to index the `Captures` value.
-///
-/// # Panics
-///
-/// If there is no matching group at the given name.
-impl<'h, 'n> core::ops::Index<&'n str> for Captures<'h> {
-    type Output = str;
-
-    fn index<'a>(&'a self, name: &'n str) -> &'a str {
-        self.name(name)
-            .map(|m| m.as_str())
-            .unwrap_or_else(|| panic!("no group named '{}'", name))
-    }
-}
-
-/// A low level representation of the byte offsets of each capture group.
-///
-/// You can think of this as a lower level [`Captures`], where this type does
-/// not support named capturing groups directly and it does not borrow the
-/// haystack that these offsets were matched on.
-///
-/// Primarily, this type is useful when using the lower level `Regex` APIs such
-/// as [`Regex::captures_read`], which permits amortizing the allocation in
-/// which capture match offsets are stored.
-///
-/// In order to build a value of this type, you'll need to call the
-/// [`Regex::capture_locations`] method. The value returned can then be reused
-/// in subsequent searches for that regex. Using it for other regexes may
-/// result in a panic or otherwise incorrect results.
-///
-/// # Example
-///
-/// This example shows how to create and use `CaptureLocations` in a search.
-///
-/// ```
-/// use regex::Regex;
-///
-/// let re = Regex::new(r"(?<first>\w+)\s+(?<last>\w+)").unwrap();
-/// let mut locs = re.capture_locations();
-/// let m = re.captures_read(&mut locs, "Bruce Springsteen").unwrap();
-/// assert_eq!(0..17, m.range());
-/// assert_eq!(Some((0, 17)), locs.get(0));
-/// assert_eq!(Some((0, 5)), locs.get(1));
-/// assert_eq!(Some((6, 17)), locs.get(2));
-///
-/// // Asking for an invalid capture group always returns None.
-/// assert_eq!(None, locs.get(3));
-/// # // literals are too big for 32-bit usize: #1041
-/// # #[cfg(target_pointer_width = "64")]
-/// assert_eq!(None, locs.get(34973498648));
-/// # #[cfg(target_pointer_width = "64")]
-/// assert_eq!(None, locs.get(9944060567225171988));
-/// ```
-#[derive(Clone, Debug)]
-pub struct CaptureLocations(captures::Captures);
-
-/// A type alias for `CaptureLocations` for backwards compatibility.
-///
-/// Previously, we exported `CaptureLocations` as `Locations` in an
-/// undocumented API. To prevent breaking that code (e.g., in `regex-capi`),
-/// we continue re-exporting the same undocumented API.
-#[doc(hidden)]
-pub type Locations = CaptureLocations;
-
-impl CaptureLocations {
-    /// Returns the start and end byte offsets of the capture group at index
-    /// `i`. This returns `None` if `i` is not a valid capture group or if the
-    /// capture group did not match.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"(?<first>\w+)\s+(?<last>\w+)").unwrap();
-    /// let mut locs = re.capture_locations();
-    /// re.captures_read(&mut locs, "Bruce Springsteen").unwrap();
-    /// assert_eq!(Some((0, 17)), locs.get(0));
-    /// assert_eq!(Some((0, 5)), locs.get(1));
-    /// assert_eq!(Some((6, 17)), locs.get(2));
-    /// ```
-    #[inline]
-    pub fn get(&self, i: usize) -> Option<(usize, usize)> {
-        self.0.get_group(i).map(|sp| (sp.start, sp.end))
-    }
-
-    /// Returns the total number of capture groups (even if they didn't match).
-    /// That is, the length returned is unaffected by the result of a search.
-    ///
-    /// This is always at least `1` since every regex has at least `1`
-    /// capturing group that corresponds to the entire match.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"(?<first>\w+)\s+(?<last>\w+)").unwrap();
-    /// let mut locs = re.capture_locations();
-    /// assert_eq!(3, locs.len());
-    /// re.captures_read(&mut locs, "Bruce Springsteen").unwrap();
-    /// assert_eq!(3, locs.len());
-    /// ```
-    ///
-    /// Notice that the length is always at least `1`, regardless of the regex:
-    ///
-    /// ```
-    /// use regex::Regex;
-    ///
-    /// let re = Regex::new(r"").unwrap();
-    /// let locs = re.capture_locations();
-    /// assert_eq!(1, locs.len());
-    ///
-    /// // [a&&b] is a regex that never matches anything.
-    /// let re = Regex::new(r"[a&&b]").unwrap();
-    /// let locs = re.capture_locations();
-    /// assert_eq!(1, locs.len());
-    /// ```
-    #[inline]
-    pub fn len(&self) -> usize {
-        // self.0.group_len() returns 0 if the underlying captures doesn't
-        // represent a match, but the behavior guaranteed for this method is
-        // that the length doesn't change based on a match or not.
-        self.0.group_info().group_len(PatternID::ZERO)
-    }
-
-    /// An alias for the `get` method for backwards compatibility.
-    ///
-    /// Previously, we exported `get` as `pos` in an undocumented API. To
-    /// prevent breaking that code (e.g., in `regex-capi`), we continue
-    /// re-exporting the same undocumented API.
-    #[doc(hidden)]
-    #[inline]
-    pub fn pos(&self, i: usize) -> Option<(usize, usize)> {
-        self.get(i)
-    }
-}
-
-/// An iterator over all non-overlapping matches in a haystack.
-///
-/// This iterator yields [`Match`] values. The iterator stops when no more
-/// matches can be found.
-///
-/// `'r` is the lifetime of the compiled regular expression and `'h` is the
-/// lifetime of the haystack.
-///
-/// This iterator is created by [`Regex::find_iter`].
-///
-/// # Time complexity
-///
-/// Note that since an iterator runs potentially many searches on the haystack
-/// and since each search has worst case `O(m * n)` time complexity, the
-/// overall worst case time complexity for iteration is `O(m * n^2)`.
-#[derive(Debug)]
-pub struct Matches<'r, 'h> {
-    haystack: &'h str,
-    it: meta::FindMatches<'r, 'h>,
-}
-
-impl<'r, 'h> Iterator for Matches<'r, 'h> {
-    type Item = Match<'h>;
-
-    #[inline]
-    fn next(&mut self) -> Option<Match<'h>> {
-        self.it
-            .next()
-            .map(|sp| Match::new(self.haystack, sp.start(), sp.end()))
-    }
-
-    #[inline]
-    fn count(self) -> usize {
-        // This can actually be up to 2x faster than calling `next()` until
-        // completion, because counting matches when using a DFA only requires
-        // finding the end of each match. But returning a `Match` via `next()`
-        // requires the start of each match which, with a DFA, requires a
-        // reverse forward scan to find it.
-        self.it.count()
-    }
-}
-
-impl<'r, 'h> core::iter::FusedIterator for Matches<'r, 'h> {}
-
-/// An iterator over all non-overlapping capture matches in a haystack.
-///
-/// This iterator yields [`Captures`] values. The iterator stops when no more
-/// matches can be found.
-///
-/// `'r` is the lifetime of the compiled regular expression and `'h` is the
-/// lifetime of the matched string.
-///
-/// This iterator is created by [`Regex::captures_iter`].
-///
-/// # Time complexity
-///
-/// Note that since an iterator runs potentially many searches on the haystack
-/// and since each search has worst case `O(m * n)` time complexity, the
-/// overall worst case time complexity for iteration is `O(m * n^2)`.
-#[derive(Debug)]
-pub struct CaptureMatches<'r, 'h> {
-    haystack: &'h str,
-    it: meta::CapturesMatches<'r, 'h>,
-}
-
-impl<'r, 'h> Iterator for CaptureMatches<'r, 'h> {
-    type Item = Captures<'h>;
-
-    #[inline]
-    fn next(&mut self) -> Option<Captures<'h>> {
-        let static_captures_len = self.it.regex().static_captures_len();
-        self.it.next().map(|caps| Captures {
-            haystack: self.haystack,
-            caps,
-            static_captures_len,
-        })
-    }
-
-    #[inline]
-    fn count(self) -> usize {
-        // This can actually be up to 2x faster than calling `next()` until
-        // completion, because counting matches when using a DFA only requires
-        // finding the end of each match. But returning a `Match` via `next()`
-        // requires the start of each match which, with a DFA, requires a
-        // reverse forward scan to find it.
-        self.it.count()
-    }
-}
-
-impl<'r, 'h> core::iter::FusedIterator for CaptureMatches<'r, 'h> {}
-
-/// An iterator over all substrings delimited by a regex match.
-///
-/// `'r` is the lifetime of the compiled regular expression and `'h` is the
-/// lifetime of the byte string being split.
-///
-/// This iterator is created by [`Regex::split`].
-///
-/// # Time complexity
-///
-/// Note that since an iterator runs potentially many searches on the haystack
-/// and since each search has worst case `O(m * n)` time complexity, the
-/// overall worst case time complexity for iteration is `O(m * n^2)`.
-#[derive(Debug)]
-pub struct Split<'r, 'h> {
-    haystack: &'h str,
-    it: meta::Split<'r, 'h>,
-}
-
-impl<'r, 'h> Iterator for Split<'r, 'h> {
-    type Item = &'h str;
-
-    #[inline]
-    fn next(&mut self) -> Option<&'h str> {
-        self.it.next().map(|span| &self.haystack[span])
-    }
-}
-
-impl<'r, 'h> core::iter::FusedIterator for Split<'r, 'h> {}
-
-/// An iterator over at most `N` substrings delimited by a regex match.
-///
-/// The last substring yielded by this iterator will be whatever remains after
-/// `N-1` splits.
-///
-/// `'r` is the lifetime of the compiled regular expression and `'h` is the
-/// lifetime of the byte string being split.
-///
-/// This iterator is created by [`Regex::splitn`].
-///
-/// # Time complexity
-///
-/// Note that since an iterator runs potentially many searches on the haystack
-/// and since each search has worst case `O(m * n)` time complexity, the
-/// overall worst case time complexity for iteration is `O(m * n^2)`.
-///
-/// Although note that the worst case time here has an upper bound given
-/// by the `limit` parameter to [`Regex::splitn`].
-#[derive(Debug)]
-pub struct SplitN<'r, 'h> {
-    haystack: &'h str,
-    it: meta::SplitN<'r, 'h>,
-}
-
-impl<'r, 'h> Iterator for SplitN<'r, 'h> {
-    type Item = &'h str;
-
-    #[inline]
-    fn next(&mut self) -> Option<&'h str> {
-        self.it.next().map(|span| &self.haystack[span])
-    }
-
-    #[inline]
-    fn size_hint(&self) -> (usize, Option<usize>) {
-        self.it.size_hint()
-    }
-}
-
-impl<'r, 'h> core::iter::FusedIterator for SplitN<'r, 'h> {}
-
-/// An iterator over the names of all capture groups in a regex.
-///
-/// This iterator yields values of type `Option<&str>` in order of the opening
-/// capture group parenthesis in the regex pattern. `None` is yielded for
-/// groups with no name. The first element always corresponds to the implicit
-/// and unnamed group for the overall match.
-///
-/// `'r` is the lifetime of the compiled regular expression.
-///
-/// This iterator is created by [`Regex::capture_names`].
-#[derive(Clone, Debug)]
-pub struct CaptureNames<'r>(captures::GroupInfoPatternNames<'r>);
-
-impl<'r> Iterator for CaptureNames<'r> {
-    type Item = Option<&'r str>;
-
-    #[inline]
-    fn next(&mut self) -> Option<Option<&'r str>> {
-        self.0.next()
-    }
-
-    #[inline]
-    fn size_hint(&self) -> (usize, Option<usize>) {
-        self.0.size_hint()
-    }
-
-    #[inline]
-    fn count(self) -> usize {
-        self.0.count()
-    }
-}
-
-impl<'r> ExactSizeIterator for CaptureNames<'r> {}
-
-impl<'r> core::iter::FusedIterator for CaptureNames<'r> {}
-
-/// An iterator over all group matches in a [`Captures`] value.
-///
-/// This iterator yields values of type `Option<Match<'h>>`, where `'h` is the
-/// lifetime of the haystack that the matches are for. The order of elements
-/// yielded corresponds to the order of the opening parenthesis for the group
-/// in the regex pattern. `None` is yielded for groups that did not participate
-/// in the match.
-///
-/// The first element always corresponds to the implicit group for the overall
-/// match. Since this iterator is created by a [`Captures`] value, and a
-/// `Captures` value is only created when a match occurs, it follows that the
-/// first element yielded by this iterator is guaranteed to be non-`None`.
-///
-/// The lifetime `'c` corresponds to the lifetime of the `Captures` value that
-/// created this iterator, and the lifetime `'h` corresponds to the originally
-/// matched haystack.
-#[derive(Clone, Debug)]
-pub struct SubCaptureMatches<'c, 'h> {
-    haystack: &'h str,
-    it: captures::CapturesPatternIter<'c>,
-}
-
-impl<'c, 'h> Iterator for SubCaptureMatches<'c, 'h> {
-    type Item = Option<Match<'h>>;
-
-    #[inline]
-    fn next(&mut self) -> Option<Option<Match<'h>>> {
-        self.it.next().map(|group| {
-            group.map(|sp| Match::new(self.haystack, sp.start, sp.end))
-        })
-    }
-
-    #[inline]
-    fn size_hint(&self) -> (usize, Option<usize>) {
-        self.it.size_hint()
-    }
-
-    #[inline]
-    fn count(self) -> usize {
-        self.it.count()
-    }
-}
-
-impl<'c, 'h> ExactSizeIterator for SubCaptureMatches<'c, 'h> {}
-
-impl<'c, 'h> core::iter::FusedIterator for SubCaptureMatches<'c, 'h> {}
-
-/// A trait for types that can be used to replace matches in a haystack.
-///
-/// In general, users of this crate shouldn't need to implement this trait,
-/// since implementations are already provided for `&str` along with other
-/// variants of string types, as well as `FnMut(&Captures) -> String` (or any
-/// `FnMut(&Captures) -> T` where `T: AsRef<str>`). Those cover most use cases,
-/// but callers can implement this trait directly if necessary.
-///
-/// # Example
-///
-/// This example shows a basic implementation of  the `Replacer` trait. This
-/// can be done much more simply using the replacement string interpolation
-/// support (e.g., `$first $last`), but this approach avoids needing to parse
-/// the replacement string at all.
-///
-/// ```
-/// use regex::{Captures, Regex, Replacer};
-///
-/// struct NameSwapper;
-///
-/// impl Replacer for NameSwapper {
-///     fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) {
-///         dst.push_str(&caps["first"]);
-///         dst.push_str(" ");
-///         dst.push_str(&caps["last"]);
-///     }
-/// }
-///
-/// let re = Regex::new(r"(?<last>[^,\s]+),\s+(?<first>\S+)").unwrap();
-/// let result = re.replace("Springsteen, Bruce", NameSwapper);
-/// assert_eq!(result, "Bruce Springsteen");
-/// ```
-pub trait Replacer {
-    /// Appends possibly empty data to `dst` to replace the current match.
-    ///
-    /// The current match is represented by `caps`, which is guaranteed to
-    /// have a match at capture group `0`.
-    ///
-    /// For example, a no-op replacement would be `dst.push_str(&caps[0])`.
-    fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String);
-
-    /// Return a fixed unchanging replacement string.
-    ///
-    /// When doing replacements, if access to [`Captures`] is not needed (e.g.,
-    /// the replacement string does not need `$` expansion), then it can be
-    /// beneficial to avoid finding sub-captures.
-    ///
-    /// In general, this is called once for every call to a replacement routine
-    /// such as [`Regex::replace_all`].
-    fn no_expansion<'r>(&'r mut self) -> Option<Cow<'r, str>> {
-        None
-    }
-
-    /// Returns a type that implements `Replacer`, but that borrows and wraps
-    /// this `Replacer`.
-    ///
-    /// This is useful when you want to take a generic `Replacer` (which might
-    /// not be cloneable) and use it without consuming it, so it can be used
-    /// more than once.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::{Regex, Replacer};
-    ///
-    /// fn replace_all_twice<R: Replacer>(
-    ///     re: Regex,
-    ///     src: &str,
-    ///     mut rep: R,
-    /// ) -> String {
-    ///     let dst = re.replace_all(src, rep.by_ref());
-    ///     let dst = re.replace_all(&dst, rep.by_ref());
-    ///     dst.into_owned()
-    /// }
-    /// ```
-    fn by_ref<'r>(&'r mut self) -> ReplacerRef<'r, Self> {
-        ReplacerRef(self)
-    }
-}
-
-impl<'a> Replacer for &'a str {
-    fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) {
-        caps.expand(*self, dst);
-    }
-
-    fn no_expansion(&mut self) -> Option<Cow<'_, str>> {
-        no_expansion(self)
-    }
-}
-
-impl<'a> Replacer for &'a String {
-    fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) {
-        self.as_str().replace_append(caps, dst)
-    }
-
-    fn no_expansion(&mut self) -> Option<Cow<'_, str>> {
-        no_expansion(self)
-    }
-}
-
-impl Replacer for String {
-    fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) {
-        self.as_str().replace_append(caps, dst)
-    }
-
-    fn no_expansion(&mut self) -> Option<Cow<'_, str>> {
-        no_expansion(self)
-    }
-}
-
-impl<'a> Replacer for Cow<'a, str> {
-    fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) {
-        self.as_ref().replace_append(caps, dst)
-    }
-
-    fn no_expansion(&mut self) -> Option<Cow<'_, str>> {
-        no_expansion(self)
-    }
-}
-
-impl<'a> Replacer for &'a Cow<'a, str> {
-    fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) {
-        self.as_ref().replace_append(caps, dst)
-    }
-
-    fn no_expansion(&mut self) -> Option<Cow<'_, str>> {
-        no_expansion(self)
-    }
-}
-
-impl<F, T> Replacer for F
-where
-    F: FnMut(&Captures<'_>) -> T,
-    T: AsRef<str>,
-{
-    fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) {
-        dst.push_str((*self)(caps).as_ref());
-    }
-}
-
-/// A by-reference adaptor for a [`Replacer`].
-///
-/// This permits reusing the same `Replacer` value in multiple calls to a
-/// replacement routine like [`Regex::replace_all`].
-///
-/// This type is created by [`Replacer::by_ref`].
-#[derive(Debug)]
-pub struct ReplacerRef<'a, R: ?Sized>(&'a mut R);
-
-impl<'a, R: Replacer + ?Sized + 'a> Replacer for ReplacerRef<'a, R> {
-    fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) {
-        self.0.replace_append(caps, dst)
-    }
-
-    fn no_expansion(&mut self) -> Option<Cow<'_, str>> {
-        self.0.no_expansion()
-    }
-}
-
-/// A helper type for forcing literal string replacement.
-///
-/// It can be used with routines like [`Regex::replace`] and
-/// [`Regex::replace_all`] to do a literal string replacement without expanding
-/// `$name` to their corresponding capture groups. This can be both convenient
-/// (to avoid escaping `$`, for example) and faster (since capture groups
-/// don't need to be found).
-///
-/// `'s` is the lifetime of the literal string to use.
-///
-/// # Example
-///
-/// ```
-/// use regex::{NoExpand, Regex};
-///
-/// let re = Regex::new(r"(?<last>[^,\s]+),\s+(\S+)").unwrap();
-/// let result = re.replace("Springsteen, Bruce", NoExpand("$2 $last"));
-/// assert_eq!(result, "$2 $last");
-/// ```
-#[derive(Clone, Debug)]
-pub struct NoExpand<'s>(pub &'s str);
-
-impl<'s> Replacer for NoExpand<'s> {
-    fn replace_append(&mut self, _: &Captures<'_>, dst: &mut String) {
-        dst.push_str(self.0);
-    }
-
-    fn no_expansion(&mut self) -> Option<Cow<'_, str>> {
-        Some(Cow::Borrowed(self.0))
-    }
-}
-
-/// Quickly checks the given replacement string for whether interpolation
-/// should be done on it. It returns `None` if a `$` was found anywhere in the
-/// given string, which suggests interpolation needs to be done. But if there's
-/// no `$` anywhere, then interpolation definitely does not need to be done. In
-/// that case, the given string is returned as a borrowed `Cow`.
-///
-/// This is meant to be used to implement the `Replacer::no_expandsion` method
-/// in its various trait impls.
-fn no_expansion<T: AsRef<str>>(replacement: &T) -> Option<Cow<'_, str>> {
-    let replacement = replacement.as_ref();
-    match crate::find_byte::find_byte(b'$', replacement.as_bytes()) {
-        Some(_) => None,
-        None => Some(Cow::Borrowed(replacement)),
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/regexset/bytes.rs b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/regexset/bytes.rs
deleted file mode 100644
index 46f02fbb..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/regexset/bytes.rs
+++ /dev/null
@@ -1,728 +0,0 @@
-use alloc::string::String;
-
-use regex_automata::{meta, Input, PatternID, PatternSet, PatternSetIter};
-
-use crate::{bytes::RegexSetBuilder, Error};
-
-/// Match multiple, possibly overlapping, regexes in a single search.
-///
-/// A regex set corresponds to the union of zero or more regular expressions.
-/// That is, a regex set will match a haystack when at least one of its
-/// constituent regexes matches. A regex set as its formulated here provides a
-/// touch more power: it will also report *which* regular expressions in the
-/// set match. Indeed, this is the key difference between regex sets and a
-/// single `Regex` with many alternates, since only one alternate can match at
-/// a time.
-///
-/// For example, consider regular expressions to match email addresses and
-/// domains: `[a-z]+@[a-z]+\.(com|org|net)` and `[a-z]+\.(com|org|net)`. If a
-/// regex set is constructed from those regexes, then searching the haystack
-/// `foo@example.com` will report both regexes as matching. Of course, one
-/// could accomplish this by compiling each regex on its own and doing two
-/// searches over the haystack. The key advantage of using a regex set is
-/// that it will report the matching regexes using a *single pass through the
-/// haystack*. If one has hundreds or thousands of regexes to match repeatedly
-/// (like a URL router for a complex web application or a user agent matcher),
-/// then a regex set *can* realize huge performance gains.
-///
-/// Unlike the top-level [`RegexSet`](crate::RegexSet), this `RegexSet`
-/// searches haystacks with type `&[u8]` instead of `&str`. Consequently, this
-/// `RegexSet` is permitted to match invalid UTF-8.
-///
-/// # Limitations
-///
-/// Regex sets are limited to answering the following two questions:
-///
-/// 1. Does any regex in the set match?
-/// 2. If so, which regexes in the set match?
-///
-/// As with the main [`Regex`][crate::bytes::Regex] type, it is cheaper to ask
-/// (1) instead of (2) since the matching engines can stop after the first
-/// match is found.
-///
-/// You cannot directly extract [`Match`][crate::bytes::Match] or
-/// [`Captures`][crate::bytes::Captures] objects from a regex set. If you need
-/// these operations, the recommended approach is to compile each pattern in
-/// the set independently and scan the exact same haystack a second time with
-/// those independently compiled patterns:
-///
-/// ```
-/// use regex::bytes::{Regex, RegexSet};
-///
-/// let patterns = ["foo", "bar"];
-/// // Both patterns will match different ranges of this string.
-/// let hay = b"barfoo";
-///
-/// // Compile a set matching any of our patterns.
-/// let set = RegexSet::new(patterns).unwrap();
-/// // Compile each pattern independently.
-/// let regexes: Vec<_> = set
-///     .patterns()
-///     .iter()
-///     .map(|pat| Regex::new(pat).unwrap())
-///     .collect();
-///
-/// // Match against the whole set first and identify the individual
-/// // matching patterns.
-/// let matches: Vec<&[u8]> = set
-///     .matches(hay)
-///     .into_iter()
-///     // Dereference the match index to get the corresponding
-///     // compiled pattern.
-///     .map(|index| &regexes[index])
-///     // To get match locations or any other info, we then have to search the
-///     // exact same haystack again, using our separately-compiled pattern.
-///     .map(|re| re.find(hay).unwrap().as_bytes())
-///     .collect();
-///
-/// // Matches arrive in the order the constituent patterns were declared,
-/// // not the order they appear in the haystack.
-/// assert_eq!(vec![&b"foo"[..], &b"bar"[..]], matches);
-/// ```
-///
-/// # Performance
-///
-/// A `RegexSet` has the same performance characteristics as `Regex`. Namely,
-/// search takes `O(m * n)` time, where `m` is proportional to the size of the
-/// regex set and `n` is proportional to the length of the haystack.
-///
-/// # Trait implementations
-///
-/// The `Default` trait is implemented for `RegexSet`. The default value
-/// is an empty set. An empty set can also be explicitly constructed via
-/// [`RegexSet::empty`].
-///
-/// # Example
-///
-/// This shows how the above two regexes (for matching email addresses and
-/// domains) might work:
-///
-/// ```
-/// use regex::bytes::RegexSet;
-///
-/// let set = RegexSet::new(&[
-///     r"[a-z]+@[a-z]+\.(com|org|net)",
-///     r"[a-z]+\.(com|org|net)",
-/// ]).unwrap();
-///
-/// // Ask whether any regexes in the set match.
-/// assert!(set.is_match(b"foo@example.com"));
-///
-/// // Identify which regexes in the set match.
-/// let matches: Vec<_> = set.matches(b"foo@example.com").into_iter().collect();
-/// assert_eq!(vec![0, 1], matches);
-///
-/// // Try again, but with a haystack that only matches one of the regexes.
-/// let matches: Vec<_> = set.matches(b"example.com").into_iter().collect();
-/// assert_eq!(vec![1], matches);
-///
-/// // Try again, but with a haystack that doesn't match any regex in the set.
-/// let matches: Vec<_> = set.matches(b"example").into_iter().collect();
-/// assert!(matches.is_empty());
-/// ```
-///
-/// Note that it would be possible to adapt the above example to using `Regex`
-/// with an expression like:
-///
-/// ```text
-/// (?P<email>[a-z]+@(?P<email_domain>[a-z]+[.](com|org|net)))|(?P<domain>[a-z]+[.](com|org|net))
-/// ```
-///
-/// After a match, one could then inspect the capture groups to figure out
-/// which alternates matched. The problem is that it is hard to make this
-/// approach scale when there are many regexes since the overlap between each
-/// alternate isn't always obvious to reason about.
-#[derive(Clone)]
-pub struct RegexSet {
-    pub(crate) meta: meta::Regex,
-    pub(crate) patterns: alloc::sync::Arc<[String]>,
-}
-
-impl RegexSet {
-    /// Create a new regex set with the given regular expressions.
-    ///
-    /// This takes an iterator of `S`, where `S` is something that can produce
-    /// a `&str`. If any of the strings in the iterator are not valid regular
-    /// expressions, then an error is returned.
-    ///
-    /// # Example
-    ///
-    /// Create a new regex set from an iterator of strings:
-    ///
-    /// ```
-    /// use regex::bytes::RegexSet;
-    ///
-    /// let set = RegexSet::new([r"\w+", r"\d+"]).unwrap();
-    /// assert!(set.is_match(b"foo"));
-    /// ```
-    pub fn new<I, S>(exprs: I) -> Result<RegexSet, Error>
-    where
-        S: AsRef<str>,
-        I: IntoIterator<Item = S>,
-    {
-        RegexSetBuilder::new(exprs).build()
-    }
-
-    /// Create a new empty regex set.
-    ///
-    /// An empty regex never matches anything.
-    ///
-    /// This is a convenience function for `RegexSet::new([])`, but doesn't
-    /// require one to specify the type of the input.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::bytes::RegexSet;
-    ///
-    /// let set = RegexSet::empty();
-    /// assert!(set.is_empty());
-    /// // an empty set matches nothing
-    /// assert!(!set.is_match(b""));
-    /// ```
-    pub fn empty() -> RegexSet {
-        let empty: [&str; 0] = [];
-        RegexSetBuilder::new(empty).build().unwrap()
-    }
-
-    /// Returns true if and only if one of the regexes in this set matches
-    /// the haystack given.
-    ///
-    /// This method should be preferred if you only need to test whether any
-    /// of the regexes in the set should match, but don't care about *which*
-    /// regexes matched. This is because the underlying matching engine will
-    /// quit immediately after seeing the first match instead of continuing to
-    /// find all matches.
-    ///
-    /// Note that as with searches using [`Regex`](crate::bytes::Regex), the
-    /// expression is unanchored by default. That is, if the regex does not
-    /// start with `^` or `\A`, or end with `$` or `\z`, then it is permitted
-    /// to match anywhere in the haystack.
-    ///
-    /// # Example
-    ///
-    /// Tests whether a set matches somewhere in a haystack:
-    ///
-    /// ```
-    /// use regex::bytes::RegexSet;
-    ///
-    /// let set = RegexSet::new([r"\w+", r"\d+"]).unwrap();
-    /// assert!(set.is_match(b"foo"));
-    /// assert!(!set.is_match("☃".as_bytes()));
-    /// ```
-    #[inline]
-    pub fn is_match(&self, haystack: &[u8]) -> bool {
-        self.is_match_at(haystack, 0)
-    }
-
-    /// Returns true if and only if one of the regexes in this set matches the
-    /// haystack given, with the search starting at the offset given.
-    ///
-    /// The significance of the starting point is that it takes the surrounding
-    /// context into consideration. For example, the `\A` anchor can only
-    /// match when `start == 0`.
-    ///
-    /// # Panics
-    ///
-    /// This panics when `start >= haystack.len() + 1`.
-    ///
-    /// # Example
-    ///
-    /// This example shows the significance of `start`. Namely, consider a
-    /// haystack `foobar` and a desire to execute a search starting at offset
-    /// `3`. You could search a substring explicitly, but then the look-around
-    /// assertions won't work correctly. Instead, you can use this method to
-    /// specify the start position of a search.
-    ///
-    /// ```
-    /// use regex::bytes::RegexSet;
-    ///
-    /// let set = RegexSet::new([r"\bbar\b", r"(?m)^bar$"]).unwrap();
-    /// let hay = b"foobar";
-    /// // We get a match here, but it's probably not intended.
-    /// assert!(set.is_match(&hay[3..]));
-    /// // No match because the  assertions take the context into account.
-    /// assert!(!set.is_match_at(hay, 3));
-    /// ```
-    #[inline]
-    pub fn is_match_at(&self, haystack: &[u8], start: usize) -> bool {
-        self.meta.is_match(Input::new(haystack).span(start..haystack.len()))
-    }
-
-    /// Returns the set of regexes that match in the given haystack.
-    ///
-    /// The set returned contains the index of each regex that matches in
-    /// the given haystack. The index is in correspondence with the order of
-    /// regular expressions given to `RegexSet`'s constructor.
-    ///
-    /// The set can also be used to iterate over the matched indices. The order
-    /// of iteration is always ascending with respect to the matching indices.
-    ///
-    /// Note that as with searches using [`Regex`](crate::bytes::Regex), the
-    /// expression is unanchored by default. That is, if the regex does not
-    /// start with `^` or `\A`, or end with `$` or `\z`, then it is permitted
-    /// to match anywhere in the haystack.
-    ///
-    /// # Example
-    ///
-    /// Tests which regular expressions match the given haystack:
-    ///
-    /// ```
-    /// use regex::bytes::RegexSet;
-    ///
-    /// let set = RegexSet::new([
-    ///     r"\w+",
-    ///     r"\d+",
-    ///     r"\pL+",
-    ///     r"foo",
-    ///     r"bar",
-    ///     r"barfoo",
-    ///     r"foobar",
-    /// ]).unwrap();
-    /// let matches: Vec<_> = set.matches(b"foobar").into_iter().collect();
-    /// assert_eq!(matches, vec![0, 2, 3, 4, 6]);
-    ///
-    /// // You can also test whether a particular regex matched:
-    /// let matches = set.matches(b"foobar");
-    /// assert!(!matches.matched(5));
-    /// assert!(matches.matched(6));
-    /// ```
-    #[inline]
-    pub fn matches(&self, haystack: &[u8]) -> SetMatches {
-        self.matches_at(haystack, 0)
-    }
-
-    /// Returns the set of regexes that match in the given haystack.
-    ///
-    /// The set returned contains the index of each regex that matches in
-    /// the given haystack. The index is in correspondence with the order of
-    /// regular expressions given to `RegexSet`'s constructor.
-    ///
-    /// The set can also be used to iterate over the matched indices. The order
-    /// of iteration is always ascending with respect to the matching indices.
-    ///
-    /// The significance of the starting point is that it takes the surrounding
-    /// context into consideration. For example, the `\A` anchor can only
-    /// match when `start == 0`.
-    ///
-    /// # Panics
-    ///
-    /// This panics when `start >= haystack.len() + 1`.
-    ///
-    /// # Example
-    ///
-    /// Tests which regular expressions match the given haystack:
-    ///
-    /// ```
-    /// use regex::bytes::RegexSet;
-    ///
-    /// let set = RegexSet::new([r"\bbar\b", r"(?m)^bar$"]).unwrap();
-    /// let hay = b"foobar";
-    /// // We get matches here, but it's probably not intended.
-    /// let matches: Vec<_> = set.matches(&hay[3..]).into_iter().collect();
-    /// assert_eq!(matches, vec![0, 1]);
-    /// // No matches because the  assertions take the context into account.
-    /// let matches: Vec<_> = set.matches_at(hay, 3).into_iter().collect();
-    /// assert_eq!(matches, vec![]);
-    /// ```
-    #[inline]
-    pub fn matches_at(&self, haystack: &[u8], start: usize) -> SetMatches {
-        let input = Input::new(haystack).span(start..haystack.len());
-        let mut patset = PatternSet::new(self.meta.pattern_len());
-        self.meta.which_overlapping_matches(&input, &mut patset);
-        SetMatches(patset)
-    }
-
-    /// Returns the same as matches, but starts the search at the given
-    /// offset and stores the matches into the slice given.
-    ///
-    /// The significance of the starting point is that it takes the surrounding
-    /// context into consideration. For example, the `\A` anchor can only
-    /// match when `start == 0`.
-    ///
-    /// `matches` must have a length that is at least the number of regexes
-    /// in this set.
-    ///
-    /// This method returns true if and only if at least one member of
-    /// `matches` is true after executing the set against `haystack`.
-    #[doc(hidden)]
-    #[inline]
-    pub fn matches_read_at(
-        &self,
-        matches: &mut [bool],
-        haystack: &[u8],
-        start: usize,
-    ) -> bool {
-        // This is pretty dumb. We should try to fix this, but the
-        // regex-automata API doesn't provide a way to store matches in an
-        // arbitrary &mut [bool]. Thankfully, this API is doc(hidden) and
-        // thus not public... But regex-capi currently uses it. We should
-        // fix regex-capi to use a PatternSet, maybe? Not sure... PatternSet
-        // is in regex-automata, not regex. So maybe we should just accept a
-        // 'SetMatches', which is basically just a newtype around PatternSet.
-        let mut patset = PatternSet::new(self.meta.pattern_len());
-        let mut input = Input::new(haystack);
-        input.set_start(start);
-        self.meta.which_overlapping_matches(&input, &mut patset);
-        for pid in patset.iter() {
-            matches[pid] = true;
-        }
-        !patset.is_empty()
-    }
-
-    /// An alias for `matches_read_at` to preserve backward compatibility.
-    ///
-    /// The `regex-capi` crate used this method, so to avoid breaking that
-    /// crate, we continue to export it as an undocumented API.
-    #[doc(hidden)]
-    #[inline]
-    pub fn read_matches_at(
-        &self,
-        matches: &mut [bool],
-        haystack: &[u8],
-        start: usize,
-    ) -> bool {
-        self.matches_read_at(matches, haystack, start)
-    }
-
-    /// Returns the total number of regexes in this set.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::bytes::RegexSet;
-    ///
-    /// assert_eq!(0, RegexSet::empty().len());
-    /// assert_eq!(1, RegexSet::new([r"[0-9]"]).unwrap().len());
-    /// assert_eq!(2, RegexSet::new([r"[0-9]", r"[a-z]"]).unwrap().len());
-    /// ```
-    #[inline]
-    pub fn len(&self) -> usize {
-        self.meta.pattern_len()
-    }
-
-    /// Returns `true` if this set contains no regexes.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::bytes::RegexSet;
-    ///
-    /// assert!(RegexSet::empty().is_empty());
-    /// assert!(!RegexSet::new([r"[0-9]"]).unwrap().is_empty());
-    /// ```
-    #[inline]
-    pub fn is_empty(&self) -> bool {
-        self.meta.pattern_len() == 0
-    }
-
-    /// Returns the regex patterns that this regex set was constructed from.
-    ///
-    /// This function can be used to determine the pattern for a match. The
-    /// slice returned has exactly as many patterns givens to this regex set,
-    /// and the order of the slice is the same as the order of the patterns
-    /// provided to the set.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::bytes::RegexSet;
-    ///
-    /// let set = RegexSet::new(&[
-    ///     r"\w+",
-    ///     r"\d+",
-    ///     r"\pL+",
-    ///     r"foo",
-    ///     r"bar",
-    ///     r"barfoo",
-    ///     r"foobar",
-    /// ]).unwrap();
-    /// let matches: Vec<_> = set
-    ///     .matches(b"foobar")
-    ///     .into_iter()
-    ///     .map(|index| &set.patterns()[index])
-    ///     .collect();
-    /// assert_eq!(matches, vec![r"\w+", r"\pL+", r"foo", r"bar", r"foobar"]);
-    /// ```
-    #[inline]
-    pub fn patterns(&self) -> &[String] {
-        &self.patterns
-    }
-}
-
-impl Default for RegexSet {
-    fn default() -> Self {
-        RegexSet::empty()
-    }
-}
-
-/// A set of matches returned by a regex set.
-///
-/// Values of this type are constructed by [`RegexSet::matches`].
-#[derive(Clone, Debug)]
-pub struct SetMatches(PatternSet);
-
-impl SetMatches {
-    /// Whether this set contains any matches.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::bytes::RegexSet;
-    ///
-    /// let set = RegexSet::new(&[
-    ///     r"[a-z]+@[a-z]+\.(com|org|net)",
-    ///     r"[a-z]+\.(com|org|net)",
-    /// ]).unwrap();
-    /// let matches = set.matches(b"foo@example.com");
-    /// assert!(matches.matched_any());
-    /// ```
-    #[inline]
-    pub fn matched_any(&self) -> bool {
-        !self.0.is_empty()
-    }
-
-    /// Whether all patterns in this set matched.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::bytes::RegexSet;
-    ///
-    /// let set = RegexSet::new(&[
-    ///     r"^foo",
-    ///     r"[a-z]+\.com",
-    /// ]).unwrap();
-    /// let matches = set.matches(b"foo.example.com");
-    /// assert!(matches.matched_all());
-    /// ```
-    pub fn matched_all(&self) -> bool {
-        self.0.is_full()
-    }
-
-    /// Whether the regex at the given index matched.
-    ///
-    /// The index for a regex is determined by its insertion order upon the
-    /// initial construction of a `RegexSet`, starting at `0`.
-    ///
-    /// # Panics
-    ///
-    /// If `index` is greater than or equal to the number of regexes in the
-    /// original set that produced these matches. Equivalently, when `index`
-    /// is greater than or equal to [`SetMatches::len`].
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::bytes::RegexSet;
-    ///
-    /// let set = RegexSet::new([
-    ///     r"[a-z]+@[a-z]+\.(com|org|net)",
-    ///     r"[a-z]+\.(com|org|net)",
-    /// ]).unwrap();
-    /// let matches = set.matches(b"example.com");
-    /// assert!(!matches.matched(0));
-    /// assert!(matches.matched(1));
-    /// ```
-    #[inline]
-    pub fn matched(&self, index: usize) -> bool {
-        self.0.contains(PatternID::new_unchecked(index))
-    }
-
-    /// The total number of regexes in the set that created these matches.
-    ///
-    /// **WARNING:** This always returns the same value as [`RegexSet::len`].
-    /// In particular, it does *not* return the number of elements yielded by
-    /// [`SetMatches::iter`]. The only way to determine the total number of
-    /// matched regexes is to iterate over them.
-    ///
-    /// # Example
-    ///
-    /// Notice that this method returns the total number of regexes in the
-    /// original set, and *not* the total number of regexes that matched.
-    ///
-    /// ```
-    /// use regex::bytes::RegexSet;
-    ///
-    /// let set = RegexSet::new([
-    ///     r"[a-z]+@[a-z]+\.(com|org|net)",
-    ///     r"[a-z]+\.(com|org|net)",
-    /// ]).unwrap();
-    /// let matches = set.matches(b"example.com");
-    /// // Total number of patterns that matched.
-    /// assert_eq!(1, matches.iter().count());
-    /// // Total number of patterns in the set.
-    /// assert_eq!(2, matches.len());
-    /// ```
-    #[inline]
-    pub fn len(&self) -> usize {
-        self.0.capacity()
-    }
-
-    /// Returns an iterator over the indices of the regexes that matched.
-    ///
-    /// This will always produces matches in ascending order, where the index
-    /// yielded corresponds to the index of the regex that matched with respect
-    /// to its position when initially building the set.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::bytes::RegexSet;
-    ///
-    /// let set = RegexSet::new([
-    ///     r"[0-9]",
-    ///     r"[a-z]",
-    ///     r"[A-Z]",
-    ///     r"\p{Greek}",
-    /// ]).unwrap();
-    /// let hay = "βa1".as_bytes();
-    /// let matches: Vec<_> = set.matches(hay).iter().collect();
-    /// assert_eq!(matches, vec![0, 1, 3]);
-    /// ```
-    ///
-    /// Note that `SetMatches` also implemnets the `IntoIterator` trait, so
-    /// this method is not always needed. For example:
-    ///
-    /// ```
-    /// use regex::bytes::RegexSet;
-    ///
-    /// let set = RegexSet::new([
-    ///     r"[0-9]",
-    ///     r"[a-z]",
-    ///     r"[A-Z]",
-    ///     r"\p{Greek}",
-    /// ]).unwrap();
-    /// let hay = "βa1".as_bytes();
-    /// let mut matches = vec![];
-    /// for index in set.matches(hay) {
-    ///     matches.push(index);
-    /// }
-    /// assert_eq!(matches, vec![0, 1, 3]);
-    /// ```
-    #[inline]
-    pub fn iter(&self) -> SetMatchesIter<'_> {
-        SetMatchesIter(self.0.iter())
-    }
-}
-
-impl IntoIterator for SetMatches {
-    type IntoIter = SetMatchesIntoIter;
-    type Item = usize;
-
-    fn into_iter(self) -> Self::IntoIter {
-        let it = 0..self.0.capacity();
-        SetMatchesIntoIter { patset: self.0, it }
-    }
-}
-
-impl<'a> IntoIterator for &'a SetMatches {
-    type IntoIter = SetMatchesIter<'a>;
-    type Item = usize;
-
-    fn into_iter(self) -> Self::IntoIter {
-        self.iter()
-    }
-}
-
-/// An owned iterator over the set of matches from a regex set.
-///
-/// This will always produces matches in ascending order of index, where the
-/// index corresponds to the index of the regex that matched with respect to
-/// its position when initially building the set.
-///
-/// This iterator is created by calling `SetMatches::into_iter` via the
-/// `IntoIterator` trait. This is automatically done in `for` loops.
-///
-/// # Example
-///
-/// ```
-/// use regex::bytes::RegexSet;
-///
-/// let set = RegexSet::new([
-///     r"[0-9]",
-///     r"[a-z]",
-///     r"[A-Z]",
-///     r"\p{Greek}",
-/// ]).unwrap();
-/// let hay = "βa1".as_bytes();
-/// let mut matches = vec![];
-/// for index in set.matches(hay) {
-///     matches.push(index);
-/// }
-/// assert_eq!(matches, vec![0, 1, 3]);
-/// ```
-#[derive(Debug)]
-pub struct SetMatchesIntoIter {
-    patset: PatternSet,
-    it: core::ops::Range<usize>,
-}
-
-impl Iterator for SetMatchesIntoIter {
-    type Item = usize;
-
-    fn next(&mut self) -> Option<usize> {
-        loop {
-            let id = self.it.next()?;
-            if self.patset.contains(PatternID::new_unchecked(id)) {
-                return Some(id);
-            }
-        }
-    }
-
-    fn size_hint(&self) -> (usize, Option<usize>) {
-        self.it.size_hint()
-    }
-}
-
-impl DoubleEndedIterator for SetMatchesIntoIter {
-    fn next_back(&mut self) -> Option<usize> {
-        loop {
-            let id = self.it.next_back()?;
-            if self.patset.contains(PatternID::new_unchecked(id)) {
-                return Some(id);
-            }
-        }
-    }
-}
-
-impl core::iter::FusedIterator for SetMatchesIntoIter {}
-
-/// A borrowed iterator over the set of matches from a regex set.
-///
-/// The lifetime `'a` refers to the lifetime of the [`SetMatches`] value that
-/// created this iterator.
-///
-/// This will always produces matches in ascending order, where the index
-/// corresponds to the index of the regex that matched with respect to its
-/// position when initially building the set.
-///
-/// This iterator is created by the [`SetMatches::iter`] method.
-#[derive(Clone, Debug)]
-pub struct SetMatchesIter<'a>(PatternSetIter<'a>);
-
-impl<'a> Iterator for SetMatchesIter<'a> {
-    type Item = usize;
-
-    fn next(&mut self) -> Option<usize> {
-        self.0.next().map(|pid| pid.as_usize())
-    }
-
-    fn size_hint(&self) -> (usize, Option<usize>) {
-        self.0.size_hint()
-    }
-}
-
-impl<'a> DoubleEndedIterator for SetMatchesIter<'a> {
-    fn next_back(&mut self) -> Option<usize> {
-        self.0.next_back().map(|pid| pid.as_usize())
-    }
-}
-
-impl<'a> core::iter::FusedIterator for SetMatchesIter<'a> {}
-
-impl core::fmt::Debug for RegexSet {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        write!(f, "RegexSet({:?})", self.patterns())
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/regexset/mod.rs b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/regexset/mod.rs
deleted file mode 100644
index 93fadec..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/regexset/mod.rs
+++ /dev/null
@@ -1,2 +0,0 @@
-pub(crate) mod bytes;
-pub(crate) mod string;
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/regexset/string.rs b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/regexset/string.rs
deleted file mode 100644
index 535a670..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/regexset/string.rs
+++ /dev/null
@@ -1,724 +0,0 @@
-use alloc::string::String;
-
-use regex_automata::{meta, Input, PatternID, PatternSet, PatternSetIter};
-
-use crate::{Error, RegexSetBuilder};
-
-/// Match multiple, possibly overlapping, regexes in a single search.
-///
-/// A regex set corresponds to the union of zero or more regular expressions.
-/// That is, a regex set will match a haystack when at least one of its
-/// constituent regexes matches. A regex set as its formulated here provides a
-/// touch more power: it will also report *which* regular expressions in the
-/// set match. Indeed, this is the key difference between regex sets and a
-/// single `Regex` with many alternates, since only one alternate can match at
-/// a time.
-///
-/// For example, consider regular expressions to match email addresses and
-/// domains: `[a-z]+@[a-z]+\.(com|org|net)` and `[a-z]+\.(com|org|net)`. If a
-/// regex set is constructed from those regexes, then searching the haystack
-/// `foo@example.com` will report both regexes as matching. Of course, one
-/// could accomplish this by compiling each regex on its own and doing two
-/// searches over the haystack. The key advantage of using a regex set is
-/// that it will report the matching regexes using a *single pass through the
-/// haystack*. If one has hundreds or thousands of regexes to match repeatedly
-/// (like a URL router for a complex web application or a user agent matcher),
-/// then a regex set *can* realize huge performance gains.
-///
-/// # Limitations
-///
-/// Regex sets are limited to answering the following two questions:
-///
-/// 1. Does any regex in the set match?
-/// 2. If so, which regexes in the set match?
-///
-/// As with the main [`Regex`][crate::Regex] type, it is cheaper to ask (1)
-/// instead of (2) since the matching engines can stop after the first match
-/// is found.
-///
-/// You cannot directly extract [`Match`][crate::Match] or
-/// [`Captures`][crate::Captures] objects from a regex set. If you need these
-/// operations, the recommended approach is to compile each pattern in the set
-/// independently and scan the exact same haystack a second time with those
-/// independently compiled patterns:
-///
-/// ```
-/// use regex::{Regex, RegexSet};
-///
-/// let patterns = ["foo", "bar"];
-/// // Both patterns will match different ranges of this string.
-/// let hay = "barfoo";
-///
-/// // Compile a set matching any of our patterns.
-/// let set = RegexSet::new(patterns).unwrap();
-/// // Compile each pattern independently.
-/// let regexes: Vec<_> = set
-///     .patterns()
-///     .iter()
-///     .map(|pat| Regex::new(pat).unwrap())
-///     .collect();
-///
-/// // Match against the whole set first and identify the individual
-/// // matching patterns.
-/// let matches: Vec<&str> = set
-///     .matches(hay)
-///     .into_iter()
-///     // Dereference the match index to get the corresponding
-///     // compiled pattern.
-///     .map(|index| &regexes[index])
-///     // To get match locations or any other info, we then have to search the
-///     // exact same haystack again, using our separately-compiled pattern.
-///     .map(|re| re.find(hay).unwrap().as_str())
-///     .collect();
-///
-/// // Matches arrive in the order the constituent patterns were declared,
-/// // not the order they appear in the haystack.
-/// assert_eq!(vec!["foo", "bar"], matches);
-/// ```
-///
-/// # Performance
-///
-/// A `RegexSet` has the same performance characteristics as `Regex`. Namely,
-/// search takes `O(m * n)` time, where `m` is proportional to the size of the
-/// regex set and `n` is proportional to the length of the haystack.
-///
-/// # Trait implementations
-///
-/// The `Default` trait is implemented for `RegexSet`. The default value
-/// is an empty set. An empty set can also be explicitly constructed via
-/// [`RegexSet::empty`].
-///
-/// # Example
-///
-/// This shows how the above two regexes (for matching email addresses and
-/// domains) might work:
-///
-/// ```
-/// use regex::RegexSet;
-///
-/// let set = RegexSet::new(&[
-///     r"[a-z]+@[a-z]+\.(com|org|net)",
-///     r"[a-z]+\.(com|org|net)",
-/// ]).unwrap();
-///
-/// // Ask whether any regexes in the set match.
-/// assert!(set.is_match("foo@example.com"));
-///
-/// // Identify which regexes in the set match.
-/// let matches: Vec<_> = set.matches("foo@example.com").into_iter().collect();
-/// assert_eq!(vec![0, 1], matches);
-///
-/// // Try again, but with a haystack that only matches one of the regexes.
-/// let matches: Vec<_> = set.matches("example.com").into_iter().collect();
-/// assert_eq!(vec![1], matches);
-///
-/// // Try again, but with a haystack that doesn't match any regex in the set.
-/// let matches: Vec<_> = set.matches("example").into_iter().collect();
-/// assert!(matches.is_empty());
-/// ```
-///
-/// Note that it would be possible to adapt the above example to using `Regex`
-/// with an expression like:
-///
-/// ```text
-/// (?P<email>[a-z]+@(?P<email_domain>[a-z]+[.](com|org|net)))|(?P<domain>[a-z]+[.](com|org|net))
-/// ```
-///
-/// After a match, one could then inspect the capture groups to figure out
-/// which alternates matched. The problem is that it is hard to make this
-/// approach scale when there are many regexes since the overlap between each
-/// alternate isn't always obvious to reason about.
-#[derive(Clone)]
-pub struct RegexSet {
-    pub(crate) meta: meta::Regex,
-    pub(crate) patterns: alloc::sync::Arc<[String]>,
-}
-
-impl RegexSet {
-    /// Create a new regex set with the given regular expressions.
-    ///
-    /// This takes an iterator of `S`, where `S` is something that can produce
-    /// a `&str`. If any of the strings in the iterator are not valid regular
-    /// expressions, then an error is returned.
-    ///
-    /// # Example
-    ///
-    /// Create a new regex set from an iterator of strings:
-    ///
-    /// ```
-    /// use regex::RegexSet;
-    ///
-    /// let set = RegexSet::new([r"\w+", r"\d+"]).unwrap();
-    /// assert!(set.is_match("foo"));
-    /// ```
-    pub fn new<I, S>(exprs: I) -> Result<RegexSet, Error>
-    where
-        S: AsRef<str>,
-        I: IntoIterator<Item = S>,
-    {
-        RegexSetBuilder::new(exprs).build()
-    }
-
-    /// Create a new empty regex set.
-    ///
-    /// An empty regex never matches anything.
-    ///
-    /// This is a convenience function for `RegexSet::new([])`, but doesn't
-    /// require one to specify the type of the input.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::RegexSet;
-    ///
-    /// let set = RegexSet::empty();
-    /// assert!(set.is_empty());
-    /// // an empty set matches nothing
-    /// assert!(!set.is_match(""));
-    /// ```
-    pub fn empty() -> RegexSet {
-        let empty: [&str; 0] = [];
-        RegexSetBuilder::new(empty).build().unwrap()
-    }
-
-    /// Returns true if and only if one of the regexes in this set matches
-    /// the haystack given.
-    ///
-    /// This method should be preferred if you only need to test whether any
-    /// of the regexes in the set should match, but don't care about *which*
-    /// regexes matched. This is because the underlying matching engine will
-    /// quit immediately after seeing the first match instead of continuing to
-    /// find all matches.
-    ///
-    /// Note that as with searches using [`Regex`](crate::Regex), the
-    /// expression is unanchored by default. That is, if the regex does not
-    /// start with `^` or `\A`, or end with `$` or `\z`, then it is permitted
-    /// to match anywhere in the haystack.
-    ///
-    /// # Example
-    ///
-    /// Tests whether a set matches somewhere in a haystack:
-    ///
-    /// ```
-    /// use regex::RegexSet;
-    ///
-    /// let set = RegexSet::new([r"\w+", r"\d+"]).unwrap();
-    /// assert!(set.is_match("foo"));
-    /// assert!(!set.is_match("☃"));
-    /// ```
-    #[inline]
-    pub fn is_match(&self, haystack: &str) -> bool {
-        self.is_match_at(haystack, 0)
-    }
-
-    /// Returns true if and only if one of the regexes in this set matches the
-    /// haystack given, with the search starting at the offset given.
-    ///
-    /// The significance of the starting point is that it takes the surrounding
-    /// context into consideration. For example, the `\A` anchor can only
-    /// match when `start == 0`.
-    ///
-    /// # Panics
-    ///
-    /// This panics when `start >= haystack.len() + 1`.
-    ///
-    /// # Example
-    ///
-    /// This example shows the significance of `start`. Namely, consider a
-    /// haystack `foobar` and a desire to execute a search starting at offset
-    /// `3`. You could search a substring explicitly, but then the look-around
-    /// assertions won't work correctly. Instead, you can use this method to
-    /// specify the start position of a search.
-    ///
-    /// ```
-    /// use regex::RegexSet;
-    ///
-    /// let set = RegexSet::new([r"\bbar\b", r"(?m)^bar$"]).unwrap();
-    /// let hay = "foobar";
-    /// // We get a match here, but it's probably not intended.
-    /// assert!(set.is_match(&hay[3..]));
-    /// // No match because the  assertions take the context into account.
-    /// assert!(!set.is_match_at(hay, 3));
-    /// ```
-    #[inline]
-    pub fn is_match_at(&self, haystack: &str, start: usize) -> bool {
-        self.meta.is_match(Input::new(haystack).span(start..haystack.len()))
-    }
-
-    /// Returns the set of regexes that match in the given haystack.
-    ///
-    /// The set returned contains the index of each regex that matches in
-    /// the given haystack. The index is in correspondence with the order of
-    /// regular expressions given to `RegexSet`'s constructor.
-    ///
-    /// The set can also be used to iterate over the matched indices. The order
-    /// of iteration is always ascending with respect to the matching indices.
-    ///
-    /// Note that as with searches using [`Regex`](crate::Regex), the
-    /// expression is unanchored by default. That is, if the regex does not
-    /// start with `^` or `\A`, or end with `$` or `\z`, then it is permitted
-    /// to match anywhere in the haystack.
-    ///
-    /// # Example
-    ///
-    /// Tests which regular expressions match the given haystack:
-    ///
-    /// ```
-    /// use regex::RegexSet;
-    ///
-    /// let set = RegexSet::new([
-    ///     r"\w+",
-    ///     r"\d+",
-    ///     r"\pL+",
-    ///     r"foo",
-    ///     r"bar",
-    ///     r"barfoo",
-    ///     r"foobar",
-    /// ]).unwrap();
-    /// let matches: Vec<_> = set.matches("foobar").into_iter().collect();
-    /// assert_eq!(matches, vec![0, 2, 3, 4, 6]);
-    ///
-    /// // You can also test whether a particular regex matched:
-    /// let matches = set.matches("foobar");
-    /// assert!(!matches.matched(5));
-    /// assert!(matches.matched(6));
-    /// ```
-    #[inline]
-    pub fn matches(&self, haystack: &str) -> SetMatches {
-        self.matches_at(haystack, 0)
-    }
-
-    /// Returns the set of regexes that match in the given haystack.
-    ///
-    /// The set returned contains the index of each regex that matches in
-    /// the given haystack. The index is in correspondence with the order of
-    /// regular expressions given to `RegexSet`'s constructor.
-    ///
-    /// The set can also be used to iterate over the matched indices. The order
-    /// of iteration is always ascending with respect to the matching indices.
-    ///
-    /// The significance of the starting point is that it takes the surrounding
-    /// context into consideration. For example, the `\A` anchor can only
-    /// match when `start == 0`.
-    ///
-    /// # Panics
-    ///
-    /// This panics when `start >= haystack.len() + 1`.
-    ///
-    /// # Example
-    ///
-    /// Tests which regular expressions match the given haystack:
-    ///
-    /// ```
-    /// use regex::RegexSet;
-    ///
-    /// let set = RegexSet::new([r"\bbar\b", r"(?m)^bar$"]).unwrap();
-    /// let hay = "foobar";
-    /// // We get matches here, but it's probably not intended.
-    /// let matches: Vec<_> = set.matches(&hay[3..]).into_iter().collect();
-    /// assert_eq!(matches, vec![0, 1]);
-    /// // No matches because the  assertions take the context into account.
-    /// let matches: Vec<_> = set.matches_at(hay, 3).into_iter().collect();
-    /// assert_eq!(matches, vec![]);
-    /// ```
-    #[inline]
-    pub fn matches_at(&self, haystack: &str, start: usize) -> SetMatches {
-        let input = Input::new(haystack).span(start..haystack.len());
-        let mut patset = PatternSet::new(self.meta.pattern_len());
-        self.meta.which_overlapping_matches(&input, &mut patset);
-        SetMatches(patset)
-    }
-
-    /// Returns the same as matches, but starts the search at the given
-    /// offset and stores the matches into the slice given.
-    ///
-    /// The significance of the starting point is that it takes the surrounding
-    /// context into consideration. For example, the `\A` anchor can only
-    /// match when `start == 0`.
-    ///
-    /// `matches` must have a length that is at least the number of regexes
-    /// in this set.
-    ///
-    /// This method returns true if and only if at least one member of
-    /// `matches` is true after executing the set against `haystack`.
-    #[doc(hidden)]
-    #[inline]
-    pub fn matches_read_at(
-        &self,
-        matches: &mut [bool],
-        haystack: &str,
-        start: usize,
-    ) -> bool {
-        // This is pretty dumb. We should try to fix this, but the
-        // regex-automata API doesn't provide a way to store matches in an
-        // arbitrary &mut [bool]. Thankfully, this API is doc(hidden) and
-        // thus not public... But regex-capi currently uses it. We should
-        // fix regex-capi to use a PatternSet, maybe? Not sure... PatternSet
-        // is in regex-automata, not regex. So maybe we should just accept a
-        // 'SetMatches', which is basically just a newtype around PatternSet.
-        let mut patset = PatternSet::new(self.meta.pattern_len());
-        let mut input = Input::new(haystack);
-        input.set_start(start);
-        self.meta.which_overlapping_matches(&input, &mut patset);
-        for pid in patset.iter() {
-            matches[pid] = true;
-        }
-        !patset.is_empty()
-    }
-
-    /// An alias for `matches_read_at` to preserve backward compatibility.
-    ///
-    /// The `regex-capi` crate used this method, so to avoid breaking that
-    /// crate, we continue to export it as an undocumented API.
-    #[doc(hidden)]
-    #[inline]
-    pub fn read_matches_at(
-        &self,
-        matches: &mut [bool],
-        haystack: &str,
-        start: usize,
-    ) -> bool {
-        self.matches_read_at(matches, haystack, start)
-    }
-
-    /// Returns the total number of regexes in this set.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::RegexSet;
-    ///
-    /// assert_eq!(0, RegexSet::empty().len());
-    /// assert_eq!(1, RegexSet::new([r"[0-9]"]).unwrap().len());
-    /// assert_eq!(2, RegexSet::new([r"[0-9]", r"[a-z]"]).unwrap().len());
-    /// ```
-    #[inline]
-    pub fn len(&self) -> usize {
-        self.meta.pattern_len()
-    }
-
-    /// Returns `true` if this set contains no regexes.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::RegexSet;
-    ///
-    /// assert!(RegexSet::empty().is_empty());
-    /// assert!(!RegexSet::new([r"[0-9]"]).unwrap().is_empty());
-    /// ```
-    #[inline]
-    pub fn is_empty(&self) -> bool {
-        self.meta.pattern_len() == 0
-    }
-
-    /// Returns the regex patterns that this regex set was constructed from.
-    ///
-    /// This function can be used to determine the pattern for a match. The
-    /// slice returned has exactly as many patterns givens to this regex set,
-    /// and the order of the slice is the same as the order of the patterns
-    /// provided to the set.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::RegexSet;
-    ///
-    /// let set = RegexSet::new(&[
-    ///     r"\w+",
-    ///     r"\d+",
-    ///     r"\pL+",
-    ///     r"foo",
-    ///     r"bar",
-    ///     r"barfoo",
-    ///     r"foobar",
-    /// ]).unwrap();
-    /// let matches: Vec<_> = set
-    ///     .matches("foobar")
-    ///     .into_iter()
-    ///     .map(|index| &set.patterns()[index])
-    ///     .collect();
-    /// assert_eq!(matches, vec![r"\w+", r"\pL+", r"foo", r"bar", r"foobar"]);
-    /// ```
-    #[inline]
-    pub fn patterns(&self) -> &[String] {
-        &self.patterns
-    }
-}
-
-impl Default for RegexSet {
-    fn default() -> Self {
-        RegexSet::empty()
-    }
-}
-
-/// A set of matches returned by a regex set.
-///
-/// Values of this type are constructed by [`RegexSet::matches`].
-#[derive(Clone, Debug)]
-pub struct SetMatches(PatternSet);
-
-impl SetMatches {
-    /// Whether this set contains any matches.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::RegexSet;
-    ///
-    /// let set = RegexSet::new(&[
-    ///     r"[a-z]+@[a-z]+\.(com|org|net)",
-    ///     r"[a-z]+\.(com|org|net)",
-    /// ]).unwrap();
-    /// let matches = set.matches("foo@example.com");
-    /// assert!(matches.matched_any());
-    /// ```
-    #[inline]
-    pub fn matched_any(&self) -> bool {
-        !self.0.is_empty()
-    }
-
-    /// Whether all patterns in this set matched.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::RegexSet;
-    ///
-    /// let set = RegexSet::new(&[
-    ///     r"^foo",
-    ///     r"[a-z]+\.com",
-    /// ]).unwrap();
-    /// let matches = set.matches("foo.example.com");
-    /// assert!(matches.matched_all());
-    /// ```
-    pub fn matched_all(&self) -> bool {
-        self.0.is_full()
-    }
-
-    /// Whether the regex at the given index matched.
-    ///
-    /// The index for a regex is determined by its insertion order upon the
-    /// initial construction of a `RegexSet`, starting at `0`.
-    ///
-    /// # Panics
-    ///
-    /// If `index` is greater than or equal to the number of regexes in the
-    /// original set that produced these matches. Equivalently, when `index`
-    /// is greater than or equal to [`SetMatches::len`].
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::RegexSet;
-    ///
-    /// let set = RegexSet::new([
-    ///     r"[a-z]+@[a-z]+\.(com|org|net)",
-    ///     r"[a-z]+\.(com|org|net)",
-    /// ]).unwrap();
-    /// let matches = set.matches("example.com");
-    /// assert!(!matches.matched(0));
-    /// assert!(matches.matched(1));
-    /// ```
-    #[inline]
-    pub fn matched(&self, index: usize) -> bool {
-        self.0.contains(PatternID::new_unchecked(index))
-    }
-
-    /// The total number of regexes in the set that created these matches.
-    ///
-    /// **WARNING:** This always returns the same value as [`RegexSet::len`].
-    /// In particular, it does *not* return the number of elements yielded by
-    /// [`SetMatches::iter`]. The only way to determine the total number of
-    /// matched regexes is to iterate over them.
-    ///
-    /// # Example
-    ///
-    /// Notice that this method returns the total number of regexes in the
-    /// original set, and *not* the total number of regexes that matched.
-    ///
-    /// ```
-    /// use regex::RegexSet;
-    ///
-    /// let set = RegexSet::new([
-    ///     r"[a-z]+@[a-z]+\.(com|org|net)",
-    ///     r"[a-z]+\.(com|org|net)",
-    /// ]).unwrap();
-    /// let matches = set.matches("example.com");
-    /// // Total number of patterns that matched.
-    /// assert_eq!(1, matches.iter().count());
-    /// // Total number of patterns in the set.
-    /// assert_eq!(2, matches.len());
-    /// ```
-    #[inline]
-    pub fn len(&self) -> usize {
-        self.0.capacity()
-    }
-
-    /// Returns an iterator over the indices of the regexes that matched.
-    ///
-    /// This will always produces matches in ascending order, where the index
-    /// yielded corresponds to the index of the regex that matched with respect
-    /// to its position when initially building the set.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex::RegexSet;
-    ///
-    /// let set = RegexSet::new([
-    ///     r"[0-9]",
-    ///     r"[a-z]",
-    ///     r"[A-Z]",
-    ///     r"\p{Greek}",
-    /// ]).unwrap();
-    /// let hay = "βa1";
-    /// let matches: Vec<_> = set.matches(hay).iter().collect();
-    /// assert_eq!(matches, vec![0, 1, 3]);
-    /// ```
-    ///
-    /// Note that `SetMatches` also implemnets the `IntoIterator` trait, so
-    /// this method is not always needed. For example:
-    ///
-    /// ```
-    /// use regex::RegexSet;
-    ///
-    /// let set = RegexSet::new([
-    ///     r"[0-9]",
-    ///     r"[a-z]",
-    ///     r"[A-Z]",
-    ///     r"\p{Greek}",
-    /// ]).unwrap();
-    /// let hay = "βa1";
-    /// let mut matches = vec![];
-    /// for index in set.matches(hay) {
-    ///     matches.push(index);
-    /// }
-    /// assert_eq!(matches, vec![0, 1, 3]);
-    /// ```
-    #[inline]
-    pub fn iter(&self) -> SetMatchesIter<'_> {
-        SetMatchesIter(self.0.iter())
-    }
-}
-
-impl IntoIterator for SetMatches {
-    type IntoIter = SetMatchesIntoIter;
-    type Item = usize;
-
-    fn into_iter(self) -> Self::IntoIter {
-        let it = 0..self.0.capacity();
-        SetMatchesIntoIter { patset: self.0, it }
-    }
-}
-
-impl<'a> IntoIterator for &'a SetMatches {
-    type IntoIter = SetMatchesIter<'a>;
-    type Item = usize;
-
-    fn into_iter(self) -> Self::IntoIter {
-        self.iter()
-    }
-}
-
-/// An owned iterator over the set of matches from a regex set.
-///
-/// This will always produces matches in ascending order of index, where the
-/// index corresponds to the index of the regex that matched with respect to
-/// its position when initially building the set.
-///
-/// This iterator is created by calling `SetMatches::into_iter` via the
-/// `IntoIterator` trait. This is automatically done in `for` loops.
-///
-/// # Example
-///
-/// ```
-/// use regex::RegexSet;
-///
-/// let set = RegexSet::new([
-///     r"[0-9]",
-///     r"[a-z]",
-///     r"[A-Z]",
-///     r"\p{Greek}",
-/// ]).unwrap();
-/// let hay = "βa1";
-/// let mut matches = vec![];
-/// for index in set.matches(hay) {
-///     matches.push(index);
-/// }
-/// assert_eq!(matches, vec![0, 1, 3]);
-/// ```
-#[derive(Debug)]
-pub struct SetMatchesIntoIter {
-    patset: PatternSet,
-    it: core::ops::Range<usize>,
-}
-
-impl Iterator for SetMatchesIntoIter {
-    type Item = usize;
-
-    fn next(&mut self) -> Option<usize> {
-        loop {
-            let id = self.it.next()?;
-            if self.patset.contains(PatternID::new_unchecked(id)) {
-                return Some(id);
-            }
-        }
-    }
-
-    fn size_hint(&self) -> (usize, Option<usize>) {
-        self.it.size_hint()
-    }
-}
-
-impl DoubleEndedIterator for SetMatchesIntoIter {
-    fn next_back(&mut self) -> Option<usize> {
-        loop {
-            let id = self.it.next_back()?;
-            if self.patset.contains(PatternID::new_unchecked(id)) {
-                return Some(id);
-            }
-        }
-    }
-}
-
-impl core::iter::FusedIterator for SetMatchesIntoIter {}
-
-/// A borrowed iterator over the set of matches from a regex set.
-///
-/// The lifetime `'a` refers to the lifetime of the [`SetMatches`] value that
-/// created this iterator.
-///
-/// This will always produces matches in ascending order, where the index
-/// corresponds to the index of the regex that matched with respect to its
-/// position when initially building the set.
-///
-/// This iterator is created by the [`SetMatches::iter`] method.
-#[derive(Clone, Debug)]
-pub struct SetMatchesIter<'a>(PatternSetIter<'a>);
-
-impl<'a> Iterator for SetMatchesIter<'a> {
-    type Item = usize;
-
-    fn next(&mut self) -> Option<usize> {
-        self.0.next().map(|pid| pid.as_usize())
-    }
-
-    fn size_hint(&self) -> (usize, Option<usize>) {
-        self.0.size_hint()
-    }
-}
-
-impl<'a> DoubleEndedIterator for SetMatchesIter<'a> {
-    fn next_back(&mut self) -> Option<usize> {
-        self.0.next_back().map(|pid| pid.as_usize())
-    }
-}
-
-impl<'a> core::iter::FusedIterator for SetMatchesIter<'a> {}
-
-impl core::fmt::Debug for RegexSet {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        write!(f, "RegexSet({:?})", self.patterns())
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/test b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/test
deleted file mode 100755
index 48224c6..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/test
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/bash
-
-set -e
-
-# cd to the directory containing this crate's Cargo.toml so that we don't need
-# to pass --manifest-path to every `cargo` command.
-cd "$(dirname "$0")"
-
-# This is a convenience script for running a broad swath of tests across
-# features. We don't test the complete space, since the complete space is quite
-# large. Hopefully once we migrate the test suite to better infrastructure
-# (like regex-automata), we'll be able to test more of the space.
-echo "===== DEFAULT FEATURES ====="
-cargo test
-
-# no-std mode is annoyingly difficult to test. Currently, the integration tests
-# don't run. So for now, we just test that library tests run. (There aren't
-# many because `regex` is just a wrapper crate.)
-cargo test --no-default-features --lib
-
-echo "===== DOC TESTS ====="
-cargo test --doc
-
-features=(
-    "std"
-    "std unicode"
-    "std unicode-perl"
-    "std perf"
-    "std perf-cache"
-    "std perf-dfa"
-    "std perf-inline"
-    "std perf-literal"
-    "std perf-dfa-full"
-    "std perf-onepass"
-    "std perf-backtrack"
-)
-for f in "${features[@]}"; do
-    echo "===== FEATURE: $f ====="
-    cargo test --test integration --no-default-features --features "$f"
-done
-
-# And test the probably-forever-nightly-only 'pattern' feature...
-if rustc --version | grep -q nightly; then
-    echo "===== FEATURE: std,pattern,unicode-perl ====="
-    cargo test --test integration --no-default-features --features std,pattern,unicode-perl
-fi
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/README.md b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/README.md
deleted file mode 100644
index c3bc1ac..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/README.md
+++ /dev/null
@@ -1,22 +0,0 @@
-This directory contains a large suite of regex tests defined in a TOML format.
-They are used to drive tests in `tests/lib.rs`, `regex-automata/tests/lib.rs`
-and `regex-lite/tests/lib.rs`.
-
-See the [`regex-test`][regex-test] crate documentation for an explanation of
-the format and how it generates tests.
-
-The basic idea here is that we have many different regex engines but generally
-one set of tests. We want to be able to run those tests (or most of them) on
-every engine. Prior to `regex 1.9`, we used to do this with a hodge podge soup
-of macros and a different test executable for each engine. It overall took a
-longer time to compile, was harder to maintain and it made the test definitions
-themselves less clear.
-
-In `regex 1.9`, when we moved over to `regex-automata`, the situation got a lot
-worse because of an increase in the number of engines. So I devised an engine
-independent format for testing regex patterns and their semantics.
-
-Note: the naming scheme used in these tests isn't terribly consistent. It would
-be great to fix that.
-
-[regex-test]: https://docs.rs/regex-test
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/anchored.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/anchored.toml
deleted file mode 100644
index 0f2248d..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/anchored.toml
+++ /dev/null
@@ -1,127 +0,0 @@
-# These tests are specifically geared toward searches with 'anchored = true'.
-# While they are interesting in their own right, they are particularly
-# important for testing the one-pass DFA since the one-pass DFA can't work in
-# unanchored contexts.
-#
-# Note that "anchored" in this context does not mean "^". Anchored searches are
-# searches whose matches must begin at the start of the search, which may not
-# be at the start of the haystack. That's why anchored searches---and there are
-# some examples below---can still report multiple matches. This occurs when the
-# matches are adjacent to one another.
-
-[[test]]
-name = "greedy"
-regex = '(abc)+'
-haystack = "abcabcabc"
-matches = [
-  [[0, 9], [6, 9]],
-]
-anchored = true
-
-# When a "earliest" search is used, greediness doesn't really exist because
-# matches are reported as soon as they are known.
-[[test]]
-name = "greedy-earliest"
-regex = '(abc)+'
-haystack = "abcabcabc"
-matches = [
-  [[0, 3], [0, 3]],
-  [[3, 6], [3, 6]],
-  [[6, 9], [6, 9]],
-]
-anchored = true
-search-kind = "earliest"
-
-[[test]]
-name = "nongreedy"
-regex = '(abc)+?'
-haystack = "abcabcabc"
-matches = [
-  [[0, 3], [0, 3]],
-  [[3, 6], [3, 6]],
-  [[6, 9], [6, 9]],
-]
-anchored = true
-
-# When "all" semantics are used, non-greediness doesn't exist since the longest
-# possible match is always taken.
-[[test]]
-name = "nongreedy-all"
-regex = '(abc)+?'
-haystack = "abcabcabc"
-matches = [
-  [[0, 9], [6, 9]],
-]
-anchored = true
-match-kind = "all"
-
-[[test]]
-name = "word-boundary-unicode-01"
-regex = '\b\w+\b'
-haystack = 'βββ☃'
-matches = [[0, 6]]
-anchored = true
-
-[[test]]
-name = "word-boundary-nounicode-01"
-regex = '\b\w+\b'
-haystack = 'abcβ'
-matches = [[0, 3]]
-anchored = true
-unicode = false
-
-# Tests that '.c' doesn't match 'abc' when performing an anchored search from
-# the beginning of the haystack. This test found two different bugs in the
-# PikeVM and the meta engine.
-[[test]]
-name = "no-match-at-start"
-regex = '.c'
-haystack = 'abc'
-matches = []
-anchored = true
-
-# Like above, but at a non-zero start offset.
-[[test]]
-name = "no-match-at-start-bounds"
-regex = '.c'
-haystack = 'aabc'
-bounds = [1, 4]
-matches = []
-anchored = true
-
-# This is like no-match-at-start, but hits the "reverse inner" optimization
-# inside the meta engine. (no-match-at-start hits the "reverse suffix"
-# optimization.)
-[[test]]
-name = "no-match-at-start-reverse-inner"
-regex = '.c[a-z]'
-haystack = 'abcz'
-matches = []
-anchored = true
-
-# Like above, but at a non-zero start offset.
-[[test]]
-name = "no-match-at-start-reverse-inner-bounds"
-regex = '.c[a-z]'
-haystack = 'aabcz'
-bounds = [1, 5]
-matches = []
-anchored = true
-
-# Same as no-match-at-start, but applies to the meta engine's "reverse
-# anchored" optimization.
-[[test]]
-name = "no-match-at-start-reverse-anchored"
-regex = '.c[a-z]$'
-haystack = 'abcz'
-matches = []
-anchored = true
-
-# Like above, but at a non-zero start offset.
-[[test]]
-name = "no-match-at-start-reverse-anchored-bounds"
-regex = '.c[a-z]$'
-haystack = 'aabcz'
-bounds = [1, 5]
-matches = []
-anchored = true
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/bytes.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/bytes.toml
deleted file mode 100644
index 346e369..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/bytes.toml
+++ /dev/null
@@ -1,235 +0,0 @@
-# These are tests specifically crafted for regexes that can match arbitrary
-# bytes. In some cases, we also test the Unicode variant as well, just because
-# it's good sense to do so. But also, these tests aren't really about Unicode,
-# but whether matches are only reported at valid UTF-8 boundaries. For most
-# tests in this entire collection, utf8 = true. But for these tests, we use
-# utf8 = false.
-
-[[test]]
-name = "word-boundary-ascii"
-regex = ' \b'
-haystack = " δ"
-matches = []
-unicode = false
-utf8 = false
-
-[[test]]
-name = "word-boundary-unicode"
-regex = ' \b'
-haystack = " δ"
-matches = [[0, 1]]
-unicode = true
-utf8 = false
-
-[[test]]
-name = "word-boundary-ascii-not"
-regex = ' \B'
-haystack = " δ"
-matches = [[0, 1]]
-unicode = false
-utf8 = false
-
-[[test]]
-name = "word-boundary-unicode-not"
-regex = ' \B'
-haystack = " δ"
-matches = []
-unicode = true
-utf8 = false
-
-[[test]]
-name = "perl-word-ascii"
-regex = '\w+'
-haystack = "aδ"
-matches = [[0, 1]]
-unicode = false
-utf8 = false
-
-[[test]]
-name = "perl-word-unicode"
-regex = '\w+'
-haystack = "aδ"
-matches = [[0, 3]]
-unicode = true
-utf8 = false
-
-[[test]]
-name = "perl-decimal-ascii"
-regex = '\d+'
-haystack = "1à„šà„©9"
-matches = [[0, 1], [7, 8]]
-unicode = false
-utf8 = false
-
-[[test]]
-name = "perl-decimal-unicode"
-regex = '\d+'
-haystack = "1à„šà„©9"
-matches = [[0, 8]]
-unicode = true
-utf8 = false
-
-[[test]]
-name = "perl-whitespace-ascii"
-regex = '\s+'
-haystack = " \u1680"
-matches = [[0, 1]]
-unicode = false
-utf8 = false
-
-[[test]]
-name = "perl-whitespace-unicode"
-regex = '\s+'
-haystack = " \u1680"
-matches = [[0, 4]]
-unicode = true
-utf8 = false
-
-# The first `(.+)` matches two Unicode codepoints, but can't match the 5th
-# byte, which isn't valid UTF-8. The second (byte based) `(.+)` takes over and
-# matches.
-[[test]]
-name = "mixed-dot"
-regex = '(.+)(?-u)(.+)'
-haystack = '\xCE\x93\xCE\x94\xFF'
-matches = [
-  [[0, 5], [0, 4], [4, 5]],
-]
-unescape = true
-unicode = true
-utf8 = false
-
-[[test]]
-name = "case-one-ascii"
-regex = 'a'
-haystack = "A"
-matches = [[0, 1]]
-case-insensitive = true
-unicode = false
-utf8 = false
-
-[[test]]
-name = "case-one-unicode"
-regex = 'a'
-haystack = "A"
-matches = [[0, 1]]
-case-insensitive = true
-unicode = true
-utf8 = false
-
-[[test]]
-name = "case-class-simple-ascii"
-regex = '[a-z]+'
-haystack = "AaAaA"
-matches = [[0, 5]]
-case-insensitive = true
-unicode = false
-utf8 = false
-
-[[test]]
-name = "case-class-ascii"
-regex = '[a-z]+'
-haystack = "aA\u212AaA"
-matches = [[0, 2], [5, 7]]
-case-insensitive = true
-unicode = false
-utf8 = false
-
-[[test]]
-name = "case-class-unicode"
-regex = '[a-z]+'
-haystack = "aA\u212AaA"
-matches = [[0, 7]]
-case-insensitive = true
-unicode = true
-utf8 = false
-
-[[test]]
-name = "negate-ascii"
-regex = '[^a]'
-haystack = "δ"
-matches = [[0, 1], [1, 2]]
-unicode = false
-utf8 = false
-
-[[test]]
-name = "negate-unicode"
-regex = '[^a]'
-haystack = "δ"
-matches = [[0, 2]]
-unicode = true
-utf8 = false
-
-# When utf8=true, this won't match, because the implicit '.*?' prefix is
-# Unicode aware and will refuse to match through invalid UTF-8 bytes.
-[[test]]
-name = "dotstar-prefix-ascii"
-regex = 'a'
-haystack = '\xFFa'
-matches = [[1, 2]]
-unescape = true
-unicode = false
-utf8 = false
-
-[[test]]
-name = "dotstar-prefix-unicode"
-regex = 'a'
-haystack = '\xFFa'
-matches = [[1, 2]]
-unescape = true
-unicode = true
-utf8 = false
-
-[[test]]
-name = "null-bytes"
-regex = '(?P<cstr>[^\x00]+)\x00'
-haystack = 'foo\x00'
-matches = [
-  [[0, 4], [0, 3]],
-]
-unescape = true
-unicode = false
-utf8 = false
-
-[[test]]
-name = "invalid-utf8-anchor-100"
-regex = '\xCC?^'
-haystack = '\x8d#;\x1a\xa4s3\x05foobarX\\\x0f0t\xe4\x9b\xa4'
-matches = [[0, 0]]
-unescape = true
-unicode = false
-utf8 = false
-
-[[test]]
-name = "invalid-utf8-anchor-200"
-regex = '^\xf7|4\xff\d\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a##########[] d\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a##########\[] #####\x80\S7|$'
-haystack = '\x8d#;\x1a\xa4s3\x05foobarX\\\x0f0t\xe4\x9b\xa4'
-matches = [[22, 22]]
-unescape = true
-unicode = false
-utf8 = false
-
-[[test]]
-name = "invalid-utf8-anchor-300"
-regex = '^|ddp\xff\xffdddddlQd@\x80'
-haystack = '\x8d#;\x1a\xa4s3\x05foobarX\\\x0f0t\xe4\x9b\xa4'
-matches = [[0, 0]]
-unescape = true
-unicode = false
-utf8 = false
-
-[[test]]
-name = "word-boundary-ascii-100"
-regex = '\Bx\B'
-haystack = "áxβ"
-matches = []
-unicode = false
-utf8 = false
-
-[[test]]
-name = "word-boundary-ascii-200"
-regex = '\B'
-haystack = "0\U0007EF5E"
-matches = [[2, 2], [3, 3], [4, 4], [5, 5]]
-unicode = false
-utf8 = false
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/crazy.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/crazy.toml
deleted file mode 100644
index aed46ea1..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/crazy.toml
+++ /dev/null
@@ -1,315 +0,0 @@
-[[test]]
-name = "nothing-empty"
-regex = []
-haystack = ""
-matches = []
-
-[[test]]
-name = "nothing-something"
-regex = []
-haystack = "wat"
-matches = []
-
-[[test]]
-name = "ranges"
-regex = '(?-u)\b(?:[0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\b'
-haystack = "num: 255"
-matches = [[5, 8]]
-
-[[test]]
-name = "ranges-not"
-regex = '(?-u)\b(?:[0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\b'
-haystack = "num: 256"
-matches = []
-
-[[test]]
-name = "float1"
-regex = '[-+]?[0-9]*\.?[0-9]+'
-haystack = "0.1"
-matches = [[0, 3]]
-
-[[test]]
-name = "float2"
-regex = '[-+]?[0-9]*\.?[0-9]+'
-haystack = "0.1.2"
-matches = [[0, 3]]
-match-limit = 1
-
-[[test]]
-name = "float3"
-regex = '[-+]?[0-9]*\.?[0-9]+'
-haystack = "a1.2"
-matches = [[1, 4]]
-
-[[test]]
-name = "float4"
-regex = '[-+]?[0-9]*\.?[0-9]+'
-haystack = "1.a"
-matches = [[0, 1]]
-
-[[test]]
-name = "float5"
-regex = '^[-+]?[0-9]*\.?[0-9]+$'
-haystack = "1.a"
-matches = []
-
-[[test]]
-name = "email"
-regex = '(?i-u)\b[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}\b'
-haystack = "mine is jam.slam@gmail.com "
-matches = [[8, 26]]
-
-[[test]]
-name = "email-not"
-regex = '(?i-u)\b[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}\b'
-haystack = "mine is jam.slam@gmail "
-matches = []
-
-[[test]]
-name = "email-big"
-regex = '''[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?'''
-haystack = "mine is jam.slam@gmail.com "
-matches = [[8, 26]]
-
-[[test]]
-name = "date1"
-regex = '^(?:19|20)\d\d[- /.](?:0[1-9]|1[012])[- /.](?:0[1-9]|[12][0-9]|3[01])$'
-haystack = "1900-01-01"
-matches = [[0, 10]]
-unicode = false
-
-[[test]]
-name = "date2"
-regex = '^(?:19|20)\d\d[- /.](?:0[1-9]|1[012])[- /.](?:0[1-9]|[12][0-9]|3[01])$'
-haystack = "1900-00-01"
-matches = []
-unicode = false
-
-[[test]]
-name = "date3"
-regex = '^(?:19|20)\d\d[- /.](?:0[1-9]|1[012])[- /.](?:0[1-9]|[12][0-9]|3[01])$'
-haystack = "1900-13-01"
-matches = []
-unicode = false
-
-[[test]]
-name = "start-end-empty"
-regex = '^$'
-haystack = ""
-matches = [[0, 0]]
-
-[[test]]
-name = "start-end-empty-rev"
-regex = '$^'
-haystack = ""
-matches = [[0, 0]]
-
-[[test]]
-name = "start-end-empty-many-1"
-regex = '^$^$^$'
-haystack = ""
-matches = [[0, 0]]
-
-[[test]]
-name = "start-end-empty-many-2"
-regex = '^^^$$$'
-haystack = ""
-matches = [[0, 0]]
-
-[[test]]
-name = "start-end-empty-rep"
-regex = '(?:^$)*'
-haystack = "a\nb\nc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]
-
-[[test]]
-name = "start-end-empty-rep-rev"
-regex = '(?:$^)*'
-haystack = "a\nb\nc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]
-
-[[test]]
-name = "neg-class-letter"
-regex = '[^ac]'
-haystack = "acx"
-matches = [[2, 3]]
-
-[[test]]
-name = "neg-class-letter-comma"
-regex = '[^a,]'
-haystack = "a,x"
-matches = [[2, 3]]
-
-[[test]]
-name = "neg-class-letter-space"
-regex = '[^a[:space:]]'
-haystack = "a x"
-matches = [[2, 3]]
-
-[[test]]
-name = "neg-class-comma"
-regex = '[^,]'
-haystack = ",,x"
-matches = [[2, 3]]
-
-[[test]]
-name = "neg-class-space"
-regex = '[^[:space:]]'
-haystack = " a"
-matches = [[1, 2]]
-
-[[test]]
-name = "neg-class-space-comma"
-regex = '[^,[:space:]]'
-haystack = ", a"
-matches = [[2, 3]]
-
-[[test]]
-name = "neg-class-comma-space"
-regex = '[^[:space:],]'
-haystack = " ,a"
-matches = [[2, 3]]
-
-[[test]]
-name = "neg-class-ascii"
-regex = '[^[:alpha:]Z]'
-haystack = "A1"
-matches = [[1, 2]]
-
-[[test]]
-name = "lazy-many-many"
-regex = '(?:(?:.*)*?)='
-haystack = "a=b"
-matches = [[0, 2]]
-
-[[test]]
-name = "lazy-many-optional"
-regex = '(?:(?:.?)*?)='
-haystack = "a=b"
-matches = [[0, 2]]
-
-[[test]]
-name = "lazy-one-many-many"
-regex = '(?:(?:.*)+?)='
-haystack = "a=b"
-matches = [[0, 2]]
-
-[[test]]
-name = "lazy-one-many-optional"
-regex = '(?:(?:.?)+?)='
-haystack = "a=b"
-matches = [[0, 2]]
-
-[[test]]
-name = "lazy-range-min-many"
-regex = '(?:(?:.*){1,}?)='
-haystack = "a=b"
-matches = [[0, 2]]
-
-[[test]]
-name = "lazy-range-many"
-regex = '(?:(?:.*){1,2}?)='
-haystack = "a=b"
-matches = [[0, 2]]
-
-[[test]]
-name = "greedy-many-many"
-regex = '(?:(?:.*)*)='
-haystack = "a=b"
-matches = [[0, 2]]
-
-[[test]]
-name = "greedy-many-optional"
-regex = '(?:(?:.?)*)='
-haystack = "a=b"
-matches = [[0, 2]]
-
-[[test]]
-name = "greedy-one-many-many"
-regex = '(?:(?:.*)+)='
-haystack = "a=b"
-matches = [[0, 2]]
-
-[[test]]
-name = "greedy-one-many-optional"
-regex = '(?:(?:.?)+)='
-haystack = "a=b"
-matches = [[0, 2]]
-
-[[test]]
-name = "greedy-range-min-many"
-regex = '(?:(?:.*){1,})='
-haystack = "a=b"
-matches = [[0, 2]]
-
-[[test]]
-name = "greedy-range-many"
-regex = '(?:(?:.*){1,2})='
-haystack = "a=b"
-matches = [[0, 2]]
-
-[[test]]
-name = "empty1"
-regex = ''
-haystack = ""
-matches = [[0, 0]]
-
-[[test]]
-name = "empty2"
-regex = ''
-haystack = "abc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "empty3"
-regex = '(?:)'
-haystack = "abc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "empty4"
-regex = '(?:)*'
-haystack = "abc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "empty5"
-regex = '(?:)+'
-haystack = "abc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "empty6"
-regex = '(?:)?'
-haystack = "abc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "empty7"
-regex = '(?:)(?:)'
-haystack = "abc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "empty8"
-regex = '(?:)+|z'
-haystack = "abc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "empty9"
-regex = 'z|(?:)+'
-haystack = "abc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "empty10"
-regex = '(?:)+|b'
-haystack = "abc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "empty11"
-regex = 'b|(?:)+'
-haystack = "abc"
-matches = [[0, 0], [1, 2], [3, 3]]
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/crlf.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/crlf.toml
deleted file mode 100644
index 9e2d3761..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/crlf.toml
+++ /dev/null
@@ -1,117 +0,0 @@
-# This is a basic test that checks ^ and $ treat \r\n as a single line
-# terminator. If ^ and $ only treated \n as a line terminator, then this would
-# only match 'xyz' at the end of the haystack.
-[[test]]
-name = "basic"
-regex = '(?mR)^[a-z]+$'
-haystack = "abc\r\ndef\r\nxyz"
-matches = [[0, 3], [5, 8], [10, 13]]
-
-# Tests that a CRLF-aware '^$' assertion does not match between CR and LF.
-[[test]]
-name = "start-end-non-empty"
-regex = '(?mR)^$'
-haystack = "abc\r\ndef\r\nxyz"
-matches = []
-
-# Tests that a CRLF-aware '^$' assertion matches the empty string, just like
-# a non-CRLF-aware '^$' assertion.
-[[test]]
-name = "start-end-empty"
-regex = '(?mR)^$'
-haystack = ""
-matches = [[0, 0]]
-
-# Tests that a CRLF-aware '^$' assertion matches the empty string preceding
-# and following a line terminator.
-[[test]]
-name = "start-end-before-after"
-regex = '(?mR)^$'
-haystack = "\r\n"
-matches = [[0, 0], [2, 2]]
-
-# Tests that a CRLF-aware '^' assertion does not split a line terminator.
-[[test]]
-name = "start-no-split"
-regex = '(?mR)^'
-haystack = "abc\r\ndef\r\nxyz"
-matches = [[0, 0], [5, 5], [10, 10]]
-
-# Same as above, but with adjacent runs of line terminators.
-[[test]]
-name = "start-no-split-adjacent"
-regex = '(?mR)^'
-haystack = "\r\n\r\n\r\n"
-matches = [[0, 0], [2, 2], [4, 4], [6, 6]]
-
-# Same as above, but with adjacent runs of just carriage returns.
-[[test]]
-name = "start-no-split-adjacent-cr"
-regex = '(?mR)^'
-haystack = "\r\r\r"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-# Same as above, but with adjacent runs of just line feeds.
-[[test]]
-name = "start-no-split-adjacent-lf"
-regex = '(?mR)^'
-haystack = "\n\n\n"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-# Tests that a CRLF-aware '$' assertion does not split a line terminator.
-[[test]]
-name = "end-no-split"
-regex = '(?mR)$'
-haystack = "abc\r\ndef\r\nxyz"
-matches = [[3, 3], [8, 8], [13, 13]]
-
-# Same as above, but with adjacent runs of line terminators.
-[[test]]
-name = "end-no-split-adjacent"
-regex = '(?mR)$'
-haystack = "\r\n\r\n\r\n"
-matches = [[0, 0], [2, 2], [4, 4], [6, 6]]
-
-# Same as above, but with adjacent runs of just carriage returns.
-[[test]]
-name = "end-no-split-adjacent-cr"
-regex = '(?mR)$'
-haystack = "\r\r\r"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-# Same as above, but with adjacent runs of just line feeds.
-[[test]]
-name = "end-no-split-adjacent-lf"
-regex = '(?mR)$'
-haystack = "\n\n\n"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-# Tests that '.' does not match either \r or \n when CRLF mode is enabled. Note
-# that this doesn't require multi-line mode to be enabled.
-[[test]]
-name = "dot-no-crlf"
-regex = '(?R).'
-haystack = "\r\n\r\n\r\n"
-matches = []
-
-# This is a test that caught a bug in the one-pass DFA where it (amazingly) was
-# using 'is_end_lf' instead of 'is_end_crlf' here. It was probably a copy &
-# paste bug. We insert an empty capture group here because it provokes the meta
-# regex engine to first find a match and then trip over a panic because the
-# one-pass DFA erroneously says there is no match.
-[[test]]
-name = "onepass-wrong-crlf-with-capture"
-regex = '(?Rm:().$)'
-haystack = "ZZ\r"
-matches = [[[1, 2], [1, 1]]]
-
-# This is like onepass-wrong-crlf-with-capture above, except it sets up the
-# test so that it can be run by the one-pass DFA directly. (i.e., Make it
-# anchored and start the search at the right place.)
-[[test]]
-name = "onepass-wrong-crlf-anchored"
-regex = '(?Rm:.$)'
-haystack = "ZZ\r"
-matches = [[1, 2]]
-anchored = true
-bounds = [1, 3]
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/earliest.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/earliest.toml
deleted file mode 100644
index 9516893..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/earliest.toml
+++ /dev/null
@@ -1,52 +0,0 @@
-[[test]]
-name = "no-greedy-100"
-regex = 'a+'
-haystack = "aaa"
-matches = [[0, 1], [1, 2], [2, 3]]
-search-kind = "earliest"
-
-[[test]]
-name = "no-greedy-200"
-regex = 'abc+'
-haystack = "zzzabccc"
-matches = [[3, 6]]
-search-kind = "earliest"
-
-[[test]]
-name = "is-ungreedy"
-regex = 'a+?'
-haystack = "aaa"
-matches = [[0, 1], [1, 2], [2, 3]]
-search-kind = "earliest"
-
-[[test]]
-name = "look-start-test"
-regex = '^(abc|a)'
-haystack = "abc"
-matches = [
-  [[0, 1], [0, 1]],
-]
-search-kind = "earliest"
-
-[[test]]
-name = "look-end-test"
-regex = '(abc|a)$'
-haystack = "abc"
-matches = [
-  [[0, 3], [0, 3]],
-]
-search-kind = "earliest"
-
-[[test]]
-name = "no-leftmost-first-100"
-regex = 'abc|a'
-haystack = "abc"
-matches = [[0, 1]]
-search-kind = "earliest"
-
-[[test]]
-name = "no-leftmost-first-200"
-regex = 'aba|a'
-haystack = "aba"
-matches = [[0, 1], [2, 3]]
-search-kind = "earliest"
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/empty.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/empty.toml
deleted file mode 100644
index 7dfd802..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/empty.toml
+++ /dev/null
@@ -1,113 +0,0 @@
-[[test]]
-name = "100"
-regex = "|b"
-haystack = "abc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "110"
-regex = "b|"
-haystack = "abc"
-matches = [[0, 0], [1, 2], [3, 3]]
-
-[[test]]
-name = "120"
-regex = "|z"
-haystack = "abc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "130"
-regex = "z|"
-haystack = "abc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "200"
-regex = "|"
-haystack = "abc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "210"
-regex = "||"
-haystack = "abc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "220"
-regex = "||b"
-haystack = "abc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "230"
-regex = "b||"
-haystack = "abc"
-matches = [[0, 0], [1, 2], [3, 3]]
-
-[[test]]
-name = "240"
-regex = "||z"
-haystack = "abc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "300"
-regex = "(?:)|b"
-haystack = "abc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "310"
-regex = "b|(?:)"
-haystack = "abc"
-matches = [[0, 0], [1, 2], [3, 3]]
-
-[[test]]
-name = "320"
-regex = "(?:|)"
-haystack = "abc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "330"
-regex = "(?:|)|z"
-haystack = "abc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "400"
-regex = "a(?:)|b"
-haystack = "abc"
-matches = [[0, 1], [1, 2]]
-
-[[test]]
-name = "500"
-regex = ""
-haystack = ""
-matches = [[0, 0]]
-
-[[test]]
-name = "510"
-regex = ""
-haystack = "a"
-matches = [[0, 0], [1, 1]]
-
-[[test]]
-name = "520"
-regex = ""
-haystack = "abc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "600"
-regex = '(?:|a)*'
-haystack = "aaa"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "610"
-regex = '(?:|a)+'
-haystack = "aaa"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/expensive.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/expensive.toml
deleted file mode 100644
index b70e42f..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/expensive.toml
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file represent tests that may be expensive to run on some regex engines.
-# For example, tests that build a full DFA ahead of time and minimize it can
-# take a horrendously long time on regexes that are large (or result in an
-# explosion in the number of states). We group these tests together so that
-# such engines can simply skip these tests.
-
-# See: https://github.com/rust-lang/regex/issues/98
-[[test]]
-name = "regression-many-repeat-no-stack-overflow"
-regex = '^.{1,2500}'
-haystack = "a"
-matches = [[0, 1]]
-
-# This test is meant to blow the bounded backtracker's visited capacity. In
-# order to do that, we need a somewhat sizeable regex. The purpose of this
-# is to make sure there's at least one test that exercises this path in the
-# backtracker. All other tests (at time of writing) are small enough that the
-# backtracker can handle them fine.
-[[test]]
-name = "backtrack-blow-visited-capacity"
-regex = '\pL{50}'
-haystack = "abcdefghijklmnopqrstuvwxyabcdefghijklmnopqrstuvwxyabcdefghijklmnopqrstuvwxyabcdefghijklmnopqrstuvwxyabcdefghijklmnopqrstuvwxyabcdefghijklmnopqrstuvwxyZZ"
-matches = [[0, 50], [50, 100], [100, 150]]
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/flags.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/flags.toml
deleted file mode 100644
index 30b412c..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/flags.toml
+++ /dev/null
@@ -1,68 +0,0 @@
-[[test]]
-name = "1"
-regex = "(?i)abc"
-haystack = "ABC"
-matches = [[0, 3]]
-
-[[test]]
-name = "2"
-regex = "(?i)a(?-i)bc"
-haystack = "Abc"
-matches = [[0, 3]]
-
-[[test]]
-name = "3"
-regex = "(?i)a(?-i)bc"
-haystack = "ABC"
-matches = []
-
-[[test]]
-name = "4"
-regex = "(?is)a."
-haystack = "A\n"
-matches = [[0, 2]]
-
-[[test]]
-name = "5"
-regex = "(?is)a.(?-is)a."
-haystack = "A\nab"
-matches = [[0, 4]]
-
-[[test]]
-name = "6"
-regex = "(?is)a.(?-is)a."
-haystack = "A\na\n"
-matches = []
-
-[[test]]
-name = "7"
-regex = "(?is)a.(?-is:a.)?"
-haystack = "A\na\n"
-matches = [[0, 2]]
-match-limit = 1
-
-[[test]]
-name = "8"
-regex = "(?U)a+"
-haystack = "aa"
-matches = [[0, 1]]
-match-limit = 1
-
-[[test]]
-name = "9"
-regex = "(?U)a+?"
-haystack = "aa"
-matches = [[0, 2]]
-
-[[test]]
-name = "10"
-regex = "(?U)(?-U)a+"
-haystack = "aa"
-matches = [[0, 2]]
-
-[[test]]
-name = "11"
-regex = '(?m)(?:^\d+$\n?)+'
-haystack = "123\n456\n789"
-matches = [[0, 11]]
-unicode = false
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/fowler/basic.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/fowler/basic.toml
deleted file mode 100644
index 92b4e4c..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/fowler/basic.toml
+++ /dev/null
@@ -1,1611 +0,0 @@
-# !!! DO NOT EDIT !!!
-# Automatically generated by 'regex-cli generate fowler'.
-# Numbers in the test names correspond to the line number of the test from
-# the original dat file.
-
-[[test]]
-name = "basic3"
-regex = '''abracadabra$'''
-haystack = '''abracadabracadabra'''
-matches = [[[7, 18]]]
-match-limit = 1
-
-[[test]]
-name = "basic4"
-regex = '''a...b'''
-haystack = '''abababbb'''
-matches = [[[2, 7]]]
-match-limit = 1
-
-[[test]]
-name = "basic5"
-regex = '''XXXXXX'''
-haystack = '''..XXXXXX'''
-matches = [[[2, 8]]]
-match-limit = 1
-
-[[test]]
-name = "basic6"
-regex = '''\)'''
-haystack = '''()'''
-matches = [[[1, 2]]]
-match-limit = 1
-
-[[test]]
-name = "basic7"
-regex = '''a]'''
-haystack = '''a]a'''
-matches = [[[0, 2]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic9"
-regex = '''\}'''
-haystack = '''}'''
-matches = [[[0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic10"
-regex = '''\]'''
-haystack = ''']'''
-matches = [[[0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic12"
-regex = ''']'''
-haystack = ''']'''
-matches = [[[0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic15"
-regex = '''^a'''
-haystack = '''ax'''
-matches = [[[0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic16"
-regex = '''\^a'''
-haystack = '''a^a'''
-matches = [[[1, 3]]]
-match-limit = 1
-
-[[test]]
-name = "basic17"
-regex = '''a\^'''
-haystack = '''a^'''
-matches = [[[0, 2]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic18"
-regex = '''a$'''
-haystack = '''aa'''
-matches = [[[1, 2]]]
-match-limit = 1
-
-[[test]]
-name = "basic19"
-regex = '''a\$'''
-haystack = '''a$'''
-matches = [[[0, 2]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic20"
-regex = '''^$'''
-haystack = ''''''
-matches = [[[0, 0]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic21"
-regex = '''$^'''
-haystack = ''''''
-matches = [[[0, 0]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic22"
-regex = '''a($)'''
-haystack = '''aa'''
-matches = [[[1, 2], [2, 2]]]
-match-limit = 1
-
-[[test]]
-name = "basic23"
-regex = '''a*(^a)'''
-haystack = '''aa'''
-matches = [[[0, 1], [0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic24"
-regex = '''(..)*(...)*'''
-haystack = '''a'''
-matches = [[[0, 0], [], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic25"
-regex = '''(..)*(...)*'''
-haystack = '''abcd'''
-matches = [[[0, 4], [2, 4], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic26"
-regex = '''(ab|a)(bc|c)'''
-haystack = '''abc'''
-matches = [[[0, 3], [0, 2], [2, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic27"
-regex = '''(ab)c|abc'''
-haystack = '''abc'''
-matches = [[[0, 3], [0, 2]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic28"
-regex = '''a{0}b'''
-haystack = '''ab'''
-matches = [[[1, 2]]]
-match-limit = 1
-
-[[test]]
-name = "basic29"
-regex = '''(a*)(b?)(b+)b{3}'''
-haystack = '''aaabbbbbbb'''
-matches = [[[0, 10], [0, 3], [3, 4], [4, 7]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic30"
-regex = '''(a*)(b{0,1})(b{1,})b{3}'''
-haystack = '''aaabbbbbbb'''
-matches = [[[0, 10], [0, 3], [3, 4], [4, 7]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic32"
-regex = '''((a|a)|a)'''
-haystack = '''a'''
-matches = [[[0, 1], [0, 1], [0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic33"
-regex = '''(a*)(a|aa)'''
-haystack = '''aaaa'''
-matches = [[[0, 4], [0, 3], [3, 4]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic34"
-regex = '''a*(a.|aa)'''
-haystack = '''aaaa'''
-matches = [[[0, 4], [2, 4]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic35"
-regex = '''a(b)|c(d)|a(e)f'''
-haystack = '''aef'''
-matches = [[[0, 3], [], [], [1, 2]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic36"
-regex = '''(a|b)?.*'''
-haystack = '''b'''
-matches = [[[0, 1], [0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic37"
-regex = '''(a|b)c|a(b|c)'''
-haystack = '''ac'''
-matches = [[[0, 2], [0, 1], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic38"
-regex = '''(a|b)c|a(b|c)'''
-haystack = '''ab'''
-matches = [[[0, 2], [], [1, 2]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic39"
-regex = '''(a|b)*c|(a|ab)*c'''
-haystack = '''abc'''
-matches = [[[0, 3], [1, 2], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic40"
-regex = '''(a|b)*c|(a|ab)*c'''
-haystack = '''xc'''
-matches = [[[1, 2], [], []]]
-match-limit = 1
-
-[[test]]
-name = "basic41"
-regex = '''(.a|.b).*|.*(.a|.b)'''
-haystack = '''xa'''
-matches = [[[0, 2], [0, 2], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic42"
-regex = '''a?(ab|ba)ab'''
-haystack = '''abab'''
-matches = [[[0, 4], [0, 2]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic43"
-regex = '''a?(ac{0}b|ba)ab'''
-haystack = '''abab'''
-matches = [[[0, 4], [0, 2]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic44"
-regex = '''ab|abab'''
-haystack = '''abbabab'''
-matches = [[[0, 2]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic45"
-regex = '''aba|bab|bba'''
-haystack = '''baaabbbaba'''
-matches = [[[5, 8]]]
-match-limit = 1
-
-[[test]]
-name = "basic46"
-regex = '''aba|bab'''
-haystack = '''baaabbbaba'''
-matches = [[[6, 9]]]
-match-limit = 1
-
-[[test]]
-name = "basic47"
-regex = '''(aa|aaa)*|(a|aaaaa)'''
-haystack = '''aa'''
-matches = [[[0, 2], [0, 2], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic48"
-regex = '''(a.|.a.)*|(a|.a...)'''
-haystack = '''aa'''
-matches = [[[0, 2], [0, 2], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic49"
-regex = '''ab|a'''
-haystack = '''xabc'''
-matches = [[[1, 3]]]
-match-limit = 1
-
-[[test]]
-name = "basic50"
-regex = '''ab|a'''
-haystack = '''xxabc'''
-matches = [[[2, 4]]]
-match-limit = 1
-
-[[test]]
-name = "basic51"
-regex = '''(Ab|cD)*'''
-haystack = '''aBcD'''
-matches = [[[0, 4], [2, 4]]]
-match-limit = 1
-anchored = true
-case-insensitive = true
-
-[[test]]
-name = "basic52"
-regex = '''[^-]'''
-haystack = '''--a'''
-matches = [[[2, 3]]]
-match-limit = 1
-
-[[test]]
-name = "basic53"
-regex = '''[a-]*'''
-haystack = '''--a'''
-matches = [[[0, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic54"
-regex = '''[a-m-]*'''
-haystack = '''--amoma--'''
-matches = [[[0, 4]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic55"
-regex = ''':::1:::0:|:::1:1:0:'''
-haystack = ''':::0:::1:::1:::0:'''
-matches = [[[8, 17]]]
-match-limit = 1
-
-[[test]]
-name = "basic56"
-regex = ''':::1:::0:|:::1:1:1:'''
-haystack = ''':::0:::1:::1:::0:'''
-matches = [[[8, 17]]]
-match-limit = 1
-
-[[test]]
-name = "basic57"
-regex = '''[[:upper:]]'''
-haystack = '''A'''
-matches = [[[0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic58"
-regex = '''[[:lower:]]+'''
-haystack = '''`az{'''
-matches = [[[1, 3]]]
-match-limit = 1
-
-[[test]]
-name = "basic59"
-regex = '''[[:upper:]]+'''
-haystack = '''@AZ['''
-matches = [[[1, 3]]]
-match-limit = 1
-
-[[test]]
-name = "basic65"
-regex = '''\n'''
-haystack = '''\n'''
-matches = [[[0, 1]]]
-match-limit = 1
-anchored = true
-unescape = true
-
-[[test]]
-name = "basic66"
-regex = '''\n'''
-haystack = '''\n'''
-matches = [[[0, 1]]]
-match-limit = 1
-anchored = true
-unescape = true
-
-[[test]]
-name = "basic67"
-regex = '''[^a]'''
-haystack = '''\n'''
-matches = [[[0, 1]]]
-match-limit = 1
-anchored = true
-unescape = true
-
-[[test]]
-name = "basic68"
-regex = '''\na'''
-haystack = '''\na'''
-matches = [[[0, 2]]]
-match-limit = 1
-anchored = true
-unescape = true
-
-[[test]]
-name = "basic69"
-regex = '''(a)(b)(c)'''
-haystack = '''abc'''
-matches = [[[0, 3], [0, 1], [1, 2], [2, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic70"
-regex = '''xxx'''
-haystack = '''xxx'''
-matches = [[[0, 3]]]
-match-limit = 1
-anchored = true
-
-# Test added by Rust regex project.
-[[test]]
-name = "basic72"
-regex = '''(?:^|[ (,;])(?:(?:(?:[Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))(?:[^0-9]|$)'''
-haystack = '''feb 6,'''
-matches = [[[0, 6]]]
-match-limit = 1
-anchored = true
-
-# Test added by Rust regex project.
-[[test]]
-name = "basic74"
-regex = '''(?:^|[ (,;])(?:(?:(?:[Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))(?:[^0-9]|$)'''
-haystack = '''2/7'''
-matches = [[[0, 3]]]
-match-limit = 1
-anchored = true
-
-# Test added by Rust regex project.
-[[test]]
-name = "basic76"
-regex = '''(?:^|[ (,;])(?:(?:(?:[Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))(?:[^0-9]|$)'''
-haystack = '''feb 1,Feb 6'''
-matches = [[[5, 11]]]
-match-limit = 1
-
-# Test added by Rust regex project.
-[[test]]
-name = "basic78"
-regex = '''(((?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:x))))))))))))))))))))))))))))))'''
-haystack = '''x'''
-matches = [[[0, 1], [0, 1], [0, 1]]]
-match-limit = 1
-anchored = true
-
-# Test added by Rust regex project.
-[[test]]
-name = "basic80"
-regex = '''(((?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:x))))))))))))))))))))))))))))))*'''
-haystack = '''xx'''
-matches = [[[0, 2], [1, 2], [1, 2]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic81"
-regex = '''a?(ab|ba)*'''
-haystack = '''ababababababababababababababababababababababababababababababababababababababababa'''
-matches = [[[0, 81], [79, 81]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic82"
-regex = '''abaa|abbaa|abbbaa|abbbbaa'''
-haystack = '''ababbabbbabbbabbbbabbbbaa'''
-matches = [[[18, 25]]]
-match-limit = 1
-
-[[test]]
-name = "basic83"
-regex = '''abaa|abbaa|abbbaa|abbbbaa'''
-haystack = '''ababbabbbabbbabbbbabaa'''
-matches = [[[18, 22]]]
-match-limit = 1
-
-[[test]]
-name = "basic84"
-regex = '''aaac|aabc|abac|abbc|baac|babc|bbac|bbbc'''
-haystack = '''baaabbbabac'''
-matches = [[[7, 11]]]
-match-limit = 1
-
-# Test added by Rust regex project.
-[[test]]
-name = "basic86"
-regex = '''.*'''
-haystack = '''\x01\x7f'''
-matches = [[[0, 2]]]
-match-limit = 1
-anchored = true
-unescape = true
-
-[[test]]
-name = "basic87"
-regex = '''aaaa|bbbb|cccc|ddddd|eeeeee|fffffff|gggg|hhhh|iiiii|jjjjj|kkkkk|llll'''
-haystack = '''XaaaXbbbXcccXdddXeeeXfffXgggXhhhXiiiXjjjXkkkXlllXcbaXaaaa'''
-matches = [[[53, 57]]]
-match-limit = 1
-
-[[test]]
-name = "basic89"
-regex = '''a*a*a*a*a*b'''
-haystack = '''aaaaaaaaab'''
-matches = [[[0, 10]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic90"
-regex = '''^'''
-haystack = ''''''
-matches = [[[0, 0]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic91"
-regex = '''$'''
-haystack = ''''''
-matches = [[[0, 0]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic92"
-regex = '''^$'''
-haystack = ''''''
-matches = [[[0, 0]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic93"
-regex = '''^a$'''
-haystack = '''a'''
-matches = [[[0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic94"
-regex = '''abc'''
-haystack = '''abc'''
-matches = [[[0, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic95"
-regex = '''abc'''
-haystack = '''xabcy'''
-matches = [[[1, 4]]]
-match-limit = 1
-
-[[test]]
-name = "basic96"
-regex = '''abc'''
-haystack = '''ababc'''
-matches = [[[2, 5]]]
-match-limit = 1
-
-[[test]]
-name = "basic97"
-regex = '''ab*c'''
-haystack = '''abc'''
-matches = [[[0, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic98"
-regex = '''ab*bc'''
-haystack = '''abc'''
-matches = [[[0, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic99"
-regex = '''ab*bc'''
-haystack = '''abbc'''
-matches = [[[0, 4]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic100"
-regex = '''ab*bc'''
-haystack = '''abbbbc'''
-matches = [[[0, 6]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic101"
-regex = '''ab+bc'''
-haystack = '''abbc'''
-matches = [[[0, 4]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic102"
-regex = '''ab+bc'''
-haystack = '''abbbbc'''
-matches = [[[0, 6]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic103"
-regex = '''ab?bc'''
-haystack = '''abbc'''
-matches = [[[0, 4]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic104"
-regex = '''ab?bc'''
-haystack = '''abc'''
-matches = [[[0, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic105"
-regex = '''ab?c'''
-haystack = '''abc'''
-matches = [[[0, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic106"
-regex = '''^abc$'''
-haystack = '''abc'''
-matches = [[[0, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic107"
-regex = '''^abc'''
-haystack = '''abcc'''
-matches = [[[0, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic108"
-regex = '''abc$'''
-haystack = '''aabc'''
-matches = [[[1, 4]]]
-match-limit = 1
-
-[[test]]
-name = "basic109"
-regex = '''^'''
-haystack = '''abc'''
-matches = [[[0, 0]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic110"
-regex = '''$'''
-haystack = '''abc'''
-matches = [[[3, 3]]]
-match-limit = 1
-
-[[test]]
-name = "basic111"
-regex = '''a.c'''
-haystack = '''abc'''
-matches = [[[0, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic112"
-regex = '''a.c'''
-haystack = '''axc'''
-matches = [[[0, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic113"
-regex = '''a.*c'''
-haystack = '''axyzc'''
-matches = [[[0, 5]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic114"
-regex = '''a[bc]d'''
-haystack = '''abd'''
-matches = [[[0, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic115"
-regex = '''a[b-d]e'''
-haystack = '''ace'''
-matches = [[[0, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic116"
-regex = '''a[b-d]'''
-haystack = '''aac'''
-matches = [[[1, 3]]]
-match-limit = 1
-
-[[test]]
-name = "basic117"
-regex = '''a[-b]'''
-haystack = '''a-'''
-matches = [[[0, 2]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic118"
-regex = '''a[b-]'''
-haystack = '''a-'''
-matches = [[[0, 2]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic119"
-regex = '''a]'''
-haystack = '''a]'''
-matches = [[[0, 2]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic120"
-regex = '''a[]]b'''
-haystack = '''a]b'''
-matches = [[[0, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic121"
-regex = '''a[^bc]d'''
-haystack = '''aed'''
-matches = [[[0, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic122"
-regex = '''a[^-b]c'''
-haystack = '''adc'''
-matches = [[[0, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic123"
-regex = '''a[^]b]c'''
-haystack = '''adc'''
-matches = [[[0, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic124"
-regex = '''ab|cd'''
-haystack = '''abc'''
-matches = [[[0, 2]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic125"
-regex = '''ab|cd'''
-haystack = '''abcd'''
-matches = [[[0, 2]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic126"
-regex = '''a\(b'''
-haystack = '''a(b'''
-matches = [[[0, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic127"
-regex = '''a\(*b'''
-haystack = '''ab'''
-matches = [[[0, 2]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic128"
-regex = '''a\(*b'''
-haystack = '''a((b'''
-matches = [[[0, 4]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic129"
-regex = '''((a))'''
-haystack = '''abc'''
-matches = [[[0, 1], [0, 1], [0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic130"
-regex = '''(a)b(c)'''
-haystack = '''abc'''
-matches = [[[0, 3], [0, 1], [2, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic131"
-regex = '''a+b+c'''
-haystack = '''aabbabc'''
-matches = [[[4, 7]]]
-match-limit = 1
-
-[[test]]
-name = "basic132"
-regex = '''a*'''
-haystack = '''aaa'''
-matches = [[[0, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic133"
-regex = '''(a*)*'''
-haystack = '''-'''
-matches = [[[0, 0], [0, 0]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic134"
-regex = '''(a*)+'''
-haystack = '''-'''
-matches = [[[0, 0], [0, 0]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic135"
-regex = '''(a*|b)*'''
-haystack = '''-'''
-matches = [[[0, 0], [0, 0]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic136"
-regex = '''(a+|b)*'''
-haystack = '''ab'''
-matches = [[[0, 2], [1, 2]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic137"
-regex = '''(a+|b)+'''
-haystack = '''ab'''
-matches = [[[0, 2], [1, 2]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic138"
-regex = '''(a+|b)?'''
-haystack = '''ab'''
-matches = [[[0, 1], [0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic139"
-regex = '''[^ab]*'''
-haystack = '''cde'''
-matches = [[[0, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic140"
-regex = '''(^)*'''
-haystack = '''-'''
-matches = [[[0, 0], [0, 0]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic141"
-regex = '''a*'''
-haystack = ''''''
-matches = [[[0, 0]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic142"
-regex = '''([abc])*d'''
-haystack = '''abbbcd'''
-matches = [[[0, 6], [4, 5]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic143"
-regex = '''([abc])*bcd'''
-haystack = '''abcd'''
-matches = [[[0, 4], [0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic144"
-regex = '''a|b|c|d|e'''
-haystack = '''e'''
-matches = [[[0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic145"
-regex = '''(a|b|c|d|e)f'''
-haystack = '''ef'''
-matches = [[[0, 2], [0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic146"
-regex = '''((a*|b))*'''
-haystack = '''-'''
-matches = [[[0, 0], [0, 0], [0, 0]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic147"
-regex = '''abcd*efg'''
-haystack = '''abcdefg'''
-matches = [[[0, 7]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic148"
-regex = '''ab*'''
-haystack = '''xabyabbbz'''
-matches = [[[1, 3]]]
-match-limit = 1
-
-[[test]]
-name = "basic149"
-regex = '''ab*'''
-haystack = '''xayabbbz'''
-matches = [[[1, 2]]]
-match-limit = 1
-
-[[test]]
-name = "basic150"
-regex = '''(ab|cd)e'''
-haystack = '''abcde'''
-matches = [[[2, 5], [2, 4]]]
-match-limit = 1
-
-[[test]]
-name = "basic151"
-regex = '''[abhgefdc]ij'''
-haystack = '''hij'''
-matches = [[[0, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic152"
-regex = '''(a|b)c*d'''
-haystack = '''abcd'''
-matches = [[[1, 4], [1, 2]]]
-match-limit = 1
-
-[[test]]
-name = "basic153"
-regex = '''(ab|ab*)bc'''
-haystack = '''abc'''
-matches = [[[0, 3], [0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic154"
-regex = '''a([bc]*)c*'''
-haystack = '''abc'''
-matches = [[[0, 3], [1, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic155"
-regex = '''a([bc]*)(c*d)'''
-haystack = '''abcd'''
-matches = [[[0, 4], [1, 3], [3, 4]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic156"
-regex = '''a([bc]+)(c*d)'''
-haystack = '''abcd'''
-matches = [[[0, 4], [1, 3], [3, 4]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic157"
-regex = '''a([bc]*)(c+d)'''
-haystack = '''abcd'''
-matches = [[[0, 4], [1, 2], [2, 4]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic158"
-regex = '''a[bcd]*dcdcde'''
-haystack = '''adcdcde'''
-matches = [[[0, 7]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic159"
-regex = '''(ab|a)b*c'''
-haystack = '''abc'''
-matches = [[[0, 3], [0, 2]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic160"
-regex = '''((a)(b)c)(d)'''
-haystack = '''abcd'''
-matches = [[[0, 4], [0, 3], [0, 1], [1, 2], [3, 4]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic161"
-regex = '''[A-Za-z_][A-Za-z0-9_]*'''
-haystack = '''alpha'''
-matches = [[[0, 5]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic162"
-regex = '''^a(bc+|b[eh])g|.h$'''
-haystack = '''abh'''
-matches = [[[1, 3], []]]
-match-limit = 1
-
-[[test]]
-name = "basic163"
-regex = '''(bc+d$|ef*g.|h?i(j|k))'''
-haystack = '''effgz'''
-matches = [[[0, 5], [0, 5], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic164"
-regex = '''(bc+d$|ef*g.|h?i(j|k))'''
-haystack = '''ij'''
-matches = [[[0, 2], [0, 2], [1, 2]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic165"
-regex = '''(bc+d$|ef*g.|h?i(j|k))'''
-haystack = '''reffgz'''
-matches = [[[1, 6], [1, 6], []]]
-match-limit = 1
-
-[[test]]
-name = "basic166"
-regex = '''(((((((((a)))))))))'''
-haystack = '''a'''
-matches = [[[0, 1], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic167"
-regex = '''multiple words'''
-haystack = '''multiple words yeah'''
-matches = [[[0, 14]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic168"
-regex = '''(.*)c(.*)'''
-haystack = '''abcde'''
-matches = [[[0, 5], [0, 2], [3, 5]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic169"
-regex = '''abcd'''
-haystack = '''abcd'''
-matches = [[[0, 4]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic170"
-regex = '''a(bc)d'''
-haystack = '''abcd'''
-matches = [[[0, 4], [1, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic171"
-regex = '''a[\x01-\x03]?c'''
-haystack = '''a\x02c'''
-matches = [[[0, 3]]]
-match-limit = 1
-anchored = true
-unescape = true
-
-[[test]]
-name = "basic172"
-regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]'''
-haystack = '''Muammar Qaddafi'''
-matches = [[[0, 15], [], [10, 12]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic173"
-regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]'''
-haystack = '''Mo'ammar Gadhafi'''
-matches = [[[0, 16], [], [11, 13]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic174"
-regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]'''
-haystack = '''Muammar Kaddafi'''
-matches = [[[0, 15], [], [10, 12]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic175"
-regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]'''
-haystack = '''Muammar Qadhafi'''
-matches = [[[0, 15], [], [10, 12]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic176"
-regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]'''
-haystack = '''Muammar Gadafi'''
-matches = [[[0, 14], [], [10, 11]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic177"
-regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]'''
-haystack = '''Mu'ammar Qadafi'''
-matches = [[[0, 15], [], [11, 12]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic178"
-regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]'''
-haystack = '''Moamar Gaddafi'''
-matches = [[[0, 14], [], [9, 11]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic179"
-regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]'''
-haystack = '''Mu'ammar Qadhdhafi'''
-matches = [[[0, 18], [], [13, 15]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic180"
-regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]'''
-haystack = '''Muammar Khaddafi'''
-matches = [[[0, 16], [], [11, 13]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic181"
-regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]'''
-haystack = '''Muammar Ghaddafy'''
-matches = [[[0, 16], [], [11, 13]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic182"
-regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]'''
-haystack = '''Muammar Ghadafi'''
-matches = [[[0, 15], [], [11, 12]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic183"
-regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]'''
-haystack = '''Muammar Ghaddafi'''
-matches = [[[0, 16], [], [11, 13]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic184"
-regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]'''
-haystack = '''Muamar Kaddafi'''
-matches = [[[0, 14], [], [9, 11]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic185"
-regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]'''
-haystack = '''Muammar Quathafi'''
-matches = [[[0, 16], [], [11, 13]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic186"
-regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]'''
-haystack = '''Muammar Gheddafi'''
-matches = [[[0, 16], [], [11, 13]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic187"
-regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]'''
-haystack = '''Moammar Khadafy'''
-matches = [[[0, 15], [], [11, 12]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic188"
-regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]'''
-haystack = '''Moammar Qudhafi'''
-matches = [[[0, 15], [], [10, 12]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic189"
-regex = '''a+(b|c)*d+'''
-haystack = '''aabcdd'''
-matches = [[[0, 6], [3, 4]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic190"
-regex = '''^.+$'''
-haystack = '''vivi'''
-matches = [[[0, 4]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic191"
-regex = '''^(.+)$'''
-haystack = '''vivi'''
-matches = [[[0, 4], [0, 4]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic192"
-regex = '''^([^!.]+).att.com!(.+)$'''
-haystack = '''gryphon.att.com!eby'''
-matches = [[[0, 19], [0, 7], [16, 19]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic193"
-regex = '''^([^!]+!)?([^!]+)$'''
-haystack = '''bas'''
-matches = [[[0, 3], [], [0, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic194"
-regex = '''^([^!]+!)?([^!]+)$'''
-haystack = '''bar!bas'''
-matches = [[[0, 7], [0, 4], [4, 7]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic195"
-regex = '''^([^!]+!)?([^!]+)$'''
-haystack = '''foo!bas'''
-matches = [[[0, 7], [0, 4], [4, 7]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic196"
-regex = '''^.+!([^!]+!)([^!]+)$'''
-haystack = '''foo!bar!bas'''
-matches = [[[0, 11], [4, 8], [8, 11]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic197"
-regex = '''((foo)|(bar))!bas'''
-haystack = '''bar!bas'''
-matches = [[[0, 7], [0, 3], [], [0, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic198"
-regex = '''((foo)|(bar))!bas'''
-haystack = '''foo!bar!bas'''
-matches = [[[4, 11], [4, 7], [], [4, 7]]]
-match-limit = 1
-
-[[test]]
-name = "basic199"
-regex = '''((foo)|(bar))!bas'''
-haystack = '''foo!bas'''
-matches = [[[0, 7], [0, 3], [0, 3], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic200"
-regex = '''((foo)|bar)!bas'''
-haystack = '''bar!bas'''
-matches = [[[0, 7], [0, 3], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic201"
-regex = '''((foo)|bar)!bas'''
-haystack = '''foo!bar!bas'''
-matches = [[[4, 11], [4, 7], []]]
-match-limit = 1
-
-[[test]]
-name = "basic202"
-regex = '''((foo)|bar)!bas'''
-haystack = '''foo!bas'''
-matches = [[[0, 7], [0, 3], [0, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic203"
-regex = '''(foo|(bar))!bas'''
-haystack = '''bar!bas'''
-matches = [[[0, 7], [0, 3], [0, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic204"
-regex = '''(foo|(bar))!bas'''
-haystack = '''foo!bar!bas'''
-matches = [[[4, 11], [4, 7], [4, 7]]]
-match-limit = 1
-
-[[test]]
-name = "basic205"
-regex = '''(foo|(bar))!bas'''
-haystack = '''foo!bas'''
-matches = [[[0, 7], [0, 3], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic206"
-regex = '''(foo|bar)!bas'''
-haystack = '''bar!bas'''
-matches = [[[0, 7], [0, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic207"
-regex = '''(foo|bar)!bas'''
-haystack = '''foo!bar!bas'''
-matches = [[[4, 11], [4, 7]]]
-match-limit = 1
-
-[[test]]
-name = "basic208"
-regex = '''(foo|bar)!bas'''
-haystack = '''foo!bas'''
-matches = [[[0, 7], [0, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic209"
-regex = '''^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$'''
-haystack = '''foo!bar!bas'''
-matches = [[[0, 11], [0, 11], [], [], [4, 8], [8, 11]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic210"
-regex = '''^([^!]+!)?([^!]+)$|^.+!([^!]+!)([^!]+)$'''
-haystack = '''bas'''
-matches = [[[0, 3], [], [0, 3], [], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic211"
-regex = '''^([^!]+!)?([^!]+)$|^.+!([^!]+!)([^!]+)$'''
-haystack = '''bar!bas'''
-matches = [[[0, 7], [0, 4], [4, 7], [], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic212"
-regex = '''^([^!]+!)?([^!]+)$|^.+!([^!]+!)([^!]+)$'''
-haystack = '''foo!bar!bas'''
-matches = [[[0, 11], [], [], [4, 8], [8, 11]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic213"
-regex = '''^([^!]+!)?([^!]+)$|^.+!([^!]+!)([^!]+)$'''
-haystack = '''foo!bas'''
-matches = [[[0, 7], [0, 4], [4, 7], [], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic214"
-regex = '''^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$'''
-haystack = '''bas'''
-matches = [[[0, 3], [0, 3], [], [0, 3], [], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic215"
-regex = '''^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$'''
-haystack = '''bar!bas'''
-matches = [[[0, 7], [0, 7], [0, 4], [4, 7], [], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic216"
-regex = '''^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$'''
-haystack = '''foo!bar!bas'''
-matches = [[[0, 11], [0, 11], [], [], [4, 8], [8, 11]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic217"
-regex = '''^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$'''
-haystack = '''foo!bas'''
-matches = [[[0, 7], [0, 7], [0, 4], [4, 7], [], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic218"
-regex = '''.*(/XXX).*'''
-haystack = '''/XXX'''
-matches = [[[0, 4], [0, 4]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic219"
-regex = '''.*(\\XXX).*'''
-haystack = '''\XXX'''
-matches = [[[0, 4], [0, 4]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic220"
-regex = '''\\XXX'''
-haystack = '''\XXX'''
-matches = [[[0, 4]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic221"
-regex = '''.*(/000).*'''
-haystack = '''/000'''
-matches = [[[0, 4], [0, 4]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic222"
-regex = '''.*(\\000).*'''
-haystack = '''\000'''
-matches = [[[0, 4], [0, 4]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "basic223"
-regex = '''\\000'''
-haystack = '''\000'''
-matches = [[[0, 4]]]
-match-limit = 1
-anchored = true
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/fowler/dat/README b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/fowler/dat/README
deleted file mode 100644
index 242a0e6..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/fowler/dat/README
+++ /dev/null
@@ -1,25 +0,0 @@
-Test data was taken from the Go distribution, which was in turn taken from the
-testregex test suite:
-
-  http://web.archive.org/web/20150925124103/http://www2.research.att.com/~astopen/testregex/testregex.html
-
-Unfortunately, the original web site now appears dead, but the test data lives
-on.
-
-The LICENSE in this directory corresponds to the LICENSE that the data was
-originally released under.
-
-The tests themselves were modified for RE2/Go (and marked as such). A
-couple were modified further by me (Andrew Gallant) and marked with 'Rust'.
-
-After some number of years, these tests were transformed into a TOML format
-using the 'regex-cli generate fowler' command. To re-generate the
-TOML files, run the following from the root of this repository:
-
-  regex-cli generate fowler tests/data/fowler tests/data/fowler/dat/*.dat
-
-This assumes that you have 'regex-cli' installed. See 'regex-cli/README.md'
-from the root of the repository for more information.
-
-This brings the Fowler tests into a more "sensible" structured format in which
-other tests can be written such that they aren't write-only.
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/fowler/dat/basic.dat b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/fowler/dat/basic.dat
deleted file mode 100644
index 654a72b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/fowler/dat/basic.dat
+++ /dev/null
@@ -1,223 +0,0 @@
-NOTE	all standard compliant implementations should pass these : 2002-05-31
-
-BE	abracadabra$	abracadabracadabra	(7,18)
-BE	a...b		abababbb		(2,7)
-BE	XXXXXX		..XXXXXX		(2,8)
-E	\)		()	(1,2)
-BE	a]		a]a	(0,2)
-B	}		}	(0,1)
-E	\}		}	(0,1)
-BE	\]		]	(0,1)
-B	]		]	(0,1)
-E	]		]	(0,1)
-B	{		{	(0,1)
-B	}		}	(0,1)
-BE	^a		ax	(0,1)
-BE	\^a		a^a	(1,3)
-BE	a\^		a^	(0,2)
-BE	a$		aa	(1,2)
-BE	a\$		a$	(0,2)
-BE	^$		NULL	(0,0)
-E	$^		NULL	(0,0)
-E	a($)		aa	(1,2)(2,2)
-E	a*(^a)		aa	(0,1)(0,1)
-E	(..)*(...)*		a	(0,0)
-E	(..)*(...)*		abcd	(0,4)(2,4)
-E	(ab|a)(bc|c)		abc	(0,3)(0,2)(2,3)
-E	(ab)c|abc		abc	(0,3)(0,2)
-E	a{0}b		ab			(1,2)
-E	(a*)(b?)(b+)b{3}	aaabbbbbbb	(0,10)(0,3)(3,4)(4,7)
-E	(a*)(b{0,1})(b{1,})b{3}	aaabbbbbbb	(0,10)(0,3)(3,4)(4,7)
-E	a{9876543210}	NULL	BADBR
-E	((a|a)|a)			a	(0,1)(0,1)(0,1)
-E	(a*)(a|aa)			aaaa	(0,4)(0,3)(3,4)
-E	a*(a.|aa)			aaaa	(0,4)(2,4)
-E	a(b)|c(d)|a(e)f			aef	(0,3)(?,?)(?,?)(1,2)
-E	(a|b)?.*			b	(0,1)(0,1)
-E	(a|b)c|a(b|c)			ac	(0,2)(0,1)
-E	(a|b)c|a(b|c)			ab	(0,2)(?,?)(1,2)
-E	(a|b)*c|(a|ab)*c		abc	(0,3)(1,2)
-E	(a|b)*c|(a|ab)*c		xc	(1,2)
-E	(.a|.b).*|.*(.a|.b)		xa	(0,2)(0,2)
-E	a?(ab|ba)ab			abab	(0,4)(0,2)
-E	a?(ac{0}b|ba)ab			abab	(0,4)(0,2)
-E	ab|abab				abbabab	(0,2)
-E	aba|bab|bba			baaabbbaba	(5,8)
-E	aba|bab				baaabbbaba	(6,9)
-E	(aa|aaa)*|(a|aaaaa)		aa	(0,2)(0,2)
-E	(a.|.a.)*|(a|.a...)		aa	(0,2)(0,2)
-E	ab|a				xabc	(1,3)
-E	ab|a				xxabc	(2,4)
-Ei	(Ab|cD)*			aBcD	(0,4)(2,4)
-BE	[^-]			--a		(2,3)
-BE	[a-]*			--a		(0,3)
-BE	[a-m-]*			--amoma--	(0,4)
-E	:::1:::0:|:::1:1:0:	:::0:::1:::1:::0:	(8,17)
-E	:::1:::0:|:::1:1:1:	:::0:::1:::1:::0:	(8,17)
-{E	[[:upper:]]		A		(0,1)	[[<element>]] not supported
-E	[[:lower:]]+		`az{		(1,3)
-E	[[:upper:]]+		@AZ[		(1,3)
-# No collation in Go
-#BE	[[-]]			[[-]]		(2,4)
-#BE	[[.NIL.]]	NULL	ECOLLATE
-#BE	[[=aleph=]]	NULL	ECOLLATE
-}
-BE$	\n		\n	(0,1)
-BEn$	\n		\n	(0,1)
-BE$	[^a]		\n	(0,1)
-BE$	\na		\na	(0,2)
-E	(a)(b)(c)	abc	(0,3)(0,1)(1,2)(2,3)
-BE	xxx		xxx	(0,3)
-#E1	(^|[ (,;])((([Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))([^0-9]|$)	feb 6,	(0,6)
-E	(?:^|[ (,;])(?:(?:(?:[Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))(?:[^0-9]|$)	feb 6,	(0,6)	Rust
-#E1	(^|[ (,;])((([Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))([^0-9]|$)	2/7	(0,3)
-E	(?:^|[ (,;])(?:(?:(?:[Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))(?:[^0-9]|$)	2/7	(0,3)	Rust
-#E1	(^|[ (,;])((([Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))([^0-9]|$)	feb 1,Feb 6	(5,11)
-E	(?:^|[ (,;])(?:(?:(?:[Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))(?:[^0-9]|$)	feb 1,Feb 6	(5,11)	Rust
-#E3	((((((((((((((((((((((((((((((x))))))))))))))))))))))))))))))	x	(0,1)(0,1)(0,1)
-E	(((?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:x))))))))))))))))))))))))))))))	x	(0,1)(0,1)(0,1)	Rust
-#E3	((((((((((((((((((((((((((((((x))))))))))))))))))))))))))))))*	xx	(0,2)(1,2)(1,2)
-E	(((?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:x))))))))))))))))))))))))))))))*	xx	(0,2)(1,2)(1,2)	Rust
-E	a?(ab|ba)*	ababababababababababababababababababababababababababababababababababababababababa	(0,81)(79,81)
-E	abaa|abbaa|abbbaa|abbbbaa	ababbabbbabbbabbbbabbbbaa	(18,25)
-E	abaa|abbaa|abbbaa|abbbbaa	ababbabbbabbbabbbbabaa	(18,22)
-E	aaac|aabc|abac|abbc|baac|babc|bbac|bbbc	baaabbbabac	(7,11)
-#BE$	.*			\x01\xff	(0,2)
-BE$	.*			\x01\x7f	(0,2)	Rust
-E	aaaa|bbbb|cccc|ddddd|eeeeee|fffffff|gggg|hhhh|iiiii|jjjjj|kkkkk|llll		XaaaXbbbXcccXdddXeeeXfffXgggXhhhXiiiXjjjXkkkXlllXcbaXaaaa	(53,57)
-L	aaaa\nbbbb\ncccc\nddddd\neeeeee\nfffffff\ngggg\nhhhh\niiiii\njjjjj\nkkkkk\nllll		XaaaXbbbXcccXdddXeeeXfffXgggXhhhXiiiXjjjXkkkXlllXcbaXaaaa	NOMATCH
-E	a*a*a*a*a*b		aaaaaaaaab	(0,10)
-BE	^			NULL		(0,0)
-BE	$			NULL		(0,0)
-BE	^$			NULL		(0,0)
-BE	^a$			a		(0,1)
-BE	abc			abc		(0,3)
-BE	abc			xabcy		(1,4)
-BE	abc			ababc		(2,5)
-BE	ab*c			abc		(0,3)
-BE	ab*bc			abc		(0,3)
-BE	ab*bc			abbc		(0,4)
-BE	ab*bc			abbbbc		(0,6)
-E	ab+bc			abbc		(0,4)
-E	ab+bc			abbbbc		(0,6)
-E	ab?bc			abbc		(0,4)
-E	ab?bc			abc		(0,3)
-E	ab?c			abc		(0,3)
-BE	^abc$			abc		(0,3)
-BE	^abc			abcc		(0,3)
-BE	abc$			aabc		(1,4)
-BE	^			abc		(0,0)
-BE	$			abc		(3,3)
-BE	a.c			abc		(0,3)
-BE	a.c			axc		(0,3)
-BE	a.*c			axyzc		(0,5)
-BE	a[bc]d			abd		(0,3)
-BE	a[b-d]e			ace		(0,3)
-BE	a[b-d]			aac		(1,3)
-BE	a[-b]			a-		(0,2)
-BE	a[b-]			a-		(0,2)
-BE	a]			a]		(0,2)
-BE	a[]]b			a]b		(0,3)
-BE	a[^bc]d			aed		(0,3)
-BE	a[^-b]c			adc		(0,3)
-BE	a[^]b]c			adc		(0,3)
-E	ab|cd			abc		(0,2)
-E	ab|cd			abcd		(0,2)
-E	a\(b			a(b		(0,3)
-E	a\(*b			ab		(0,2)
-E	a\(*b			a((b		(0,4)
-E	((a))			abc		(0,1)(0,1)(0,1)
-E	(a)b(c)			abc		(0,3)(0,1)(2,3)
-E	a+b+c			aabbabc		(4,7)
-E	a*			aaa		(0,3)
-E	(a*)*			-		(0,0)(0,0)
-E	(a*)+			-		(0,0)(0,0)
-E	(a*|b)*			-		(0,0)(0,0)
-E	(a+|b)*			ab		(0,2)(1,2)
-E	(a+|b)+			ab		(0,2)(1,2)
-E	(a+|b)?			ab		(0,1)(0,1)
-BE	[^ab]*			cde		(0,3)
-E	(^)*			-		(0,0)(0,0)
-BE	a*			NULL		(0,0)
-E	([abc])*d		abbbcd		(0,6)(4,5)
-E	([abc])*bcd		abcd		(0,4)(0,1)
-E	a|b|c|d|e		e		(0,1)
-E	(a|b|c|d|e)f		ef		(0,2)(0,1)
-E	((a*|b))*		-		(0,0)(0,0)(0,0)
-BE	abcd*efg		abcdefg		(0,7)
-BE	ab*			xabyabbbz	(1,3)
-BE	ab*			xayabbbz	(1,2)
-E	(ab|cd)e		abcde		(2,5)(2,4)
-BE	[abhgefdc]ij		hij		(0,3)
-E	(a|b)c*d		abcd		(1,4)(1,2)
-E	(ab|ab*)bc		abc		(0,3)(0,1)
-E	a([bc]*)c*		abc		(0,3)(1,3)
-E	a([bc]*)(c*d)		abcd		(0,4)(1,3)(3,4)
-E	a([bc]+)(c*d)		abcd		(0,4)(1,3)(3,4)
-E	a([bc]*)(c+d)		abcd		(0,4)(1,2)(2,4)
-E	a[bcd]*dcdcde		adcdcde		(0,7)
-E	(ab|a)b*c		abc		(0,3)(0,2)
-E	((a)(b)c)(d)		abcd		(0,4)(0,3)(0,1)(1,2)(3,4)
-BE	[A-Za-z_][A-Za-z0-9_]*	alpha		(0,5)
-E	^a(bc+|b[eh])g|.h$	abh		(1,3)
-E	(bc+d$|ef*g.|h?i(j|k))	effgz		(0,5)(0,5)
-E	(bc+d$|ef*g.|h?i(j|k))	ij		(0,2)(0,2)(1,2)
-E	(bc+d$|ef*g.|h?i(j|k))	reffgz		(1,6)(1,6)
-E	(((((((((a)))))))))	a		(0,1)(0,1)(0,1)(0,1)(0,1)(0,1)(0,1)(0,1)(0,1)(0,1)
-BE	multiple words		multiple words yeah	(0,14)
-E	(.*)c(.*)		abcde		(0,5)(0,2)(3,5)
-BE	abcd			abcd		(0,4)
-E	a(bc)d			abcd		(0,4)(1,3)
-E	a[-]?c		ac		(0,3)
-E	M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]	Muammar Qaddafi	(0,15)(?,?)(10,12)
-E	M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]	Mo'ammar Gadhafi	(0,16)(?,?)(11,13)
-E	M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]	Muammar Kaddafi	(0,15)(?,?)(10,12)
-E	M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]	Muammar Qadhafi	(0,15)(?,?)(10,12)
-E	M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]	Muammar Gadafi	(0,14)(?,?)(10,11)
-E	M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]	Mu'ammar Qadafi	(0,15)(?,?)(11,12)
-E	M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]	Moamar Gaddafi	(0,14)(?,?)(9,11)
-E	M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]	Mu'ammar Qadhdhafi	(0,18)(?,?)(13,15)
-E	M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]	Muammar Khaddafi	(0,16)(?,?)(11,13)
-E	M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]	Muammar Ghaddafy	(0,16)(?,?)(11,13)
-E	M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]	Muammar Ghadafi	(0,15)(?,?)(11,12)
-E	M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]	Muammar Ghaddafi	(0,16)(?,?)(11,13)
-E	M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]	Muamar Kaddafi	(0,14)(?,?)(9,11)
-E	M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]	Muammar Quathafi	(0,16)(?,?)(11,13)
-E	M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]	Muammar Gheddafi	(0,16)(?,?)(11,13)
-E	M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]	Moammar Khadafy	(0,15)(?,?)(11,12)
-E	M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]	Moammar Qudhafi	(0,15)(?,?)(10,12)
-E	a+(b|c)*d+		aabcdd			(0,6)(3,4)
-E	^.+$			vivi			(0,4)
-E	^(.+)$			vivi			(0,4)(0,4)
-E	^([^!.]+).att.com!(.+)$	gryphon.att.com!eby	(0,19)(0,7)(16,19)
-E	^([^!]+!)?([^!]+)$	bas			(0,3)(?,?)(0,3)
-E	^([^!]+!)?([^!]+)$	bar!bas			(0,7)(0,4)(4,7)
-E	^([^!]+!)?([^!]+)$	foo!bas			(0,7)(0,4)(4,7)
-E	^.+!([^!]+!)([^!]+)$	foo!bar!bas		(0,11)(4,8)(8,11)
-E	((foo)|(bar))!bas	bar!bas			(0,7)(0,3)(?,?)(0,3)
-E	((foo)|(bar))!bas	foo!bar!bas		(4,11)(4,7)(?,?)(4,7)
-E	((foo)|(bar))!bas	foo!bas			(0,7)(0,3)(0,3)
-E	((foo)|bar)!bas		bar!bas			(0,7)(0,3)
-E	((foo)|bar)!bas		foo!bar!bas		(4,11)(4,7)
-E	((foo)|bar)!bas		foo!bas			(0,7)(0,3)(0,3)
-E	(foo|(bar))!bas		bar!bas			(0,7)(0,3)(0,3)
-E	(foo|(bar))!bas		foo!bar!bas		(4,11)(4,7)(4,7)
-E	(foo|(bar))!bas		foo!bas			(0,7)(0,3)
-E	(foo|bar)!bas		bar!bas			(0,7)(0,3)
-E	(foo|bar)!bas		foo!bar!bas		(4,11)(4,7)
-E	(foo|bar)!bas		foo!bas			(0,7)(0,3)
-E	^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$	foo!bar!bas	(0,11)(0,11)(?,?)(?,?)(4,8)(8,11)
-E	^([^!]+!)?([^!]+)$|^.+!([^!]+!)([^!]+)$	bas		(0,3)(?,?)(0,3)
-E	^([^!]+!)?([^!]+)$|^.+!([^!]+!)([^!]+)$	bar!bas		(0,7)(0,4)(4,7)
-E	^([^!]+!)?([^!]+)$|^.+!([^!]+!)([^!]+)$	foo!bar!bas	(0,11)(?,?)(?,?)(4,8)(8,11)
-E	^([^!]+!)?([^!]+)$|^.+!([^!]+!)([^!]+)$	foo!bas		(0,7)(0,4)(4,7)
-E	^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$	bas		(0,3)(0,3)(?,?)(0,3)
-E	^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$	bar!bas		(0,7)(0,7)(0,4)(4,7)
-E	^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$	foo!bar!bas	(0,11)(0,11)(?,?)(?,?)(4,8)(8,11)
-E	^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$	foo!bas		(0,7)(0,7)(0,4)(4,7)
-E	.*(/XXX).*			/XXX			(0,4)(0,4)
-E	.*(\\XXX).*			\XXX			(0,4)(0,4)
-E	\\XXX				\XXX			(0,4)
-E	.*(/000).*			/000			(0,4)(0,4)
-E	.*(\\000).*			\000			(0,4)(0,4)
-E	\\000				\000			(0,4)
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/fowler/dat/nullsubexpr.dat b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/fowler/dat/nullsubexpr.dat
deleted file mode 100644
index a944306..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/fowler/dat/nullsubexpr.dat
+++ /dev/null
@@ -1,74 +0,0 @@
-NOTE	null subexpression matches : 2002-06-06
-
-E	(a*)*		a		(0,1)(0,1)
-E	SAME		x		(0,0)(0,0)
-E	SAME		aaaaaa		(0,6)(0,6)
-E	SAME		aaaaaax		(0,6)(0,6)
-E	(a*)+		a		(0,1)(0,1)
-E	SAME		x		(0,0)(0,0)
-E	SAME		aaaaaa		(0,6)(0,6)
-E	SAME		aaaaaax		(0,6)(0,6)
-E	(a+)*		a		(0,1)(0,1)
-E	SAME		x		(0,0)
-E	SAME		aaaaaa		(0,6)(0,6)
-E	SAME		aaaaaax		(0,6)(0,6)
-E	(a+)+		a		(0,1)(0,1)
-E	SAME		x		NOMATCH
-E	SAME		aaaaaa		(0,6)(0,6)
-E	SAME		aaaaaax		(0,6)(0,6)
-
-E	([a]*)*		a		(0,1)(0,1)
-E	SAME		x		(0,0)(0,0)
-E	SAME		aaaaaa		(0,6)(0,6)
-E	SAME		aaaaaax		(0,6)(0,6)
-E	([a]*)+		a		(0,1)(0,1)
-E	SAME		x		(0,0)(0,0)
-E	SAME		aaaaaa		(0,6)(0,6)
-E	SAME		aaaaaax		(0,6)(0,6)
-E	([^b]*)*	a		(0,1)(0,1)
-E	SAME		b		(0,0)(0,0)
-E	SAME		aaaaaa		(0,6)(0,6)
-E	SAME		aaaaaab		(0,6)(0,6)
-E	([ab]*)*	a		(0,1)(0,1)
-E	SAME		aaaaaa		(0,6)(0,6)
-E	SAME		ababab		(0,6)(0,6)
-E	SAME		bababa		(0,6)(0,6)
-E	SAME		b		(0,1)(0,1)
-E	SAME		bbbbbb		(0,6)(0,6)
-E	SAME		aaaabcde	(0,5)(0,5)
-E	([^a]*)*	b		(0,1)(0,1)
-E	SAME		bbbbbb		(0,6)(0,6)
-E	SAME		aaaaaa		(0,0)(0,0)
-E	([^ab]*)*	ccccxx		(0,6)(0,6)
-E	SAME		ababab		(0,0)(0,0)
-
-#E	((z)+|a)*	zabcde		(0,2)(1,2)
-E	((z)+|a)*	zabcde		(0,2)(1,2)(0,1)	Rust
-
-#{E	a+?		aaaaaa		(0,1)	no *? +? mimimal match ops
-#E	(a)		aaa		(0,1)(0,1)
-#E	(a*?)		aaa		(0,0)(0,0)
-#E	(a)*?		aaa		(0,0)
-#E	(a*?)*?		aaa		(0,0)
-#}
-
-B	\(a*\)*\(x\)		x	(0,1)(0,0)(0,1)
-B	\(a*\)*\(x\)		ax	(0,2)(0,1)(1,2)
-B	\(a*\)*\(x\)		axa	(0,2)(0,1)(1,2)
-B	\(a*\)*\(x\)\(\1\)	x	(0,1)(0,0)(0,1)(1,1)
-B	\(a*\)*\(x\)\(\1\)	ax	(0,2)(1,1)(1,2)(2,2)
-B	\(a*\)*\(x\)\(\1\)	axa	(0,3)(0,1)(1,2)(2,3)
-B	\(a*\)*\(x\)\(\1\)\(x\)	axax	(0,4)(0,1)(1,2)(2,3)(3,4)
-B	\(a*\)*\(x\)\(\1\)\(x\)	axxa	(0,3)(1,1)(1,2)(2,2)(2,3)
-
-E	(a*)*(x)		x	(0,1)(0,0)(0,1)
-E	(a*)*(x)		ax	(0,2)(0,1)(1,2)
-E	(a*)*(x)		axa	(0,2)(0,1)(1,2)
-
-E	(a*)+(x)		x	(0,1)(0,0)(0,1)
-E	(a*)+(x)		ax	(0,2)(0,1)(1,2)
-E	(a*)+(x)		axa	(0,2)(0,1)(1,2)
-
-E	(a*){2}(x)		x	(0,1)(0,0)(0,1)
-E	(a*){2}(x)		ax	(0,2)(1,1)(1,2)
-E	(a*){2}(x)		axa	(0,2)(1,1)(1,2)
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/fowler/dat/repetition.dat b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/fowler/dat/repetition.dat
deleted file mode 100644
index cf0d8382..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/fowler/dat/repetition.dat
+++ /dev/null
@@ -1,169 +0,0 @@
-NOTE	implicit vs. explicit repetitions : 2009-02-02
-
-# Glenn Fowler <gsf@research.att.com>
-# conforming matches (column 4) must match one of the following BREs
-#	NOMATCH
-#	(0,.)\((\(.\),\(.\))(?,?)(\2,\3)\)*
-#	(0,.)\((\(.\),\(.\))(\2,\3)(?,?)\)*
-# i.e., each 3-tuple has two identical elements and one (?,?)
-
-E	((..)|(.))				NULL		NOMATCH
-E	((..)|(.))((..)|(.))			NULL		NOMATCH
-E	((..)|(.))((..)|(.))((..)|(.))		NULL		NOMATCH
-
-E	((..)|(.)){1}				NULL		NOMATCH
-E	((..)|(.)){2}				NULL		NOMATCH
-E	((..)|(.)){3}				NULL		NOMATCH
-
-E	((..)|(.))*				NULL		(0,0)
-
-E	((..)|(.))				a		(0,1)(0,1)(?,?)(0,1)
-E	((..)|(.))((..)|(.))			a		NOMATCH
-E	((..)|(.))((..)|(.))((..)|(.))		a		NOMATCH
-
-E	((..)|(.)){1}				a		(0,1)(0,1)(?,?)(0,1)
-E	((..)|(.)){2}				a		NOMATCH
-E	((..)|(.)){3}				a		NOMATCH
-
-E	((..)|(.))*				a		(0,1)(0,1)(?,?)(0,1)
-
-E	((..)|(.))				aa		(0,2)(0,2)(0,2)(?,?)
-E	((..)|(.))((..)|(.))			aa		(0,2)(0,1)(?,?)(0,1)(1,2)(?,?)(1,2)
-E	((..)|(.))((..)|(.))((..)|(.))		aa		NOMATCH
-
-E	((..)|(.)){1}				aa		(0,2)(0,2)(0,2)(?,?)
-E	((..)|(.)){2}				aa		(0,2)(1,2)(?,?)(1,2)
-E	((..)|(.)){3}				aa		NOMATCH
-
-E	((..)|(.))*				aa		(0,2)(0,2)(0,2)(?,?)
-
-E	((..)|(.))				aaa		(0,2)(0,2)(0,2)(?,?)
-E	((..)|(.))((..)|(.))			aaa		(0,3)(0,2)(0,2)(?,?)(2,3)(?,?)(2,3)
-E	((..)|(.))((..)|(.))((..)|(.))		aaa		(0,3)(0,1)(?,?)(0,1)(1,2)(?,?)(1,2)(2,3)(?,?)(2,3)
-
-E	((..)|(.)){1}				aaa		(0,2)(0,2)(0,2)(?,?)
-#E	((..)|(.)){2}				aaa		(0,3)(2,3)(?,?)(2,3)
-E	((..)|(.)){2}				aaa		(0,3)(2,3)(0,2)(2,3)	RE2/Go
-E	((..)|(.)){3}				aaa		(0,3)(2,3)(?,?)(2,3)
-
-#E	((..)|(.))*				aaa		(0,3)(2,3)(?,?)(2,3)
-E	((..)|(.))*				aaa		(0,3)(2,3)(0,2)(2,3)	RE2/Go
-
-E	((..)|(.))				aaaa		(0,2)(0,2)(0,2)(?,?)
-E	((..)|(.))((..)|(.))			aaaa		(0,4)(0,2)(0,2)(?,?)(2,4)(2,4)(?,?)
-E	((..)|(.))((..)|(.))((..)|(.))		aaaa		(0,4)(0,2)(0,2)(?,?)(2,3)(?,?)(2,3)(3,4)(?,?)(3,4)
-
-E	((..)|(.)){1}				aaaa		(0,2)(0,2)(0,2)(?,?)
-E	((..)|(.)){2}				aaaa		(0,4)(2,4)(2,4)(?,?)
-#E	((..)|(.)){3}				aaaa		(0,4)(3,4)(?,?)(3,4)
-E	((..)|(.)){3}				aaaa		(0,4)(3,4)(0,2)(3,4)	RE2/Go
-
-E	((..)|(.))*				aaaa		(0,4)(2,4)(2,4)(?,?)
-
-E	((..)|(.))				aaaaa		(0,2)(0,2)(0,2)(?,?)
-E	((..)|(.))((..)|(.))			aaaaa		(0,4)(0,2)(0,2)(?,?)(2,4)(2,4)(?,?)
-E	((..)|(.))((..)|(.))((..)|(.))		aaaaa		(0,5)(0,2)(0,2)(?,?)(2,4)(2,4)(?,?)(4,5)(?,?)(4,5)
-
-E	((..)|(.)){1}				aaaaa		(0,2)(0,2)(0,2)(?,?)
-E	((..)|(.)){2}				aaaaa		(0,4)(2,4)(2,4)(?,?)
-#E	((..)|(.)){3}				aaaaa		(0,5)(4,5)(?,?)(4,5)
-E	((..)|(.)){3}				aaaaa		(0,5)(4,5)(2,4)(4,5)	RE2/Go
-
-#E	((..)|(.))*				aaaaa		(0,5)(4,5)(?,?)(4,5)
-E	((..)|(.))*				aaaaa		(0,5)(4,5)(2,4)(4,5)	RE2/Go
-
-E	((..)|(.))				aaaaaa		(0,2)(0,2)(0,2)(?,?)
-E	((..)|(.))((..)|(.))			aaaaaa		(0,4)(0,2)(0,2)(?,?)(2,4)(2,4)(?,?)
-E	((..)|(.))((..)|(.))((..)|(.))		aaaaaa		(0,6)(0,2)(0,2)(?,?)(2,4)(2,4)(?,?)(4,6)(4,6)(?,?)
-
-E	((..)|(.)){1}				aaaaaa		(0,2)(0,2)(0,2)(?,?)
-E	((..)|(.)){2}				aaaaaa		(0,4)(2,4)(2,4)(?,?)
-E	((..)|(.)){3}				aaaaaa		(0,6)(4,6)(4,6)(?,?)
-
-E	((..)|(.))*				aaaaaa		(0,6)(4,6)(4,6)(?,?)
-
-NOTE	additional repetition tests graciously provided by Chris Kuklewicz www.haskell.org 2009-02-02
-
-# These test a bug in OS X / FreeBSD / NetBSD, and libtree.
-# Linux/GLIBC gets the {8,} and {8,8} wrong.
-
-:HA#100:E	X(.?){0,}Y	X1234567Y	(0,9)(7,8)
-:HA#101:E	X(.?){1,}Y	X1234567Y	(0,9)(7,8)
-:HA#102:E	X(.?){2,}Y	X1234567Y	(0,9)(7,8)
-:HA#103:E	X(.?){3,}Y	X1234567Y	(0,9)(7,8)
-:HA#104:E	X(.?){4,}Y	X1234567Y	(0,9)(7,8)
-:HA#105:E	X(.?){5,}Y	X1234567Y	(0,9)(7,8)
-:HA#106:E	X(.?){6,}Y	X1234567Y	(0,9)(7,8)
-:HA#107:E	X(.?){7,}Y	X1234567Y	(0,9)(7,8)
-:HA#108:E	X(.?){8,}Y	X1234567Y	(0,9)(8,8)
-#:HA#110:E	X(.?){0,8}Y	X1234567Y	(0,9)(7,8)
-:HA#110:E	X(.?){0,8}Y	X1234567Y	(0,9)(8,8)	RE2/Go
-#:HA#111:E	X(.?){1,8}Y	X1234567Y	(0,9)(7,8)
-:HA#111:E	X(.?){1,8}Y	X1234567Y	(0,9)(8,8)	RE2/Go
-#:HA#112:E	X(.?){2,8}Y	X1234567Y	(0,9)(7,8)
-:HA#112:E	X(.?){2,8}Y	X1234567Y	(0,9)(8,8)	RE2/Go
-#:HA#113:E	X(.?){3,8}Y	X1234567Y	(0,9)(7,8)
-:HA#113:E	X(.?){3,8}Y	X1234567Y	(0,9)(8,8)	RE2/Go
-#:HA#114:E	X(.?){4,8}Y	X1234567Y	(0,9)(7,8)
-:HA#114:E	X(.?){4,8}Y	X1234567Y	(0,9)(8,8)	RE2/Go
-#:HA#115:E	X(.?){5,8}Y	X1234567Y	(0,9)(7,8)
-:HA#115:E	X(.?){5,8}Y	X1234567Y	(0,9)(8,8)	RE2/Go
-#:HA#116:E	X(.?){6,8}Y	X1234567Y	(0,9)(7,8)
-:HA#116:E	X(.?){6,8}Y	X1234567Y	(0,9)(8,8)	RE2/Go
-#:HA#117:E	X(.?){7,8}Y	X1234567Y	(0,9)(7,8)
-:HA#117:E	X(.?){7,8}Y	X1234567Y	(0,9)(8,8)	RE2/Go
-:HA#118:E	X(.?){8,8}Y	X1234567Y	(0,9)(8,8)
-
-# These test a fixed bug in my regex-tdfa that did not keep the expanded
-# form properly grouped, so right association did the wrong thing with
-# these ambiguous patterns (crafted just to test my code when I became
-# suspicious of my implementation).  The first subexpression should use
-# "ab" then "a" then "bcd".
-
-# OS X / FreeBSD / NetBSD badly fail many of these, with impossible
-# results like (0,6)(4,5)(6,6).
-
-#:HA#260:E	(a|ab|c|bcd){0,}(d*)	ababcd	(0,6)(3,6)(6,6)
-:HA#260:E	(a|ab|c|bcd){0,}(d*)	ababcd	(0,1)(0,1)(1,1)	Rust
-#:HA#261:E	(a|ab|c|bcd){1,}(d*)	ababcd	(0,6)(3,6)(6,6)
-:HA#261:E	(a|ab|c|bcd){1,}(d*)	ababcd	(0,1)(0,1)(1,1)	Rust
-:HA#262:E	(a|ab|c|bcd){2,}(d*)	ababcd	(0,6)(3,6)(6,6)
-:HA#263:E	(a|ab|c|bcd){3,}(d*)	ababcd	(0,6)(3,6)(6,6)
-:HA#264:E	(a|ab|c|bcd){4,}(d*)	ababcd	NOMATCH
-#:HA#265:E	(a|ab|c|bcd){0,10}(d*)	ababcd	(0,6)(3,6)(6,6)
-:HA#265:E	(a|ab|c|bcd){0,10}(d*)	ababcd	(0,1)(0,1)(1,1)	Rust
-#:HA#266:E	(a|ab|c|bcd){1,10}(d*)	ababcd	(0,6)(3,6)(6,6)
-:HA#266:E	(a|ab|c|bcd){1,10}(d*)	ababcd	(0,1)(0,1)(1,1)	Rust
-:HA#267:E	(a|ab|c|bcd){2,10}(d*)	ababcd	(0,6)(3,6)(6,6)
-:HA#268:E	(a|ab|c|bcd){3,10}(d*)	ababcd	(0,6)(3,6)(6,6)
-:HA#269:E	(a|ab|c|bcd){4,10}(d*)	ababcd	NOMATCH
-#:HA#270:E	(a|ab|c|bcd)*(d*)	ababcd	(0,6)(3,6)(6,6)
-:HA#270:E	(a|ab|c|bcd)*(d*)	ababcd	(0,1)(0,1)(1,1)	Rust
-#:HA#271:E	(a|ab|c|bcd)+(d*)	ababcd	(0,6)(3,6)(6,6)
-:HA#271:E	(a|ab|c|bcd)+(d*)	ababcd	(0,1)(0,1)(1,1)	Rust
-
-# The above worked on Linux/GLIBC but the following often fail.
-# They also trip up OS X / FreeBSD / NetBSD:
-
-#:HA#280:E	(ab|a|c|bcd){0,}(d*)	ababcd	(0,6)(3,6)(6,6)
-:HA#280:E	(ab|a|c|bcd){0,}(d*)	ababcd	(0,6)(4,5)(5,6)	RE2/Go
-#:HA#281:E	(ab|a|c|bcd){1,}(d*)	ababcd	(0,6)(3,6)(6,6)
-:HA#281:E	(ab|a|c|bcd){1,}(d*)	ababcd	(0,6)(4,5)(5,6)	RE2/Go
-#:HA#282:E	(ab|a|c|bcd){2,}(d*)	ababcd	(0,6)(3,6)(6,6)
-:HA#282:E	(ab|a|c|bcd){2,}(d*)	ababcd	(0,6)(4,5)(5,6)	RE2/Go
-#:HA#283:E	(ab|a|c|bcd){3,}(d*)	ababcd	(0,6)(3,6)(6,6)
-:HA#283:E	(ab|a|c|bcd){3,}(d*)	ababcd	(0,6)(4,5)(5,6)	RE2/Go
-:HA#284:E	(ab|a|c|bcd){4,}(d*)	ababcd	NOMATCH
-#:HA#285:E	(ab|a|c|bcd){0,10}(d*)	ababcd	(0,6)(3,6)(6,6)
-:HA#285:E	(ab|a|c|bcd){0,10}(d*)	ababcd	(0,6)(4,5)(5,6)	RE2/Go
-#:HA#286:E	(ab|a|c|bcd){1,10}(d*)	ababcd	(0,6)(3,6)(6,6)
-:HA#286:E	(ab|a|c|bcd){1,10}(d*)	ababcd	(0,6)(4,5)(5,6)	RE2/Go
-#:HA#287:E	(ab|a|c|bcd){2,10}(d*)	ababcd	(0,6)(3,6)(6,6)
-:HA#287:E	(ab|a|c|bcd){2,10}(d*)	ababcd	(0,6)(4,5)(5,6)	RE2/Go
-#:HA#288:E	(ab|a|c|bcd){3,10}(d*)	ababcd	(0,6)(3,6)(6,6)
-:HA#288:E	(ab|a|c|bcd){3,10}(d*)	ababcd	(0,6)(4,5)(5,6)	RE2/Go
-:HA#289:E	(ab|a|c|bcd){4,10}(d*)	ababcd	NOMATCH
-#:HA#290:E	(ab|a|c|bcd)*(d*)	ababcd	(0,6)(3,6)(6,6)
-:HA#290:E	(ab|a|c|bcd)*(d*)	ababcd	(0,6)(4,5)(5,6)	RE2/Go
-#:HA#291:E	(ab|a|c|bcd)+(d*)	ababcd	(0,6)(3,6)(6,6)
-:HA#291:E	(ab|a|c|bcd)+(d*)	ababcd	(0,6)(4,5)(5,6)	RE2/Go
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/fowler/nullsubexpr.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/fowler/nullsubexpr.toml
deleted file mode 100644
index 2f1f018..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/fowler/nullsubexpr.toml
+++ /dev/null
@@ -1,405 +0,0 @@
-# !!! DO NOT EDIT !!!
-# Automatically generated by 'regex-cli generate fowler'.
-# Numbers in the test names correspond to the line number of the test from
-# the original dat file.
-
-[[test]]
-name = "nullsubexpr3"
-regex = '''(a*)*'''
-haystack = '''a'''
-matches = [[[0, 1], [0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr4"
-regex = '''(a*)*'''
-haystack = '''x'''
-matches = [[[0, 0], [0, 0]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr5"
-regex = '''(a*)*'''
-haystack = '''aaaaaa'''
-matches = [[[0, 6], [0, 6]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr6"
-regex = '''(a*)*'''
-haystack = '''aaaaaax'''
-matches = [[[0, 6], [0, 6]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr7"
-regex = '''(a*)+'''
-haystack = '''a'''
-matches = [[[0, 1], [0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr8"
-regex = '''(a*)+'''
-haystack = '''x'''
-matches = [[[0, 0], [0, 0]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr9"
-regex = '''(a*)+'''
-haystack = '''aaaaaa'''
-matches = [[[0, 6], [0, 6]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr10"
-regex = '''(a*)+'''
-haystack = '''aaaaaax'''
-matches = [[[0, 6], [0, 6]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr11"
-regex = '''(a+)*'''
-haystack = '''a'''
-matches = [[[0, 1], [0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr12"
-regex = '''(a+)*'''
-haystack = '''x'''
-matches = [[[0, 0], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr13"
-regex = '''(a+)*'''
-haystack = '''aaaaaa'''
-matches = [[[0, 6], [0, 6]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr14"
-regex = '''(a+)*'''
-haystack = '''aaaaaax'''
-matches = [[[0, 6], [0, 6]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr15"
-regex = '''(a+)+'''
-haystack = '''a'''
-matches = [[[0, 1], [0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr16"
-regex = '''(a+)+'''
-haystack = '''x'''
-matches = []
-match-limit = 1
-
-[[test]]
-name = "nullsubexpr17"
-regex = '''(a+)+'''
-haystack = '''aaaaaa'''
-matches = [[[0, 6], [0, 6]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr18"
-regex = '''(a+)+'''
-haystack = '''aaaaaax'''
-matches = [[[0, 6], [0, 6]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr20"
-regex = '''([a]*)*'''
-haystack = '''a'''
-matches = [[[0, 1], [0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr21"
-regex = '''([a]*)*'''
-haystack = '''x'''
-matches = [[[0, 0], [0, 0]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr22"
-regex = '''([a]*)*'''
-haystack = '''aaaaaa'''
-matches = [[[0, 6], [0, 6]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr23"
-regex = '''([a]*)*'''
-haystack = '''aaaaaax'''
-matches = [[[0, 6], [0, 6]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr24"
-regex = '''([a]*)+'''
-haystack = '''a'''
-matches = [[[0, 1], [0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr25"
-regex = '''([a]*)+'''
-haystack = '''x'''
-matches = [[[0, 0], [0, 0]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr26"
-regex = '''([a]*)+'''
-haystack = '''aaaaaa'''
-matches = [[[0, 6], [0, 6]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr27"
-regex = '''([a]*)+'''
-haystack = '''aaaaaax'''
-matches = [[[0, 6], [0, 6]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr28"
-regex = '''([^b]*)*'''
-haystack = '''a'''
-matches = [[[0, 1], [0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr29"
-regex = '''([^b]*)*'''
-haystack = '''b'''
-matches = [[[0, 0], [0, 0]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr30"
-regex = '''([^b]*)*'''
-haystack = '''aaaaaa'''
-matches = [[[0, 6], [0, 6]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr31"
-regex = '''([^b]*)*'''
-haystack = '''aaaaaab'''
-matches = [[[0, 6], [0, 6]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr32"
-regex = '''([ab]*)*'''
-haystack = '''a'''
-matches = [[[0, 1], [0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr33"
-regex = '''([ab]*)*'''
-haystack = '''aaaaaa'''
-matches = [[[0, 6], [0, 6]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr34"
-regex = '''([ab]*)*'''
-haystack = '''ababab'''
-matches = [[[0, 6], [0, 6]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr35"
-regex = '''([ab]*)*'''
-haystack = '''bababa'''
-matches = [[[0, 6], [0, 6]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr36"
-regex = '''([ab]*)*'''
-haystack = '''b'''
-matches = [[[0, 1], [0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr37"
-regex = '''([ab]*)*'''
-haystack = '''bbbbbb'''
-matches = [[[0, 6], [0, 6]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr38"
-regex = '''([ab]*)*'''
-haystack = '''aaaabcde'''
-matches = [[[0, 5], [0, 5]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr39"
-regex = '''([^a]*)*'''
-haystack = '''b'''
-matches = [[[0, 1], [0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr40"
-regex = '''([^a]*)*'''
-haystack = '''bbbbbb'''
-matches = [[[0, 6], [0, 6]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr41"
-regex = '''([^a]*)*'''
-haystack = '''aaaaaa'''
-matches = [[[0, 0], [0, 0]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr42"
-regex = '''([^ab]*)*'''
-haystack = '''ccccxx'''
-matches = [[[0, 6], [0, 6]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr43"
-regex = '''([^ab]*)*'''
-haystack = '''ababab'''
-matches = [[[0, 0], [0, 0]]]
-match-limit = 1
-anchored = true
-
-# Test added by Rust regex project.
-[[test]]
-name = "nullsubexpr46"
-regex = '''((z)+|a)*'''
-haystack = '''zabcde'''
-matches = [[[0, 2], [1, 2], [0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr64"
-regex = '''(a*)*(x)'''
-haystack = '''x'''
-matches = [[[0, 1], [0, 0], [0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr65"
-regex = '''(a*)*(x)'''
-haystack = '''ax'''
-matches = [[[0, 2], [0, 1], [1, 2]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr66"
-regex = '''(a*)*(x)'''
-haystack = '''axa'''
-matches = [[[0, 2], [0, 1], [1, 2]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr68"
-regex = '''(a*)+(x)'''
-haystack = '''x'''
-matches = [[[0, 1], [0, 0], [0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr69"
-regex = '''(a*)+(x)'''
-haystack = '''ax'''
-matches = [[[0, 2], [0, 1], [1, 2]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr70"
-regex = '''(a*)+(x)'''
-haystack = '''axa'''
-matches = [[[0, 2], [0, 1], [1, 2]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr72"
-regex = '''(a*){2}(x)'''
-haystack = '''x'''
-matches = [[[0, 1], [0, 0], [0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr73"
-regex = '''(a*){2}(x)'''
-haystack = '''ax'''
-matches = [[[0, 2], [1, 1], [1, 2]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "nullsubexpr74"
-regex = '''(a*){2}(x)'''
-haystack = '''axa'''
-matches = [[[0, 2], [1, 1], [1, 2]]]
-match-limit = 1
-anchored = true
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/fowler/repetition.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/fowler/repetition.toml
deleted file mode 100644
index d6a7112..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/fowler/repetition.toml
+++ /dev/null
@@ -1,746 +0,0 @@
-# !!! DO NOT EDIT !!!
-# Automatically generated by 'regex-cli generate fowler'.
-# Numbers in the test names correspond to the line number of the test from
-# the original dat file.
-
-[[test]]
-name = "repetition10"
-regex = '''((..)|(.))'''
-haystack = ''''''
-matches = []
-match-limit = 1
-
-[[test]]
-name = "repetition11"
-regex = '''((..)|(.))((..)|(.))'''
-haystack = ''''''
-matches = []
-match-limit = 1
-
-[[test]]
-name = "repetition12"
-regex = '''((..)|(.))((..)|(.))((..)|(.))'''
-haystack = ''''''
-matches = []
-match-limit = 1
-
-[[test]]
-name = "repetition14"
-regex = '''((..)|(.)){1}'''
-haystack = ''''''
-matches = []
-match-limit = 1
-
-[[test]]
-name = "repetition15"
-regex = '''((..)|(.)){2}'''
-haystack = ''''''
-matches = []
-match-limit = 1
-
-[[test]]
-name = "repetition16"
-regex = '''((..)|(.)){3}'''
-haystack = ''''''
-matches = []
-match-limit = 1
-
-[[test]]
-name = "repetition18"
-regex = '''((..)|(.))*'''
-haystack = ''''''
-matches = [[[0, 0], [], [], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition20"
-regex = '''((..)|(.))'''
-haystack = '''a'''
-matches = [[[0, 1], [0, 1], [], [0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition21"
-regex = '''((..)|(.))((..)|(.))'''
-haystack = '''a'''
-matches = []
-match-limit = 1
-
-[[test]]
-name = "repetition22"
-regex = '''((..)|(.))((..)|(.))((..)|(.))'''
-haystack = '''a'''
-matches = []
-match-limit = 1
-
-[[test]]
-name = "repetition24"
-regex = '''((..)|(.)){1}'''
-haystack = '''a'''
-matches = [[[0, 1], [0, 1], [], [0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition25"
-regex = '''((..)|(.)){2}'''
-haystack = '''a'''
-matches = []
-match-limit = 1
-
-[[test]]
-name = "repetition26"
-regex = '''((..)|(.)){3}'''
-haystack = '''a'''
-matches = []
-match-limit = 1
-
-[[test]]
-name = "repetition28"
-regex = '''((..)|(.))*'''
-haystack = '''a'''
-matches = [[[0, 1], [0, 1], [], [0, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition30"
-regex = '''((..)|(.))'''
-haystack = '''aa'''
-matches = [[[0, 2], [0, 2], [0, 2], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition31"
-regex = '''((..)|(.))((..)|(.))'''
-haystack = '''aa'''
-matches = [[[0, 2], [0, 1], [], [0, 1], [1, 2], [], [1, 2]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition32"
-regex = '''((..)|(.))((..)|(.))((..)|(.))'''
-haystack = '''aa'''
-matches = []
-match-limit = 1
-
-[[test]]
-name = "repetition34"
-regex = '''((..)|(.)){1}'''
-haystack = '''aa'''
-matches = [[[0, 2], [0, 2], [0, 2], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition35"
-regex = '''((..)|(.)){2}'''
-haystack = '''aa'''
-matches = [[[0, 2], [1, 2], [], [1, 2]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition36"
-regex = '''((..)|(.)){3}'''
-haystack = '''aa'''
-matches = []
-match-limit = 1
-
-[[test]]
-name = "repetition38"
-regex = '''((..)|(.))*'''
-haystack = '''aa'''
-matches = [[[0, 2], [0, 2], [0, 2], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition40"
-regex = '''((..)|(.))'''
-haystack = '''aaa'''
-matches = [[[0, 2], [0, 2], [0, 2], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition41"
-regex = '''((..)|(.))((..)|(.))'''
-haystack = '''aaa'''
-matches = [[[0, 3], [0, 2], [0, 2], [], [2, 3], [], [2, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition42"
-regex = '''((..)|(.))((..)|(.))((..)|(.))'''
-haystack = '''aaa'''
-matches = [[[0, 3], [0, 1], [], [0, 1], [1, 2], [], [1, 2], [2, 3], [], [2, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition44"
-regex = '''((..)|(.)){1}'''
-haystack = '''aaa'''
-matches = [[[0, 2], [0, 2], [0, 2], []]]
-match-limit = 1
-anchored = true
-
-# Test added by RE2/Go project.
-[[test]]
-name = "repetition46"
-regex = '''((..)|(.)){2}'''
-haystack = '''aaa'''
-matches = [[[0, 3], [2, 3], [0, 2], [2, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition47"
-regex = '''((..)|(.)){3}'''
-haystack = '''aaa'''
-matches = [[[0, 3], [2, 3], [], [2, 3]]]
-match-limit = 1
-anchored = true
-
-# Test added by RE2/Go project.
-[[test]]
-name = "repetition50"
-regex = '''((..)|(.))*'''
-haystack = '''aaa'''
-matches = [[[0, 3], [2, 3], [0, 2], [2, 3]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition52"
-regex = '''((..)|(.))'''
-haystack = '''aaaa'''
-matches = [[[0, 2], [0, 2], [0, 2], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition53"
-regex = '''((..)|(.))((..)|(.))'''
-haystack = '''aaaa'''
-matches = [[[0, 4], [0, 2], [0, 2], [], [2, 4], [2, 4], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition54"
-regex = '''((..)|(.))((..)|(.))((..)|(.))'''
-haystack = '''aaaa'''
-matches = [[[0, 4], [0, 2], [0, 2], [], [2, 3], [], [2, 3], [3, 4], [], [3, 4]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition56"
-regex = '''((..)|(.)){1}'''
-haystack = '''aaaa'''
-matches = [[[0, 2], [0, 2], [0, 2], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition57"
-regex = '''((..)|(.)){2}'''
-haystack = '''aaaa'''
-matches = [[[0, 4], [2, 4], [2, 4], []]]
-match-limit = 1
-anchored = true
-
-# Test added by RE2/Go project.
-[[test]]
-name = "repetition59"
-regex = '''((..)|(.)){3}'''
-haystack = '''aaaa'''
-matches = [[[0, 4], [3, 4], [0, 2], [3, 4]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition61"
-regex = '''((..)|(.))*'''
-haystack = '''aaaa'''
-matches = [[[0, 4], [2, 4], [2, 4], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition63"
-regex = '''((..)|(.))'''
-haystack = '''aaaaa'''
-matches = [[[0, 2], [0, 2], [0, 2], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition64"
-regex = '''((..)|(.))((..)|(.))'''
-haystack = '''aaaaa'''
-matches = [[[0, 4], [0, 2], [0, 2], [], [2, 4], [2, 4], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition65"
-regex = '''((..)|(.))((..)|(.))((..)|(.))'''
-haystack = '''aaaaa'''
-matches = [[[0, 5], [0, 2], [0, 2], [], [2, 4], [2, 4], [], [4, 5], [], [4, 5]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition67"
-regex = '''((..)|(.)){1}'''
-haystack = '''aaaaa'''
-matches = [[[0, 2], [0, 2], [0, 2], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition68"
-regex = '''((..)|(.)){2}'''
-haystack = '''aaaaa'''
-matches = [[[0, 4], [2, 4], [2, 4], []]]
-match-limit = 1
-anchored = true
-
-# Test added by RE2/Go project.
-[[test]]
-name = "repetition70"
-regex = '''((..)|(.)){3}'''
-haystack = '''aaaaa'''
-matches = [[[0, 5], [4, 5], [2, 4], [4, 5]]]
-match-limit = 1
-anchored = true
-
-# Test added by RE2/Go project.
-[[test]]
-name = "repetition73"
-regex = '''((..)|(.))*'''
-haystack = '''aaaaa'''
-matches = [[[0, 5], [4, 5], [2, 4], [4, 5]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition75"
-regex = '''((..)|(.))'''
-haystack = '''aaaaaa'''
-matches = [[[0, 2], [0, 2], [0, 2], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition76"
-regex = '''((..)|(.))((..)|(.))'''
-haystack = '''aaaaaa'''
-matches = [[[0, 4], [0, 2], [0, 2], [], [2, 4], [2, 4], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition77"
-regex = '''((..)|(.))((..)|(.))((..)|(.))'''
-haystack = '''aaaaaa'''
-matches = [[[0, 6], [0, 2], [0, 2], [], [2, 4], [2, 4], [], [4, 6], [4, 6], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition79"
-regex = '''((..)|(.)){1}'''
-haystack = '''aaaaaa'''
-matches = [[[0, 2], [0, 2], [0, 2], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition80"
-regex = '''((..)|(.)){2}'''
-haystack = '''aaaaaa'''
-matches = [[[0, 4], [2, 4], [2, 4], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition81"
-regex = '''((..)|(.)){3}'''
-haystack = '''aaaaaa'''
-matches = [[[0, 6], [4, 6], [4, 6], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition83"
-regex = '''((..)|(.))*'''
-haystack = '''aaaaaa'''
-matches = [[[0, 6], [4, 6], [4, 6], []]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition-expensive90"
-regex = '''X(.?){0,}Y'''
-haystack = '''X1234567Y'''
-matches = [[[0, 9], [7, 8]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition-expensive91"
-regex = '''X(.?){1,}Y'''
-haystack = '''X1234567Y'''
-matches = [[[0, 9], [7, 8]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition-expensive92"
-regex = '''X(.?){2,}Y'''
-haystack = '''X1234567Y'''
-matches = [[[0, 9], [7, 8]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition-expensive93"
-regex = '''X(.?){3,}Y'''
-haystack = '''X1234567Y'''
-matches = [[[0, 9], [7, 8]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition-expensive94"
-regex = '''X(.?){4,}Y'''
-haystack = '''X1234567Y'''
-matches = [[[0, 9], [7, 8]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition-expensive95"
-regex = '''X(.?){5,}Y'''
-haystack = '''X1234567Y'''
-matches = [[[0, 9], [7, 8]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition-expensive96"
-regex = '''X(.?){6,}Y'''
-haystack = '''X1234567Y'''
-matches = [[[0, 9], [7, 8]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition-expensive97"
-regex = '''X(.?){7,}Y'''
-haystack = '''X1234567Y'''
-matches = [[[0, 9], [7, 8]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition-expensive98"
-regex = '''X(.?){8,}Y'''
-haystack = '''X1234567Y'''
-matches = [[[0, 9], [8, 8]]]
-match-limit = 1
-anchored = true
-
-# Test added by RE2/Go project.
-[[test]]
-name = "repetition-expensive100"
-regex = '''X(.?){0,8}Y'''
-haystack = '''X1234567Y'''
-matches = [[[0, 9], [8, 8]]]
-match-limit = 1
-anchored = true
-
-# Test added by RE2/Go project.
-[[test]]
-name = "repetition-expensive102"
-regex = '''X(.?){1,8}Y'''
-haystack = '''X1234567Y'''
-matches = [[[0, 9], [8, 8]]]
-match-limit = 1
-anchored = true
-
-# Test added by RE2/Go project.
-[[test]]
-name = "repetition-expensive104"
-regex = '''X(.?){2,8}Y'''
-haystack = '''X1234567Y'''
-matches = [[[0, 9], [8, 8]]]
-match-limit = 1
-anchored = true
-
-# Test added by RE2/Go project.
-[[test]]
-name = "repetition-expensive106"
-regex = '''X(.?){3,8}Y'''
-haystack = '''X1234567Y'''
-matches = [[[0, 9], [8, 8]]]
-match-limit = 1
-anchored = true
-
-# Test added by RE2/Go project.
-[[test]]
-name = "repetition-expensive108"
-regex = '''X(.?){4,8}Y'''
-haystack = '''X1234567Y'''
-matches = [[[0, 9], [8, 8]]]
-match-limit = 1
-anchored = true
-
-# Test added by RE2/Go project.
-[[test]]
-name = "repetition-expensive110"
-regex = '''X(.?){5,8}Y'''
-haystack = '''X1234567Y'''
-matches = [[[0, 9], [8, 8]]]
-match-limit = 1
-anchored = true
-
-# Test added by RE2/Go project.
-[[test]]
-name = "repetition-expensive112"
-regex = '''X(.?){6,8}Y'''
-haystack = '''X1234567Y'''
-matches = [[[0, 9], [8, 8]]]
-match-limit = 1
-anchored = true
-
-# Test added by RE2/Go project.
-[[test]]
-name = "repetition-expensive114"
-regex = '''X(.?){7,8}Y'''
-haystack = '''X1234567Y'''
-matches = [[[0, 9], [8, 8]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition-expensive115"
-regex = '''X(.?){8,8}Y'''
-haystack = '''X1234567Y'''
-matches = [[[0, 9], [8, 8]]]
-match-limit = 1
-anchored = true
-
-# Test added by Rust regex project.
-[[test]]
-name = "repetition-expensive127"
-regex = '''(a|ab|c|bcd){0,}(d*)'''
-haystack = '''ababcd'''
-matches = [[[0, 1], [0, 1], [1, 1]]]
-match-limit = 1
-anchored = true
-
-# Test added by Rust regex project.
-[[test]]
-name = "repetition-expensive129"
-regex = '''(a|ab|c|bcd){1,}(d*)'''
-haystack = '''ababcd'''
-matches = [[[0, 1], [0, 1], [1, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition-expensive130"
-regex = '''(a|ab|c|bcd){2,}(d*)'''
-haystack = '''ababcd'''
-matches = [[[0, 6], [3, 6], [6, 6]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition-expensive131"
-regex = '''(a|ab|c|bcd){3,}(d*)'''
-haystack = '''ababcd'''
-matches = [[[0, 6], [3, 6], [6, 6]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition-expensive132"
-regex = '''(a|ab|c|bcd){4,}(d*)'''
-haystack = '''ababcd'''
-matches = []
-match-limit = 1
-
-# Test added by Rust regex project.
-[[test]]
-name = "repetition-expensive134"
-regex = '''(a|ab|c|bcd){0,10}(d*)'''
-haystack = '''ababcd'''
-matches = [[[0, 1], [0, 1], [1, 1]]]
-match-limit = 1
-anchored = true
-
-# Test added by Rust regex project.
-[[test]]
-name = "repetition-expensive136"
-regex = '''(a|ab|c|bcd){1,10}(d*)'''
-haystack = '''ababcd'''
-matches = [[[0, 1], [0, 1], [1, 1]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition-expensive137"
-regex = '''(a|ab|c|bcd){2,10}(d*)'''
-haystack = '''ababcd'''
-matches = [[[0, 6], [3, 6], [6, 6]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition-expensive138"
-regex = '''(a|ab|c|bcd){3,10}(d*)'''
-haystack = '''ababcd'''
-matches = [[[0, 6], [3, 6], [6, 6]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition-expensive139"
-regex = '''(a|ab|c|bcd){4,10}(d*)'''
-haystack = '''ababcd'''
-matches = []
-match-limit = 1
-
-# Test added by Rust regex project.
-[[test]]
-name = "repetition-expensive141"
-regex = '''(a|ab|c|bcd)*(d*)'''
-haystack = '''ababcd'''
-matches = [[[0, 1], [0, 1], [1, 1]]]
-match-limit = 1
-anchored = true
-
-# Test added by Rust regex project.
-[[test]]
-name = "repetition-expensive143"
-regex = '''(a|ab|c|bcd)+(d*)'''
-haystack = '''ababcd'''
-matches = [[[0, 1], [0, 1], [1, 1]]]
-match-limit = 1
-anchored = true
-
-# Test added by RE2/Go project.
-[[test]]
-name = "repetition-expensive149"
-regex = '''(ab|a|c|bcd){0,}(d*)'''
-haystack = '''ababcd'''
-matches = [[[0, 6], [4, 5], [5, 6]]]
-match-limit = 1
-anchored = true
-
-# Test added by RE2/Go project.
-[[test]]
-name = "repetition-expensive151"
-regex = '''(ab|a|c|bcd){1,}(d*)'''
-haystack = '''ababcd'''
-matches = [[[0, 6], [4, 5], [5, 6]]]
-match-limit = 1
-anchored = true
-
-# Test added by RE2/Go project.
-[[test]]
-name = "repetition-expensive153"
-regex = '''(ab|a|c|bcd){2,}(d*)'''
-haystack = '''ababcd'''
-matches = [[[0, 6], [4, 5], [5, 6]]]
-match-limit = 1
-anchored = true
-
-# Test added by RE2/Go project.
-[[test]]
-name = "repetition-expensive155"
-regex = '''(ab|a|c|bcd){3,}(d*)'''
-haystack = '''ababcd'''
-matches = [[[0, 6], [4, 5], [5, 6]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition-expensive156"
-regex = '''(ab|a|c|bcd){4,}(d*)'''
-haystack = '''ababcd'''
-matches = []
-match-limit = 1
-
-# Test added by RE2/Go project.
-[[test]]
-name = "repetition-expensive158"
-regex = '''(ab|a|c|bcd){0,10}(d*)'''
-haystack = '''ababcd'''
-matches = [[[0, 6], [4, 5], [5, 6]]]
-match-limit = 1
-anchored = true
-
-# Test added by RE2/Go project.
-[[test]]
-name = "repetition-expensive160"
-regex = '''(ab|a|c|bcd){1,10}(d*)'''
-haystack = '''ababcd'''
-matches = [[[0, 6], [4, 5], [5, 6]]]
-match-limit = 1
-anchored = true
-
-# Test added by RE2/Go project.
-[[test]]
-name = "repetition-expensive162"
-regex = '''(ab|a|c|bcd){2,10}(d*)'''
-haystack = '''ababcd'''
-matches = [[[0, 6], [4, 5], [5, 6]]]
-match-limit = 1
-anchored = true
-
-# Test added by RE2/Go project.
-[[test]]
-name = "repetition-expensive164"
-regex = '''(ab|a|c|bcd){3,10}(d*)'''
-haystack = '''ababcd'''
-matches = [[[0, 6], [4, 5], [5, 6]]]
-match-limit = 1
-anchored = true
-
-[[test]]
-name = "repetition-expensive165"
-regex = '''(ab|a|c|bcd){4,10}(d*)'''
-haystack = '''ababcd'''
-matches = []
-match-limit = 1
-
-# Test added by RE2/Go project.
-[[test]]
-name = "repetition-expensive167"
-regex = '''(ab|a|c|bcd)*(d*)'''
-haystack = '''ababcd'''
-matches = [[[0, 6], [4, 5], [5, 6]]]
-match-limit = 1
-anchored = true
-
-# Test added by RE2/Go project.
-[[test]]
-name = "repetition-expensive169"
-regex = '''(ab|a|c|bcd)+(d*)'''
-haystack = '''ababcd'''
-matches = [[[0, 6], [4, 5], [5, 6]]]
-match-limit = 1
-anchored = true
-
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/iter.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/iter.toml
deleted file mode 100644
index 329b9f03..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/iter.toml
+++ /dev/null
@@ -1,143 +0,0 @@
-[[test]]
-name = "1"
-regex = "a"
-haystack = "aaa"
-matches = [[0, 1], [1, 2], [2, 3]]
-
-[[test]]
-name = "2"
-regex = "a"
-haystack = "aba"
-matches = [[0, 1], [2, 3]]
-
-[[test]]
-name = "empty1"
-regex = ''
-haystack = ''
-matches = [[0, 0]]
-
-[[test]]
-name = "empty2"
-regex = ''
-haystack = 'abc'
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "empty3"
-regex = '(?:)'
-haystack = 'abc'
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "empty4"
-regex = '(?:)*'
-haystack = 'abc'
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "empty5"
-regex = '(?:)+'
-haystack = 'abc'
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "empty6"
-regex = '(?:)?'
-haystack = 'abc'
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "empty7"
-regex = '(?:)(?:)'
-haystack = 'abc'
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "empty8"
-regex = '(?:)+|z'
-haystack = 'abc'
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "empty9"
-regex = 'z|(?:)+'
-haystack = 'abc'
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "empty10"
-regex = '(?:)+|b'
-haystack = 'abc'
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-
-[[test]]
-name = "empty11"
-regex = 'b|(?:)+'
-haystack = 'abc'
-matches = [[0, 0], [1, 2], [3, 3]]
-
-[[test]]
-name = "start1"
-regex = "^a"
-haystack = "a"
-matches = [[0, 1]]
-
-[[test]]
-name = "start2"
-regex = "^a"
-haystack = "aa"
-matches = [[0, 1]]
-
-[[test]]
-name = "anchored1"
-regex = "a"
-haystack = "a"
-matches = [[0, 1]]
-anchored = true
-
-# This test is pretty subtle. It demonstrates the crucial difference between
-# '^a' and 'a' compiled in 'anchored' mode. The former regex exclusively
-# matches at the start of a haystack and nowhere else. The latter regex has
-# no such restriction, but its automaton is constructed such that it lacks a
-# `.*?` prefix. So it can actually produce matches at multiple locations.
-# The anchored3 test drives this point home.
-[[test]]
-name = "anchored2"
-regex = "a"
-haystack = "aa"
-matches = [[0, 1], [1, 2]]
-anchored = true
-
-# Unlikely anchored2, this test stops matching anything after it sees `b`
-# since it lacks a `.*?` prefix. Since it is looking for 'a' but sees 'b', it
-# determines that there are no remaining matches.
-[[test]]
-name = "anchored3"
-regex = "a"
-haystack = "aaba"
-matches = [[0, 1], [1, 2]]
-anchored = true
-
-[[test]]
-name = "nonempty-followedby-empty"
-regex = 'abc|.*?'
-haystack = "abczzz"
-matches = [[0, 3], [4, 4], [5, 5], [6, 6]]
-
-[[test]]
-name = "nonempty-followedby-oneempty"
-regex = 'abc|.*?'
-haystack = "abcz"
-matches = [[0, 3], [4, 4]]
-
-[[test]]
-name = "nonempty-followedby-onemixed"
-regex = 'abc|.*?'
-haystack = "abczabc"
-matches = [[0, 3], [4, 7]]
-
-[[test]]
-name = "nonempty-followedby-twomixed"
-regex = 'abc|.*?'
-haystack = "abczzabc"
-matches = [[0, 3], [4, 4], [5, 8]]
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/leftmost-all.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/leftmost-all.toml
deleted file mode 100644
index e3fd950..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/leftmost-all.toml
+++ /dev/null
@@ -1,25 +0,0 @@
-[[test]]
-name = "alt"
-regex = 'foo|foobar'
-haystack = "foobar"
-matches = [[0, 6]]
-match-kind = "all"
-search-kind = "leftmost"
-
-[[test]]
-name = "multi"
-regex = ['foo', 'foobar']
-haystack = "foobar"
-matches = [
-  { id = 1, span = [0, 6] },
-]
-match-kind = "all"
-search-kind = "leftmost"
-
-[[test]]
-name = "dotall"
-regex = '(?s:.)'
-haystack = "foobar"
-matches = [[5, 6]]
-match-kind = "all"
-search-kind = "leftmost"
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/line-terminator.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/line-terminator.toml
deleted file mode 100644
index a398daf..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/line-terminator.toml
+++ /dev/null
@@ -1,109 +0,0 @@
-# This tests that we can switch the line terminator to the NUL byte.
-[[test]]
-name = "nul"
-regex = '(?m)^[a-z]+$'
-haystack = '\x00abc\x00'
-matches = [[1, 4]]
-unescape = true
-line-terminator = '\x00'
-
-# This tests that '.' will not match the configured line terminator, but will
-# match \n.
-[[test]]
-name = "dot-changes-with-line-terminator"
-regex = '.'
-haystack = '\x00\n'
-matches = [[1, 2]]
-unescape = true
-line-terminator = '\x00'
-
-# This tests that when we switch the line terminator, \n is no longer
-# recognized as the terminator.
-[[test]]
-name = "not-line-feed"
-regex = '(?m)^[a-z]+$'
-haystack = '\nabc\n'
-matches = []
-unescape = true
-line-terminator = '\x00'
-
-# This tests that we can set the line terminator to a non-ASCII byte and have
-# it behave as expected.
-[[test]]
-name = "non-ascii"
-regex = '(?m)^[a-z]+$'
-haystack = '\xFFabc\xFF'
-matches = [[1, 4]]
-unescape = true
-line-terminator = '\xFF'
-utf8 = false
-
-# This tests a tricky case where the line terminator is set to \r. This ensures
-# that the StartLF look-behind assertion is tracked when computing the start
-# state.
-[[test]]
-name = "carriage"
-regex = '(?m)^[a-z]+'
-haystack = 'ABC\rabc'
-matches = [[4, 7]]
-bounds = [4, 7]
-unescape = true
-line-terminator = '\r'
-
-# This tests that we can set the line terminator to a byte corresponding to a
-# word character, and things work as expected.
-[[test]]
-name = "word-byte"
-regex = '(?m)^[a-z]+$'
-haystack = 'ZabcZ'
-matches = [[1, 4]]
-unescape = true
-line-terminator = 'Z'
-
-# This tests that we can set the line terminator to a byte corresponding to a
-# non-word character, and things work as expected.
-[[test]]
-name = "non-word-byte"
-regex = '(?m)^[a-z]+$'
-haystack = '%abc%'
-matches = [[1, 4]]
-unescape = true
-line-terminator = '%'
-
-# This combines "set line terminator to a word byte" with a word boundary
-# assertion, which should result in no match even though ^/$ matches.
-[[test]]
-name = "word-boundary"
-regex = '(?m)^\b[a-z]+\b$'
-haystack = 'ZabcZ'
-matches = []
-unescape = true
-line-terminator = 'Z'
-
-# Like 'word-boundary', but does an anchored search at the point where ^
-# matches, but where \b should not.
-[[test]]
-name = "word-boundary-at"
-regex = '(?m)^\b[a-z]+\b$'
-haystack = 'ZabcZ'
-matches = []
-bounds = [1, 4]
-anchored = true
-unescape = true
-line-terminator = 'Z'
-
-# Like 'word-boundary-at', but flips the word boundary to a negation. This
-# in particular tests a tricky case in DFA engines, where they must consider
-# explicitly that a starting configuration from a custom line terminator may
-# also required setting the "is from word byte" flag on a state. Otherwise,
-# it's treated as "not from a word byte," which would result in \B not matching
-# here when it should.
-[[test]]
-name = "not-word-boundary-at"
-regex = '(?m)^\B[a-z]+\B$'
-haystack = 'ZabcZ'
-matches = [[1, 4]]
-bounds = [1, 4]
-anchored = true
-unescape = true
-line-terminator = 'Z'
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/misc.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/misc.toml
deleted file mode 100644
index c65531f..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/misc.toml
+++ /dev/null
@@ -1,99 +0,0 @@
-[[test]]
-name = "ascii-literal"
-regex = "a"
-haystack = "a"
-matches = [[0, 1]]
-
-[[test]]
-name = "ascii-literal-not"
-regex = "a"
-haystack = "z"
-matches = []
-
-[[test]]
-name = "ascii-literal-anchored"
-regex = "a"
-haystack = "a"
-matches = [[0, 1]]
-anchored = true
-
-[[test]]
-name = "ascii-literal-anchored-not"
-regex = "a"
-haystack = "z"
-matches = []
-anchored = true
-
-[[test]]
-name = "anchor-start-end-line"
-regex = '(?m)^bar$'
-haystack = "foo\nbar\nbaz"
-matches = [[4, 7]]
-
-[[test]]
-name = "prefix-literal-match"
-regex = '^abc'
-haystack = "abc"
-matches = [[0, 3]]
-
-[[test]]
-name = "prefix-literal-match-ascii"
-regex = '^abc'
-haystack = "abc"
-matches = [[0, 3]]
-unicode = false
-utf8 = false
-
-[[test]]
-name = "prefix-literal-no-match"
-regex = '^abc'
-haystack = "zabc"
-matches = []
-
-[[test]]
-name = "one-literal-edge"
-regex = 'abc'
-haystack = "xxxxxab"
-matches = []
-
-[[test]]
-name = "terminates"
-regex = 'a$'
-haystack = "a"
-matches = [[0, 1]]
-
-[[test]]
-name = "suffix-100"
-regex = '.*abcd'
-haystack = "abcd"
-matches = [[0, 4]]
-
-[[test]]
-name = "suffix-200"
-regex = '.*(?:abcd)+'
-haystack = "abcd"
-matches = [[0, 4]]
-
-[[test]]
-name = "suffix-300"
-regex = '.*(?:abcd)+'
-haystack = "abcdabcd"
-matches = [[0, 8]]
-
-[[test]]
-name = "suffix-400"
-regex = '.*(?:abcd)+'
-haystack = "abcdxabcd"
-matches = [[0, 9]]
-
-[[test]]
-name = "suffix-500"
-regex = '.*x(?:abcd)+'
-haystack = "abcdxabcd"
-matches = [[0, 9]]
-
-[[test]]
-name = "suffix-600"
-regex = '[^abcd]*x(?:abcd)+'
-haystack = "abcdxabcd"
-matches = [[4, 9]]
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/multiline.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/multiline.toml
deleted file mode 100644
index 3acc901..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/multiline.toml
+++ /dev/null
@@ -1,845 +0,0 @@
-[[test]]
-name = "basic1"
-regex = '(?m)^[a-z]+$'
-haystack = "abc\ndef\nxyz"
-matches = [[0, 3], [4, 7], [8, 11]]
-
-[[test]]
-name = "basic1-crlf"
-regex = '(?Rm)^[a-z]+$'
-haystack = "abc\ndef\nxyz"
-matches = [[0, 3], [4, 7], [8, 11]]
-
-[[test]]
-name = "basic1-crlf-cr"
-regex = '(?Rm)^[a-z]+$'
-haystack = "abc\rdef\rxyz"
-matches = [[0, 3], [4, 7], [8, 11]]
-
-[[test]]
-name = "basic2"
-regex = '(?m)^$'
-haystack = "abc\ndef\nxyz"
-matches = []
-
-[[test]]
-name = "basic2-crlf"
-regex = '(?Rm)^$'
-haystack = "abc\ndef\nxyz"
-matches = []
-
-[[test]]
-name = "basic2-crlf-cr"
-regex = '(?Rm)^$'
-haystack = "abc\rdef\rxyz"
-matches = []
-
-[[test]]
-name = "basic3"
-regex = '(?m)^'
-haystack = "abc\ndef\nxyz"
-matches = [[0, 0], [4, 4], [8, 8]]
-
-[[test]]
-name = "basic3-crlf"
-regex = '(?Rm)^'
-haystack = "abc\ndef\nxyz"
-matches = [[0, 0], [4, 4], [8, 8]]
-
-[[test]]
-name = "basic3-crlf-cr"
-regex = '(?Rm)^'
-haystack = "abc\rdef\rxyz"
-matches = [[0, 0], [4, 4], [8, 8]]
-
-[[test]]
-name = "basic4"
-regex = '(?m)$'
-haystack = "abc\ndef\nxyz"
-matches = [[3, 3], [7, 7], [11, 11]]
-
-[[test]]
-name = "basic4-crlf"
-regex = '(?Rm)$'
-haystack = "abc\ndef\nxyz"
-matches = [[3, 3], [7, 7], [11, 11]]
-
-[[test]]
-name = "basic4-crlf-cr"
-regex = '(?Rm)$'
-haystack = "abc\rdef\rxyz"
-matches = [[3, 3], [7, 7], [11, 11]]
-
-[[test]]
-name = "basic5"
-regex = '(?m)^[a-z]'
-haystack = "abc\ndef\nxyz"
-matches = [[0, 1], [4, 5], [8, 9]]
-
-[[test]]
-name = "basic5-crlf"
-regex = '(?Rm)^[a-z]'
-haystack = "abc\ndef\nxyz"
-matches = [[0, 1], [4, 5], [8, 9]]
-
-[[test]]
-name = "basic5-crlf-cr"
-regex = '(?Rm)^[a-z]'
-haystack = "abc\rdef\rxyz"
-matches = [[0, 1], [4, 5], [8, 9]]
-
-[[test]]
-name = "basic6"
-regex = '(?m)[a-z]^'
-haystack = "abc\ndef\nxyz"
-matches = []
-
-[[test]]
-name = "basic6-crlf"
-regex = '(?Rm)[a-z]^'
-haystack = "abc\ndef\nxyz"
-matches = []
-
-[[test]]
-name = "basic6-crlf-cr"
-regex = '(?Rm)[a-z]^'
-haystack = "abc\rdef\rxyz"
-matches = []
-
-[[test]]
-name = "basic7"
-regex = '(?m)[a-z]$'
-haystack = "abc\ndef\nxyz"
-matches = [[2, 3], [6, 7], [10, 11]]
-
-[[test]]
-name = "basic7-crlf"
-regex = '(?Rm)[a-z]$'
-haystack = "abc\ndef\nxyz"
-matches = [[2, 3], [6, 7], [10, 11]]
-
-[[test]]
-name = "basic7-crlf-cr"
-regex = '(?Rm)[a-z]$'
-haystack = "abc\rdef\rxyz"
-matches = [[2, 3], [6, 7], [10, 11]]
-
-[[test]]
-name = "basic8"
-regex = '(?m)$[a-z]'
-haystack = "abc\ndef\nxyz"
-matches = []
-
-[[test]]
-name = "basic8-crlf"
-regex = '(?Rm)$[a-z]'
-haystack = "abc\ndef\nxyz"
-matches = []
-
-[[test]]
-name = "basic8-crlf-cr"
-regex = '(?Rm)$[a-z]'
-haystack = "abc\rdef\rxyz"
-matches = []
-
-[[test]]
-name = "basic9"
-regex = '(?m)^$'
-haystack = ""
-matches = [[0, 0]]
-
-[[test]]
-name = "basic9-crlf"
-regex = '(?Rm)^$'
-haystack = ""
-matches = [[0, 0]]
-
-[[test]]
-name = "repeat1"
-regex = '(?m)(?:^$)*'
-haystack = "a\nb\nc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]
-
-[[test]]
-name = "repeat1-crlf"
-regex = '(?Rm)(?:^$)*'
-haystack = "a\nb\nc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]
-
-[[test]]
-name = "repeat1-crlf-cr"
-regex = '(?Rm)(?:^$)*'
-haystack = "a\rb\rc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]
-
-[[test]]
-name = "repeat1-no-multi"
-regex = '(?:^$)*'
-haystack = "a\nb\nc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]
-
-[[test]]
-name = "repeat1-no-multi-crlf"
-regex = '(?R)(?:^$)*'
-haystack = "a\nb\nc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]
-
-[[test]]
-name = "repeat1-no-multi-crlf-cr"
-regex = '(?R)(?:^$)*'
-haystack = "a\rb\rc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]
-
-[[test]]
-name = "repeat2"
-regex = '(?m)(?:^|a)+'
-haystack = "a\naaa\n"
-matches = [[0, 0], [2, 2], [3, 5], [6, 6]]
-
-[[test]]
-name = "repeat2-crlf"
-regex = '(?Rm)(?:^|a)+'
-haystack = "a\naaa\n"
-matches = [[0, 0], [2, 2], [3, 5], [6, 6]]
-
-[[test]]
-name = "repeat2-crlf-cr"
-regex = '(?Rm)(?:^|a)+'
-haystack = "a\raaa\r"
-matches = [[0, 0], [2, 2], [3, 5], [6, 6]]
-
-[[test]]
-name = "repeat2-no-multi"
-regex = '(?:^|a)+'
-haystack = "a\naaa\n"
-matches = [[0, 0], [2, 5]]
-
-[[test]]
-name = "repeat2-no-multi-crlf"
-regex = '(?R)(?:^|a)+'
-haystack = "a\naaa\n"
-matches = [[0, 0], [2, 5]]
-
-[[test]]
-name = "repeat2-no-multi-crlf-cr"
-regex = '(?R)(?:^|a)+'
-haystack = "a\raaa\r"
-matches = [[0, 0], [2, 5]]
-
-[[test]]
-name = "repeat3"
-regex = '(?m)(?:^|a)*'
-haystack = "a\naaa\n"
-matches = [[0, 0], [1, 1], [2, 2], [3, 5], [6, 6]]
-
-[[test]]
-name = "repeat3-crlf"
-regex = '(?Rm)(?:^|a)*'
-haystack = "a\naaa\n"
-matches = [[0, 0], [1, 1], [2, 2], [3, 5], [6, 6]]
-
-[[test]]
-name = "repeat3-crlf-cr"
-regex = '(?Rm)(?:^|a)*'
-haystack = "a\raaa\r"
-matches = [[0, 0], [1, 1], [2, 2], [3, 5], [6, 6]]
-
-[[test]]
-name = "repeat3-no-multi"
-regex = '(?:^|a)*'
-haystack = "a\naaa\n"
-matches = [[0, 0], [1, 1], [2, 5], [6, 6]]
-
-[[test]]
-name = "repeat3-no-multi-crlf"
-regex = '(?R)(?:^|a)*'
-haystack = "a\naaa\n"
-matches = [[0, 0], [1, 1], [2, 5], [6, 6]]
-
-[[test]]
-name = "repeat3-no-multi-crlf-cr"
-regex = '(?R)(?:^|a)*'
-haystack = "a\raaa\r"
-matches = [[0, 0], [1, 1], [2, 5], [6, 6]]
-
-[[test]]
-name = "repeat4"
-regex = '(?m)(?:^|a+)'
-haystack = "a\naaa\n"
-matches = [[0, 0], [2, 2], [3, 5], [6, 6]]
-
-[[test]]
-name = "repeat4-crlf"
-regex = '(?Rm)(?:^|a+)'
-haystack = "a\naaa\n"
-matches = [[0, 0], [2, 2], [3, 5], [6, 6]]
-
-[[test]]
-name = "repeat4-crlf-cr"
-regex = '(?Rm)(?:^|a+)'
-haystack = "a\raaa\r"
-matches = [[0, 0], [2, 2], [3, 5], [6, 6]]
-
-[[test]]
-name = "repeat4-no-multi"
-regex = '(?:^|a+)'
-haystack = "a\naaa\n"
-matches = [[0, 0], [2, 5]]
-
-[[test]]
-name = "repeat4-no-multi-crlf"
-regex = '(?R)(?:^|a+)'
-haystack = "a\naaa\n"
-matches = [[0, 0], [2, 5]]
-
-[[test]]
-name = "repeat4-no-multi-crlf-cr"
-regex = '(?R)(?:^|a+)'
-haystack = "a\raaa\r"
-matches = [[0, 0], [2, 5]]
-
-[[test]]
-name = "repeat5"
-regex = '(?m)(?:^|a*)'
-haystack = "a\naaa\n"
-matches = [[0, 0], [1, 1], [2, 2], [3, 5], [6, 6]]
-
-[[test]]
-name = "repeat5-crlf"
-regex = '(?Rm)(?:^|a*)'
-haystack = "a\naaa\n"
-matches = [[0, 0], [1, 1], [2, 2], [3, 5], [6, 6]]
-
-[[test]]
-name = "repeat5-crlf-cr"
-regex = '(?Rm)(?:^|a*)'
-haystack = "a\raaa\r"
-matches = [[0, 0], [1, 1], [2, 2], [3, 5], [6, 6]]
-
-[[test]]
-name = "repeat5-no-multi"
-regex = '(?:^|a*)'
-haystack = "a\naaa\n"
-matches = [[0, 0], [1, 1], [2, 5], [6, 6]]
-
-[[test]]
-name = "repeat5-no-multi-crlf"
-regex = '(?R)(?:^|a*)'
-haystack = "a\naaa\n"
-matches = [[0, 0], [1, 1], [2, 5], [6, 6]]
-
-[[test]]
-name = "repeat5-no-multi-crlf-cr"
-regex = '(?R)(?:^|a*)'
-haystack = "a\raaa\r"
-matches = [[0, 0], [1, 1], [2, 5], [6, 6]]
-
-[[test]]
-name = "repeat6"
-regex = '(?m)(?:^[a-z])+'
-haystack = "abc\ndef\nxyz"
-matches = [[0, 1], [4, 5], [8, 9]]
-
-[[test]]
-name = "repeat6-crlf"
-regex = '(?Rm)(?:^[a-z])+'
-haystack = "abc\ndef\nxyz"
-matches = [[0, 1], [4, 5], [8, 9]]
-
-[[test]]
-name = "repeat6-crlf-cr"
-regex = '(?Rm)(?:^[a-z])+'
-haystack = "abc\rdef\rxyz"
-matches = [[0, 1], [4, 5], [8, 9]]
-
-[[test]]
-name = "repeat6-no-multi"
-regex = '(?:^[a-z])+'
-haystack = "abc\ndef\nxyz"
-matches = [[0, 1]]
-
-[[test]]
-name = "repeat6-no-multi-crlf"
-regex = '(?R)(?:^[a-z])+'
-haystack = "abc\ndef\nxyz"
-matches = [[0, 1]]
-
-[[test]]
-name = "repeat6-no-multi-crlf-cr"
-regex = '(?R)(?:^[a-z])+'
-haystack = "abc\rdef\rxyz"
-matches = [[0, 1]]
-
-[[test]]
-name = "repeat7"
-regex = '(?m)(?:^[a-z]{3}\n?)+'
-haystack = "abc\ndef\nxyz"
-matches = [[0, 11]]
-
-[[test]]
-name = "repeat7-crlf"
-regex = '(?Rm)(?:^[a-z]{3}\n?)+'
-haystack = "abc\ndef\nxyz"
-matches = [[0, 11]]
-
-[[test]]
-name = "repeat7-crlf-cr"
-regex = '(?Rm)(?:^[a-z]{3}\r?)+'
-haystack = "abc\rdef\rxyz"
-matches = [[0, 11]]
-
-[[test]]
-name = "repeat7-no-multi"
-regex = '(?:^[a-z]{3}\n?)+'
-haystack = "abc\ndef\nxyz"
-matches = [[0, 4]]
-
-[[test]]
-name = "repeat7-no-multi-crlf"
-regex = '(?R)(?:^[a-z]{3}\n?)+'
-haystack = "abc\ndef\nxyz"
-matches = [[0, 4]]
-
-[[test]]
-name = "repeat7-no-multi-crlf-cr"
-regex = '(?R)(?:^[a-z]{3}\r?)+'
-haystack = "abc\rdef\rxyz"
-matches = [[0, 4]]
-
-[[test]]
-name = "repeat8"
-regex = '(?m)(?:^[a-z]{3}\n?)*'
-haystack = "abc\ndef\nxyz"
-matches = [[0, 11]]
-
-[[test]]
-name = "repeat8-crlf"
-regex = '(?Rm)(?:^[a-z]{3}\n?)*'
-haystack = "abc\ndef\nxyz"
-matches = [[0, 11]]
-
-[[test]]
-name = "repeat8-crlf-cr"
-regex = '(?Rm)(?:^[a-z]{3}\r?)*'
-haystack = "abc\rdef\rxyz"
-matches = [[0, 11]]
-
-[[test]]
-name = "repeat8-no-multi"
-regex = '(?:^[a-z]{3}\n?)*'
-haystack = "abc\ndef\nxyz"
-matches = [[0, 4], [5, 5], [6, 6], [7, 7], [8, 8], [9, 9], [10, 10], [11, 11]]
-
-[[test]]
-name = "repeat8-no-multi-crlf"
-regex = '(?R)(?:^[a-z]{3}\n?)*'
-haystack = "abc\ndef\nxyz"
-matches = [[0, 4], [5, 5], [6, 6], [7, 7], [8, 8], [9, 9], [10, 10], [11, 11]]
-
-[[test]]
-name = "repeat8-no-multi-crlf-cr"
-regex = '(?R)(?:^[a-z]{3}\r?)*'
-haystack = "abc\rdef\rxyz"
-matches = [[0, 4], [5, 5], [6, 6], [7, 7], [8, 8], [9, 9], [10, 10], [11, 11]]
-
-[[test]]
-name = "repeat9"
-regex = '(?m)(?:\n?[a-z]{3}$)+'
-haystack = "abc\ndef\nxyz"
-matches = [[0, 11]]
-
-[[test]]
-name = "repeat9-crlf"
-regex = '(?Rm)(?:\n?[a-z]{3}$)+'
-haystack = "abc\ndef\nxyz"
-matches = [[0, 11]]
-
-[[test]]
-name = "repeat9-crlf-cr"
-regex = '(?Rm)(?:\r?[a-z]{3}$)+'
-haystack = "abc\rdef\rxyz"
-matches = [[0, 11]]
-
-[[test]]
-name = "repeat9-no-multi"
-regex = '(?:\n?[a-z]{3}$)+'
-haystack = "abc\ndef\nxyz"
-matches = [[7, 11]]
-
-[[test]]
-name = "repeat9-no-multi-crlf"
-regex = '(?R)(?:\n?[a-z]{3}$)+'
-haystack = "abc\ndef\nxyz"
-matches = [[7, 11]]
-
-[[test]]
-name = "repeat9-no-multi-crlf-cr"
-regex = '(?R)(?:\r?[a-z]{3}$)+'
-haystack = "abc\rdef\rxyz"
-matches = [[7, 11]]
-
-[[test]]
-name = "repeat10"
-regex = '(?m)(?:\n?[a-z]{3}$)*'
-haystack = "abc\ndef\nxyz"
-matches = [[0, 11]]
-
-[[test]]
-name = "repeat10-crlf"
-regex = '(?Rm)(?:\n?[a-z]{3}$)*'
-haystack = "abc\ndef\nxyz"
-matches = [[0, 11]]
-
-[[test]]
-name = "repeat10-crlf-cr"
-regex = '(?Rm)(?:\r?[a-z]{3}$)*'
-haystack = "abc\rdef\rxyz"
-matches = [[0, 11]]
-
-[[test]]
-name = "repeat10-no-multi"
-regex = '(?:\n?[a-z]{3}$)*'
-haystack = "abc\ndef\nxyz"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 11]]
-
-[[test]]
-name = "repeat10-no-multi-crlf"
-regex = '(?R)(?:\n?[a-z]{3}$)*'
-haystack = "abc\ndef\nxyz"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 11]]
-
-[[test]]
-name = "repeat10-no-multi-crlf-cr"
-regex = '(?R)(?:\r?[a-z]{3}$)*'
-haystack = "abc\rdef\rxyz"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 11]]
-
-[[test]]
-name = "repeat11"
-regex = '(?m)^*'
-haystack = "\naa\n"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]]
-
-[[test]]
-name = "repeat11-crlf"
-regex = '(?Rm)^*'
-haystack = "\naa\n"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]]
-
-[[test]]
-name = "repeat11-crlf-cr"
-regex = '(?Rm)^*'
-haystack = "\raa\r"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]]
-
-[[test]]
-name = "repeat11-no-multi"
-regex = '^*'
-haystack = "\naa\n"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]]
-
-[[test]]
-name = "repeat11-no-multi-crlf"
-regex = '(?R)^*'
-haystack = "\naa\n"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]]
-
-[[test]]
-name = "repeat11-no-multi-crlf-cr"
-regex = '(?R)^*'
-haystack = "\raa\r"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]]
-
-[[test]]
-name = "repeat12"
-regex = '(?m)^+'
-haystack = "\naa\n"
-matches = [[0, 0], [1, 1], [4, 4]]
-
-[[test]]
-name = "repeat12-crlf"
-regex = '(?Rm)^+'
-haystack = "\naa\n"
-matches = [[0, 0], [1, 1], [4, 4]]
-
-[[test]]
-name = "repeat12-crlf-cr"
-regex = '(?Rm)^+'
-haystack = "\raa\r"
-matches = [[0, 0], [1, 1], [4, 4]]
-
-[[test]]
-name = "repeat12-no-multi"
-regex = '^+'
-haystack = "\naa\n"
-matches = [[0, 0]]
-
-[[test]]
-name = "repeat12-no-multi-crlf"
-regex = '(?R)^+'
-haystack = "\naa\n"
-matches = [[0, 0]]
-
-[[test]]
-name = "repeat12-no-multi-crlf-cr"
-regex = '(?R)^+'
-haystack = "\raa\r"
-matches = [[0, 0]]
-
-[[test]]
-name = "repeat13"
-regex = '(?m)$*'
-haystack = "\naa\n"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]]
-
-[[test]]
-name = "repeat13-crlf"
-regex = '(?Rm)$*'
-haystack = "\naa\n"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]]
-
-[[test]]
-name = "repeat13-crlf-cr"
-regex = '(?Rm)$*'
-haystack = "\raa\r"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]]
-
-[[test]]
-name = "repeat13-no-multi"
-regex = '$*'
-haystack = "\naa\n"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]]
-
-[[test]]
-name = "repeat13-no-multi-crlf"
-regex = '(?R)$*'
-haystack = "\naa\n"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]]
-
-[[test]]
-name = "repeat13-no-multi-crlf-cr"
-regex = '(?R)$*'
-haystack = "\raa\r"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]]
-
-[[test]]
-name = "repeat14"
-regex = '(?m)$+'
-haystack = "\naa\n"
-matches = [[0, 0], [3, 3], [4, 4]]
-
-[[test]]
-name = "repeat14-crlf"
-regex = '(?Rm)$+'
-haystack = "\naa\n"
-matches = [[0, 0], [3, 3], [4, 4]]
-
-[[test]]
-name = "repeat14-crlf-cr"
-regex = '(?Rm)$+'
-haystack = "\raa\r"
-matches = [[0, 0], [3, 3], [4, 4]]
-
-[[test]]
-name = "repeat14-no-multi"
-regex = '$+'
-haystack = "\naa\n"
-matches = [[4, 4]]
-
-[[test]]
-name = "repeat14-no-multi-crlf"
-regex = '(?R)$+'
-haystack = "\naa\n"
-matches = [[4, 4]]
-
-[[test]]
-name = "repeat14-no-multi-crlf-cr"
-regex = '(?R)$+'
-haystack = "\raa\r"
-matches = [[4, 4]]
-
-[[test]]
-name = "repeat15"
-regex = '(?m)(?:$\n)+'
-haystack = "\n\naaa\n\n"
-matches = [[0, 2], [5, 7]]
-
-[[test]]
-name = "repeat15-crlf"
-regex = '(?Rm)(?:$\n)+'
-haystack = "\n\naaa\n\n"
-matches = [[0, 2], [5, 7]]
-
-[[test]]
-name = "repeat15-crlf-cr"
-regex = '(?Rm)(?:$\r)+'
-haystack = "\r\raaa\r\r"
-matches = [[0, 2], [5, 7]]
-
-[[test]]
-name = "repeat15-no-multi"
-regex = '(?:$\n)+'
-haystack = "\n\naaa\n\n"
-matches = []
-
-[[test]]
-name = "repeat15-no-multi-crlf"
-regex = '(?R)(?:$\n)+'
-haystack = "\n\naaa\n\n"
-matches = []
-
-[[test]]
-name = "repeat15-no-multi-crlf-cr"
-regex = '(?R)(?:$\r)+'
-haystack = "\r\raaa\r\r"
-matches = []
-
-[[test]]
-name = "repeat16"
-regex = '(?m)(?:$\n)*'
-haystack = "\n\naaa\n\n"
-matches = [[0, 2], [3, 3], [4, 4], [5, 7]]
-
-[[test]]
-name = "repeat16-crlf"
-regex = '(?Rm)(?:$\n)*'
-haystack = "\n\naaa\n\n"
-matches = [[0, 2], [3, 3], [4, 4], [5, 7]]
-
-[[test]]
-name = "repeat16-crlf-cr"
-regex = '(?Rm)(?:$\r)*'
-haystack = "\r\raaa\r\r"
-matches = [[0, 2], [3, 3], [4, 4], [5, 7]]
-
-[[test]]
-name = "repeat16-no-multi"
-regex = '(?:$\n)*'
-haystack = "\n\naaa\n\n"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 7]]
-
-[[test]]
-name = "repeat16-no-multi-crlf"
-regex = '(?R)(?:$\n)*'
-haystack = "\n\naaa\n\n"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 7]]
-
-[[test]]
-name = "repeat16-no-multi-crlf-cr"
-regex = '(?R)(?:$\r)*'
-haystack = "\r\raaa\r\r"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 7]]
-
-[[test]]
-name = "repeat17"
-regex = '(?m)(?:$\n^)+'
-haystack = "\n\naaa\n\n"
-matches = [[0, 2], [5, 7]]
-
-[[test]]
-name = "repeat17-crlf"
-regex = '(?Rm)(?:$\n^)+'
-haystack = "\n\naaa\n\n"
-matches = [[0, 2], [5, 7]]
-
-[[test]]
-name = "repeat17-crlf-cr"
-regex = '(?Rm)(?:$\r^)+'
-haystack = "\r\raaa\r\r"
-matches = [[0, 2], [5, 7]]
-
-[[test]]
-name = "repeat17-no-multi"
-regex = '(?:$\n^)+'
-haystack = "\n\naaa\n\n"
-matches = []
-
-[[test]]
-name = "repeat17-no-multi-crlf"
-regex = '(?R)(?:$\n^)+'
-haystack = "\n\naaa\n\n"
-matches = []
-
-[[test]]
-name = "repeat17-no-multi-crlf-cr"
-regex = '(?R)(?:$\r^)+'
-haystack = "\r\raaa\r\r"
-matches = []
-
-[[test]]
-name = "repeat18"
-regex = '(?m)(?:^|$)+'
-haystack = "\n\naaa\n\n"
-matches = [[0, 0], [1, 1], [2, 2], [5, 5], [6, 6], [7, 7]]
-
-[[test]]
-name = "repeat18-crlf"
-regex = '(?Rm)(?:^|$)+'
-haystack = "\n\naaa\n\n"
-matches = [[0, 0], [1, 1], [2, 2], [5, 5], [6, 6], [7, 7]]
-
-[[test]]
-name = "repeat18-crlf-cr"
-regex = '(?Rm)(?:^|$)+'
-haystack = "\r\raaa\r\r"
-matches = [[0, 0], [1, 1], [2, 2], [5, 5], [6, 6], [7, 7]]
-
-[[test]]
-name = "repeat18-no-multi"
-regex = '(?:^|$)+'
-haystack = "\n\naaa\n\n"
-matches = [[0, 0], [7, 7]]
-
-[[test]]
-name = "repeat18-no-multi-crlf"
-regex = '(?R)(?:^|$)+'
-haystack = "\n\naaa\n\n"
-matches = [[0, 0], [7, 7]]
-
-[[test]]
-name = "repeat18-no-multi-crlf-cr"
-regex = '(?R)(?:^|$)+'
-haystack = "\r\raaa\r\r"
-matches = [[0, 0], [7, 7]]
-
-[[test]]
-name = "match-line-100"
-regex = '(?m)^.+$'
-haystack = "aa\naaaaaaaaaaaaaaaaaaa\n"
-matches = [[0, 2], [3, 22]]
-
-[[test]]
-name = "match-line-100-crlf"
-regex = '(?Rm)^.+$'
-haystack = "aa\naaaaaaaaaaaaaaaaaaa\n"
-matches = [[0, 2], [3, 22]]
-
-[[test]]
-name = "match-line-100-crlf-cr"
-regex = '(?Rm)^.+$'
-haystack = "aa\raaaaaaaaaaaaaaaaaaa\r"
-matches = [[0, 2], [3, 22]]
-
-[[test]]
-name = "match-line-200"
-regex = '(?m)^.+$'
-haystack = "aa\naaaaaaaaaaaaaaaaaaa\n"
-matches = [[0, 2], [3, 22]]
-unicode = false
-utf8 = false
-
-[[test]]
-name = "match-line-200-crlf"
-regex = '(?Rm)^.+$'
-haystack = "aa\naaaaaaaaaaaaaaaaaaa\n"
-matches = [[0, 2], [3, 22]]
-unicode = false
-utf8 = false
-
-[[test]]
-name = "match-line-200-crlf-cr"
-regex = '(?Rm)^.+$'
-haystack = "aa\raaaaaaaaaaaaaaaaaaa\r"
-matches = [[0, 2], [3, 22]]
-unicode = false
-utf8 = false
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/no-unicode.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/no-unicode.toml
deleted file mode 100644
index 0ddac4c..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/no-unicode.toml
+++ /dev/null
@@ -1,222 +0,0 @@
-[[test]]
-name = "invalid-utf8-literal1"
-regex = '\xFF'
-haystack = '\xFF'
-matches = [[0, 1]]
-unicode = false
-utf8 = false
-unescape = true
-
-
-[[test]]
-name = "mixed"
-regex = '(?:.+)(?-u)(?:.+)'
-haystack = '\xCE\x93\xCE\x94\xFF'
-matches = [[0, 5]]
-utf8 = false
-unescape = true
-
-
-[[test]]
-name = "case1"
-regex = "a"
-haystack = "A"
-matches = [[0, 1]]
-case-insensitive = true
-unicode = false
-
-[[test]]
-name = "case2"
-regex = "[a-z]+"
-haystack = "AaAaA"
-matches = [[0, 5]]
-case-insensitive = true
-unicode = false
-
-[[test]]
-name = "case3"
-regex = "[a-z]+"
-haystack = "aA\u212AaA"
-matches = [[0, 7]]
-case-insensitive = true
-
-[[test]]
-name = "case4"
-regex = "[a-z]+"
-haystack = "aA\u212AaA"
-matches = [[0, 2], [5, 7]]
-case-insensitive = true
-unicode = false
-
-
-[[test]]
-name = "negate1"
-regex = "[^a]"
-haystack = "δ"
-matches = [[0, 2]]
-
-[[test]]
-name = "negate2"
-regex = "[^a]"
-haystack = "δ"
-matches = [[0, 1], [1, 2]]
-unicode = false
-utf8 = false
-
-
-[[test]]
-name = "dotstar-prefix1"
-regex = "a"
-haystack = '\xFFa'
-matches = [[1, 2]]
-unicode = false
-utf8 = false
-unescape = true
-
-[[test]]
-name = "dotstar-prefix2"
-regex = "a"
-haystack = '\xFFa'
-matches = [[1, 2]]
-utf8 = false
-unescape = true
-
-
-[[test]]
-name = "null-bytes1"
-regex = '[^\x00]+\x00'
-haystack = 'foo\x00'
-matches = [[0, 4]]
-unicode = false
-utf8 = false
-unescape = true
-
-
-[[test]]
-name = "word-ascii"
-regex = '\w+'
-haystack = "aδ"
-matches = [[0, 1]]
-unicode = false
-
-[[test]]
-name = "word-unicode"
-regex = '\w+'
-haystack = "aδ"
-matches = [[0, 3]]
-
-[[test]]
-name = "decimal-ascii"
-regex = '\d+'
-haystack = "1à„šà„©9"
-matches = [[0, 1], [7, 8]]
-unicode = false
-
-[[test]]
-name = "decimal-unicode"
-regex = '\d+'
-haystack = "1à„šà„©9"
-matches = [[0, 8]]
-
-[[test]]
-name = "space-ascii"
-regex = '\s+'
-haystack = " \u1680"
-matches = [[0, 1]]
-unicode = false
-
-[[test]]
-name = "space-unicode"
-regex = '\s+'
-haystack = " \u1680"
-matches = [[0, 4]]
-
-
-[[test]]
-# See: https://github.com/rust-lang/regex/issues/484
-name = "iter1-bytes"
-regex = ''
-haystack = "☃"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-utf8 = false
-
-[[test]]
-# See: https://github.com/rust-lang/regex/issues/484
-name = "iter1-utf8"
-regex = ''
-haystack = "☃"
-matches = [[0, 0], [3, 3]]
-
-[[test]]
-# See: https://github.com/rust-lang/regex/issues/484
-# Note that iter2-utf8 doesn't make sense here, since the input isn't UTF-8.
-name = "iter2-bytes"
-regex = ''
-haystack = 'b\xFFr'
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-unescape = true
-utf8 = false
-
-
-# These test that unanchored prefixes can munch through invalid UTF-8 even when
-# utf8 is enabled.
-#
-# This test actually reflects an interesting simplification in how the Thompson
-# NFA is constructed. It used to be that the NFA could be built with an
-# unanchored prefix that either matched any byte or _only_ matched valid UTF-8.
-# But the latter turns out to be pretty precarious when it comes to prefilters,
-# because if you search a haystack that contains invalid UTF-8 but have an
-# unanchored prefix that requires UTF-8, then prefilters are no longer a valid
-# optimization because you actually have to check that everything is valid
-# UTF-8.
-#
-# Originally, I had thought that we needed a valid UTF-8 unanchored prefix in
-# order to guarantee that we only match at valid UTF-8 boundaries. But this
-# isn't actually true! There are really only two things to consider here:
-#
-# 1) Will a regex match split an encoded codepoint? No. Because by construction,
-# we ensure that a MATCH state can only be reached by following valid UTF-8 (assuming
-# all of the UTF-8 modes are enabled).
-#
-# 2) Will a regex match arbitrary bytes that aren't valid UTF-8? Again, no,
-# assuming all of the UTF-8 modes are enabled.
-[[test]]
-name = "unanchored-invalid-utf8-match-100"
-regex = '[a-z]'
-haystack = '\xFFa\xFF'
-matches = [[1, 2]]
-unescape = true
-utf8 = false
-
-# This test shows that we can still prevent a match from occurring by requiring
-# that valid UTF-8 match by inserting our own unanchored prefix. Thus, if the
-# behavior of not munching through invalid UTF-8 anywhere is needed, then it
-# can be achieved thusly.
-[[test]]
-name = "unanchored-invalid-utf8-nomatch"
-regex = '^(?s:.)*?[a-z]'
-haystack = '\xFFa\xFF'
-matches = []
-unescape = true
-utf8 = false
-
-# This is a tricky test that makes sure we don't accidentally do a kind of
-# unanchored search when we've requested that a regex engine not report
-# empty matches that split a codepoint. This test caught a regression during
-# development where the code for skipping over bad empty matches would do so
-# even if the search should have been anchored. This is ultimately what led to
-# making 'anchored' an 'Input' option, so that it was always clear what kind
-# of search was being performed. (Before that, whether a search was anchored
-# or not was a config knob on the regex engine.) This did wind up making DFAs
-# a little more complex to configure (with their 'StartKind' knob), but it
-# generally smoothed out everything else.
-#
-# Great example of a test whose failure motivated a sweeping API refactoring.
-[[test]]
-name = "anchored-iter-empty-utf8"
-regex = ''
-haystack = 'a☃z'
-matches = [[0, 0], [1, 1]]
-unescape = false
-utf8 = true
-anchored = true
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/overlapping.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/overlapping.toml
deleted file mode 100644
index 7bcd45a2..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/overlapping.toml
+++ /dev/null
@@ -1,280 +0,0 @@
-# NOTE: We define a number of tests where the *match* kind is 'leftmost-first'
-# but the *search* kind is 'overlapping'. This is a somewhat nonsensical
-# combination and can produce odd results. Nevertheless, those results should
-# be consistent so we test them here. (At the time of writing this note, I
-# hadn't yet decided whether to make 'leftmost-first' with 'overlapping' result
-# in unspecified behavior.)
-
-# This demonstrates how a full overlapping search is obvious quadratic. This
-# regex reports a match for every substring in the haystack.
-[[test]]
-name = "ungreedy-dotstar-matches-everything-100"
-regex = [".*?"]
-haystack = "zzz"
-matches = [
-  { id = 0, span = [0, 0] },
-  { id = 0, span = [1, 1] },
-  { id = 0, span = [0, 1] },
-  { id = 0, span = [2, 2] },
-  { id = 0, span = [1, 2] },
-  { id = 0, span = [0, 2] },
-  { id = 0, span = [3, 3] },
-  { id = 0, span = [2, 3] },
-  { id = 0, span = [1, 3] },
-  { id = 0, span = [0, 3] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "greedy-dotstar-matches-everything-100"
-regex = [".*"]
-haystack = "zzz"
-matches = [
-  { id = 0, span = [0, 0] },
-  { id = 0, span = [1, 1] },
-  { id = 0, span = [0, 1] },
-  { id = 0, span = [2, 2] },
-  { id = 0, span = [1, 2] },
-  { id = 0, span = [0, 2] },
-  { id = 0, span = [3, 3] },
-  { id = 0, span = [2, 3] },
-  { id = 0, span = [1, 3] },
-  { id = 0, span = [0, 3] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "repetition-plus-leftmost-first-100"
-regex = 'a+'
-haystack = "aaa"
-matches = [[0, 1], [1, 2], [0, 2], [2, 3], [1, 3], [0, 3]]
-match-kind = "leftmost-first"
-search-kind = "overlapping"
-
-[[test]]
-name = "repetition-plus-leftmost-first-110"
-regex = '☃+'
-haystack = "☃☃☃"
-matches = [[0, 3], [3, 6], [0, 6], [6, 9], [3, 9], [0, 9]]
-match-kind = "leftmost-first"
-search-kind = "overlapping"
-
-[[test]]
-name = "repetition-plus-all-100"
-regex = 'a+'
-haystack = "aaa"
-matches = [[0, 1], [1, 2], [0, 2], [2, 3], [1, 3], [0, 3]]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "repetition-plus-all-110"
-regex = '☃+'
-haystack = "☃☃☃"
-matches = [[0, 3], [3, 6], [0, 6], [6, 9], [3, 9], [0, 9]]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "repetition-plus-leftmost-first-200"
-regex = '(abc)+'
-haystack = "zzabcabczzabc"
-matches = [
-  [[2, 5], [2, 5]],
-  [[5, 8], [5, 8]],
-  [[2, 8], [5, 8]],
-]
-match-kind = "leftmost-first"
-search-kind = "overlapping"
-
-[[test]]
-name = "repetition-plus-all-200"
-regex = '(abc)+'
-haystack = "zzabcabczzabc"
-matches = [
-  [[2, 5], [2, 5]],
-  [[5, 8], [5, 8]],
-  [[2, 8], [5, 8]],
-  [[10, 13], [10, 13]],
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "repetition-star-leftmost-first-100"
-regex = 'a*'
-haystack = "aaa"
-matches = [
-  [0, 0],
-  [1, 1],
-  [0, 1],
-  [2, 2],
-  [1, 2],
-  [0, 2],
-  [3, 3],
-  [2, 3],
-  [1, 3],
-  [0, 3],
-]
-match-kind = "leftmost-first"
-search-kind = "overlapping"
-
-[[test]]
-name = "repetition-star-all-100"
-regex = 'a*'
-haystack = "aaa"
-matches = [
-  [0, 0],
-  [1, 1],
-  [0, 1],
-  [2, 2],
-  [1, 2],
-  [0, 2],
-  [3, 3],
-  [2, 3],
-  [1, 3],
-  [0, 3],
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "repetition-star-leftmost-first-200"
-regex = '(abc)*'
-haystack = "zzabcabczzabc"
-matches = [
-  [[0, 0], []],
-]
-match-kind = "leftmost-first"
-search-kind = "overlapping"
-
-[[test]]
-name = "repetition-star-all-200"
-regex = '(abc)*'
-haystack = "zzabcabczzabc"
-matches = [
-  [[0, 0], []],
-  [[1, 1], []],
-  [[2, 2], []],
-  [[3, 3], []],
-  [[4, 4], []],
-  [[5, 5], []],
-  [[2, 5], [2, 5]],
-  [[6, 6], []],
-  [[7, 7], []],
-  [[8, 8], []],
-  [[5, 8], [5, 8]],
-  [[2, 8], [5, 8]],
-  [[9, 9], []],
-  [[10, 10], []],
-  [[11, 11], []],
-  [[12, 12], []],
-  [[13, 13], []],
-  [[10, 13], [10, 13]],
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "start-end-rep-leftmost-first"
-regex = '(^$)*'
-haystack = "abc"
-matches = [
-  [[0, 0], []],
-]
-match-kind = "leftmost-first"
-search-kind = "overlapping"
-
-[[test]]
-name = "start-end-rep-all"
-regex = '(^$)*'
-haystack = "abc"
-matches = [
-  [[0, 0], []],
-  [[1, 1], []],
-  [[2, 2], []],
-  [[3, 3], []],
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "alt-leftmost-first-100"
-regex = 'abc|a'
-haystack = "zzabcazzaabc"
-matches = [[2, 3], [2, 5]]
-match-kind = "leftmost-first"
-search-kind = "overlapping"
-
-[[test]]
-name = "alt-all-100"
-regex = 'abc|a'
-haystack = "zzabcazzaabc"
-matches = [[2, 3], [2, 5], [5, 6], [8, 9], [9, 10], [9, 12]]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "empty-000"
-regex = ""
-haystack = "abc"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "empty-alt-000"
-regex = "|b"
-haystack = "abc"
-matches = [[0, 0], [1, 1], [2, 2], [1, 2], [3, 3]]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "empty-alt-010"
-regex = "b|"
-haystack = "abc"
-matches = [[0, 0], [1, 1], [2, 2], [1, 2], [3, 3]]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-# See: https://github.com/rust-lang/regex/issues/484
-name = "iter1-bytes"
-regex = ''
-haystack = "☃"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-utf8 = false
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-# See: https://github.com/rust-lang/regex/issues/484
-name = "iter1-utf8"
-regex = ''
-haystack = "☃"
-matches = [[0, 0], [3, 3]]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "iter1-incomplete-utf8"
-regex = ''
-haystack = '\xE2\x98'  # incomplete snowman
-matches = [[0, 0], [1, 1], [2, 2]]
-match-kind = "all"
-search-kind = "overlapping"
-unescape = true
-utf8 = false
-
-[[test]]
-name = "scratch"
-regex = ['sam', 'samwise']
-haystack = "samwise"
-matches = [
-  { id = 0, span = [0, 3] },
-]
-match-kind = "leftmost-first"
-search-kind = "overlapping"
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/regex-lite.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/regex-lite.toml
deleted file mode 100644
index 1769d80..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/regex-lite.toml
+++ /dev/null
@@ -1,98 +0,0 @@
-# These tests are specifically written to test the regex-lite crate. While it
-# largely has the same semantics as the regex crate, there are some differences
-# around Unicode support and UTF-8.
-#
-# To be clear, regex-lite supports far fewer patterns because of its lack of
-# Unicode support, nested character classes and character class set operations.
-# What we're talking about here are the patterns that both crates support but
-# where the semantics might differ.
-
-# regex-lite uses ASCII definitions for Perl character classes.
-[[test]]
-name = "perl-class-decimal"
-regex = '\d'
-haystack = '᠕'
-matches = []
-unicode = true
-
-# regex-lite uses ASCII definitions for Perl character classes.
-[[test]]
-name = "perl-class-space"
-regex = '\s'
-haystack = "\u2000"
-matches = []
-unicode = true
-
-# regex-lite uses ASCII definitions for Perl character classes.
-[[test]]
-name = "perl-class-word"
-regex = '\w'
-haystack = 'δ'
-matches = []
-unicode = true
-
-# regex-lite uses the ASCII definition of word for word boundary assertions.
-[[test]]
-name = "word-boundary"
-regex = '\b'
-haystack = 'δ'
-matches = []
-unicode = true
-
-# regex-lite uses the ASCII definition of word for negated word boundary
-# assertions. But note that it should still not split codepoints!
-[[test]]
-name = "word-boundary-negated"
-regex = '\B'
-haystack = 'δ'
-matches = [[0, 0], [2, 2]]
-unicode = true
-
-# While we're here, the empty regex---which matches at every
-# position---shouldn't split a codepoint either.
-[[test]]
-name = "empty-no-split-codepoint"
-regex = ''
-haystack = 'đŸ’©'
-matches = [[0, 0], [4, 4]]
-unicode = true
-
-# A dot always matches a full codepoint.
-[[test]]
-name = "dot-always-matches-codepoint"
-regex = '.'
-haystack = 'đŸ’©'
-matches = [[0, 4]]
-unicode = false
-
-# A negated character class also always matches a full codepoint.
-[[test]]
-name = "negated-class-always-matches-codepoint"
-regex = '[^a]'
-haystack = 'đŸ’©'
-matches = [[0, 4]]
-unicode = false
-
-# regex-lite only supports ASCII-aware case insensitive matching.
-[[test]]
-name = "case-insensitive-is-ascii-only"
-regex = 's'
-haystack = 'Ćż'
-matches = []
-unicode = true
-case-insensitive = true
-
-# Negated word boundaries shouldn't split a codepoint, but they will match
-# between invalid UTF-8.
-#
-# This test is only valid for a 'bytes' API, but that doesn't (yet) exist in
-# regex-lite. This can't happen in the main API because &str can't contain
-# invalid UTF-8.
-# [[test]]
-# name = "word-boundary-invalid-utf8"
-# regex = '\B'
-# haystack = '\xFF\xFF\xFF\xFF'
-# unescape = true
-# matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]]
-# unicode = true
-# utf8 = false
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/regression.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/regression.toml
deleted file mode 100644
index 53b0701..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/regression.toml
+++ /dev/null
@@ -1,830 +0,0 @@
-# See: https://github.com/rust-lang/regex/issues/48
-[[test]]
-name = "invalid-regex-no-crash-100"
-regex = '(*)'
-haystack = ""
-matches = []
-compiles = false
-
-# See: https://github.com/rust-lang/regex/issues/48
-[[test]]
-name = "invalid-regex-no-crash-200"
-regex = '(?:?)'
-haystack = ""
-matches = []
-compiles = false
-
-# See: https://github.com/rust-lang/regex/issues/48
-[[test]]
-name = "invalid-regex-no-crash-300"
-regex = '(?)'
-haystack = ""
-matches = []
-compiles = false
-
-# See: https://github.com/rust-lang/regex/issues/48
-[[test]]
-name = "invalid-regex-no-crash-400"
-regex = '*'
-haystack = ""
-matches = []
-compiles = false
-
-# See: https://github.com/rust-lang/regex/issues/75
-[[test]]
-name = "unsorted-binary-search-100"
-regex = '(?i-u)[a_]+'
-haystack = "A_"
-matches = [[0, 2]]
-
-# See: https://github.com/rust-lang/regex/issues/75
-[[test]]
-name = "unsorted-binary-search-200"
-regex = '(?i-u)[A_]+'
-haystack = "a_"
-matches = [[0, 2]]
-
-# See: https://github.com/rust-lang/regex/issues/76
-[[test]]
-name = "unicode-case-lower-nocase-flag"
-regex = '(?i)\p{Ll}+'
-haystack = "ΛΘΓΔα"
-matches = [[0, 10]]
-
-# See: https://github.com/rust-lang/regex/issues/99
-[[test]]
-name = "negated-char-class-100"
-regex = '(?i)[^x]'
-haystack = "x"
-matches = []
-
-# See: https://github.com/rust-lang/regex/issues/99
-[[test]]
-name = "negated-char-class-200"
-regex = '(?i)[^x]'
-haystack = "X"
-matches = []
-
-# See: https://github.com/rust-lang/regex/issues/101
-[[test]]
-name = "ascii-word-underscore"
-regex = '[[:word:]]'
-haystack = "_"
-matches = [[0, 1]]
-
-# See: https://github.com/rust-lang/regex/issues/129
-[[test]]
-name = "captures-repeat"
-regex = '([a-f]){2}(?P<foo>[x-z])'
-haystack = "abx"
-matches = [
-  [[0, 3], [1, 2], [2, 3]],
-]
-
-# See: https://github.com/rust-lang/regex/issues/153
-[[test]]
-name = "alt-in-alt-100"
-regex = 'ab?|$'
-haystack = "az"
-matches = [[0, 1], [2, 2]]
-
-# See: https://github.com/rust-lang/regex/issues/153
-[[test]]
-name = "alt-in-alt-200"
-regex = '^(?:.*?)(?:\n|\r\n?|$)'
-haystack = "ab\rcd"
-matches = [[0, 3]]
-
-# See: https://github.com/rust-lang/regex/issues/169
-[[test]]
-name = "leftmost-first-prefix"
-regex = 'z*azb'
-haystack = "azb"
-matches = [[0, 3]]
-
-# See: https://github.com/rust-lang/regex/issues/191
-[[test]]
-name = "many-alternates"
-regex = '1|2|3|4|5|6|7|8|9|10|int'
-haystack = "int"
-matches = [[0, 3]]
-
-# See: https://github.com/rust-lang/regex/issues/204
-[[test]]
-name = "word-boundary-alone-100"
-regex = '\b'
-haystack = "Should this (work?)"
-matches = [[0, 0], [6, 6], [7, 7], [11, 11], [13, 13], [17, 17]]
-
-# See: https://github.com/rust-lang/regex/issues/204
-[[test]]
-name = "word-boundary-alone-200"
-regex = '\b'
-haystack = "a b c"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]
-
-# See: https://github.com/rust-lang/regex/issues/264
-[[test]]
-name = "word-boundary-ascii-no-capture"
-regex = '\B'
-haystack = "\U00028F3E"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]]
-unicode = false
-utf8 = false
-
-# See: https://github.com/rust-lang/regex/issues/264
-[[test]]
-name = "word-boundary-ascii-capture"
-regex = '(?:\B)'
-haystack = "\U00028F3E"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]]
-unicode = false
-utf8 = false
-
-# See: https://github.com/rust-lang/regex/issues/268
-[[test]]
-name = "partial-anchor"
-regex = '^a|b'
-haystack = "ba"
-matches = [[0, 1]]
-
-# See: https://github.com/rust-lang/regex/issues/271
-[[test]]
-name = "endl-or-word-boundary"
-regex = '(?m:$)|(?-u:\b)'
-haystack = "\U0006084E"
-matches = [[4, 4]]
-
-# See: https://github.com/rust-lang/regex/issues/271
-[[test]]
-name = "zero-or-end"
-regex = '(?i-u:\x00)|$'
-haystack = "\U000E682F"
-matches = [[4, 4]]
-
-# See: https://github.com/rust-lang/regex/issues/271
-[[test]]
-name = "y-or-endl"
-regex = '(?i-u:y)|(?m:$)'
-haystack = "\U000B4331"
-matches = [[4, 4]]
-
-# See: https://github.com/rust-lang/regex/issues/271
-[[test]]
-name = "word-boundary-start-x"
-regex = '(?u:\b)^(?-u:X)'
-haystack = "X"
-matches = [[0, 1]]
-
-# See: https://github.com/rust-lang/regex/issues/271
-[[test]]
-name = "word-boundary-ascii-start-x"
-regex = '(?-u:\b)^(?-u:X)'
-haystack = "X"
-matches = [[0, 1]]
-
-# See: https://github.com/rust-lang/regex/issues/271
-[[test]]
-name = "end-not-word-boundary"
-regex = '$\B'
-haystack = "\U0005C124\U000B576C"
-matches = [[8, 8]]
-unicode = false
-utf8 = false
-
-# See: https://github.com/rust-lang/regex/issues/280
-[[test]]
-name = "partial-anchor-alternate-begin"
-regex = '^a|z'
-haystack = "yyyyya"
-matches = []
-
-# See: https://github.com/rust-lang/regex/issues/280
-[[test]]
-name = "partial-anchor-alternate-end"
-regex = 'a$|z'
-haystack = "ayyyyy"
-matches = []
-
-# See: https://github.com/rust-lang/regex/issues/289
-[[test]]
-name = "lits-unambiguous-100"
-regex = '(?:ABC|CDA|BC)X'
-haystack = "CDAX"
-matches = [[0, 4]]
-
-# See: https://github.com/rust-lang/regex/issues/291
-[[test]]
-name = "lits-unambiguous-200"
-regex = '((IMG|CAM|MG|MB2)_|(DSCN|CIMG))(?P<n>[0-9]+)$'
-haystack = "CIMG2341"
-matches = [
-  [[0, 8], [0, 4], [], [0, 4], [4, 8]],
-]
-
-# See: https://github.com/rust-lang/regex/issues/303
-#
-# 2022-09-19: This has now been "properly" fixed in that empty character
-# classes are fully supported as something that can never match. This test
-# used to be marked as 'compiles = false', but now it works.
-[[test]]
-name = "negated-full-byte-range"
-regex = '[^\x00-\xFF]'
-haystack = ""
-matches = []
-compiles = true
-unicode = false
-utf8 = false
-
-# See: https://github.com/rust-lang/regex/issues/321
-[[test]]
-name = "strange-anchor-non-complete-prefix"
-regex = 'a^{2}'
-haystack = ""
-matches = []
-
-# See: https://github.com/rust-lang/regex/issues/321
-[[test]]
-name = "strange-anchor-non-complete-suffix"
-regex = '${2}a'
-haystack = ""
-matches = []
-
-# See: https://github.com/rust-lang/regex/issues/334
-# See: https://github.com/rust-lang/regex/issues/557
-[[test]]
-name = "captures-after-dfa-premature-end-100"
-regex = 'a(b*(X|$))?'
-haystack = "abcbX"
-matches = [
-  [[0, 1], [], []],
-]
-
-# See: https://github.com/rust-lang/regex/issues/334
-# See: https://github.com/rust-lang/regex/issues/557
-[[test]]
-name = "captures-after-dfa-premature-end-200"
-regex = 'a(bc*(X|$))?'
-haystack = "abcbX"
-matches = [
-  [[0, 1], [], []],
-]
-
-# See: https://github.com/rust-lang/regex/issues/334
-# See: https://github.com/rust-lang/regex/issues/557
-[[test]]
-name = "captures-after-dfa-premature-end-300"
-regex = '(aa$)?'
-haystack = "aaz"
-matches = [
-  [[0, 0], []],
-  [[1, 1], []],
-  [[2, 2], []],
-  [[3, 3], []],
-]
-
-# Plucked from "Why aren’t regular expressions a lingua franca? an empirical
-# study on the re-use and portability of regular expressions", The ACM Joint
-# European Software Engineering Conference and Symposium on the Foundations of
-# Software Engineering (ESEC/FSE), 2019.
-#
-# Link: https://dl.acm.org/doi/pdf/10.1145/3338906.3338909
-[[test]]
-name = "captures-after-dfa-premature-end-400"
-regex = '(a)\d*\.?\d+\b'
-haystack = "a0.0c"
-matches = [
-  [[0, 2], [0, 1]],
-]
-
-# See: https://github.com/rust-lang/regex/issues/437
-[[test]]
-name = "literal-panic"
-regex = 'typename type\-parameter\-[0-9]+\-[0-9]+::.+'
-haystack = "test"
-matches = []
-
-# See: https://github.com/rust-lang/regex/issues/527
-[[test]]
-name = "empty-flag-expr"
-regex = '(?:(?:(?x)))'
-haystack = ""
-matches = [[0, 0]]
-
-# See: https://github.com/rust-lang/regex/issues/533
-#[[tests]]
-#name = "blank-matches-nothing-between-space-and-tab"
-#regex = '[[:blank:]]'
-#input = '\x0A\x0B\x0C\x0D\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F'
-#match = false
-#unescape = true
-
-# See: https://github.com/rust-lang/regex/issues/533
-#[[tests]]
-#name = "blank-matches-nothing-between-space-and-tab-inverted"
-#regex = '^[[:^blank:]]+$'
-#input = '\x0A\x0B\x0C\x0D\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F'
-#match = true
-#unescape = true
-
-# See: https://github.com/rust-lang/regex/issues/555
-[[test]]
-name = "invalid-repetition"
-regex = '(?m){1,1}'
-haystack = ""
-matches = []
-compiles = false
-
-# See: https://github.com/rust-lang/regex/issues/640
-[[test]]
-name = "flags-are-unset"
-regex = '(?:(?i)foo)|Bar'
-haystack = "foo Foo bar Bar"
-matches = [[0, 3], [4, 7], [12, 15]]
-
-# Note that 'Ј' is not 'j', but cyrillic Je
-# https://en.wikipedia.org/wiki/Je_(Cyrillic)
-#
-# See: https://github.com/rust-lang/regex/issues/659
-[[test]]
-name = "empty-group-with-unicode"
-regex = '(?:)Ј01'
-haystack = 'zЈ01'
-matches = [[1, 5]]
-
-# See: https://github.com/rust-lang/regex/issues/579
-[[test]]
-name = "word-boundary-weird"
-regex = '\b..\b'
-haystack = "I have 12, he has 2!"
-matches = [[0, 2], [7, 9], [9, 11], [11, 13], [17, 19]]
-
-# See: https://github.com/rust-lang/regex/issues/579
-[[test]]
-name = "word-boundary-weird-ascii"
-regex = '\b..\b'
-haystack = "I have 12, he has 2!"
-matches = [[0, 2], [7, 9], [9, 11], [11, 13], [17, 19]]
-unicode = false
-utf8 = false
-
-# See: https://github.com/rust-lang/regex/issues/579
-[[test]]
-name = "word-boundary-weird-minimal-ascii"
-regex = '\b..\b'
-haystack = "az,,b"
-matches = [[0, 2], [2, 4]]
-unicode = false
-utf8 = false
-
-# See: https://github.com/BurntSushi/ripgrep/issues/1203
-[[test]]
-name = "reverse-suffix-100"
-regex = '[0-4][0-4][0-4]000'
-haystack = "153.230000"
-matches = [[4, 10]]
-
-# See: https://github.com/BurntSushi/ripgrep/issues/1203
-[[test]]
-name = "reverse-suffix-200"
-regex = '[0-9][0-9][0-9]000'
-haystack = "153.230000\n"
-matches = [[4, 10]]
-
-# This is a tricky case for the reverse suffix optimization, because it
-# finds the 'foobar' match but the reverse scan must fail to find a match by
-# correctly dealing with the word boundary following the 'foobar' literal when
-# computing the start state.
-#
-# This test exists because I tried to break the following assumption that
-# is currently in the code: that if a suffix is found and the reverse scan
-# succeeds, then it's guaranteed that there is an overall match. Namely, the
-# 'is_match' routine does *not* do another forward scan in this case because of
-# this assumption.
-[[test]]
-name = "reverse-suffix-300"
-regex = '\w+foobar\b'
-haystack = "xyzfoobarZ"
-matches = []
-unicode = false
-utf8 = false
-
-# See: https://github.com/BurntSushi/ripgrep/issues/1247
-[[test]]
-name = "stops"
-regex = '\bs(?:[ab])'
-haystack = 's\xE4'
-matches = []
-unescape = true
-utf8 = false
-
-# See: https://github.com/BurntSushi/ripgrep/issues/1247
-[[test]]
-name = "stops-ascii"
-regex = '(?-u:\b)s(?:[ab])'
-haystack = 's\xE4'
-matches = []
-unescape = true
-utf8 = false
-
-# See: https://github.com/rust-lang/regex/issues/850
-[[test]]
-name = "adjacent-line-boundary-100"
-regex = '(?m)^(?:[^ ]+?)$'
-haystack = "line1\nline2"
-matches = [[0, 5], [6, 11]]
-
-# Continued.
-[[test]]
-name = "adjacent-line-boundary-200"
-regex = '(?m)^(?:[^ ]+?)$'
-haystack = "A\nB"
-matches = [[0, 1], [2, 3]]
-
-# There is no issue for this bug.
-[[test]]
-name = "anchored-prefix-100"
-regex = '^a[[:^space:]]'
-haystack = "a "
-matches = []
-
-# There is no issue for this bug.
-[[test]]
-name = "anchored-prefix-200"
-regex = '^a[[:^space:]]'
-haystack = "foo boo a"
-matches = []
-
-# There is no issue for this bug.
-[[test]]
-name = "anchored-prefix-300"
-regex = '^-[a-z]'
-haystack = "r-f"
-matches = []
-
-# Tests that a possible Aho-Corasick optimization works correctly. It only
-# kicks in when we have a lot of literals. By "works correctly," we mean that
-# leftmost-first match semantics are properly respected. That is, samwise
-# should match, not sam.
-#
-# There is no issue for this bug.
-[[test]]
-name = "aho-corasick-100"
-regex = 'samwise|sam|a|b|c|d|e|f|g|h|i|j|k|l|m|n|o|p|q|r|s|t|u|v|w|x|y|z|A|B|C|D|E|F|G|H|I|J|K|L|M|N|O|P|Q|R|S|T|U|V|W|X|Y|Z'
-haystack = "samwise"
-matches = [[0, 7]]
-
-# See: https://github.com/rust-lang/regex/issues/921
-[[test]]
-name = "interior-anchor-capture"
-regex = '(a$)b$'
-haystack = 'ab'
-matches = []
-
-# I found this bug in the course of adding some of the regexes that Ruff uses
-# to rebar. It turns out that the lazy DFA was finding a match that was being
-# rejected by the one-pass DFA. Yikes. I then minimized the regex and haystack.
-#
-# Source: https://github.com/charliermarsh/ruff/blob/a919041ddaa64cdf6f216f90dd0480dab69fd3ba/crates/ruff/src/rules/pycodestyle/rules/whitespace_around_keywords.rs#L52
-[[test]]
-name = "ruff-whitespace-around-keywords"
-regex = '^(a|ab)$'
-haystack = "ab"
-anchored = true
-unicode = false
-utf8 = true
-matches = [[[0, 2], [0, 2]]]
-
-# From: https://github.com/rust-lang/regex/issues/429
-[[test]]
-name = "i429-0"
-regex = '(?:(?-u:\b)|(?u:h))+'
-haystack = "h"
-unicode = true
-utf8 = false
-matches = [[0, 0], [1, 1]]
-
-# From: https://github.com/rust-lang/regex/issues/429
-[[test]]
-name = "i429-1"
-regex = '(?u:\B)'
-haystack = "鋾"
-unicode = true
-utf8 = false
-matches = []
-
-# From: https://github.com/rust-lang/regex/issues/429
-[[test]]
-name = "i429-2"
-regex = '(?:(?u:\b)|(?s-u:.))+'
-haystack = "oB"
-unicode = true
-utf8 = false
-matches = [[0, 0], [1, 2]]
-
-# From: https://github.com/rust-lang/regex/issues/429
-[[test]]
-name = "i429-3"
-regex = '(?:(?-u:\B)|(?su:.))+'
-haystack = "\U000FEF80"
-unicode = true
-utf8 = false
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]]
-
-# From: https://github.com/rust-lang/regex/issues/429
-[[test]]
-name = "i429-3-utf8"
-regex = '(?:(?-u:\B)|(?su:.))+'
-haystack = "\U000FEF80"
-unicode = true
-utf8 = true
-matches = [[0, 0], [4, 4]]
-
-# From: https://github.com/rust-lang/regex/issues/429
-[[test]]
-name = "i429-4"
-regex = '(?m:$)(?m:^)(?su:.)'
-haystack = "\n‣"
-unicode = true
-utf8 = false
-matches = [[0, 1]]
-
-# From: https://github.com/rust-lang/regex/issues/429
-[[test]]
-name = "i429-5"
-regex = '(?m:$)^(?m:^)'
-haystack = "\n"
-unicode = true
-utf8 = false
-matches = [[0, 0]]
-
-# From: https://github.com/rust-lang/regex/issues/429
-[[test]]
-name = "i429-6"
-regex = '(?P<kp>(?iu:do)(?m:$))*'
-haystack = "dodo"
-unicode = true
-utf8 = false
-matches = [
-  [[0, 0], []],
-  [[1, 1], []],
-  [[2, 4], [2, 4]],
-]
-
-# From: https://github.com/rust-lang/regex/issues/429
-[[test]]
-name = "i429-7"
-regex = '(?u:\B)'
-haystack = "䥁"
-unicode = true
-utf8 = false
-matches = []
-
-# From: https://github.com/rust-lang/regex/issues/429
-[[test]]
-name = "i429-8"
-regex = '(?:(?-u:\b)|(?u:[\u{0}-W]))+'
-haystack = "0"
-unicode = true
-utf8 = false
-matches = [[0, 0], [1, 1]]
-
-# From: https://github.com/rust-lang/regex/issues/429
-[[test]]
-name = "i429-9"
-regex = '((?m:$)(?-u:\B)(?s-u:.)(?-u:\B)$)'
-haystack = "\n\n"
-unicode = true
-utf8 = false
-matches = [
-  [[1, 2], [1, 2]],
-]
-
-# From: https://github.com/rust-lang/regex/issues/429
-[[test]]
-name = "i429-10"
-regex = '(?m:$)(?m:$)^(?su:.)'
-haystack = "\n\u0081¨\u200a"
-unicode = true
-utf8 = false
-matches = [[0, 1]]
-
-# From: https://github.com/rust-lang/regex/issues/429
-[[test]]
-name = "i429-11"
-regex = '(?-u:\B)(?m:^)'
-haystack = "0\n"
-unicode = true
-utf8 = false
-matches = [[2, 2]]
-
-# From: https://github.com/rust-lang/regex/issues/429
-[[test]]
-name = "i429-12"
-regex = '(?:(?u:\b)|(?-u:.))+'
-haystack = "0"
-unicode = true
-utf8 = false
-matches = [[0, 0], [1, 1]]
-
-# From: https://github.com/rust-lang/regex/issues/969
-[[test]]
-name = "i969"
-regex = 'c.*d\z'
-haystack = "ababcd"
-bounds = [4, 6]
-search-kind = "earliest"
-matches = [[4, 6]]
-
-# I found this during the regex-automata migration. This is the fowler basic
-# 154 test, but without anchored = true and without a match limit.
-#
-# This test caught a subtle bug in the hybrid reverse DFA search, where it
-# would skip over the termination condition if it entered a start state. This
-# was a double bug. Firstly, the reverse DFA shouldn't have had start states
-# specialized in the first place, and thus it shouldn't have possible to detect
-# that the DFA had entered a start state. The second bug was that the start
-# state handling was incorrect by jumping over the termination condition.
-[[test]]
-name = "fowler-basic154-unanchored"
-regex = '''a([bc]*)c*'''
-haystack = '''abc'''
-matches = [[[0, 3], [1, 3]]]
-
-# From: https://github.com/rust-lang/regex/issues/981
-#
-# This was never really a problem in the new architecture because the
-# regex-automata engines are far more principled about how they deal with
-# look-around. (This was one of the many reasons I wanted to re-work the
-# original regex crate engines.)
-[[test]]
-name = "word-boundary-interact-poorly-with-literal-optimizations"
-regex = '(?i:(?:\b|_)win(?:32|64|dows)?(?:\b|_))'
-haystack = 'ubi-Darwin-x86_64.tar.gz'
-matches = []
-
-# This was found during fuzz testing of regex. It provoked a panic in the meta
-# engine as a result of the reverse suffix optimization. Namely, it hit a case
-# where a suffix match was found, a corresponding reverse match was found, but
-# the forward search turned up no match. The forward search should always match
-# if the suffix and reverse search match.
-#
-# This in turn uncovered an inconsistency between the PikeVM and the DFA (lazy
-# and fully compiled) engines. It was caused by a mishandling of the collection
-# of NFA state IDs in the generic determinization code (which is why both types
-# of DFA were impacted). Namely, when a fail state was encountered (that's the
-# `[^\s\S]` in the pattern below), then it would just stop collecting states.
-# But that's not correct since a later state could lead to a match.
-[[test]]
-name = "impossible-branch"
-regex = '.*[^\s\S]A|B'
-haystack = "B"
-matches = [[0, 1]]
-
-# This was found during fuzz testing in regex-lite. The regex crate never
-# suffered from this bug, but it causes regex-lite to incorrectly compile
-# captures.
-[[test]]
-name = "captures-wrong-order"
-regex = '(a){0}(a)'
-haystack = 'a'
-matches = [[[0, 1], [], [0, 1]]]
-
-# This tests a bug in how quit states are handled in the DFA. At some point
-# during development, the DFAs were tweaked slightly such that if they hit
-# a quit state (which means, they hit a byte that the caller configured should
-# stop the search), then it might not return an error necessarily. Namely, if a
-# match had already been found, then it would be returned instead of an error.
-#
-# But this is actually wrong! Why? Because even though a match had been found,
-# it wouldn't be fully correct to return it once a quit state has been seen
-# because you can't determine whether the match offset returned is the correct
-# greedy/leftmost-first match. Since you can't complete the search as requested
-# by the caller, the DFA should just stop and return an error.
-#
-# Interestingly, this does seem to produce an unavoidable difference between
-# 'try_is_match().unwrap()' and 'try_find().unwrap().is_some()' for the DFAs.
-# The former will stop immediately once a match is known to occur and return
-# 'Ok(true)', where as the latter could find the match but quit with an
-# 'Err(..)' first.
-#
-# Thankfully, I believe this inconsistency between 'is_match()' and 'find()'
-# cannot be observed in the higher level meta regex API because it specifically
-# will try another engine that won't fail in the case of a DFA failing.
-#
-# This regression happened in the regex crate rewrite, but before anything got
-# released.
-[[test]]
-name = "negated-unicode-word-boundary-dfa-fail"
-regex = '\B.*'
-haystack = "!\u02D7"
-matches = [[0, 3]]
-
-# This failure was found in the *old* regex crate (prior to regex 1.9), but
-# I didn't investigate why. My best guess is that it's a literal optimization
-# bug. It didn't occur in the rewrite.
-[[test]]
-name = "missed-match"
-regex = 'e..+e.ee>'
-haystack = 'Zeee.eZZZZZZZZeee>eeeeeee>'
-matches = [[1, 26]]
-
-# This test came from the 'ignore' crate and tripped a bug in how accelerated
-# DFA states were handled in an overlapping search.
-[[test]]
-name = "regex-to-glob"
-regex = ['(?-u)^path1/[^/]*$']
-haystack = "path1/foo"
-matches = [[0, 9]]
-utf8 = false
-match-kind = "all"
-search-kind = "overlapping"
-
-# See: https://github.com/rust-lang/regex/issues/1060
-[[test]]
-name = "reverse-inner-plus-shorter-than-expected"
-regex = '(?:(\d+)[:.])?(\d{1,2})[:.](\d{2})'
-haystack = '102:12:39'
-matches = [[[0, 9], [0, 3], [4, 6], [7, 9]]]
-
-# Like reverse-inner-plus-shorter-than-expected, but using a far simpler regex
-# to demonstrate the extent of the rot. Sigh.
-#
-# See: https://github.com/rust-lang/regex/issues/1060
-[[test]]
-name = "reverse-inner-short"
-regex = '(?:([0-9][0-9][0-9]):)?([0-9][0-9]):([0-9][0-9])'
-haystack = '102:12:39'
-matches = [[[0, 9], [0, 3], [4, 6], [7, 9]]]
-
-# This regression test was found via the RegexSet APIs. It triggered a
-# particular code path where a regex was compiled with 'All' match semantics
-# (to support overlapping search), but got funneled down into a standard
-# leftmost search when calling 'is_match'. This is fine on its own, but the
-# leftmost search will use a prefilter and that's where this went awry.
-#
-# Namely, since 'All' semantics were used, the aho-corasick prefilter was
-# incorrectly compiled with 'Standard' semantics. This was wrong because
-# 'Standard' immediately attempts to report a match at every position, even if
-# that would mean reporting a match past the leftmost match before reporting
-# the leftmost match. This breaks the prefilter contract of never having false
-# negatives and leads overall to the engine not finding a match.
-#
-# See: https://github.com/rust-lang/regex/issues/1070
-[[test]]
-name = "prefilter-with-aho-corasick-standard-semantics"
-regex = '(?m)^ *v [0-9]'
-haystack = 'v 0'
-matches = [
-  { id = 0, spans = [[0, 3]] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-unicode = true
-utf8 = true
-
-# This tests that the PikeVM and the meta regex agree on a particular regex.
-# This test previously failed when the ad hoc engines inside the meta engine
-# did not handle quit states correctly. Namely, the Unicode word boundary here
-# combined with a non-ASCII codepoint provokes the quit state. The ad hoc
-# engines were previously returning a match even after entering the quit state
-# if a match had been previously detected, but this is incorrect. The reason
-# is that if a quit state is found, then the search must give up *immediately*
-# because it prevents the search from finding the "proper" leftmost-first
-# match. If it instead returns a match that has been found, it risks reporting
-# an improper match, as it did in this case.
-#
-# See: https://github.com/rust-lang/regex/issues/1046
-[[test]]
-name = "non-prefix-literal-quit-state"
-regex = '.+\b\n'
-haystack = "β77\n"
-matches = [[0, 5]]
-
-# This is a regression test for some errant HIR interval set operations that
-# were made in the regex-syntax 0.8.0 release and then reverted in 0.8.1. The
-# issue here is that the HIR produced from the regex had out-of-order ranges.
-#
-# See: https://github.com/rust-lang/regex/issues/1103
-# Ref: https://github.com/rust-lang/regex/pull/1051
-# Ref: https://github.com/rust-lang/regex/pull/1102
-[[test]]
-name = "hir-optimization-out-of-order-class"
-regex = '^[[:alnum:]./-]+$'
-haystack = "a-b"
-matches = [[0, 3]]
-
-# This is a regression test for an improper reverse suffix optimization. This
-# occurred when I "broadened" the applicability of the optimization to include
-# multiple possible literal suffixes instead of only sticking to a non-empty
-# longest common suffix. It turns out that, at least given how the reverse
-# suffix optimization works, we need to stick to the longest common suffix for
-# now.
-#
-# See: https://github.com/rust-lang/regex/issues/1110
-# See also: https://github.com/astral-sh/ruff/pull/7980
-[[test]]
-name = 'improper-reverse-suffix-optimization'
-regex = '(\\N\{[^}]+})|([{}])'
-haystack = 'hiya \N{snowman} bye'
-matches = [[[5, 16], [5, 16], []]]
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/set.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/set.toml
deleted file mode 100644
index 049e8a8..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/set.toml
+++ /dev/null
@@ -1,641 +0,0 @@
-# Basic multi-regex tests.
-
-[[test]]
-name = "basic10"
-regex = ["a", "a"]
-haystack = "a"
-matches = [
-  { id = 0, span = [0, 1] },
-  { id = 1, span = [0, 1] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "basic10-leftmost-first"
-regex = ["a", "a"]
-haystack = "a"
-matches = [
-  { id = 0, span = [0, 1] },
-]
-match-kind = "leftmost-first"
-search-kind = "leftmost"
-
-[[test]]
-name = "basic20"
-regex = ["a", "a"]
-haystack = "ba"
-matches = [
-  { id = 0, span = [1, 2] },
-  { id = 1, span = [1, 2] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "basic30"
-regex = ["a", "b"]
-haystack = "a"
-matches = [
-  { id = 0, span = [0, 1] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "basic40"
-regex = ["a", "b"]
-haystack = "b"
-matches = [
-  { id = 1, span = [0, 1] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "basic50"
-regex = ["a|b", "b|a"]
-haystack = "b"
-matches = [
-  { id = 0, span = [0, 1] },
-  { id = 1, span = [0, 1] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "basic60"
-regex = ["foo", "oo"]
-haystack = "foo"
-matches = [
-  { id = 0, span = [0, 3] },
-  { id = 1, span = [1, 3] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "basic60-leftmost-first"
-regex = ["foo", "oo"]
-haystack = "foo"
-matches = [
-  { id = 0, span = [0, 3] },
-]
-match-kind = "leftmost-first"
-search-kind = "leftmost"
-
-[[test]]
-name = "basic61"
-regex = ["oo", "foo"]
-haystack = "foo"
-matches = [
-  { id = 1, span = [0, 3] },
-  { id = 0, span = [1, 3] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "basic61-leftmost-first"
-regex = ["oo", "foo"]
-haystack = "foo"
-matches = [
-  { id = 1, span = [0, 3] },
-]
-match-kind = "leftmost-first"
-search-kind = "leftmost"
-
-[[test]]
-name = "basic70"
-regex = ["abcd", "bcd", "cd", "d"]
-haystack = "abcd"
-matches = [
-  { id = 0, span = [0, 4] },
-  { id = 1, span = [1, 4] },
-  { id = 2, span = [2, 4] },
-  { id = 3, span = [3, 4] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "basic71"
-regex = ["bcd", "cd", "d", "abcd"]
-haystack = "abcd"
-matches = [
-  { id = 3, span = [0, 4] },
-]
-match-kind = "leftmost-first"
-search-kind = "leftmost"
-
-[[test]]
-name = "basic80"
-regex = ["^foo", "bar$"]
-haystack = "foo"
-matches = [
-  { id = 0, span = [0, 3] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "basic81"
-regex = ["^foo", "bar$"]
-haystack = "foo bar"
-matches = [
-  { id = 0, span = [0, 3] },
-  { id = 1, span = [4, 7] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "basic82"
-regex = ["^foo", "bar$"]
-haystack = "bar"
-matches = [
-  { id = 1, span = [0, 3] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "basic90"
-regex = ["[a-z]+$", "foo"]
-haystack = "01234 foo"
-matches = [
-  { id = 0, span = [8, 9] },
-  { id = 0, span = [7, 9] },
-  { id = 0, span = [6, 9] },
-  { id = 1, span = [6, 9] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "basic91"
-regex = ["[a-z]+$", "foo"]
-haystack = "foo 01234"
-matches = [
-  { id = 1, span = [0, 3] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "basic100"
-regex = [".*?", "a"]
-haystack = "zzza"
-matches = [
-  { id = 0, span = [0, 0] },
-  { id = 0, span = [1, 1] },
-  { id = 0, span = [0, 1] },
-  { id = 0, span = [2, 2] },
-  { id = 0, span = [1, 2] },
-  { id = 0, span = [0, 2] },
-  { id = 0, span = [3, 3] },
-  { id = 0, span = [2, 3] },
-  { id = 0, span = [1, 3] },
-  { id = 0, span = [0, 3] },
-  { id = 0, span = [4, 4] },
-  { id = 0, span = [3, 4] },
-  { id = 0, span = [2, 4] },
-  { id = 0, span = [1, 4] },
-  { id = 0, span = [0, 4] },
-  { id = 1, span = [3, 4] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "basic101"
-regex = [".*", "a"]
-haystack = "zzza"
-matches = [
-  { id = 0, span = [0, 0] },
-  { id = 0, span = [1, 1] },
-  { id = 0, span = [0, 1] },
-  { id = 0, span = [2, 2] },
-  { id = 0, span = [1, 2] },
-  { id = 0, span = [0, 2] },
-  { id = 0, span = [3, 3] },
-  { id = 0, span = [2, 3] },
-  { id = 0, span = [1, 3] },
-  { id = 0, span = [0, 3] },
-  { id = 0, span = [4, 4] },
-  { id = 0, span = [3, 4] },
-  { id = 0, span = [2, 4] },
-  { id = 0, span = [1, 4] },
-  { id = 0, span = [0, 4] },
-  { id = 1, span = [3, 4] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "basic102"
-regex = [".*", "a"]
-haystack = "zzz"
-matches = [
-  { id = 0, span = [0, 0] },
-  { id = 0, span = [1, 1] },
-  { id = 0, span = [0, 1] },
-  { id = 0, span = [2, 2] },
-  { id = 0, span = [1, 2] },
-  { id = 0, span = [0, 2] },
-  { id = 0, span = [3, 3] },
-  { id = 0, span = [2, 3] },
-  { id = 0, span = [1, 3] },
-  { id = 0, span = [0, 3] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "basic110"
-regex = ['\ba\b']
-haystack = "hello a bye"
-matches = [
-  { id = 0, span = [6, 7] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "basic111"
-regex = ['\ba\b', '\be\b']
-haystack = "hello a bye e"
-matches = [
-  { id = 0, span = [6, 7] },
-  { id = 1, span = [12, 13] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "basic120"
-regex = ["a"]
-haystack = "a"
-matches = [
-  { id = 0, span = [0, 1] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "basic121"
-regex = [".*a"]
-haystack = "a"
-matches = [
-  { id = 0, span = [0, 1] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "basic122"
-regex = [".*a", "β"]
-haystack = "β"
-matches = [
-  { id = 1, span = [0, 2] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "basic130"
-regex = ["ab", "b"]
-haystack = "ba"
-matches = [
-  { id = 1, span = [0, 1] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-# These test cases where one of the regexes matches the empty string.
-
-[[test]]
-name = "empty10"
-regex = ["", "a"]
-haystack = "abc"
-matches = [
-  { id = 0, span = [0, 0] },
-  { id = 1, span = [0, 1] },
-  { id = 0, span = [1, 1] },
-  { id = 0, span = [2, 2] },
-  { id = 0, span = [3, 3] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "empty10-leftmost-first"
-regex = ["", "a"]
-haystack = "abc"
-matches = [
-  { id = 0, span = [0, 0] },
-  { id = 0, span = [1, 1] },
-  { id = 0, span = [2, 2] },
-  { id = 0, span = [3, 3] },
-]
-match-kind = "leftmost-first"
-search-kind = "leftmost"
-
-[[test]]
-name = "empty11"
-regex = ["a", ""]
-haystack = "abc"
-matches = [
-  { id = 1, span = [0, 0] },
-  { id = 0, span = [0, 1] },
-  { id = 1, span = [1, 1] },
-  { id = 1, span = [2, 2] },
-  { id = 1, span = [3, 3] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "empty11-leftmost-first"
-regex = ["a", ""]
-haystack = "abc"
-matches = [
-  { id = 0, span = [0, 1] },
-  { id = 1, span = [2, 2] },
-  { id = 1, span = [3, 3] },
-]
-match-kind = "leftmost-first"
-search-kind = "leftmost"
-
-[[test]]
-name = "empty20"
-regex = ["", "b"]
-haystack = "abc"
-matches = [
-  { id = 0, span = [0, 0] },
-  { id = 0, span = [1, 1] },
-  { id = 1, span = [1, 2] },
-  { id = 0, span = [2, 2] },
-  { id = 0, span = [3, 3] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "empty20-leftmost-first"
-regex = ["", "b"]
-haystack = "abc"
-matches = [
-  { id = 0, span = [0, 0] },
-  { id = 0, span = [1, 1] },
-  { id = 0, span = [2, 2] },
-  { id = 0, span = [3, 3] },
-]
-match-kind = "leftmost-first"
-search-kind = "leftmost"
-
-[[test]]
-name = "empty21"
-regex = ["b", ""]
-haystack = "abc"
-matches = [
-  { id = 1, span = [0, 0] },
-  { id = 1, span = [1, 1] },
-  { id = 0, span = [1, 2] },
-  { id = 1, span = [2, 2] },
-  { id = 1, span = [3, 3] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "empty21-leftmost-first"
-regex = ["b", ""]
-haystack = "abc"
-matches = [
-  { id = 1, span = [0, 0] },
-  { id = 0, span = [1, 2] },
-  { id = 1, span = [3, 3] },
-]
-match-kind = "leftmost-first"
-search-kind = "leftmost"
-
-[[test]]
-name = "empty22"
-regex = ["(?:)", "b"]
-haystack = "abc"
-matches = [
-  { id = 0, span = [0, 0] },
-  { id = 0, span = [1, 1] },
-  { id = 1, span = [1, 2] },
-  { id = 0, span = [2, 2] },
-  { id = 0, span = [3, 3] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "empty23"
-regex = ["b", "(?:)"]
-haystack = "abc"
-matches = [
-  { id = 1, span = [0, 0] },
-  { id = 1, span = [1, 1] },
-  { id = 0, span = [1, 2] },
-  { id = 1, span = [2, 2] },
-  { id = 1, span = [3, 3] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "empty30"
-regex = ["", "z"]
-haystack = "abc"
-matches = [
-  { id = 0, span = [0, 0] },
-  { id = 0, span = [1, 1] },
-  { id = 0, span = [2, 2] },
-  { id = 0, span = [3, 3] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "empty30-leftmost-first"
-regex = ["", "z"]
-haystack = "abc"
-matches = [
-  { id = 0, span = [0, 0] },
-  { id = 0, span = [1, 1] },
-  { id = 0, span = [2, 2] },
-  { id = 0, span = [3, 3] },
-]
-match-kind = "leftmost-first"
-search-kind = "leftmost"
-
-[[test]]
-name = "empty31"
-regex = ["z", ""]
-haystack = "abc"
-matches = [
-  { id = 1, span = [0, 0] },
-  { id = 1, span = [1, 1] },
-  { id = 1, span = [2, 2] },
-  { id = 1, span = [3, 3] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "empty31-leftmost-first"
-regex = ["z", ""]
-haystack = "abc"
-matches = [
-  { id = 1, span = [0, 0] },
-  { id = 1, span = [1, 1] },
-  { id = 1, span = [2, 2] },
-  { id = 1, span = [3, 3] },
-]
-match-kind = "leftmost-first"
-search-kind = "leftmost"
-
-[[test]]
-name = "empty40"
-regex = ["c(?:)", "b"]
-haystack = "abc"
-matches = [
-  { id = 1, span = [1, 2] },
-  { id = 0, span = [2, 3] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "empty40-leftmost-first"
-regex = ["c(?:)", "b"]
-haystack = "abc"
-matches = [
-  { id = 1, span = [1, 2] },
-  { id = 0, span = [2, 3] },
-]
-match-kind = "leftmost-first"
-search-kind = "leftmost"
-
-# These test cases where there are no matches.
-
-[[test]]
-name = "nomatch10"
-regex = ["a", "a"]
-haystack = "b"
-matches = []
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "nomatch20"
-regex = ["^foo", "bar$"]
-haystack = "bar foo"
-matches = []
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "nomatch30"
-regex = []
-haystack = "a"
-matches = []
-match-kind = "all"
-search-kind = "overlapping"
-
-[[test]]
-name = "nomatch40"
-regex = ["^rooted$", '\.log$']
-haystack = "notrooted"
-matches = []
-match-kind = "all"
-search-kind = "overlapping"
-
-# These test multi-regex searches with capture groups.
-#
-# NOTE: I wrote these tests in the course of developing a first class API for
-# overlapping capturing group matches, but ultimately removed that API because
-# the semantics for overlapping matches aren't totally clear. However, I've
-# left the tests because I believe the semantics for these patterns are clear
-# and because we can still test our "which patterns matched" APIs with them.
-
-[[test]]
-name = "caps-010"
-regex = ['^(\w+) (\w+)$', '^(\S+) (\S+)$']
-haystack = "Bruce Springsteen"
-matches = [
-  { id = 0, spans = [[0, 17], [0, 5], [6, 17]] },
-  { id = 1, spans = [[0, 17], [0, 5], [6, 17]] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-unicode = false
-utf8 = false
-
-[[test]]
-name = "caps-020"
-regex = ['^(\w+) (\w+)$', '^[A-Z](\S+) [A-Z](\S+)$']
-haystack = "Bruce Springsteen"
-matches = [
-  { id = 0, spans = [[0, 17], [0, 5], [6, 17]] },
-  { id = 1, spans = [[0, 17], [1, 5], [7, 17]] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-unicode = false
-utf8 = false
-
-[[test]]
-name = "caps-030"
-regex = ['^(\w+) (\w+)$', '^([A-Z])(\S+) ([A-Z])(\S+)$']
-haystack = "Bruce Springsteen"
-matches = [
-  { id = 0, spans = [[0, 17], [0, 5], [6, 17]] },
-  { id = 1, spans = [[0, 17], [0, 1], [1, 5], [6, 7], [7, 17]] },
-]
-match-kind = "all"
-search-kind = "overlapping"
-unicode = false
-utf8 = false
-
-[[test]]
-name = "caps-110"
-regex = ['(\w+) (\w+)', '(\S+) (\S+)']
-haystack = "Bruce Springsteen"
-matches = [
-  { id = 0, spans = [[0, 17], [0, 5], [6, 17]] },
-]
-match-kind = "leftmost-first"
-search-kind = "leftmost"
-unicode = false
-utf8 = false
-
-[[test]]
-name = "caps-120"
-regex = ['(\w+) (\w+)', '(\S+) (\S+)']
-haystack = "&ruce $pringsteen"
-matches = [
-  { id = 1, spans = [[0, 17], [0, 5], [6, 17]] },
-]
-match-kind = "leftmost-first"
-search-kind = "leftmost"
-unicode = false
-utf8 = false
-
-[[test]]
-name = "caps-121"
-regex = ['(\w+) (\w+)', '(\S+) (\S+)']
-haystack = "&ruce $pringsteen Foo Bar"
-matches = [
-  { id = 1, spans = [[0, 17], [0, 5], [6, 17]] },
-  { id = 0, spans = [[18, 25], [18, 21], [22, 25]] },
-]
-match-kind = "leftmost-first"
-search-kind = "leftmost"
-unicode = false
-utf8 = false
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/substring.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/substring.toml
deleted file mode 100644
index 69595ce8..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/substring.toml
+++ /dev/null
@@ -1,36 +0,0 @@
-# These tests check that regex engines perform as expected when the search is
-# instructed to only search a substring of a haystack instead of the entire
-# haystack. This tends to exercise interesting edge cases that are otherwise
-# difficult to provoke. (But not necessarily impossible. Regex search iterators
-# for example, make use of the "search just a substring" APIs by changing the
-# starting position of a search to the end position of the previous match.)
-
-[[test]]
-name = "unicode-word-start"
-regex = '\b[0-9]+\b'
-haystack = "β123"
-bounds = { start = 2, end = 5 }
-matches = []
-
-[[test]]
-name = "unicode-word-end"
-regex = '\b[0-9]+\b'
-haystack = "123β"
-bounds = { start = 0, end = 3 }
-matches = []
-
-[[test]]
-name = "ascii-word-start"
-regex = '\b[0-9]+\b'
-haystack = "β123"
-bounds = { start = 2, end = 5 }
-matches = [[2, 5]]
-unicode = false
-
-[[test]]
-name = "ascii-word-end"
-regex = '\b[0-9]+\b'
-haystack = "123β"
-bounds = { start = 0, end = 3 }
-matches = [[0, 3]]
-unicode = false
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/unicode.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/unicode.toml
deleted file mode 100644
index f4ac76b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/unicode.toml
+++ /dev/null
@@ -1,517 +0,0 @@
-# Basic Unicode literal support.
-[[test]]
-name = "literal1"
-regex = '☃'
-haystack = "☃"
-matches = [[0, 3]]
-
-[[test]]
-name = "literal2"
-regex = '☃+'
-haystack = "☃"
-matches = [[0, 3]]
-
-[[test]]
-name = "literal3"
-regex = '☃+'
-haystack = "☃"
-matches = [[0, 3]]
-case-insensitive = true
-
-[[test]]
-name = "literal4"
-regex = 'Δ'
-haystack = "δ"
-matches = [[0, 2]]
-case-insensitive = true
-
-# Unicode word boundaries.
-[[test]]
-name = "wb-100"
-regex = '\d\b'
-haystack = "6δ"
-matches = []
-
-[[test]]
-name = "wb-200"
-regex = '\d\b'
-haystack = "6 "
-matches = [[0, 1]]
-
-[[test]]
-name = "wb-300"
-regex = '\d\B'
-haystack = "6δ"
-matches = [[0, 1]]
-
-[[test]]
-name = "wb-400"
-regex = '\d\B'
-haystack = "6 "
-matches = []
-
-# Unicode character class support.
-[[test]]
-name = "class1"
-regex = '[☃Ⅰ]+'
-haystack = "☃"
-matches = [[0, 3]]
-
-[[test]]
-name = "class2"
-regex = '\pN'
-haystack = "Ⅰ"
-matches = [[0, 3]]
-
-[[test]]
-name = "class3"
-regex = '\pN+'
-haystack = "Ⅰ1Ⅱ2"
-matches = [[0, 8]]
-
-[[test]]
-name = "class4"
-regex = '\PN+'
-haystack = "abⅠ"
-matches = [[0, 2]]
-
-[[test]]
-name = "class5"
-regex = '[\PN]+'
-haystack = "abⅠ"
-matches = [[0, 2]]
-
-[[test]]
-name = "class6"
-regex = '[^\PN]+'
-haystack = "abⅠ"
-matches = [[2, 5]]
-
-[[test]]
-name = "class7"
-regex = '\p{Lu}+'
-haystack = "ΛΘΓΔα"
-matches = [[0, 8]]
-
-[[test]]
-name = "class8"
-regex = '\p{Lu}+'
-haystack = "ΛΘΓΔα"
-matches = [[0, 10]]
-case-insensitive = true
-
-[[test]]
-name = "class9"
-regex = '\pL+'
-haystack = "ΛΘΓΔα"
-matches = [[0, 10]]
-
-[[test]]
-name = "class10"
-regex = '\p{Ll}+'
-haystack = "ΛΘΓΔα"
-matches = [[8, 10]]
-
-# Unicode aware "Perl" character classes.
-[[test]]
-name = "perl1"
-regex = '\w+'
-haystack = "dδd"
-matches = [[0, 4]]
-
-[[test]]
-name = "perl2"
-regex = '\w+'
-haystack = "℥"
-matches = []
-
-[[test]]
-name = "perl3"
-regex = '\W+'
-haystack = "℥"
-matches = [[0, 3]]
-
-[[test]]
-name = "perl4"
-regex = '\d+'
-haystack = "1à„šà„©9"
-matches = [[0, 8]]
-
-[[test]]
-name = "perl5"
-regex = '\d+'
-haystack = "Ⅱ"
-matches = []
-
-[[test]]
-name = "perl6"
-regex = '\D+'
-haystack = "Ⅱ"
-matches = [[0, 3]]
-
-[[test]]
-name = "perl7"
-regex = '\s+'
-haystack = " "
-matches = [[0, 3]]
-
-[[test]]
-name = "perl8"
-regex = '\s+'
-haystack = "☃"
-matches = []
-
-[[test]]
-name = "perl9"
-regex = '\S+'
-haystack = "☃"
-matches = [[0, 3]]
-
-# Specific tests for Unicode general category classes.
-[[test]]
-name = "class-gencat1"
-regex = '\p{Cased_Letter}'
-haystack = "ïŒĄ"
-matches = [[0, 3]]
-
-[[test]]
-name = "class-gencat2"
-regex = '\p{Close_Punctuation}'
-haystack = "❯"
-matches = [[0, 3]]
-
-[[test]]
-name = "class-gencat3"
-regex = '\p{Connector_Punctuation}'
-haystack = "⁀"
-matches = [[0, 3]]
-
-[[test]]
-name = "class-gencat4"
-regex = '\p{Control}'
-haystack = "\u009F"
-matches = [[0, 2]]
-
-[[test]]
-name = "class-gencat5"
-regex = '\p{Currency_Symbol}'
-haystack = "ïżĄ"
-matches = [[0, 3]]
-
-[[test]]
-name = "class-gencat6"
-regex = '\p{Dash_Punctuation}'
-haystack = "〰"
-matches = [[0, 3]]
-
-[[test]]
-name = "class-gencat7"
-regex = '\p{Decimal_Number}'
-haystack = "𑓙"
-matches = [[0, 4]]
-
-[[test]]
-name = "class-gencat8"
-regex = '\p{Enclosing_Mark}'
-haystack = "\uA672"
-matches = [[0, 3]]
-
-[[test]]
-name = "class-gencat9"
-regex = '\p{Final_Punctuation}'
-haystack = "➥"
-matches = [[0, 3]]
-
-[[test]]
-name = "class-gencat10"
-regex = '\p{Format}'
-haystack = "\U000E007F"
-matches = [[0, 4]]
-
-[[test]]
-name = "class-gencat11"
-regex = '\p{Initial_Punctuation}'
-haystack = "⾜"
-matches = [[0, 3]]
-
-[[test]]
-name = "class-gencat12"
-regex = '\p{Letter}'
-haystack = "Έ"
-matches = [[0, 2]]
-
-[[test]]
-name = "class-gencat13"
-regex = '\p{Letter_Number}'
-haystack = "ↂ"
-matches = [[0, 3]]
-
-[[test]]
-name = "class-gencat14"
-regex = '\p{Line_Separator}'
-haystack = "\u2028"
-matches = [[0, 3]]
-
-[[test]]
-name = "class-gencat15"
-regex = '\p{Lowercase_Letter}'
-haystack = "ϛ"
-matches = [[0, 2]]
-
-[[test]]
-name = "class-gencat16"
-regex = '\p{Mark}'
-haystack = "\U000E01EF"
-matches = [[0, 4]]
-
-[[test]]
-name = "class-gencat17"
-regex = '\p{Math}'
-haystack = "⋿"
-matches = [[0, 3]]
-
-[[test]]
-name = "class-gencat18"
-regex = '\p{Modifier_Letter}'
-haystack = "𖭃"
-matches = [[0, 4]]
-
-[[test]]
-name = "class-gencat19"
-regex = '\p{Modifier_Symbol}'
-haystack = "🏿"
-matches = [[0, 4]]
-
-[[test]]
-name = "class-gencat20"
-regex = '\p{Nonspacing_Mark}'
-haystack = "\U0001E94A"
-matches = [[0, 4]]
-
-[[test]]
-name = "class-gencat21"
-regex = '\p{Number}'
-haystack = "⓿"
-matches = [[0, 3]]
-
-[[test]]
-name = "class-gencat22"
-regex = '\p{Open_Punctuation}'
-haystack = ""
-matches = [[0, 3]]
-
-[[test]]
-name = "class-gencat23"
-regex = '\p{Other}'
-haystack = "\u0BC9"
-matches = [[0, 3]]
-
-[[test]]
-name = "class-gencat24"
-regex = '\p{Other_Letter}'
-haystack = "ꓷ"
-matches = [[0, 3]]
-
-[[test]]
-name = "class-gencat25"
-regex = '\p{Other_Number}'
-haystack = "㉏"
-matches = [[0, 3]]
-
-[[test]]
-name = "class-gencat26"
-regex = '\p{Other_Punctuation}'
-haystack = "𞄞"
-matches = [[0, 4]]
-
-[[test]]
-name = "class-gencat27"
-regex = '\p{Other_Symbol}'
-haystack = "⅌"
-matches = [[0, 3]]
-
-[[test]]
-name = "class-gencat28"
-regex = '\p{Paragraph_Separator}'
-haystack = "\u2029"
-matches = [[0, 3]]
-
-[[test]]
-name = "class-gencat29"
-regex = '\p{Private_Use}'
-haystack = "\U0010FFFD"
-matches = [[0, 4]]
-
-[[test]]
-name = "class-gencat30"
-regex = '\p{Punctuation}'
-haystack = "𑁍"
-matches = [[0, 4]]
-
-[[test]]
-name = "class-gencat31"
-regex = '\p{Separator}'
-haystack = "\u3000"
-matches = [[0, 3]]
-
-[[test]]
-name = "class-gencat32"
-regex = '\p{Space_Separator}'
-haystack = "\u205F"
-matches = [[0, 3]]
-
-[[test]]
-name = "class-gencat33"
-regex = '\p{Spacing_Mark}'
-haystack = "\U00016F7E"
-matches = [[0, 4]]
-
-[[test]]
-name = "class-gencat34"
-regex = '\p{Symbol}'
-haystack = "⯈"
-matches = [[0, 3]]
-
-[[test]]
-name = "class-gencat35"
-regex = '\p{Titlecase_Letter}'
-haystack = "áżŒ"
-matches = [[0, 3]]
-
-[[test]]
-name = "class-gencat36"
-regex = '\p{Unassigned}'
-haystack = "\U0010FFFF"
-matches = [[0, 4]]
-
-[[test]]
-name = "class-gencat37"
-regex = '\p{Uppercase_Letter}'
-haystack = "Ꝋ"
-matches = [[0, 3]]
-
-
-# Tests for Unicode emoji properties.
-[[test]]
-name = "class-emoji1"
-regex = '\p{Emoji}'
-haystack = "\u23E9"
-matches = [[0, 3]]
-
-[[test]]
-name = "class-emoji2"
-regex = '\p{emoji}'
-haystack = "\U0001F21A"
-matches = [[0, 4]]
-
-[[test]]
-name = "class-emoji3"
-regex = '\p{extendedpictographic}'
-haystack = "\U0001FA6E"
-matches = [[0, 4]]
-
-[[test]]
-name = "class-emoji4"
-regex = '\p{extendedpictographic}'
-haystack = "\U0001FFFD"
-matches = [[0, 4]]
-
-
-# Tests for Unicode grapheme cluster properties.
-[[test]]
-name = "class-gcb1"
-regex = '\p{grapheme_cluster_break=prepend}'
-haystack = "\U00011D46"
-matches = [[0, 4]]
-
-[[test]]
-name = "class-gcb2"
-regex = '\p{gcb=regional_indicator}'
-haystack = "\U0001F1E6"
-matches = [[0, 4]]
-
-[[test]]
-name = "class-gcb3"
-regex = '\p{gcb=ri}'
-haystack = "\U0001F1E7"
-matches = [[0, 4]]
-
-[[test]]
-name = "class-gcb4"
-regex = '\p{regionalindicator}'
-haystack = "\U0001F1FF"
-matches = [[0, 4]]
-
-[[test]]
-name = "class-gcb5"
-regex = '\p{gcb=lvt}'
-haystack = "\uC989"
-matches = [[0, 3]]
-
-[[test]]
-name = "class-gcb6"
-regex = '\p{gcb=zwj}'
-haystack = "\u200D"
-matches = [[0, 3]]
-
-# Tests for Unicode word boundary properties.
-[[test]]
-name = "class-word-break1"
-regex = '\p{word_break=Hebrew_Letter}'
-haystack = "\uFB46"
-matches = [[0, 3]]
-
-[[test]]
-name = "class-word-break2"
-regex = '\p{wb=hebrewletter}'
-haystack = "\uFB46"
-matches = [[0, 3]]
-
-[[test]]
-name = "class-word-break3"
-regex = '\p{wb=ExtendNumLet}'
-haystack = "\uFF3F"
-matches = [[0, 3]]
-
-[[test]]
-name = "class-word-break4"
-regex = '\p{wb=WSegSpace}'
-haystack = "\u3000"
-matches = [[0, 3]]
-
-[[test]]
-name = "class-word-break5"
-regex = '\p{wb=numeric}'
-haystack = "\U0001E950"
-matches = [[0, 4]]
-
-# Tests for Unicode sentence boundary properties.
-[[test]]
-name = "class-sentence-break1"
-regex = '\p{sentence_break=Lower}'
-haystack = "\u0469"
-matches = [[0, 2]]
-
-[[test]]
-name = "class-sentence-break2"
-regex = '\p{sb=lower}'
-haystack = "\u0469"
-matches = [[0, 2]]
-
-[[test]]
-name = "class-sentence-break3"
-regex = '\p{sb=Close}'
-haystack = "\uFF60"
-matches = [[0, 3]]
-
-[[test]]
-name = "class-sentence-break4"
-regex = '\p{sb=Close}'
-haystack = "\U0001F677"
-matches = [[0, 4]]
-
-[[test]]
-name = "class-sentence-break5"
-regex = '\p{sb=SContinue}'
-haystack = "\uFF64"
-matches = [[0, 3]]
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/utf8.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/utf8.toml
deleted file mode 100644
index 39e284b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/utf8.toml
+++ /dev/null
@@ -1,399 +0,0 @@
-# These test the UTF-8 modes expose by regex-automata. Namely, when utf8 is
-# true, then we promise that the haystack is valid UTF-8. (Otherwise behavior
-# is unspecified.) This also corresponds to building the regex engine with the
-# following two guarantees:
-#
-# 1) For any non-empty match reported, its span is guaranteed to correspond to
-# valid UTF-8.
-# 2) All empty or zero-width matches reported must never split a UTF-8
-# encoded codepoint. If the haystack has invalid UTF-8, then this results in
-# unspecified behavior.
-#
-# The (2) is in particular what we focus our testing on since (1) is generally
-# guaranteed by regex-syntax's AST-to-HIR translator and is well tested there.
-# The thing with (2) is that it can't be described in the HIR, so the regex
-# engines have to handle that case. Thus, we test it here.
-#
-# Note that it is possible to build a regex that has property (1) but not
-# (2), and vice versa. This is done by building the HIR with 'utf8=true' but
-# building the Thompson NFA with 'utf8=false'. We don't test that here because
-# the harness doesn't expose a way to enable or disable UTF-8 mode with that
-# granularity. Instead, those combinations are lightly tested via doc examples.
-# That's not to say that (1) without (2) is uncommon. Indeed, ripgrep uses it
-# because it cannot guarantee that its haystack is valid UTF-8.
-
-# This tests that an empty regex doesn't split a codepoint.
-[[test]]
-name = "empty-utf8yes"
-regex = ''
-haystack = '☃'
-matches = [[0, 0], [3, 3]]
-unicode = true
-utf8 = true
-
-# Tests the overlapping case of the above.
-[[test]]
-name = "empty-utf8yes-overlapping"
-regex = ''
-haystack = '☃'
-matches = [[0, 0], [3, 3]]
-unicode = true
-utf8 = true
-match-kind = "all"
-search-kind = "overlapping"
-
-# This tests that an empty regex DOES split a codepoint when utf=false.
-[[test]]
-name = "empty-utf8no"
-regex = ''
-haystack = '☃'
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-unicode = true
-utf8 = false
-
-# Tests the overlapping case of the above.
-[[test]]
-name = "empty-utf8no-overlapping"
-regex = ''
-haystack = '☃'
-matches = [[0, 0], [1, 1], [2, 2], [3, 3]]
-unicode = true
-utf8 = false
-match-kind = "all"
-search-kind = "overlapping"
-
-# This tests that an empty regex doesn't split a codepoint, even if we give
-# it bounds entirely within the codepoint.
-#
-# This is one of the trickier cases and is what motivated the current UTF-8
-# mode design. In particular, at one point, this test failed the 'is_match'
-# variant of the test but not 'find'. This is because the 'is_match' code path
-# is specifically optimized for "was a match found" rather than "where is the
-# match." In the former case, you don't really care about the empty-vs-non-empty
-# matches, and thus, the codepoint splitting filtering logic wasn't getting
-# applied. (In multiple ways across multiple regex engines.) In this way, you
-# can wind up with a situation where 'is_match' says "yes," but 'find' says,
-# "I didn't find anything." Which is... not great.
-#
-# I could have decided to say that providing boundaries that themselves split
-# a codepoint would have unspecified behavior. But I couldn't quite convince
-# myself that such boundaries were the only way to get an inconsistency between
-# 'is_match' and 'find'.
-#
-# Note that I also tried to come up with a test like this that fails without
-# using `bounds`. Specifically, a test where 'is_match' and 'find' disagree.
-# But I couldn't do it, and I'm tempted to conclude it is impossible. The
-# fundamental problem is that you need to simultaneously produce an empty match
-# that splits a codepoint while *not* matching before or after the codepoint.
-[[test]]
-name = "empty-utf8yes-bounds"
-regex = ''
-haystack = '𝛃'
-bounds = [1, 3]
-matches = []
-unicode = true
-utf8 = true
-
-# Tests the overlapping case of the above.
-[[test]]
-name = "empty-utf8yes-bounds-overlapping"
-regex = ''
-haystack = '𝛃'
-bounds = [1, 3]
-matches = []
-unicode = true
-utf8 = true
-match-kind = "all"
-search-kind = "overlapping"
-
-# This tests that an empty regex splits a codepoint when the bounds are
-# entirely within the codepoint.
-[[test]]
-name = "empty-utf8no-bounds"
-regex = ''
-haystack = '𝛃'
-bounds = [1, 3]
-matches = [[1, 1], [2, 2], [3, 3]]
-unicode = true
-utf8 = false
-
-# Tests the overlapping case of the above.
-[[test]]
-name = "empty-utf8no-bounds-overlapping"
-regex = ''
-haystack = '𝛃'
-bounds = [1, 3]
-matches = [[1, 1], [2, 2], [3, 3]]
-unicode = true
-utf8 = false
-match-kind = "all"
-search-kind = "overlapping"
-
-# In this test, we anchor the search. Since the start position is also a UTF-8
-# boundary, we get a match.
-[[test]]
-name = "empty-utf8yes-anchored"
-regex = ''
-haystack = '𝛃'
-matches = [[0, 0]]
-anchored = true
-unicode = true
-utf8 = true
-
-# Tests the overlapping case of the above.
-[[test]]
-name = "empty-utf8yes-anchored-overlapping"
-regex = ''
-haystack = '𝛃'
-matches = [[0, 0]]
-anchored = true
-unicode = true
-utf8 = true
-match-kind = "all"
-search-kind = "overlapping"
-
-# Same as above, except with UTF-8 mode disabled. It almost doesn't change the
-# result, except for the fact that since this is an anchored search and we
-# always find all matches, the test harness will keep reporting matches until
-# none are found. Because it's anchored, matches will be reported so long as
-# they are directly adjacent. Since with UTF-8 mode the next anchored search
-# after the match at [0, 0] fails, iteration stops (and doesn't find the last
-# match at [4, 4]).
-[[test]]
-name = "empty-utf8no-anchored"
-regex = ''
-haystack = '𝛃'
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]]
-anchored = true
-unicode = true
-utf8 = false
-
-# Tests the overlapping case of the above.
-#
-# Note that overlapping anchored searches are a little weird, and it's not
-# totally clear what their semantics ought to be. For now, we just test the
-# current behavior of our test shim that implements overlapping search. (This
-# is one of the reasons why we don't really expose regex-level overlapping
-# searches.)
-[[test]]
-name = "empty-utf8no-anchored-overlapping"
-regex = ''
-haystack = '𝛃'
-matches = [[0, 0]]
-anchored = true
-unicode = true
-utf8 = false
-match-kind = "all"
-search-kind = "overlapping"
-
-# In this test, we anchor the search, but also set bounds. The bounds start the
-# search in the middle of a codepoint, so there should never be a match.
-[[test]]
-name = "empty-utf8yes-anchored-bounds"
-regex = ''
-haystack = '𝛃'
-matches = []
-bounds = [1, 3]
-anchored = true
-unicode = true
-utf8 = true
-
-# Tests the overlapping case of the above.
-[[test]]
-name = "empty-utf8yes-anchored-bounds-overlapping"
-regex = ''
-haystack = '𝛃'
-matches = []
-bounds = [1, 3]
-anchored = true
-unicode = true
-utf8 = true
-match-kind = "all"
-search-kind = "overlapping"
-
-# Same as above, except with UTF-8 mode disabled. Without UTF-8 mode enabled,
-# matching within a codepoint is allowed. And remember, as in the anchored test
-# above with UTF-8 mode disabled, iteration will report all adjacent matches.
-# The matches at [0, 0] and [4, 4] are not included because of the bounds of
-# the search.
-[[test]]
-name = "empty-utf8no-anchored-bounds"
-regex = ''
-haystack = '𝛃'
-bounds = [1, 3]
-matches = [[1, 1], [2, 2], [3, 3]]
-anchored = true
-unicode = true
-utf8 = false
-
-# Tests the overlapping case of the above.
-#
-# Note that overlapping anchored searches are a little weird, and it's not
-# totally clear what their semantics ought to be. For now, we just test the
-# current behavior of our test shim that implements overlapping search. (This
-# is one of the reasons why we don't really expose regex-level overlapping
-# searches.)
-[[test]]
-name = "empty-utf8no-anchored-bounds-overlapping"
-regex = ''
-haystack = '𝛃'
-bounds = [1, 3]
-matches = [[1, 1]]
-anchored = true
-unicode = true
-utf8 = false
-match-kind = "all"
-search-kind = "overlapping"
-
-# This tests that we find the match at the end of the string when the bounds
-# exclude the first match.
-[[test]]
-name = "empty-utf8yes-startbound"
-regex = ''
-haystack = '𝛃'
-bounds = [1, 4]
-matches = [[4, 4]]
-unicode = true
-utf8 = true
-
-# Tests the overlapping case of the above.
-[[test]]
-name = "empty-utf8yes-startbound-overlapping"
-regex = ''
-haystack = '𝛃'
-bounds = [1, 4]
-matches = [[4, 4]]
-unicode = true
-utf8 = true
-match-kind = "all"
-search-kind = "overlapping"
-
-# Same as above, except since UTF-8 mode is disabled, we also find the matches
-# inbetween that split the codepoint.
-[[test]]
-name = "empty-utf8no-startbound"
-regex = ''
-haystack = '𝛃'
-bounds = [1, 4]
-matches = [[1, 1], [2, 2], [3, 3], [4, 4]]
-unicode = true
-utf8 = false
-
-# Tests the overlapping case of the above.
-[[test]]
-name = "empty-utf8no-startbound-overlapping"
-regex = ''
-haystack = '𝛃'
-bounds = [1, 4]
-matches = [[1, 1], [2, 2], [3, 3], [4, 4]]
-unicode = true
-utf8 = false
-match-kind = "all"
-search-kind = "overlapping"
-
-# This tests that we don't find any matches in an anchored search, even when
-# the bounds include a match (at the end).
-[[test]]
-name = "empty-utf8yes-anchored-startbound"
-regex = ''
-haystack = '𝛃'
-bounds = [1, 4]
-matches = []
-anchored = true
-unicode = true
-utf8 = true
-
-# Tests the overlapping case of the above.
-[[test]]
-name = "empty-utf8yes-anchored-startbound-overlapping"
-regex = ''
-haystack = '𝛃'
-bounds = [1, 4]
-matches = []
-anchored = true
-unicode = true
-utf8 = true
-match-kind = "all"
-search-kind = "overlapping"
-
-# Same as above, except since UTF-8 mode is disabled, we also find the matches
-# inbetween that split the codepoint. Even though this is an anchored search,
-# since the matches are adjacent, we find all of them.
-[[test]]
-name = "empty-utf8no-anchored-startbound"
-regex = ''
-haystack = '𝛃'
-bounds = [1, 4]
-matches = [[1, 1], [2, 2], [3, 3], [4, 4]]
-anchored = true
-unicode = true
-utf8 = false
-
-# Tests the overlapping case of the above.
-#
-# Note that overlapping anchored searches are a little weird, and it's not
-# totally clear what their semantics ought to be. For now, we just test the
-# current behavior of our test shim that implements overlapping search. (This
-# is one of the reasons why we don't really expose regex-level overlapping
-# searches.)
-[[test]]
-name = "empty-utf8no-anchored-startbound-overlapping"
-regex = ''
-haystack = '𝛃'
-bounds = [1, 4]
-matches = [[1, 1]]
-anchored = true
-unicode = true
-utf8 = false
-match-kind = "all"
-search-kind = "overlapping"
-
-# This tests that we find the match at the end of the haystack in UTF-8 mode
-# when our bounds only include the empty string at the end of the haystack.
-[[test]]
-name = "empty-utf8yes-anchored-endbound"
-regex = ''
-haystack = '𝛃'
-bounds = [4, 4]
-matches = [[4, 4]]
-anchored = true
-unicode = true
-utf8 = true
-
-# Tests the overlapping case of the above.
-[[test]]
-name = "empty-utf8yes-anchored-endbound-overlapping"
-regex = ''
-haystack = '𝛃'
-bounds = [4, 4]
-matches = [[4, 4]]
-anchored = true
-unicode = true
-utf8 = true
-match-kind = "all"
-search-kind = "overlapping"
-
-# Same as above, but with UTF-8 mode disabled. Results remain the same since
-# the only possible match does not split a codepoint.
-[[test]]
-name = "empty-utf8no-anchored-endbound"
-regex = ''
-haystack = '𝛃'
-bounds = [4, 4]
-matches = [[4, 4]]
-anchored = true
-unicode = true
-utf8 = false
-
-# Tests the overlapping case of the above.
-[[test]]
-name = "empty-utf8no-anchored-endbound-overlapping"
-regex = ''
-haystack = '𝛃'
-bounds = [4, 4]
-matches = [[4, 4]]
-anchored = true
-unicode = true
-utf8 = false
-match-kind = "all"
-search-kind = "overlapping"
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/word-boundary-special.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/word-boundary-special.toml
deleted file mode 100644
index 2b5a2a0..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/word-boundary-special.toml
+++ /dev/null
@@ -1,687 +0,0 @@
-# These tests are for the "special" word boundary assertions. That is,
-# \b{start}, \b{end}, \b{start-half}, \b{end-half}. These are specialty
-# assertions for more niche use cases, but hitting those cases without these
-# assertions is difficult. For example, \b{start-half} and \b{end-half} are
-# used to implement the -w/--word-regexp flag in a grep program.
-
-# Tests for (?-u:\b{start})
-
-[[test]]
-name = "word-start-ascii-010"
-regex = '\b{start}'
-haystack = "a"
-matches = [[0, 0]]
-unicode = false
-
-[[test]]
-name = "word-start-ascii-020"
-regex = '\b{start}'
-haystack = "a "
-matches = [[0, 0]]
-unicode = false
-
-[[test]]
-name = "word-start-ascii-030"
-regex = '\b{start}'
-haystack = " a "
-matches = [[1, 1]]
-unicode = false
-
-[[test]]
-name = "word-start-ascii-040"
-regex = '\b{start}'
-haystack = ""
-matches = []
-unicode = false
-
-[[test]]
-name = "word-start-ascii-050"
-regex = '\b{start}'
-haystack = "ab"
-matches = [[0, 0]]
-unicode = false
-
-[[test]]
-name = "word-start-ascii-060"
-regex = '\b{start}'
-haystack = "𝛃"
-matches = []
-unicode = false
-
-[[test]]
-name = "word-start-ascii-060-bounds"
-regex = '\b{start}'
-haystack = "𝛃"
-bounds = [2, 3]
-matches = []
-unicode = false
-
-[[test]]
-name = "word-start-ascii-070"
-regex = '\b{start}'
-haystack = " 𝛃 "
-matches = []
-unicode = false
-
-[[test]]
-name = "word-start-ascii-080"
-regex = '\b{start}'
-haystack = "𝛃𐆀"
-matches = []
-unicode = false
-
-[[test]]
-name = "word-start-ascii-090"
-regex = '\b{start}'
-haystack = "𝛃b"
-matches = [[4, 4]]
-unicode = false
-
-[[test]]
-name = "word-start-ascii-110"
-regex = '\b{start}'
-haystack = "b𝛃"
-matches = [[0, 0]]
-unicode = false
-
-# Tests for (?-u:\b{end})
-
-[[test]]
-name = "word-end-ascii-010"
-regex = '\b{end}'
-haystack = "a"
-matches = [[1, 1]]
-unicode = false
-
-[[test]]
-name = "word-end-ascii-020"
-regex = '\b{end}'
-haystack = "a "
-matches = [[1, 1]]
-unicode = false
-
-[[test]]
-name = "word-end-ascii-030"
-regex = '\b{end}'
-haystack = " a "
-matches = [[2, 2]]
-unicode = false
-
-[[test]]
-name = "word-end-ascii-040"
-regex = '\b{end}'
-haystack = ""
-matches = []
-unicode = false
-
-[[test]]
-name = "word-end-ascii-050"
-regex = '\b{end}'
-haystack = "ab"
-matches = [[2, 2]]
-unicode = false
-
-[[test]]
-name = "word-end-ascii-060"
-regex = '\b{end}'
-haystack = "𝛃"
-matches = []
-unicode = false
-
-[[test]]
-name = "word-end-ascii-060-bounds"
-regex = '\b{end}'
-haystack = "𝛃"
-bounds = [2, 3]
-matches = []
-unicode = false
-
-[[test]]
-name = "word-end-ascii-070"
-regex = '\b{end}'
-haystack = " 𝛃 "
-matches = []
-unicode = false
-
-[[test]]
-name = "word-end-ascii-080"
-regex = '\b{end}'
-haystack = "𝛃𐆀"
-matches = []
-unicode = false
-
-[[test]]
-name = "word-end-ascii-090"
-regex = '\b{end}'
-haystack = "𝛃b"
-matches = [[5, 5]]
-unicode = false
-
-[[test]]
-name = "word-end-ascii-110"
-regex = '\b{end}'
-haystack = "b𝛃"
-matches = [[1, 1]]
-unicode = false
-
-# Tests for \b{start}
-
-[[test]]
-name = "word-start-unicode-010"
-regex = '\b{start}'
-haystack = "a"
-matches = [[0, 0]]
-unicode = true
-
-[[test]]
-name = "word-start-unicode-020"
-regex = '\b{start}'
-haystack = "a "
-matches = [[0, 0]]
-unicode = true
-
-[[test]]
-name = "word-start-unicode-030"
-regex = '\b{start}'
-haystack = " a "
-matches = [[1, 1]]
-unicode = true
-
-[[test]]
-name = "word-start-unicode-040"
-regex = '\b{start}'
-haystack = ""
-matches = []
-unicode = true
-
-[[test]]
-name = "word-start-unicode-050"
-regex = '\b{start}'
-haystack = "ab"
-matches = [[0, 0]]
-unicode = true
-
-[[test]]
-name = "word-start-unicode-060"
-regex = '\b{start}'
-haystack = "𝛃"
-matches = [[0, 0]]
-unicode = true
-
-[[test]]
-name = "word-start-unicode-060-bounds"
-regex = '\b{start}'
-haystack = "𝛃"
-bounds = [2, 3]
-matches = []
-unicode = true
-
-[[test]]
-name = "word-start-unicode-070"
-regex = '\b{start}'
-haystack = " 𝛃 "
-matches = [[1, 1]]
-unicode = true
-
-[[test]]
-name = "word-start-unicode-080"
-regex = '\b{start}'
-haystack = "𝛃𐆀"
-matches = [[0, 0]]
-unicode = true
-
-[[test]]
-name = "word-start-unicode-090"
-regex = '\b{start}'
-haystack = "𝛃b"
-matches = [[0, 0]]
-unicode = true
-
-[[test]]
-name = "word-start-unicode-110"
-regex = '\b{start}'
-haystack = "b𝛃"
-matches = [[0, 0]]
-unicode = true
-
-# Tests for \b{end}
-
-[[test]]
-name = "word-end-unicode-010"
-regex = '\b{end}'
-haystack = "a"
-matches = [[1, 1]]
-unicode = true
-
-[[test]]
-name = "word-end-unicode-020"
-regex = '\b{end}'
-haystack = "a "
-matches = [[1, 1]]
-unicode = true
-
-[[test]]
-name = "word-end-unicode-030"
-regex = '\b{end}'
-haystack = " a "
-matches = [[2, 2]]
-unicode = true
-
-[[test]]
-name = "word-end-unicode-040"
-regex = '\b{end}'
-haystack = ""
-matches = []
-unicode = true
-
-[[test]]
-name = "word-end-unicode-050"
-regex = '\b{end}'
-haystack = "ab"
-matches = [[2, 2]]
-unicode = true
-
-[[test]]
-name = "word-end-unicode-060"
-regex = '\b{end}'
-haystack = "𝛃"
-matches = [[4, 4]]
-unicode = true
-
-[[test]]
-name = "word-end-unicode-060-bounds"
-regex = '\b{end}'
-haystack = "𝛃"
-bounds = [2, 3]
-matches = []
-unicode = true
-
-[[test]]
-name = "word-end-unicode-070"
-regex = '\b{end}'
-haystack = " 𝛃 "
-matches = [[5, 5]]
-unicode = true
-
-[[test]]
-name = "word-end-unicode-080"
-regex = '\b{end}'
-haystack = "𝛃𐆀"
-matches = [[4, 4]]
-unicode = true
-
-[[test]]
-name = "word-end-unicode-090"
-regex = '\b{end}'
-haystack = "𝛃b"
-matches = [[5, 5]]
-unicode = true
-
-[[test]]
-name = "word-end-unicode-110"
-regex = '\b{end}'
-haystack = "b𝛃"
-matches = [[5, 5]]
-unicode = true
-
-# Tests for (?-u:\b{start-half})
-
-[[test]]
-name = "word-start-half-ascii-010"
-regex = '\b{start-half}'
-haystack = "a"
-matches = [[0, 0]]
-unicode = false
-
-[[test]]
-name = "word-start-half-ascii-020"
-regex = '\b{start-half}'
-haystack = "a "
-matches = [[0, 0], [2, 2]]
-unicode = false
-
-[[test]]
-name = "word-start-half-ascii-030"
-regex = '\b{start-half}'
-haystack = " a "
-matches = [[0, 0], [1, 1], [3, 3]]
-unicode = false
-
-[[test]]
-name = "word-start-half-ascii-040"
-regex = '\b{start-half}'
-haystack = ""
-matches = [[0, 0]]
-unicode = false
-
-[[test]]
-name = "word-start-half-ascii-050"
-regex = '\b{start-half}'
-haystack = "ab"
-matches = [[0, 0]]
-unicode = false
-
-[[test]]
-name = "word-start-half-ascii-060"
-regex = '\b{start-half}'
-haystack = "𝛃"
-matches = [[0, 0], [4, 4]]
-unicode = false
-
-[[test]]
-name = "word-start-half-ascii-060-noutf8"
-regex = '\b{start-half}'
-haystack = "𝛃"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]]
-unicode = false
-utf8 = false
-
-[[test]]
-name = "word-start-half-ascii-060-bounds"
-regex = '\b{start-half}'
-haystack = "𝛃"
-bounds = [2, 3]
-matches = []
-unicode = false
-
-[[test]]
-name = "word-start-half-ascii-070"
-regex = '\b{start-half}'
-haystack = " 𝛃 "
-matches = [[0, 0], [1, 1], [5, 5], [6, 6]]
-unicode = false
-
-[[test]]
-name = "word-start-half-ascii-080"
-regex = '\b{start-half}'
-haystack = "𝛃𐆀"
-matches = [[0, 0], [4, 4], [8, 8]]
-unicode = false
-
-[[test]]
-name = "word-start-half-ascii-090"
-regex = '\b{start-half}'
-haystack = "𝛃b"
-matches = [[0, 0], [4, 4]]
-unicode = false
-
-[[test]]
-name = "word-start-half-ascii-110"
-regex = '\b{start-half}'
-haystack = "b𝛃"
-matches = [[0, 0], [5, 5]]
-unicode = false
-
-# Tests for (?-u:\b{end-half})
-
-[[test]]
-name = "word-end-half-ascii-010"
-regex = '\b{end-half}'
-haystack = "a"
-matches = [[1, 1]]
-unicode = false
-
-[[test]]
-name = "word-end-half-ascii-020"
-regex = '\b{end-half}'
-haystack = "a "
-matches = [[1, 1], [2, 2]]
-unicode = false
-
-[[test]]
-name = "word-end-half-ascii-030"
-regex = '\b{end-half}'
-haystack = " a "
-matches = [[0, 0], [2, 2], [3, 3]]
-unicode = false
-
-[[test]]
-name = "word-end-half-ascii-040"
-regex = '\b{end-half}'
-haystack = ""
-matches = [[0, 0]]
-unicode = false
-
-[[test]]
-name = "word-end-half-ascii-050"
-regex = '\b{end-half}'
-haystack = "ab"
-matches = [[2, 2]]
-unicode = false
-
-[[test]]
-name = "word-end-half-ascii-060"
-regex = '\b{end-half}'
-haystack = "𝛃"
-matches = [[0, 0], [4, 4]]
-unicode = false
-
-[[test]]
-name = "word-end-half-ascii-060-bounds"
-regex = '\b{end-half}'
-haystack = "𝛃"
-bounds = [2, 3]
-matches = []
-unicode = false
-
-[[test]]
-name = "word-end-half-ascii-070"
-regex = '\b{end-half}'
-haystack = " 𝛃 "
-matches = [[0, 0], [1, 1], [5, 5], [6, 6]]
-unicode = false
-
-[[test]]
-name = "word-end-half-ascii-080"
-regex = '\b{end-half}'
-haystack = "𝛃𐆀"
-matches = [[0, 0], [4, 4], [8, 8]]
-unicode = false
-
-[[test]]
-name = "word-end-half-ascii-090"
-regex = '\b{end-half}'
-haystack = "𝛃b"
-matches = [[0, 0], [5, 5]]
-unicode = false
-
-[[test]]
-name = "word-end-half-ascii-110"
-regex = '\b{end-half}'
-haystack = "b𝛃"
-matches = [[1, 1], [5, 5]]
-unicode = false
-
-# Tests for \b{start-half}
-
-[[test]]
-name = "word-start-half-unicode-010"
-regex = '\b{start-half}'
-haystack = "a"
-matches = [[0, 0]]
-unicode = true
-
-[[test]]
-name = "word-start-half-unicode-020"
-regex = '\b{start-half}'
-haystack = "a "
-matches = [[0, 0], [2, 2]]
-unicode = true
-
-[[test]]
-name = "word-start-half-unicode-030"
-regex = '\b{start-half}'
-haystack = " a "
-matches = [[0, 0], [1, 1], [3, 3]]
-unicode = true
-
-[[test]]
-name = "word-start-half-unicode-040"
-regex = '\b{start-half}'
-haystack = ""
-matches = [[0, 0]]
-unicode = true
-
-[[test]]
-name = "word-start-half-unicode-050"
-regex = '\b{start-half}'
-haystack = "ab"
-matches = [[0, 0]]
-unicode = true
-
-[[test]]
-name = "word-start-half-unicode-060"
-regex = '\b{start-half}'
-haystack = "𝛃"
-matches = [[0, 0]]
-unicode = true
-
-[[test]]
-name = "word-start-half-unicode-060-bounds"
-regex = '\b{start-half}'
-haystack = "𝛃"
-bounds = [2, 3]
-matches = []
-unicode = true
-
-[[test]]
-name = "word-start-half-unicode-070"
-regex = '\b{start-half}'
-haystack = " 𝛃 "
-matches = [[0, 0], [1, 1], [6, 6]]
-unicode = true
-
-[[test]]
-name = "word-start-half-unicode-080"
-regex = '\b{start-half}'
-haystack = "𝛃𐆀"
-matches = [[0, 0], [8, 8]]
-unicode = true
-
-[[test]]
-name = "word-start-half-unicode-090"
-regex = '\b{start-half}'
-haystack = "𝛃b"
-matches = [[0, 0]]
-unicode = true
-
-[[test]]
-name = "word-start-half-unicode-110"
-regex = '\b{start-half}'
-haystack = "b𝛃"
-matches = [[0, 0]]
-unicode = true
-
-# Tests for \b{end-half}
-
-[[test]]
-name = "word-end-half-unicode-010"
-regex = '\b{end-half}'
-haystack = "a"
-matches = [[1, 1]]
-unicode = true
-
-[[test]]
-name = "word-end-half-unicode-020"
-regex = '\b{end-half}'
-haystack = "a "
-matches = [[1, 1], [2, 2]]
-unicode = true
-
-[[test]]
-name = "word-end-half-unicode-030"
-regex = '\b{end-half}'
-haystack = " a "
-matches = [[0, 0], [2, 2], [3, 3]]
-unicode = true
-
-[[test]]
-name = "word-end-half-unicode-040"
-regex = '\b{end-half}'
-haystack = ""
-matches = [[0, 0]]
-unicode = true
-
-[[test]]
-name = "word-end-half-unicode-050"
-regex = '\b{end-half}'
-haystack = "ab"
-matches = [[2, 2]]
-unicode = true
-
-[[test]]
-name = "word-end-half-unicode-060"
-regex = '\b{end-half}'
-haystack = "𝛃"
-matches = [[4, 4]]
-unicode = true
-
-[[test]]
-name = "word-end-half-unicode-060-bounds"
-regex = '\b{end-half}'
-haystack = "𝛃"
-bounds = [2, 3]
-matches = []
-unicode = true
-
-[[test]]
-name = "word-end-half-unicode-070"
-regex = '\b{end-half}'
-haystack = " 𝛃 "
-matches = [[0, 0], [5, 5], [6, 6]]
-unicode = true
-
-[[test]]
-name = "word-end-half-unicode-080"
-regex = '\b{end-half}'
-haystack = "𝛃𐆀"
-matches = [[4, 4], [8, 8]]
-unicode = true
-
-[[test]]
-name = "word-end-half-unicode-090"
-regex = '\b{end-half}'
-haystack = "𝛃b"
-matches = [[5, 5]]
-unicode = true
-
-[[test]]
-name = "word-end-half-unicode-110"
-regex = '\b{end-half}'
-haystack = "b𝛃"
-matches = [[5, 5]]
-unicode = true
-
-# Specialty tests.
-
-# Since \r is special cased in the start state computation (to deal with CRLF
-# mode), this test ensures that the correct start state is computed when the
-# pattern starts with a half word boundary assertion.
-[[test]]
-name = "word-start-half-ascii-carriage"
-regex = '\b{start-half}[a-z]+'
-haystack = 'ABC\rabc'
-matches = [[4, 7]]
-bounds = [4, 7]
-unescape = true
-
-# Since \n is also special cased in the start state computation, this test
-# ensures that the correct start state is computed when the pattern starts with
-# a half word boundary assertion.
-[[test]]
-name = "word-start-half-ascii-linefeed"
-regex = '\b{start-half}[a-z]+'
-haystack = 'ABC\nabc'
-matches = [[4, 7]]
-bounds = [4, 7]
-unescape = true
-
-# Like the carriage return test above, but with a custom line terminator.
-[[test]]
-name = "word-start-half-ascii-customlineterm"
-regex = '\b{start-half}[a-z]+'
-haystack = 'ABC!abc'
-matches = [[4, 7]]
-bounds = [4, 7]
-unescape = true
-line-terminator = '!'
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/word-boundary.toml b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/word-boundary.toml
deleted file mode 100644
index 1d86fc9b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/testdata/word-boundary.toml
+++ /dev/null
@@ -1,781 +0,0 @@
-# Some of these are cribbed from RE2's test suite.
-
-# These test \b. Below are tests for \B.
-[[test]]
-name = "wb1"
-regex = '\b'
-haystack = ""
-matches = []
-unicode = false
-
-[[test]]
-name = "wb2"
-regex = '\b'
-haystack = "a"
-matches = [[0, 0], [1, 1]]
-unicode = false
-
-[[test]]
-name = "wb3"
-regex = '\b'
-haystack = "ab"
-matches = [[0, 0], [2, 2]]
-unicode = false
-
-[[test]]
-name = "wb4"
-regex = '^\b'
-haystack = "ab"
-matches = [[0, 0]]
-unicode = false
-
-[[test]]
-name = "wb5"
-regex = '\b$'
-haystack = "ab"
-matches = [[2, 2]]
-unicode = false
-
-[[test]]
-name = "wb6"
-regex = '^\b$'
-haystack = "ab"
-matches = []
-unicode = false
-
-[[test]]
-name = "wb7"
-regex = '\bbar\b'
-haystack = "nobar bar foo bar"
-matches = [[6, 9], [14, 17]]
-unicode = false
-
-[[test]]
-name = "wb8"
-regex = 'a\b'
-haystack = "faoa x"
-matches = [[3, 4]]
-unicode = false
-
-[[test]]
-name = "wb9"
-regex = '\bbar'
-haystack = "bar x"
-matches = [[0, 3]]
-unicode = false
-
-[[test]]
-name = "wb10"
-regex = '\bbar'
-haystack = "foo\nbar x"
-matches = [[4, 7]]
-unicode = false
-
-[[test]]
-name = "wb11"
-regex = 'bar\b'
-haystack = "foobar"
-matches = [[3, 6]]
-unicode = false
-
-[[test]]
-name = "wb12"
-regex = 'bar\b'
-haystack = "foobar\nxxx"
-matches = [[3, 6]]
-unicode = false
-
-[[test]]
-name = "wb13"
-regex = '(?:foo|bar|[A-Z])\b'
-haystack = "foo"
-matches = [[0, 3]]
-unicode = false
-
-[[test]]
-name = "wb14"
-regex = '(?:foo|bar|[A-Z])\b'
-haystack = "foo\n"
-matches = [[0, 3]]
-unicode = false
-
-[[test]]
-name = "wb15"
-regex = '\b(?:foo|bar|[A-Z])'
-haystack = "foo"
-matches = [[0, 3]]
-unicode = false
-
-[[test]]
-name = "wb16"
-regex = '\b(?:foo|bar|[A-Z])\b'
-haystack = "X"
-matches = [[0, 1]]
-unicode = false
-
-[[test]]
-name = "wb17"
-regex = '\b(?:foo|bar|[A-Z])\b'
-haystack = "XY"
-matches = []
-unicode = false
-
-[[test]]
-name = "wb18"
-regex = '\b(?:foo|bar|[A-Z])\b'
-haystack = "bar"
-matches = [[0, 3]]
-unicode = false
-
-[[test]]
-name = "wb19"
-regex = '\b(?:foo|bar|[A-Z])\b'
-haystack = "foo"
-matches = [[0, 3]]
-unicode = false
-
-[[test]]
-name = "wb20"
-regex = '\b(?:foo|bar|[A-Z])\b'
-haystack = "foo\n"
-matches = [[0, 3]]
-unicode = false
-
-[[test]]
-name = "wb21"
-regex = '\b(?:foo|bar|[A-Z])\b'
-haystack = "ffoo bbar N x"
-matches = [[10, 11]]
-unicode = false
-
-[[test]]
-name = "wb22"
-regex = '\b(?:fo|foo)\b'
-haystack = "fo"
-matches = [[0, 2]]
-unicode = false
-
-[[test]]
-name = "wb23"
-regex = '\b(?:fo|foo)\b'
-haystack = "foo"
-matches = [[0, 3]]
-unicode = false
-
-[[test]]
-name = "wb24"
-regex = '\b\b'
-haystack = ""
-matches = []
-unicode = false
-
-[[test]]
-name = "wb25"
-regex = '\b\b'
-haystack = "a"
-matches = [[0, 0], [1, 1]]
-unicode = false
-
-[[test]]
-name = "wb26"
-regex = '\b$'
-haystack = ""
-matches = []
-unicode = false
-
-[[test]]
-name = "wb27"
-regex = '\b$'
-haystack = "x"
-matches = [[1, 1]]
-unicode = false
-
-[[test]]
-name = "wb28"
-regex = '\b$'
-haystack = "y x"
-matches = [[3, 3]]
-unicode = false
-
-[[test]]
-name = "wb29"
-regex = '(?-u:\b).$'
-haystack = "x"
-matches = [[0, 1]]
-
-[[test]]
-name = "wb30"
-regex = '^\b(?:fo|foo)\b'
-haystack = "fo"
-matches = [[0, 2]]
-unicode = false
-
-[[test]]
-name = "wb31"
-regex = '^\b(?:fo|foo)\b'
-haystack = "foo"
-matches = [[0, 3]]
-unicode = false
-
-[[test]]
-name = "wb32"
-regex = '^\b$'
-haystack = ""
-matches = []
-unicode = false
-
-[[test]]
-name = "wb33"
-regex = '^\b$'
-haystack = "x"
-matches = []
-unicode = false
-
-[[test]]
-name = "wb34"
-regex = '^(?-u:\b).$'
-haystack = "x"
-matches = [[0, 1]]
-
-[[test]]
-name = "wb35"
-regex = '^(?-u:\b).(?-u:\b)$'
-haystack = "x"
-matches = [[0, 1]]
-
-[[test]]
-name = "wb36"
-regex = '^^^^^\b$$$$$'
-haystack = ""
-matches = []
-unicode = false
-
-[[test]]
-name = "wb37"
-regex = '^^^^^(?-u:\b).$$$$$'
-haystack = "x"
-matches = [[0, 1]]
-
-[[test]]
-name = "wb38"
-regex = '^^^^^\b$$$$$'
-haystack = "x"
-matches = []
-unicode = false
-
-[[test]]
-name = "wb39"
-regex = '^^^^^(?-u:\b\b\b).(?-u:\b\b\b)$$$$$'
-haystack = "x"
-matches = [[0, 1]]
-
-[[test]]
-name = "wb40"
-regex = '(?-u:\b).+(?-u:\b)'
-haystack = "$$abc$$"
-matches = [[2, 5]]
-
-[[test]]
-name = "wb41"
-regex = '\b'
-haystack = "a b c"
-matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]
-unicode = false
-
-[[test]]
-name = "wb42"
-regex = '\bfoo\b'
-haystack = "zzz foo zzz"
-matches = [[4, 7]]
-unicode = false
-
-[[test]]
-name = "wb43"
-regex = '\b^'
-haystack = "ab"
-matches = [[0, 0]]
-unicode = false
-
-[[test]]
-name = "wb44"
-regex = '$\b'
-haystack = "ab"
-matches = [[2, 2]]
-unicode = false
-
-
-# Tests for \B. Note that \B is not allowed if UTF-8 mode is enabled, so we
-# have to disable it for most of these tests. This is because \B can match at
-# non-UTF-8 boundaries.
-[[test]]
-name = "nb1"
-regex = '\Bfoo\B'
-haystack = "n foo xfoox that"
-matches = [[7, 10]]
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb2"
-regex = 'a\B'
-haystack = "faoa x"
-matches = [[1, 2]]
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb3"
-regex = '\Bbar'
-haystack = "bar x"
-matches = []
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb4"
-regex = '\Bbar'
-haystack = "foo\nbar x"
-matches = []
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb5"
-regex = 'bar\B'
-haystack = "foobar"
-matches = []
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb6"
-regex = 'bar\B'
-haystack = "foobar\nxxx"
-matches = []
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb7"
-regex = '(?:foo|bar|[A-Z])\B'
-haystack = "foox"
-matches = [[0, 3]]
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb8"
-regex = '(?:foo|bar|[A-Z])\B'
-haystack = "foo\n"
-matches = []
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb9"
-regex = '\B'
-haystack = ""
-matches = [[0, 0]]
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb10"
-regex = '\B'
-haystack = "x"
-matches = []
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb11"
-regex = '\B(?:foo|bar|[A-Z])'
-haystack = "foo"
-matches = []
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb12"
-regex = '\B(?:foo|bar|[A-Z])\B'
-haystack = "xXy"
-matches = [[1, 2]]
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb13"
-regex = '\B(?:foo|bar|[A-Z])\B'
-haystack = "XY"
-matches = []
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb14"
-regex = '\B(?:foo|bar|[A-Z])\B'
-haystack = "XYZ"
-matches = [[1, 2]]
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb15"
-regex = '\B(?:foo|bar|[A-Z])\B'
-haystack = "abara"
-matches = [[1, 4]]
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb16"
-regex = '\B(?:foo|bar|[A-Z])\B'
-haystack = "xfoo_"
-matches = [[1, 4]]
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb17"
-regex = '\B(?:foo|bar|[A-Z])\B'
-haystack = "xfoo\n"
-matches = []
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb18"
-regex = '\B(?:foo|bar|[A-Z])\B'
-haystack = "foo bar vNX"
-matches = [[9, 10]]
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb19"
-regex = '\B(?:fo|foo)\B'
-haystack = "xfoo"
-matches = [[1, 3]]
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb20"
-regex = '\B(?:foo|fo)\B'
-haystack = "xfooo"
-matches = [[1, 4]]
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb21"
-regex = '\B\B'
-haystack = ""
-matches = [[0, 0]]
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb22"
-regex = '\B\B'
-haystack = "x"
-matches = []
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb23"
-regex = '\B$'
-haystack = ""
-matches = [[0, 0]]
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb24"
-regex = '\B$'
-haystack = "x"
-matches = []
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb25"
-regex = '\B$'
-haystack = "y x"
-matches = []
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb26"
-regex = '\B.$'
-haystack = "x"
-matches = []
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb27"
-regex = '^\B(?:fo|foo)\B'
-haystack = "fo"
-matches = []
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb28"
-regex = '^\B(?:fo|foo)\B'
-haystack = "fo"
-matches = []
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb29"
-regex = '^\B'
-haystack = ""
-matches = [[0, 0]]
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb30"
-regex = '^\B'
-haystack = "x"
-matches = []
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb31"
-regex = '^\B\B'
-haystack = ""
-matches = [[0, 0]]
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb32"
-regex = '^\B\B'
-haystack = "x"
-matches = []
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb33"
-regex = '^\B$'
-haystack = ""
-matches = [[0, 0]]
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb34"
-regex = '^\B$'
-haystack = "x"
-matches = []
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb35"
-regex = '^\B.$'
-haystack = "x"
-matches = []
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb36"
-regex = '^\B.\B$'
-haystack = "x"
-matches = []
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb37"
-regex = '^^^^^\B$$$$$'
-haystack = ""
-matches = [[0, 0]]
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb38"
-regex = '^^^^^\B.$$$$$'
-haystack = "x"
-matches = []
-unicode = false
-utf8 = false
-
-[[test]]
-name = "nb39"
-regex = '^^^^^\B$$$$$'
-haystack = "x"
-matches = []
-unicode = false
-utf8 = false
-
-
-# unicode1* and unicode2* work for both Unicode and ASCII because all matches
-# are reported as byte offsets, and « and » do not correspond to word
-# boundaries at either the character or byte level.
-[[test]]
-name = "unicode1"
-regex = '\bx\b'
-haystack = "«x"
-matches = [[2, 3]]
-
-[[test]]
-name = "unicode1-only-ascii"
-regex = '\bx\b'
-haystack = "«x"
-matches = [[2, 3]]
-unicode = false
-
-[[test]]
-name = "unicode2"
-regex = '\bx\b'
-haystack = "x»"
-matches = [[0, 1]]
-
-[[test]]
-name = "unicode2-only-ascii"
-regex = '\bx\b'
-haystack = "x»"
-matches = [[0, 1]]
-unicode = false
-
-# ASCII word boundaries are completely oblivious to Unicode characters, so
-# even though β is a character, an ASCII \b treats it as a word boundary
-# when it is adjacent to another ASCII character. (The ASCII \b only looks
-# at the leading byte of β.) For Unicode \b, the tests are precisely inverted.
-[[test]]
-name = "unicode3"
-regex = '\bx\b'
-haystack = 'áxβ'
-matches = []
-
-[[test]]
-name = "unicode3-only-ascii"
-regex = '\bx\b'
-haystack = 'áxβ'
-matches = [[2, 3]]
-unicode = false
-
-[[test]]
-name = "unicode4"
-regex = '\Bx\B'
-haystack = 'áxβ'
-matches = [[2, 3]]
-
-[[test]]
-name = "unicode4-only-ascii"
-regex = '\Bx\B'
-haystack = 'áxβ'
-matches = []
-unicode = false
-utf8 = false
-
-# The same as above, but with \b instead of \B as a sanity check.
-[[test]]
-name = "unicode5"
-regex = '\b'
-haystack = "0\U0007EF5E"
-matches = [[0, 0], [1, 1]]
-
-[[test]]
-name = "unicode5-only-ascii"
-regex = '\b'
-haystack = "0\U0007EF5E"
-matches = [[0, 0], [1, 1]]
-unicode = false
-utf8 = false
-
-[[test]]
-name = "unicode5-noutf8"
-regex = '\b'
-haystack = '0\xFF\xFF\xFF\xFF'
-matches = [[0, 0], [1, 1]]
-unescape = true
-utf8 = false
-
-[[test]]
-name = "unicode5-noutf8-only-ascii"
-regex = '\b'
-haystack = '0\xFF\xFF\xFF\xFF'
-matches = [[0, 0], [1, 1]]
-unescape = true
-unicode = false
-utf8 = false
-
-# Weird special case to ensure that ASCII \B treats each individual code unit
-# as a non-word byte. (The specific codepoint is irrelevant. It's an arbitrary
-# codepoint that uses 4 bytes in its UTF-8 encoding and is not a member of the
-# \w character class.)
-[[test]]
-name = "unicode5-not"
-regex = '\B'
-haystack = "0\U0007EF5E"
-matches = [[5, 5]]
-
-[[test]]
-name = "unicode5-not-only-ascii"
-regex = '\B'
-haystack = "0\U0007EF5E"
-matches = [[2, 2], [3, 3], [4, 4], [5, 5]]
-unicode = false
-utf8 = false
-
-# This gets no matches since \B only matches in the presence of valid UTF-8
-# when Unicode is enabled, even when UTF-8 mode is disabled.
-[[test]]
-name = "unicode5-not-noutf8"
-regex = '\B'
-haystack = '0\xFF\xFF\xFF\xFF'
-matches = []
-unescape = true
-utf8 = false
-
-# But this DOES get matches since \B in ASCII mode only looks at individual
-# bytes.
-[[test]]
-name = "unicode5-not-noutf8-only-ascii"
-regex = '\B'
-haystack = '0\xFF\xFF\xFF\xFF'
-matches = [[2, 2], [3, 3], [4, 4], [5, 5]]
-unescape = true
-unicode = false
-utf8 = false
-
-# Some tests of no particular significance.
-[[test]]
-name = "unicode6"
-regex = '\b[0-9]+\b'
-haystack = "foo 123 bar 456 quux 789"
-matches = [[4, 7], [12, 15], [21, 24]]
-
-[[test]]
-name = "unicode7"
-regex = '\b[0-9]+\b'
-haystack = "foo 123 bar a456 quux 789"
-matches = [[4, 7], [22, 25]]
-
-[[test]]
-name = "unicode8"
-regex = '\b[0-9]+\b'
-haystack = "foo 123 bar 456a quux 789"
-matches = [[4, 7], [22, 25]]
-
-# A variant of the problem described here:
-# https://github.com/google/re2/blob/89567f5de5b23bb5ad0c26cbafc10bdc7389d1fa/re2/dfa.cc#L658-L667
-[[test]]
-name = "alt-with-assertion-repetition"
-regex = '(?:\b|%)+'
-haystack = "z%"
-bounds = [1, 2]
-anchored = true
-matches = [[1, 1]]
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/mod.rs b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/mod.rs
deleted file mode 100644
index 88c196a..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/mod.rs
+++ /dev/null
@@ -1,166 +0,0 @@
-// This set of tests is different from regression_fuzz in that the tests start
-// from the fuzzer data directly. The test essentially duplicates the fuzz
-// target. I wonder if there's a better way to set this up... Hmmm. I bet
-// `cargo fuzz` has something where it can run a target against crash files and
-// verify that they pass.
-
-// This case found by the fuzzer causes the meta engine to use the "reverse
-// inner" literal strategy. That in turn uses a specialized search routine
-// for the lazy DFA in order to avoid worst case quadratic behavior. That
-// specialized search routine had a bug where it assumed that start state
-// specialization was disabled. But this is indeed not the case, since it
-// reuses the "general" lazy DFA for the full regex created as part of the core
-// strategy, which might very well have start states specialized due to the
-// existence of a prefilter.
-//
-// This is a somewhat weird case because if the core engine has a prefilter,
-// then it's usually the case that the "reverse inner" optimization won't be
-// pursued in that case. But there are some heuristics that try to detect
-// whether a prefilter is "fast" or not. If it's not, then the meta engine will
-// attempt the reverse inner optimization. And indeed, that's what happens
-// here. So the reverse inner optimization ends up with a lazy DFA that has
-// start states specialized. Ideally this wouldn't happen because specializing
-// start states without a prefilter inside the DFA can be disastrous for
-// performance by causing the DFA to ping-pong in and out of the special state
-// handling. In this case, it's probably not a huge deal because the lazy
-// DFA is only used for part of the matching where as the work horse is the
-// prefilter found by the reverse inner optimization.
-//
-// We could maybe fix this by refactoring the meta engine to be a little more
-// careful. For example, by attempting the optimizations before building the
-// core engine. But this is perhaps a little tricky.
-#[test]
-fn meta_stopat_specialize_start_states() {
-    let data = include_bytes!(
-        "testdata/crash-8760b19b25d74e3603d4c643e9c7404fdd3631f9",
-    );
-    let _ = run(data);
-}
-
-// Same bug as meta_stopat_specialize_start_states, but minimized by the
-// fuzzer.
-#[test]
-fn meta_stopat_specialize_start_states_min() {
-    let data = include_bytes!(
-        "testdata/minimized-from-8760b19b25d74e3603d4c643e9c7404fdd3631f9",
-    );
-    let _ = run(data);
-}
-
-// This input generated a pattern with a fail state (e.g., \P{any}, [^\s\S]
-// or [a&&b]). But the fail state was in a branch, where a subsequent branch
-// should have led to an overall match, but handling of the fail state
-// prevented it from doing so. A hand-minimized version of this is '[^\s\S]A|B'
-// on the haystack 'B'. That should yield a match of 'B'.
-//
-// The underlying cause was an issue in how DFA determinization handled fail
-// states. The bug didn't impact the PikeVM or the bounded backtracker.
-#[test]
-fn fail_branch_prevents_match() {
-    let data = include_bytes!(
-        "testdata/crash-cd33b13df59ea9d74503986f9d32a270dd43cc04",
-    );
-    let _ = run(data);
-}
-
-// This input generated a pattern that contained a sub-expression like this:
-//
-//     a{0}{50000}
-//
-// This turned out to provoke quadratic behavior in the NFA compiler.
-// Basically, the NFA compiler works in two phases. The first phase builds
-// a more complicated-but-simpler-to-construct sequence of NFA states that
-// includes unconditional epsilon transitions. As part of converting this
-// sequence to the "final" NFA, we remove those unconditional espilon
-// transition. The code responsible for doing this follows every chain of
-// these transitions and remaps the state IDs. The way we were doing this
-// before resulted in re-following every subsequent part of the chain for each
-// state in the chain, which ended up being quadratic behavior. We effectively
-// memoized this, which fixed the performance bug.
-#[test]
-fn slow_big_empty_chain() {
-    let data = include_bytes!(
-        "testdata/slow-unit-9ca9cc9929fee1fcbb847a78384effb8b98ea18a",
-    );
-    let _ = run(data);
-}
-
-// A different case of slow_big_empty_chain.
-#[test]
-fn slow_big_empty_chain2() {
-    let data = include_bytes!(
-        "testdata/slow-unit-3ab758ea520027fefd3f00e1384d9aeef155739e",
-    );
-    let _ = run(data);
-}
-
-// A different case of slow_big_empty_chain.
-#[test]
-fn slow_big_empty_chain3() {
-    let data = include_bytes!(
-        "testdata/slow-unit-b8a052f4254802edbe5f569b6ce6e9b6c927e9d6",
-    );
-    let _ = run(data);
-}
-
-// A different case of slow_big_empty_chain.
-#[test]
-fn slow_big_empty_chain4() {
-    let data = include_bytes!(
-        "testdata/slow-unit-93c73a43581f205f9aaffd9c17e52b34b17becd0",
-    );
-    let _ = run(data);
-}
-
-// A different case of slow_big_empty_chain.
-#[test]
-fn slow_big_empty_chain5() {
-    let data = include_bytes!(
-        "testdata/slow-unit-5345fccadf3812c53c3ccc7af5aa2741b7b2106c",
-    );
-    let _ = run(data);
-}
-
-// A different case of slow_big_empty_chain.
-#[test]
-fn slow_big_empty_chain6() {
-    let data = include_bytes!(
-        "testdata/slow-unit-6bd643eec330166e4ada91da2d3f284268481085",
-    );
-    let _ = run(data);
-}
-
-// This fuzz input generated a pattern with a large repetition that would fail
-// NFA compilation, but its HIR was small. (HIR doesn't expand repetitions.)
-// But, the bounds were high enough that the minimum length calculation
-// overflowed. We fixed this by using saturating arithmetic (and also checked
-// arithmetic for the maximum length calculation).
-//
-// Incidentally, this was the only unguarded arithmetic operation performed in
-// the HIR smart constructors. And the fuzzer found it. Hah. Nice.
-#[test]
-fn minimum_len_overflow() {
-    let data = include_bytes!(
-        "testdata/crash-7eb3351f0965e5d6c1cb98aa8585949ef96531ff",
-    );
-    let _ = run(data);
-}
-
-// This is the fuzz target function. We duplicate it here since this is the
-// thing we use to interpret the data. It is ultimately what we want to
-// succeed.
-fn run(data: &[u8]) -> Option<()> {
-    if data.len() < 2 {
-        return None;
-    }
-    let mut split_at = usize::from(data[0]);
-    let data = std::str::from_utf8(&data[1..]).ok()?;
-    // Split data into a regex and haystack to search.
-    let len = usize::try_from(data.chars().count()).ok()?;
-    split_at = std::cmp::max(split_at, 1) % len;
-    let char_index = data.char_indices().nth(split_at)?.0;
-    let (pattern, input) = data.split_at(char_index);
-    let re = regex::Regex::new(pattern).ok()?;
-    re.is_match(input);
-    Some(())
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/crash-7eb3351f0965e5d6c1cb98aa8585949ef96531ff b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/crash-7eb3351f0965e5d6c1cb98aa8585949ef96531ff
deleted file mode 100644
index f7ffbc97..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/crash-7eb3351f0965e5d6c1cb98aa8585949ef96531ff
+++ /dev/null
Binary files differ
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/crash-8760b19b25d74e3603d4c643e9c7404fdd3631f9 b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/crash-8760b19b25d74e3603d4c643e9c7404fdd3631f9
deleted file mode 100644
index 86748199..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/crash-8760b19b25d74e3603d4c643e9c7404fdd3631f9
+++ /dev/null
Binary files differ
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/crash-cd33b13df59ea9d74503986f9d32a270dd43cc04 b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/crash-cd33b13df59ea9d74503986f9d32a270dd43cc04
deleted file mode 100644
index 152769d..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/crash-cd33b13df59ea9d74503986f9d32a270dd43cc04
+++ /dev/null
Binary files differ
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/minimized-from-8760b19b25d74e3603d4c643e9c7404fdd3631f9 b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/minimized-from-8760b19b25d74e3603d4c643e9c7404fdd3631f9
deleted file mode 100644
index 69663d5..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/minimized-from-8760b19b25d74e3603d4c643e9c7404fdd3631f9
+++ /dev/null
Binary files differ
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/slow-unit-3ab758ea520027fefd3f00e1384d9aeef155739e b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/slow-unit-3ab758ea520027fefd3f00e1384d9aeef155739e
deleted file mode 100644
index 6c22803..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/slow-unit-3ab758ea520027fefd3f00e1384d9aeef155739e
+++ /dev/null
Binary files differ
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/slow-unit-5345fccadf3812c53c3ccc7af5aa2741b7b2106c b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/slow-unit-5345fccadf3812c53c3ccc7af5aa2741b7b2106c
deleted file mode 100644
index 0570f328..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/slow-unit-5345fccadf3812c53c3ccc7af5aa2741b7b2106c
+++ /dev/null
Binary files differ
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/slow-unit-6bd643eec330166e4ada91da2d3f284268481085 b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/slow-unit-6bd643eec330166e4ada91da2d3f284268481085
deleted file mode 100644
index 182bc7f..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/slow-unit-6bd643eec330166e4ada91da2d3f284268481085
+++ /dev/null
Binary files differ
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/slow-unit-93c73a43581f205f9aaffd9c17e52b34b17becd0 b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/slow-unit-93c73a43581f205f9aaffd9c17e52b34b17becd0
deleted file mode 100644
index f939c33..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/slow-unit-93c73a43581f205f9aaffd9c17e52b34b17becd0
+++ /dev/null
Binary files differ
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/slow-unit-9ca9cc9929fee1fcbb847a78384effb8b98ea18a b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/slow-unit-9ca9cc9929fee1fcbb847a78384effb8b98ea18a
deleted file mode 100644
index a87de230..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/slow-unit-9ca9cc9929fee1fcbb847a78384effb8b98ea18a
+++ /dev/null
Binary files differ
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/slow-unit-b8a052f4254802edbe5f569b6ce6e9b6c927e9d6 b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/slow-unit-b8a052f4254802edbe5f569b6ce6e9b6c927e9d6
deleted file mode 100644
index dc332933..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/fuzz/testdata/slow-unit-b8a052f4254802edbe5f569b6ce6e9b6c927e9d6
+++ /dev/null
Binary files differ
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/lib.rs b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/lib.rs
deleted file mode 100644
index b3f6942..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/lib.rs
+++ /dev/null
@@ -1,58 +0,0 @@
-#![cfg_attr(feature = "pattern", feature(pattern))]
-
-mod fuzz;
-mod misc;
-mod regression;
-mod regression_fuzz;
-mod replace;
-#[cfg(feature = "pattern")]
-mod searcher;
-mod suite_bytes;
-mod suite_bytes_set;
-mod suite_string;
-mod suite_string_set;
-
-const BLACKLIST: &[&str] = &[
-    // Nothing to blacklist yet!
-];
-
-fn suite() -> anyhow::Result<regex_test::RegexTests> {
-    let _ = env_logger::try_init();
-
-    let mut tests = regex_test::RegexTests::new();
-    macro_rules! load {
-        ($name:expr) => {{
-            const DATA: &[u8] =
-                include_bytes!(concat!("../testdata/", $name, ".toml"));
-            tests.load_slice($name, DATA)?;
-        }};
-    }
-
-    load!("anchored");
-    load!("bytes");
-    load!("crazy");
-    load!("crlf");
-    load!("earliest");
-    load!("empty");
-    load!("expensive");
-    load!("flags");
-    load!("iter");
-    load!("leftmost-all");
-    load!("line-terminator");
-    load!("misc");
-    load!("multiline");
-    load!("no-unicode");
-    load!("overlapping");
-    load!("regression");
-    load!("set");
-    load!("substring");
-    load!("unicode");
-    load!("utf8");
-    load!("word-boundary");
-    load!("word-boundary-special");
-    load!("fowler/basic");
-    load!("fowler/nullsubexpr");
-    load!("fowler/repetition");
-
-    Ok(tests)
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/misc.rs b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/misc.rs
deleted file mode 100644
index 91e7d28..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/misc.rs
+++ /dev/null
@@ -1,143 +0,0 @@
-use regex::Regex;
-
-macro_rules! regex {
-    ($pattern:expr) => {
-        regex::Regex::new($pattern).unwrap()
-    };
-}
-
-#[test]
-fn unclosed_group_error() {
-    let err = Regex::new(r"(").unwrap_err();
-    let msg = err.to_string();
-    assert!(msg.contains("unclosed group"), "error message: {:?}", msg);
-}
-
-#[test]
-fn regex_string() {
-    assert_eq!(r"[a-zA-Z0-9]+", regex!(r"[a-zA-Z0-9]+").as_str());
-    assert_eq!(r"[a-zA-Z0-9]+", &format!("{}", regex!(r"[a-zA-Z0-9]+")));
-    assert_eq!(
-        r#"Regex("[a-zA-Z0-9]+")"#,
-        &format!("{:?}", regex!(r"[a-zA-Z0-9]+"))
-    );
-}
-
-#[test]
-fn capture_names() {
-    let re = regex!(r"(.)(?P<a>.)");
-    assert_eq!(3, re.captures_len());
-    assert_eq!((3, Some(3)), re.capture_names().size_hint());
-    assert_eq!(
-        vec![None, None, Some("a")],
-        re.capture_names().collect::<Vec<_>>()
-    );
-}
-
-#[test]
-fn capture_index() {
-    let re = regex!(r"^(?P<name>.+)$");
-    let cap = re.captures("abc").unwrap();
-    assert_eq!(&cap[0], "abc");
-    assert_eq!(&cap[1], "abc");
-    assert_eq!(&cap["name"], "abc");
-}
-
-#[test]
-#[should_panic]
-fn capture_index_panic_usize() {
-    let re = regex!(r"^(?P<name>.+)$");
-    let cap = re.captures("abc").unwrap();
-    let _ = cap[2];
-}
-
-#[test]
-#[should_panic]
-fn capture_index_panic_name() {
-    let re = regex!(r"^(?P<name>.+)$");
-    let cap = re.captures("abc").unwrap();
-    let _ = cap["bad name"];
-}
-
-#[test]
-fn capture_index_lifetime() {
-    // This is a test of whether the types on `caps["..."]` are general
-    // enough. If not, this will fail to typecheck.
-    fn inner(s: &str) -> usize {
-        let re = regex!(r"(?P<number>[0-9]+)");
-        let caps = re.captures(s).unwrap();
-        caps["number"].len()
-    }
-    assert_eq!(3, inner("123"));
-}
-
-#[test]
-fn capture_misc() {
-    let re = regex!(r"(.)(?P<a>a)?(.)(?P<b>.)");
-    let cap = re.captures("abc").unwrap();
-
-    assert_eq!(5, cap.len());
-
-    assert_eq!((0, 3), {
-        let m = cap.get(0).unwrap();
-        (m.start(), m.end())
-    });
-    assert_eq!(None, cap.get(2));
-    assert_eq!((2, 3), {
-        let m = cap.get(4).unwrap();
-        (m.start(), m.end())
-    });
-
-    assert_eq!("abc", cap.get(0).unwrap().as_str());
-    assert_eq!(None, cap.get(2));
-    assert_eq!("c", cap.get(4).unwrap().as_str());
-
-    assert_eq!(None, cap.name("a"));
-    assert_eq!("c", cap.name("b").unwrap().as_str());
-}
-
-#[test]
-fn sub_capture_matches() {
-    let re = regex!(r"([a-z])(([a-z])|([0-9]))");
-    let cap = re.captures("a5").unwrap();
-    let subs: Vec<_> = cap.iter().collect();
-
-    assert_eq!(5, subs.len());
-    assert!(subs[0].is_some());
-    assert!(subs[1].is_some());
-    assert!(subs[2].is_some());
-    assert!(subs[3].is_none());
-    assert!(subs[4].is_some());
-
-    assert_eq!("a5", subs[0].unwrap().as_str());
-    assert_eq!("a", subs[1].unwrap().as_str());
-    assert_eq!("5", subs[2].unwrap().as_str());
-    assert_eq!("5", subs[4].unwrap().as_str());
-}
-
-// Test that the DFA can handle pathological cases. (This should result in the
-// DFA's cache being flushed too frequently, which should cause it to quit and
-// fall back to the NFA algorithm.)
-#[test]
-fn dfa_handles_pathological_case() {
-    fn ones_and_zeroes(count: usize) -> String {
-        let mut s = String::new();
-        for i in 0..count {
-            if i % 3 == 0 {
-                s.push('1');
-            } else {
-                s.push('0');
-            }
-        }
-        s
-    }
-
-    let re = regex!(r"[01]*1[01]{20}$");
-    let text = {
-        let mut pieces = ones_and_zeroes(100_000);
-        pieces.push('1');
-        pieces.push_str(&ones_and_zeroes(20));
-        pieces
-    };
-    assert!(re.is_match(&text));
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/regression.rs b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/regression.rs
deleted file mode 100644
index a5867016..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/regression.rs
+++ /dev/null
@@ -1,94 +0,0 @@
-use regex::Regex;
-
-macro_rules! regex {
-    ($pattern:expr) => {
-        regex::Regex::new($pattern).unwrap()
-    };
-}
-
-// See: https://github.com/rust-lang/regex/issues/48
-#[test]
-fn invalid_regexes_no_crash() {
-    assert!(Regex::new("(*)").is_err());
-    assert!(Regex::new("(?:?)").is_err());
-    assert!(Regex::new("(?)").is_err());
-    assert!(Regex::new("*").is_err());
-}
-
-// See: https://github.com/rust-lang/regex/issues/98
-#[test]
-fn regression_many_repeat_stack_overflow() {
-    let re = regex!("^.{1,2500}");
-    assert_eq!(
-        vec![0..1],
-        re.find_iter("a").map(|m| m.range()).collect::<Vec<_>>()
-    );
-}
-
-// See: https://github.com/rust-lang/regex/issues/555
-#[test]
-fn regression_invalid_repetition_expr() {
-    assert!(Regex::new("(?m){1,1}").is_err());
-}
-
-// See: https://github.com/rust-lang/regex/issues/527
-#[test]
-fn regression_invalid_flags_expression() {
-    assert!(Regex::new("(((?x)))").is_ok());
-}
-
-// See: https://github.com/rust-lang/regex/issues/129
-#[test]
-fn regression_captures_rep() {
-    let re = regex!(r"([a-f]){2}(?P<foo>[x-z])");
-    let caps = re.captures("abx").unwrap();
-    assert_eq!(&caps["foo"], "x");
-}
-
-// See: https://github.com/BurntSushi/ripgrep/issues/1247
-#[cfg(feature = "unicode-perl")]
-#[test]
-fn regression_nfa_stops1() {
-    let re = regex::bytes::Regex::new(r"\bs(?:[ab])").unwrap();
-    assert_eq!(0, re.find_iter(b"s\xE4").count());
-}
-
-// See: https://github.com/rust-lang/regex/issues/981
-#[cfg(feature = "unicode")]
-#[test]
-fn regression_bad_word_boundary() {
-    let re = regex!(r#"(?i:(?:\b|_)win(?:32|64|dows)?(?:\b|_))"#);
-    let hay = "ubi-Darwin-x86_64.tar.gz";
-    assert!(!re.is_match(hay));
-    let hay = "ubi-Windows-x86_64.zip";
-    assert!(re.is_match(hay));
-}
-
-// See: https://github.com/rust-lang/regex/issues/982
-#[cfg(feature = "unicode-perl")]
-#[test]
-fn regression_unicode_perl_not_enabled() {
-    let pat = r"(\d+\s?(years|year|y))?\s?(\d+\s?(months|month|m))?\s?(\d+\s?(weeks|week|w))?\s?(\d+\s?(days|day|d))?\s?(\d+\s?(hours|hour|h))?";
-    assert!(Regex::new(pat).is_ok());
-}
-
-// See: https://github.com/rust-lang/regex/issues/995
-#[test]
-fn regression_big_regex_overflow() {
-    let pat = r" {2147483516}{2147483416}{5}";
-    assert!(Regex::new(pat).is_err());
-}
-
-// See: https://github.com/rust-lang/regex/issues/999
-#[test]
-fn regression_complete_literals_suffix_incorrect() {
-    let needles = vec![
-        "aA", "bA", "cA", "dA", "eA", "fA", "gA", "hA", "iA", "jA", "kA",
-        "lA", "mA", "nA", "oA", "pA", "qA", "rA", "sA", "tA", "uA", "vA",
-        "wA", "xA", "yA", "zA",
-    ];
-    let pattern = needles.join("|");
-    let re = regex!(&pattern);
-    let hay = "FUBAR";
-    assert_eq!(0, re.find_iter(hay).count());
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/regression_fuzz.rs b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/regression_fuzz.rs
deleted file mode 100644
index f90ad4cb..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/regression_fuzz.rs
+++ /dev/null
@@ -1,61 +0,0 @@
-// These tests are only run for the "default" test target because some of them
-// can take quite a long time. Some of them take long enough that it's not
-// practical to run them in debug mode. :-/
-
-use regex::Regex;
-
-macro_rules! regex {
-    ($pattern:expr) => {
-        regex::Regex::new($pattern).unwrap()
-    };
-}
-
-// See: https://oss-fuzz.com/testcase-detail/5673225499181056
-//
-// Ignored by default since it takes too long in debug mode (almost a minute).
-#[test]
-#[ignore]
-fn fuzz1() {
-    regex!(r"1}{55}{0}*{1}{55}{55}{5}*{1}{55}+{56}|;**");
-}
-
-// See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=26505
-// See: https://github.com/rust-lang/regex/issues/722
-#[test]
-#[cfg(feature = "unicode")]
-fn empty_any_errors_no_panic() {
-    assert!(Regex::new(r"\P{any}").is_ok());
-}
-
-// This tests that a very large regex errors during compilation instead of
-// using gratuitous amounts of memory. The specific problem is that the
-// compiler wasn't accounting for the memory used by Unicode character classes
-// correctly.
-//
-// See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=33579
-#[test]
-fn big_regex_fails_to_compile() {
-    let pat = "[\u{0}\u{e}\u{2}\\w~~>[l\t\u{0}]p?<]{971158}";
-    assert!(Regex::new(pat).is_err());
-}
-
-// This was caught while on master but before a release went out(!).
-//
-// See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=58173
-#[test]
-fn todo() {
-    let pat = "(?:z|xx)@|xx";
-    assert!(Regex::new(pat).is_ok());
-}
-
-// This was caused by the fuzzer, and then minimized by hand.
-//
-// This was caused by a bug in DFA determinization that mishandled NFA fail
-// states.
-#[test]
-fn fail_branch_prevents_match() {
-    let pat = r".*[a&&b]A|B";
-    let hay = "B";
-    let re = Regex::new(pat).unwrap();
-    assert!(re.is_match(hay));
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/replace.rs b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/replace.rs
deleted file mode 100644
index f26ae460..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/replace.rs
+++ /dev/null
@@ -1,183 +0,0 @@
-macro_rules! replace(
-    ($name:ident, $which:ident, $re:expr,
-     $search:expr, $replace:expr, $result:expr) => (
-        #[test]
-        fn $name() {
-            let re = regex::Regex::new($re).unwrap();
-            assert_eq!(re.$which($search, $replace), $result);
-        }
-    );
-);
-
-replace!(first, replace, r"[0-9]", "age: 26", "Z", "age: Z6");
-replace!(plus, replace, r"[0-9]+", "age: 26", "Z", "age: Z");
-replace!(all, replace_all, r"[0-9]", "age: 26", "Z", "age: ZZ");
-replace!(groups, replace, r"([^ ]+)[ ]+([^ ]+)", "w1 w2", "$2 $1", "w2 w1");
-replace!(
-    double_dollar,
-    replace,
-    r"([^ ]+)[ ]+([^ ]+)",
-    "w1 w2",
-    "$2 $$1",
-    "w2 $1"
-);
-// replace!(adjacent_index, replace,
-// r"([^aeiouy])ies$", "skies", "$1y", "sky");
-replace!(
-    named,
-    replace_all,
-    r"(?P<first>[^ ]+)[ ]+(?P<last>[^ ]+)(?P<space>[ ]*)",
-    "w1 w2 w3 w4",
-    "$last $first$space",
-    "w2 w1 w4 w3"
-);
-replace!(
-    trim,
-    replace_all,
-    "^[ \t]+|[ \t]+$",
-    " \t  trim me\t   \t",
-    "",
-    "trim me"
-);
-replace!(number_hyphen, replace, r"(.)(.)", "ab", "$1-$2", "a-b");
-// replace!(number_underscore, replace, r"(.)(.)", "ab", "$1_$2", "a_b");
-replace!(
-    simple_expand,
-    replace_all,
-    r"([a-z]) ([a-z])",
-    "a b",
-    "$2 $1",
-    "b a"
-);
-replace!(
-    literal_dollar1,
-    replace_all,
-    r"([a-z]+) ([a-z]+)",
-    "a b",
-    "$$1",
-    "$1"
-);
-replace!(
-    literal_dollar2,
-    replace_all,
-    r"([a-z]+) ([a-z]+)",
-    "a b",
-    "$2 $$c $1",
-    "b $c a"
-);
-replace!(
-    no_expand1,
-    replace,
-    r"([^ ]+)[ ]+([^ ]+)",
-    "w1 w2",
-    regex::NoExpand("$2 $1"),
-    "$2 $1"
-);
-replace!(
-    no_expand2,
-    replace,
-    r"([^ ]+)[ ]+([^ ]+)",
-    "w1 w2",
-    regex::NoExpand("$$1"),
-    "$$1"
-);
-replace!(
-    closure_returning_reference,
-    replace,
-    r"([0-9]+)",
-    "age: 26",
-    |captures: &regex::Captures<'_>| { captures[1][0..1].to_owned() },
-    "age: 2"
-);
-replace!(
-    closure_returning_value,
-    replace,
-    r"[0-9]+",
-    "age: 26",
-    |_captures: &regex::Captures<'_>| "Z".to_owned(),
-    "age: Z"
-);
-
-// See https://github.com/rust-lang/regex/issues/314
-replace!(
-    match_at_start_replace_with_empty,
-    replace_all,
-    r"foo",
-    "foobar",
-    "",
-    "bar"
-);
-
-// See https://github.com/rust-lang/regex/issues/393
-replace!(single_empty_match, replace, r"^", "bar", "foo", "foobar");
-
-// See https://github.com/rust-lang/regex/issues/399
-replace!(
-    capture_longest_possible_name,
-    replace_all,
-    r"(.)",
-    "b",
-    "${1}a $1a",
-    "ba "
-);
-
-replace!(
-    impl_string,
-    replace,
-    r"[0-9]",
-    "age: 26",
-    "Z".to_string(),
-    "age: Z6"
-);
-replace!(
-    impl_string_ref,
-    replace,
-    r"[0-9]",
-    "age: 26",
-    &"Z".to_string(),
-    "age: Z6"
-);
-replace!(
-    impl_cow_str_borrowed,
-    replace,
-    r"[0-9]",
-    "age: 26",
-    std::borrow::Cow::<'_, str>::Borrowed("Z"),
-    "age: Z6"
-);
-replace!(
-    impl_cow_str_borrowed_ref,
-    replace,
-    r"[0-9]",
-    "age: 26",
-    &std::borrow::Cow::<'_, str>::Borrowed("Z"),
-    "age: Z6"
-);
-replace!(
-    impl_cow_str_owned,
-    replace,
-    r"[0-9]",
-    "age: 26",
-    std::borrow::Cow::<'_, str>::Owned("Z".to_string()),
-    "age: Z6"
-);
-replace!(
-    impl_cow_str_owned_ref,
-    replace,
-    r"[0-9]",
-    "age: 26",
-    &std::borrow::Cow::<'_, str>::Owned("Z".to_string()),
-    "age: Z6"
-);
-
-#[test]
-fn replacen_no_captures() {
-    let re = regex::Regex::new(r"[0-9]").unwrap();
-    assert_eq!(re.replacen("age: 1234", 2, "Z"), "age: ZZ34");
-}
-
-#[test]
-fn replacen_with_captures() {
-    let re = regex::Regex::new(r"([0-9])").unwrap();
-    assert_eq!(re.replacen("age: 1234", 2, "${1}Z"), "age: 1Z2Z34");
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/searcher.rs b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/searcher.rs
deleted file mode 100644
index f6dae131..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/searcher.rs
+++ /dev/null
@@ -1,93 +0,0 @@
-macro_rules! searcher {
-    ($name:ident, $re:expr, $haystack:expr) => (
-        searcher!($name, $re, $haystack, vec vec![]);
-    );
-    ($name:ident, $re:expr, $haystack:expr, $($steps:expr,)*) => (
-        searcher!($name, $re, $haystack, vec vec![$($steps),*]);
-    );
-    ($name:ident, $re:expr, $haystack:expr, $($steps:expr),*) => (
-        searcher!($name, $re, $haystack, vec vec![$($steps),*]);
-    );
-    ($name:ident, $re:expr, $haystack:expr, vec $expect_steps:expr) => (
-        #[test]
-        #[allow(unused_imports)]
-        fn $name() {
-            use std::str::pattern::{Pattern, Searcher};
-            use std::str::pattern::SearchStep::{Match, Reject, Done};
-            let re = regex::Regex::new($re).unwrap();
-            let mut se = re.into_searcher($haystack);
-            let mut got_steps = vec![];
-            loop {
-                match se.next() {
-                    Done => break,
-                    step => { got_steps.push(step); }
-                }
-            }
-            assert_eq!(got_steps, $expect_steps);
-        }
-    );
-}
-
-searcher!(searcher_empty_regex_empty_haystack, r"", "", Match(0, 0));
-searcher!(
-    searcher_empty_regex,
-    r"",
-    "ab",
-    Match(0, 0),
-    Reject(0, 1),
-    Match(1, 1),
-    Reject(1, 2),
-    Match(2, 2)
-);
-searcher!(searcher_empty_haystack, r"\d", "");
-searcher!(searcher_one_match, r"\d", "5", Match(0, 1));
-searcher!(searcher_no_match, r"\d", "a", Reject(0, 1));
-searcher!(
-    searcher_two_adjacent_matches,
-    r"\d",
-    "56",
-    Match(0, 1),
-    Match(1, 2)
-);
-searcher!(
-    searcher_two_non_adjacent_matches,
-    r"\d",
-    "5a6",
-    Match(0, 1),
-    Reject(1, 2),
-    Match(2, 3)
-);
-searcher!(searcher_reject_first, r"\d", "a6", Reject(0, 1), Match(1, 2));
-searcher!(
-    searcher_one_zero_length_matches,
-    r"\d*",
-    "a1b2",
-    Match(0, 0),  // ^
-    Reject(0, 1), // a
-    Match(1, 2),  // a1
-    Reject(2, 3), // a1b
-    Match(3, 4),  // a1b2
-);
-searcher!(
-    searcher_many_zero_length_matches,
-    r"\d*",
-    "a1bbb2",
-    Match(0, 0),  // ^
-    Reject(0, 1), // a
-    Match(1, 2),  // a1
-    Reject(2, 3), // a1b
-    Match(3, 3),  // a1bb
-    Reject(3, 4), // a1bb
-    Match(4, 4),  // a1bbb
-    Reject(4, 5), // a1bbb
-    Match(5, 6),  // a1bbba
-);
-searcher!(
-    searcher_unicode,
-    r".+?",
-    "Ⅰ1Ⅱ2",
-    Match(0, 3),
-    Match(3, 4),
-    Match(4, 7),
-    Match(7, 8)
-);
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/suite_bytes.rs b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/suite_bytes.rs
deleted file mode 100644
index 106d998..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/suite_bytes.rs
+++ /dev/null
@@ -1,108 +0,0 @@
-use {
-    anyhow::Result,
-    regex::bytes::{Regex, RegexBuilder},
-    regex_test::{
-        CompiledRegex, Match, RegexTest, Span, TestResult, TestRunner,
-    },
-};
-
-/// Tests the default configuration of the hybrid NFA/DFA.
-#[test]
-fn default() -> Result<()> {
-    let mut runner = TestRunner::new()?;
-    runner
-        .expand(&["is_match", "find", "captures"], |test| test.compiles())
-        .blacklist_iter(super::BLACKLIST)
-        .test_iter(crate::suite()?.iter(), compiler)
-        .assert();
-    Ok(())
-}
-
-fn run_test(re: &Regex, test: &RegexTest) -> TestResult {
-    match test.additional_name() {
-        "is_match" => TestResult::matched(re.is_match(test.haystack())),
-        "find" => TestResult::matches(
-            re.find_iter(test.haystack())
-                .take(test.match_limit().unwrap_or(std::usize::MAX))
-                .map(|m| Match {
-                    id: 0,
-                    span: Span { start: m.start(), end: m.end() },
-                }),
-        ),
-        "captures" => {
-            let it = re
-                .captures_iter(test.haystack())
-                .take(test.match_limit().unwrap_or(std::usize::MAX))
-                .map(|caps| testify_captures(&caps));
-            TestResult::captures(it)
-        }
-        name => TestResult::fail(&format!("unrecognized test name: {}", name)),
-    }
-}
-
-/// Converts the given regex test to a closure that searches with a
-/// `bytes::Regex`. If the test configuration is unsupported, then a
-/// `CompiledRegex` that skips the test is returned.
-fn compiler(
-    test: &RegexTest,
-    _patterns: &[String],
-) -> anyhow::Result<CompiledRegex> {
-    let skip = Ok(CompiledRegex::skip());
-
-    // We're only testing bytes::Regex here, which supports one pattern only.
-    let pattern = match test.regexes().len() {
-        1 => &test.regexes()[0],
-        _ => return skip,
-    };
-    // We only test is_match, find_iter and captures_iter. All of those are
-    // leftmost searches.
-    if !matches!(test.search_kind(), regex_test::SearchKind::Leftmost) {
-        return skip;
-    }
-    // The top-level single-pattern regex API always uses leftmost-first.
-    if !matches!(test.match_kind(), regex_test::MatchKind::LeftmostFirst) {
-        return skip;
-    }
-    // The top-level regex API always runs unanchored searches. ... But we can
-    // handle tests that are anchored but have only one match.
-    if test.anchored() && test.match_limit() != Some(1) {
-        return skip;
-    }
-    // We don't support tests with explicit search bounds. We could probably
-    // support this by using the 'find_at' (and such) APIs.
-    let bounds = test.bounds();
-    if !(bounds.start == 0 && bounds.end == test.haystack().len()) {
-        return skip;
-    }
-    // The bytes::Regex API specifically does not support enabling UTF-8 mode.
-    // It could I suppose, but currently it does not. That is, it permits
-    // matches to have offsets that split codepoints.
-    if test.utf8() {
-        return skip;
-    }
-    // If the test requires Unicode but the Unicode feature isn't enabled,
-    // skip it. This is a little aggressive, but the test suite doesn't
-    // have any easy way of communicating which Unicode features are needed.
-    if test.unicode() && !cfg!(feature = "unicode") {
-        return skip;
-    }
-    let re = RegexBuilder::new(pattern)
-        .case_insensitive(test.case_insensitive())
-        .unicode(test.unicode())
-        .line_terminator(test.line_terminator())
-        .build()?;
-    Ok(CompiledRegex::compiled(move |test| run_test(&re, test)))
-}
-
-/// Convert `Captures` into the test suite's capture values.
-fn testify_captures(
-    caps: &regex::bytes::Captures<'_>,
-) -> regex_test::Captures {
-    let spans = caps.iter().map(|group| {
-        group.map(|m| regex_test::Span { start: m.start(), end: m.end() })
-    });
-    // This unwrap is OK because we assume our 'caps' represents a match, and
-    // a match always gives a non-zero number of groups with the first group
-    // being non-None.
-    regex_test::Captures::new(0, spans).unwrap()
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/suite_bytes_set.rs b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/suite_bytes_set.rs
deleted file mode 100644
index 899d24c..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/suite_bytes_set.rs
+++ /dev/null
@@ -1,71 +0,0 @@
-use {
-    anyhow::Result,
-    regex::bytes::{RegexSet, RegexSetBuilder},
-    regex_test::{CompiledRegex, RegexTest, TestResult, TestRunner},
-};
-
-/// Tests the default configuration of the hybrid NFA/DFA.
-#[test]
-fn default() -> Result<()> {
-    let mut runner = TestRunner::new()?;
-    runner
-        .expand(&["is_match", "which"], |test| test.compiles())
-        .blacklist_iter(super::BLACKLIST)
-        .test_iter(crate::suite()?.iter(), compiler)
-        .assert();
-    Ok(())
-}
-
-fn run_test(re: &RegexSet, test: &RegexTest) -> TestResult {
-    match test.additional_name() {
-        "is_match" => TestResult::matched(re.is_match(test.haystack())),
-        "which" => TestResult::which(re.matches(test.haystack()).iter()),
-        name => TestResult::fail(&format!("unrecognized test name: {}", name)),
-    }
-}
-
-/// Converts the given regex test to a closure that searches with a
-/// `bytes::Regex`. If the test configuration is unsupported, then a
-/// `CompiledRegex` that skips the test is returned.
-fn compiler(
-    test: &RegexTest,
-    _patterns: &[String],
-) -> anyhow::Result<CompiledRegex> {
-    let skip = Ok(CompiledRegex::skip());
-
-    // The top-level RegexSet API only supports "overlapping" semantics.
-    if !matches!(test.search_kind(), regex_test::SearchKind::Overlapping) {
-        return skip;
-    }
-    // The top-level RegexSet API only supports "all" semantics.
-    if !matches!(test.match_kind(), regex_test::MatchKind::All) {
-        return skip;
-    }
-    // The top-level RegexSet API always runs unanchored searches.
-    if test.anchored() {
-        return skip;
-    }
-    // We don't support tests with explicit search bounds.
-    let bounds = test.bounds();
-    if !(bounds.start == 0 && bounds.end == test.haystack().len()) {
-        return skip;
-    }
-    // The bytes::Regex API specifically does not support enabling UTF-8 mode.
-    // It could I suppose, but currently it does not. That is, it permits
-    // matches to have offsets that split codepoints.
-    if test.utf8() {
-        return skip;
-    }
-    // If the test requires Unicode but the Unicode feature isn't enabled,
-    // skip it. This is a little aggressive, but the test suite doesn't
-    // have any easy way of communicating which Unicode features are needed.
-    if test.unicode() && !cfg!(feature = "unicode") {
-        return skip;
-    }
-    let re = RegexSetBuilder::new(test.regexes())
-        .case_insensitive(test.case_insensitive())
-        .unicode(test.unicode())
-        .line_terminator(test.line_terminator())
-        .build()?;
-    Ok(CompiledRegex::compiled(move |test| run_test(&re, test)))
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/suite_string.rs b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/suite_string.rs
deleted file mode 100644
index 1e5bf0bb..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/suite_string.rs
+++ /dev/null
@@ -1,114 +0,0 @@
-use {
-    anyhow::Result,
-    regex::{Regex, RegexBuilder},
-    regex_test::{
-        CompiledRegex, Match, RegexTest, Span, TestResult, TestRunner,
-    },
-};
-
-/// Tests the default configuration of the hybrid NFA/DFA.
-#[test]
-fn default() -> Result<()> {
-    let mut runner = TestRunner::new()?;
-    runner
-        .expand(&["is_match", "find", "captures"], |test| test.compiles())
-        .blacklist_iter(super::BLACKLIST)
-        .test_iter(crate::suite()?.iter(), compiler)
-        .assert();
-    Ok(())
-}
-
-fn run_test(re: &Regex, test: &RegexTest) -> TestResult {
-    let hay = match std::str::from_utf8(test.haystack()) {
-        Ok(hay) => hay,
-        Err(err) => {
-            return TestResult::fail(&format!(
-                "haystack is not valid UTF-8: {}",
-                err
-            ));
-        }
-    };
-    match test.additional_name() {
-        "is_match" => TestResult::matched(re.is_match(hay)),
-        "find" => TestResult::matches(
-            re.find_iter(hay)
-                .take(test.match_limit().unwrap_or(std::usize::MAX))
-                .map(|m| Match {
-                    id: 0,
-                    span: Span { start: m.start(), end: m.end() },
-                }),
-        ),
-        "captures" => {
-            let it = re
-                .captures_iter(hay)
-                .take(test.match_limit().unwrap_or(std::usize::MAX))
-                .map(|caps| testify_captures(&caps));
-            TestResult::captures(it)
-        }
-        name => TestResult::fail(&format!("unrecognized test name: {}", name)),
-    }
-}
-
-/// Converts the given regex test to a closure that searches with a
-/// `bytes::Regex`. If the test configuration is unsupported, then a
-/// `CompiledRegex` that skips the test is returned.
-fn compiler(
-    test: &RegexTest,
-    _patterns: &[String],
-) -> anyhow::Result<CompiledRegex> {
-    let skip = Ok(CompiledRegex::skip());
-
-    // We're only testing bytes::Regex here, which supports one pattern only.
-    let pattern = match test.regexes().len() {
-        1 => &test.regexes()[0],
-        _ => return skip,
-    };
-    // We only test is_match, find_iter and captures_iter. All of those are
-    // leftmost searches.
-    if !matches!(test.search_kind(), regex_test::SearchKind::Leftmost) {
-        return skip;
-    }
-    // The top-level single-pattern regex API always uses leftmost-first.
-    if !matches!(test.match_kind(), regex_test::MatchKind::LeftmostFirst) {
-        return skip;
-    }
-    // The top-level regex API always runs unanchored searches. ... But we can
-    // handle tests that are anchored but have only one match.
-    if test.anchored() && test.match_limit() != Some(1) {
-        return skip;
-    }
-    // We don't support tests with explicit search bounds. We could probably
-    // support this by using the 'find_at' (and such) APIs.
-    let bounds = test.bounds();
-    if !(bounds.start == 0 && bounds.end == test.haystack().len()) {
-        return skip;
-    }
-    // The Regex API specifically does not support disabling UTF-8 mode because
-    // it can only search &str which is always valid UTF-8.
-    if !test.utf8() {
-        return skip;
-    }
-    // If the test requires Unicode but the Unicode feature isn't enabled,
-    // skip it. This is a little aggressive, but the test suite doesn't
-    // have any easy way of communicating which Unicode features are needed.
-    if test.unicode() && !cfg!(feature = "unicode") {
-        return skip;
-    }
-    let re = RegexBuilder::new(pattern)
-        .case_insensitive(test.case_insensitive())
-        .unicode(test.unicode())
-        .line_terminator(test.line_terminator())
-        .build()?;
-    Ok(CompiledRegex::compiled(move |test| run_test(&re, test)))
-}
-
-/// Convert `Captures` into the test suite's capture values.
-fn testify_captures(caps: &regex::Captures<'_>) -> regex_test::Captures {
-    let spans = caps.iter().map(|group| {
-        group.map(|m| regex_test::Span { start: m.start(), end: m.end() })
-    });
-    // This unwrap is OK because we assume our 'caps' represents a match, and
-    // a match always gives a non-zero number of groups with the first group
-    // being non-None.
-    regex_test::Captures::new(0, spans).unwrap()
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/suite_string_set.rs b/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/suite_string_set.rs
deleted file mode 100644
index dffdc708..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-1.11.1/tests/suite_string_set.rs
+++ /dev/null
@@ -1,79 +0,0 @@
-use {
-    anyhow::Result,
-    regex::{RegexSet, RegexSetBuilder},
-    regex_test::{CompiledRegex, RegexTest, TestResult, TestRunner},
-};
-
-/// Tests the default configuration of the hybrid NFA/DFA.
-#[test]
-fn default() -> Result<()> {
-    let mut runner = TestRunner::new()?;
-    runner
-        .expand(&["is_match", "which"], |test| test.compiles())
-        .blacklist_iter(super::BLACKLIST)
-        .test_iter(crate::suite()?.iter(), compiler)
-        .assert();
-    Ok(())
-}
-
-fn run_test(re: &RegexSet, test: &RegexTest) -> TestResult {
-    let hay = match std::str::from_utf8(test.haystack()) {
-        Ok(hay) => hay,
-        Err(err) => {
-            return TestResult::fail(&format!(
-                "haystack is not valid UTF-8: {}",
-                err
-            ));
-        }
-    };
-    match test.additional_name() {
-        "is_match" => TestResult::matched(re.is_match(hay)),
-        "which" => TestResult::which(re.matches(hay).iter()),
-        name => TestResult::fail(&format!("unrecognized test name: {}", name)),
-    }
-}
-
-/// Converts the given regex test to a closure that searches with a
-/// `bytes::Regex`. If the test configuration is unsupported, then a
-/// `CompiledRegex` that skips the test is returned.
-fn compiler(
-    test: &RegexTest,
-    _patterns: &[String],
-) -> anyhow::Result<CompiledRegex> {
-    let skip = Ok(CompiledRegex::skip());
-
-    // The top-level RegexSet API only supports "overlapping" semantics.
-    if !matches!(test.search_kind(), regex_test::SearchKind::Overlapping) {
-        return skip;
-    }
-    // The top-level RegexSet API only supports "all" semantics.
-    if !matches!(test.match_kind(), regex_test::MatchKind::All) {
-        return skip;
-    }
-    // The top-level RegexSet API always runs unanchored searches.
-    if test.anchored() {
-        return skip;
-    }
-    // We don't support tests with explicit search bounds.
-    let bounds = test.bounds();
-    if !(bounds.start == 0 && bounds.end == test.haystack().len()) {
-        return skip;
-    }
-    // The Regex API specifically does not support disabling UTF-8 mode because
-    // it can only search &str which is always valid UTF-8.
-    if !test.utf8() {
-        return skip;
-    }
-    // If the test requires Unicode but the Unicode feature isn't enabled,
-    // skip it. This is a little aggressive, but the test suite doesn't
-    // have any easy way of communicating which Unicode features are needed.
-    if test.unicode() && !cfg!(feature = "unicode") {
-        return skip;
-    }
-    let re = RegexSetBuilder::new(test.regexes())
-        .case_insensitive(test.case_insensitive())
-        .unicode(test.unicode())
-        .line_terminator(test.line_terminator())
-        .build()?;
-    Ok(CompiledRegex::compiled(move |test| run_test(&re, test)))
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/.cargo-checksum.json
deleted file mode 100644
index 697c9ce..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/.cargo-checksum.json
+++ /dev/null
@@ -1 +0,0 @@
-{"files":{}}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/.cargo_vcs_info.json
deleted file mode 100644
index cd74d4a7..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/.cargo_vcs_info.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
-  "git": {
-    "sha1": "cba0fbc0194456f644040d7558ae6ed261d57cc2"
-  },
-  "path_in_vcs": "regex-syntax"
-}
\ No newline at end of file
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/Cargo.toml
deleted file mode 100644
index 6b93357..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/Cargo.toml
+++ /dev/null
@@ -1,74 +0,0 @@
-# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
-#
-# When uploading crates to the registry Cargo will automatically
-# "normalize" Cargo.toml files for maximal compatibility
-# with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g., crates.io) dependencies.
-#
-# If you are reading this file be aware that the original Cargo.toml
-# will likely look very different (and much more reasonable).
-# See Cargo.toml.orig for the original contents.
-
-[package]
-edition = "2021"
-rust-version = "1.65"
-name = "regex-syntax"
-version = "0.8.5"
-authors = [
-    "The Rust Project Developers",
-    "Andrew Gallant <jamslam@gmail.com>",
-]
-build = false
-autobins = false
-autoexamples = false
-autotests = false
-autobenches = false
-description = "A regular expression parser."
-documentation = "https://docs.rs/regex-syntax"
-readme = "README.md"
-license = "MIT OR Apache-2.0"
-repository = "https://github.com/rust-lang/regex/tree/master/regex-syntax"
-
-[package.metadata.docs.rs]
-all-features = true
-rustdoc-args = [
-    "--cfg",
-    "docsrs",
-]
-
-[lib]
-name = "regex_syntax"
-path = "src/lib.rs"
-
-[[bench]]
-name = "bench"
-path = "benches/bench.rs"
-
-[dependencies.arbitrary]
-version = "1.3.0"
-features = ["derive"]
-optional = true
-
-[features]
-arbitrary = ["dep:arbitrary"]
-default = [
-    "std",
-    "unicode",
-]
-std = []
-unicode = [
-    "unicode-age",
-    "unicode-bool",
-    "unicode-case",
-    "unicode-gencat",
-    "unicode-perl",
-    "unicode-script",
-    "unicode-segment",
-]
-unicode-age = []
-unicode-bool = []
-unicode-case = []
-unicode-gencat = []
-unicode-perl = []
-unicode-script = []
-unicode-segment = []
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/Cargo.toml.orig
deleted file mode 100644
index 0cbcde5..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/Cargo.toml.orig
+++ /dev/null
@@ -1,51 +0,0 @@
-[package]
-name = "regex-syntax"
-version = "0.8.5"  #:version
-authors = ["The Rust Project Developers", "Andrew Gallant <jamslam@gmail.com>"]
-license = "MIT OR Apache-2.0"
-repository = "https://github.com/rust-lang/regex/tree/master/regex-syntax"
-documentation = "https://docs.rs/regex-syntax"
-description = "A regular expression parser."
-workspace = ".."
-edition = "2021"
-rust-version = "1.65"
-
-# Features are documented in the "Crate features" section of the crate docs:
-# https://docs.rs/regex-syntax/*/#crate-features
-[features]
-default = ["std", "unicode"]
-std = []
-arbitrary = ["dep:arbitrary"]
-
-unicode = [
-  "unicode-age",
-  "unicode-bool",
-  "unicode-case",
-  "unicode-gencat",
-  "unicode-perl",
-  "unicode-script",
-  "unicode-segment",
-]
-unicode-age = []
-unicode-bool = []
-unicode-case = []
-unicode-gencat = []
-unicode-perl = []
-unicode-script = []
-unicode-segment = []
-
-[dependencies]
-arbitrary = { version = "1.3.0", features = ["derive"], optional = true }
-
-[package.metadata.docs.rs]
-# We want to document all features.
-all-features = true
-# Since this crate's feature setup is pretty complicated, it is worth opting
-# into a nightly unstable option to show the features that need to be enabled
-# for public API items. To do that, we set 'docsrs', and when that's enabled,
-# we enable the 'doc_auto_cfg' feature.
-#
-# To test this locally, run:
-#
-#     RUSTDOCFLAGS="--cfg docsrs" cargo +nightly doc --all-features
-rustdoc-args = ["--cfg", "docsrs"]
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/LICENSE-APACHE b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/LICENSE-APACHE
deleted file mode 100644
index 16fe87b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/LICENSE-APACHE
+++ /dev/null
@@ -1,201 +0,0 @@
-                              Apache License
-                        Version 2.0, January 2004
-                     http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-   "License" shall mean the terms and conditions for use, reproduction,
-   and distribution as defined by Sections 1 through 9 of this document.
-
-   "Licensor" shall mean the copyright owner or entity authorized by
-   the copyright owner that is granting the License.
-
-   "Legal Entity" shall mean the union of the acting entity and all
-   other entities that control, are controlled by, or are under common
-   control with that entity. For the purposes of this definition,
-   "control" means (i) the power, direct or indirect, to cause the
-   direction or management of such entity, whether by contract or
-   otherwise, or (ii) ownership of fifty percent (50%) or more of the
-   outstanding shares, or (iii) beneficial ownership of such entity.
-
-   "You" (or "Your") shall mean an individual or Legal Entity
-   exercising permissions granted by this License.
-
-   "Source" form shall mean the preferred form for making modifications,
-   including but not limited to software source code, documentation
-   source, and configuration files.
-
-   "Object" form shall mean any form resulting from mechanical
-   transformation or translation of a Source form, including but
-   not limited to compiled object code, generated documentation,
-   and conversions to other media types.
-
-   "Work" shall mean the work of authorship, whether in Source or
-   Object form, made available under the License, as indicated by a
-   copyright notice that is included in or attached to the work
-   (an example is provided in the Appendix below).
-
-   "Derivative Works" shall mean any work, whether in Source or Object
-   form, that is based on (or derived from) the Work and for which the
-   editorial revisions, annotations, elaborations, or other modifications
-   represent, as a whole, an original work of authorship. For the purposes
-   of this License, Derivative Works shall not include works that remain
-   separable from, or merely link (or bind by name) to the interfaces of,
-   the Work and Derivative Works thereof.
-
-   "Contribution" shall mean any work of authorship, including
-   the original version of the Work and any modifications or additions
-   to that Work or Derivative Works thereof, that is intentionally
-   submitted to Licensor for inclusion in the Work by the copyright owner
-   or by an individual or Legal Entity authorized to submit on behalf of
-   the copyright owner. For the purposes of this definition, "submitted"
-   means any form of electronic, verbal, or written communication sent
-   to the Licensor or its representatives, including but not limited to
-   communication on electronic mailing lists, source code control systems,
-   and issue tracking systems that are managed by, or on behalf of, the
-   Licensor for the purpose of discussing and improving the Work, but
-   excluding communication that is conspicuously marked or otherwise
-   designated in writing by the copyright owner as "Not a Contribution."
-
-   "Contributor" shall mean Licensor and any individual or Legal Entity
-   on behalf of whom a Contribution has been received by Licensor and
-   subsequently incorporated within the Work.
-
-2. Grant of Copyright License. Subject to the terms and conditions of
-   this License, each Contributor hereby grants to You a perpetual,
-   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-   copyright license to reproduce, prepare Derivative Works of,
-   publicly display, publicly perform, sublicense, and distribute the
-   Work and such Derivative Works in Source or Object form.
-
-3. Grant of Patent License. Subject to the terms and conditions of
-   this License, each Contributor hereby grants to You a perpetual,
-   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-   (except as stated in this section) patent license to make, have made,
-   use, offer to sell, sell, import, and otherwise transfer the Work,
-   where such license applies only to those patent claims licensable
-   by such Contributor that are necessarily infringed by their
-   Contribution(s) alone or by combination of their Contribution(s)
-   with the Work to which such Contribution(s) was submitted. If You
-   institute patent litigation against any entity (including a
-   cross-claim or counterclaim in a lawsuit) alleging that the Work
-   or a Contribution incorporated within the Work constitutes direct
-   or contributory patent infringement, then any patent licenses
-   granted to You under this License for that Work shall terminate
-   as of the date such litigation is filed.
-
-4. Redistribution. You may reproduce and distribute copies of the
-   Work or Derivative Works thereof in any medium, with or without
-   modifications, and in Source or Object form, provided that You
-   meet the following conditions:
-
-   (a) You must give any other recipients of the Work or
-       Derivative Works a copy of this License; and
-
-   (b) You must cause any modified files to carry prominent notices
-       stating that You changed the files; and
-
-   (c) You must retain, in the Source form of any Derivative Works
-       that You distribute, all copyright, patent, trademark, and
-       attribution notices from the Source form of the Work,
-       excluding those notices that do not pertain to any part of
-       the Derivative Works; and
-
-   (d) If the Work includes a "NOTICE" text file as part of its
-       distribution, then any Derivative Works that You distribute must
-       include a readable copy of the attribution notices contained
-       within such NOTICE file, excluding those notices that do not
-       pertain to any part of the Derivative Works, in at least one
-       of the following places: within a NOTICE text file distributed
-       as part of the Derivative Works; within the Source form or
-       documentation, if provided along with the Derivative Works; or,
-       within a display generated by the Derivative Works, if and
-       wherever such third-party notices normally appear. The contents
-       of the NOTICE file are for informational purposes only and
-       do not modify the License. You may add Your own attribution
-       notices within Derivative Works that You distribute, alongside
-       or as an addendum to the NOTICE text from the Work, provided
-       that such additional attribution notices cannot be construed
-       as modifying the License.
-
-   You may add Your own copyright statement to Your modifications and
-   may provide additional or different license terms and conditions
-   for use, reproduction, or distribution of Your modifications, or
-   for any such Derivative Works as a whole, provided Your use,
-   reproduction, and distribution of the Work otherwise complies with
-   the conditions stated in this License.
-
-5. Submission of Contributions. Unless You explicitly state otherwise,
-   any Contribution intentionally submitted for inclusion in the Work
-   by You to the Licensor shall be under the terms and conditions of
-   this License, without any additional terms or conditions.
-   Notwithstanding the above, nothing herein shall supersede or modify
-   the terms of any separate license agreement you may have executed
-   with Licensor regarding such Contributions.
-
-6. Trademarks. This License does not grant permission to use the trade
-   names, trademarks, service marks, or product names of the Licensor,
-   except as required for reasonable and customary use in describing the
-   origin of the Work and reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty. Unless required by applicable law or
-   agreed to in writing, Licensor provides the Work (and each
-   Contributor provides its Contributions) on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-   implied, including, without limitation, any warranties or conditions
-   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-   PARTICULAR PURPOSE. You are solely responsible for determining the
-   appropriateness of using or redistributing the Work and assume any
-   risks associated with Your exercise of permissions under this License.
-
-8. Limitation of Liability. In no event and under no legal theory,
-   whether in tort (including negligence), contract, or otherwise,
-   unless required by applicable law (such as deliberate and grossly
-   negligent acts) or agreed to in writing, shall any Contributor be
-   liable to You for damages, including any direct, indirect, special,
-   incidental, or consequential damages of any character arising as a
-   result of this License or out of the use or inability to use the
-   Work (including but not limited to damages for loss of goodwill,
-   work stoppage, computer failure or malfunction, or any and all
-   other commercial damages or losses), even if such Contributor
-   has been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability. While redistributing
-   the Work or Derivative Works thereof, You may choose to offer,
-   and charge a fee for, acceptance of support, warranty, indemnity,
-   or other liability obligations and/or rights consistent with this
-   License. However, in accepting such obligations, You may act only
-   on Your own behalf and on Your sole responsibility, not on behalf
-   of any other Contributor, and only if You agree to indemnify,
-   defend, and hold each Contributor harmless for any liability
-   incurred by, or claims asserted against, such Contributor by reason
-   of your accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work.
-
-   To apply the Apache License to your work, attach the following
-   boilerplate notice, with the fields enclosed by brackets "[]"
-   replaced with your own identifying information. (Don't include
-   the brackets!)  The text should be enclosed in the appropriate
-   comment syntax for the file format. We also recommend that a
-   file or class name and description of purpose be included on the
-   same "printed page" as the copyright notice for easier
-   identification within third-party archives.
-
-Copyright [yyyy] [name of copyright owner]
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-	http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/LICENSE-MIT b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/LICENSE-MIT
deleted file mode 100644
index 39d4bdb..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/LICENSE-MIT
+++ /dev/null
@@ -1,25 +0,0 @@
-Copyright (c) 2014 The Rust Project Developers
-
-Permission is hereby granted, free of charge, to any
-person obtaining a copy of this software and associated
-documentation files (the "Software"), to deal in the
-Software without restriction, including without
-limitation the rights to use, copy, modify, merge,
-publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software
-is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice
-shall be included in all copies or substantial portions
-of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
-ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
-TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
-PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
-IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-DEALINGS IN THE SOFTWARE.
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/README.md b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/README.md
deleted file mode 100644
index 529513b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/README.md
+++ /dev/null
@@ -1,96 +0,0 @@
-regex-syntax
-============
-This crate provides a robust regular expression parser.
-
-[![Build status](https://github.com/rust-lang/regex/workflows/ci/badge.svg)](https://github.com/rust-lang/regex/actions)
-[![Crates.io](https://img.shields.io/crates/v/regex-syntax.svg)](https://crates.io/crates/regex-syntax)
-
-
-### Documentation
-
-https://docs.rs/regex-syntax
-
-
-### Overview
-
-There are two primary types exported by this crate: `Ast` and `Hir`. The former
-is a faithful abstract syntax of a regular expression, and can convert regular
-expressions back to their concrete syntax while mostly preserving its original
-form. The latter type is a high level intermediate representation of a regular
-expression that is amenable to analysis and compilation into byte codes or
-automata. An `Hir` achieves this by drastically simplifying the syntactic
-structure of the regular expression. While an `Hir` can be converted back to
-its equivalent concrete syntax, the result is unlikely to resemble the original
-concrete syntax that produced the `Hir`.
-
-
-### Example
-
-This example shows how to parse a pattern string into its HIR:
-
-```rust
-use regex_syntax::{hir::Hir, parse};
-
-let hir = parse("a|b").unwrap();
-assert_eq!(hir, Hir::alternation(vec![
-    Hir::literal("a".as_bytes()),
-    Hir::literal("b".as_bytes()),
-]));
-```
-
-
-### Safety
-
-This crate has no `unsafe` code and sets `forbid(unsafe_code)`. While it's
-possible this crate could use `unsafe` code in the future, the standard
-for doing so is extremely high. In general, most code in this crate is not
-performance critical, since it tends to be dwarfed by the time it takes to
-compile a regular expression into an automaton. Therefore, there is little need
-for extreme optimization, and therefore, use of `unsafe`.
-
-The standard for using `unsafe` in this crate is extremely high because this
-crate is intended to be reasonably safe to use with user supplied regular
-expressions. Therefore, while there may be bugs in the regex parser itself,
-they should _never_ result in memory unsafety unless there is either a bug
-in the compiler or the standard library. (Since `regex-syntax` has zero
-dependencies.)
-
-
-### Crate features
-
-By default, this crate bundles a fairly large amount of Unicode data tables
-(a source size of ~750KB). Because of their large size, one can disable some
-or all of these data tables. If a regular expression attempts to use Unicode
-data that is not available, then an error will occur when translating the `Ast`
-to the `Hir`.
-
-The full set of features one can disable are
-[in the "Crate features" section of the documentation](https://docs.rs/regex-syntax/*/#crate-features).
-
-
-### Testing
-
-Simply running `cargo test` will give you very good coverage. However, because
-of the large number of features exposed by this crate, a `test` script is
-included in this directory which will test several feature combinations. This
-is the same script that is run in CI.
-
-
-### Motivation
-
-The primary purpose of this crate is to provide the parser used by `regex`.
-Specifically, this crate is treated as an implementation detail of the `regex`,
-and is primarily developed for the needs of `regex`.
-
-Since this crate is an implementation detail of `regex`, it may experience
-breaking change releases at a different cadence from `regex`. This is only
-possible because this crate is _not_ a public dependency of `regex`.
-
-Another consequence of this de-coupling is that there is no direct way to
-compile a `regex::Regex` from a `regex_syntax::hir::Hir`. Instead, one must
-first convert the `Hir` to a string (via its `std::fmt::Display`) and then
-compile that via `Regex::new`. While this does repeat some work, compilation
-typically takes much longer than parsing.
-
-Stated differently, the coupling between `regex` and `regex-syntax` exists only
-at the level of the concrete syntax.
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/benches/bench.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/benches/bench.rs
deleted file mode 100644
index d4703d4..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/benches/bench.rs
+++ /dev/null
@@ -1,63 +0,0 @@
-#![feature(test)]
-
-extern crate test;
-
-use regex_syntax::Parser;
-use test::Bencher;
-
-#[bench]
-fn parse_simple1(b: &mut Bencher) {
-    b.iter(|| {
-        let re = r"^bc(d|e)*$";
-        Parser::new().parse(re).unwrap()
-    });
-}
-
-#[bench]
-fn parse_simple2(b: &mut Bencher) {
-    b.iter(|| {
-        let re = r"'[a-zA-Z_][a-zA-Z0-9_]*(')\b";
-        Parser::new().parse(re).unwrap()
-    });
-}
-
-#[bench]
-fn parse_small1(b: &mut Bencher) {
-    b.iter(|| {
-        let re = r"\p{L}|\p{N}|\s|.|\d";
-        Parser::new().parse(re).unwrap()
-    });
-}
-
-#[bench]
-fn parse_medium1(b: &mut Bencher) {
-    b.iter(|| {
-        let re = r"\pL\p{Greek}\p{Hiragana}\p{Alphabetic}\p{Hebrew}\p{Arabic}";
-        Parser::new().parse(re).unwrap()
-    });
-}
-
-#[bench]
-fn parse_medium2(b: &mut Bencher) {
-    b.iter(|| {
-        let re = r"\s\S\w\W\d\D";
-        Parser::new().parse(re).unwrap()
-    });
-}
-
-#[bench]
-fn parse_medium3(b: &mut Bencher) {
-    b.iter(|| {
-        let re =
-            r"\p{age:3.2}\p{hira}\p{scx:hira}\p{alphabetic}\p{sc:Greek}\pL";
-        Parser::new().parse(re).unwrap()
-    });
-}
-
-#[bench]
-fn parse_huge(b: &mut Bencher) {
-    b.iter(|| {
-        let re = r"\p{L}{100}";
-        Parser::new().parse(re).unwrap()
-    });
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/ast/mod.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/ast/mod.rs
deleted file mode 100644
index ce79a89..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/ast/mod.rs
+++ /dev/null
@@ -1,1809 +0,0 @@
-/*!
-Defines an abstract syntax for regular expressions.
-*/
-
-use core::cmp::Ordering;
-
-use alloc::{boxed::Box, string::String, vec, vec::Vec};
-
-pub use crate::ast::visitor::{visit, Visitor};
-
-pub mod parse;
-pub mod print;
-mod visitor;
-
-/// An error that occurred while parsing a regular expression into an abstract
-/// syntax tree.
-///
-/// Note that not all ASTs represents a valid regular expression. For example,
-/// an AST is constructed without error for `\p{Quux}`, but `Quux` is not a
-/// valid Unicode property name. That particular error is reported when
-/// translating an AST to the high-level intermediate representation (`HIR`).
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub struct Error {
-    /// The kind of error.
-    kind: ErrorKind,
-    /// The original pattern that the parser generated the error from. Every
-    /// span in an error is a valid range into this string.
-    pattern: String,
-    /// The span of this error.
-    span: Span,
-}
-
-impl Error {
-    /// Return the type of this error.
-    pub fn kind(&self) -> &ErrorKind {
-        &self.kind
-    }
-
-    /// The original pattern string in which this error occurred.
-    ///
-    /// Every span reported by this error is reported in terms of this string.
-    pub fn pattern(&self) -> &str {
-        &self.pattern
-    }
-
-    /// Return the span at which this error occurred.
-    pub fn span(&self) -> &Span {
-        &self.span
-    }
-
-    /// Return an auxiliary span. This span exists only for some errors that
-    /// benefit from being able to point to two locations in the original
-    /// regular expression. For example, "duplicate" errors will have the
-    /// main error position set to the duplicate occurrence while its
-    /// auxiliary span will be set to the initial occurrence.
-    pub fn auxiliary_span(&self) -> Option<&Span> {
-        use self::ErrorKind::*;
-        match self.kind {
-            FlagDuplicate { ref original } => Some(original),
-            FlagRepeatedNegation { ref original, .. } => Some(original),
-            GroupNameDuplicate { ref original, .. } => Some(original),
-            _ => None,
-        }
-    }
-}
-
-/// The type of an error that occurred while building an AST.
-///
-/// This error type is marked as `non_exhaustive`. This means that adding a
-/// new variant is not considered a breaking change.
-#[non_exhaustive]
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub enum ErrorKind {
-    /// The capturing group limit was exceeded.
-    ///
-    /// Note that this represents a limit on the total number of capturing
-    /// groups in a regex and not necessarily the number of nested capturing
-    /// groups. That is, the nest limit can be low and it is still possible for
-    /// this error to occur.
-    CaptureLimitExceeded,
-    /// An invalid escape sequence was found in a character class set.
-    ClassEscapeInvalid,
-    /// An invalid character class range was found. An invalid range is any
-    /// range where the start is greater than the end.
-    ClassRangeInvalid,
-    /// An invalid range boundary was found in a character class. Range
-    /// boundaries must be a single literal codepoint, but this error indicates
-    /// that something else was found, such as a nested class.
-    ClassRangeLiteral,
-    /// An opening `[` was found with no corresponding closing `]`.
-    ClassUnclosed,
-    /// Note that this error variant is no longer used. Namely, a decimal
-    /// number can only appear as a repetition quantifier. When the number
-    /// in a repetition quantifier is empty, then it gets its own specialized
-    /// error, `RepetitionCountDecimalEmpty`.
-    DecimalEmpty,
-    /// An invalid decimal number was given where one was expected.
-    DecimalInvalid,
-    /// A bracketed hex literal was empty.
-    EscapeHexEmpty,
-    /// A bracketed hex literal did not correspond to a Unicode scalar value.
-    EscapeHexInvalid,
-    /// An invalid hexadecimal digit was found.
-    EscapeHexInvalidDigit,
-    /// EOF was found before an escape sequence was completed.
-    EscapeUnexpectedEof,
-    /// An unrecognized escape sequence.
-    EscapeUnrecognized,
-    /// A dangling negation was used when setting flags, e.g., `i-`.
-    FlagDanglingNegation,
-    /// A flag was used twice, e.g., `i-i`.
-    FlagDuplicate {
-        /// The position of the original flag. The error position
-        /// points to the duplicate flag.
-        original: Span,
-    },
-    /// The negation operator was used twice, e.g., `-i-s`.
-    FlagRepeatedNegation {
-        /// The position of the original negation operator. The error position
-        /// points to the duplicate negation operator.
-        original: Span,
-    },
-    /// Expected a flag but got EOF, e.g., `(?`.
-    FlagUnexpectedEof,
-    /// Unrecognized flag, e.g., `a`.
-    FlagUnrecognized,
-    /// A duplicate capture name was found.
-    GroupNameDuplicate {
-        /// The position of the initial occurrence of the capture name. The
-        /// error position itself points to the duplicate occurrence.
-        original: Span,
-    },
-    /// A capture group name is empty, e.g., `(?P<>abc)`.
-    GroupNameEmpty,
-    /// An invalid character was seen for a capture group name. This includes
-    /// errors where the first character is a digit (even though subsequent
-    /// characters are allowed to be digits).
-    GroupNameInvalid,
-    /// A closing `>` could not be found for a capture group name.
-    GroupNameUnexpectedEof,
-    /// An unclosed group, e.g., `(ab`.
-    ///
-    /// The span of this error corresponds to the unclosed parenthesis.
-    GroupUnclosed,
-    /// An unopened group, e.g., `ab)`.
-    GroupUnopened,
-    /// The nest limit was exceeded. The limit stored here is the limit
-    /// configured in the parser.
-    NestLimitExceeded(u32),
-    /// The range provided in a counted repetition operator is invalid. The
-    /// range is invalid if the start is greater than the end.
-    RepetitionCountInvalid,
-    /// An opening `{` was not followed by a valid decimal value.
-    /// For example, `x{}` or `x{]}` would fail.
-    RepetitionCountDecimalEmpty,
-    /// An opening `{` was found with no corresponding closing `}`.
-    RepetitionCountUnclosed,
-    /// A repetition operator was applied to a missing sub-expression. This
-    /// occurs, for example, in the regex consisting of just a `*` or even
-    /// `(?i)*`. It is, however, possible to create a repetition operating on
-    /// an empty sub-expression. For example, `()*` is still considered valid.
-    RepetitionMissing,
-    /// The special word boundary syntax, `\b{something}`, was used, but
-    /// either EOF without `}` was seen, or an invalid character in the
-    /// braces was seen.
-    SpecialWordBoundaryUnclosed,
-    /// The special word boundary syntax, `\b{something}`, was used, but
-    /// `something` was not recognized as a valid word boundary kind.
-    SpecialWordBoundaryUnrecognized,
-    /// The syntax `\b{` was observed, but afterwards the end of the pattern
-    /// was observed without being able to tell whether it was meant to be a
-    /// bounded repetition on the `\b` or the beginning of a special word
-    /// boundary assertion.
-    SpecialWordOrRepetitionUnexpectedEof,
-    /// The Unicode class is not valid. This typically occurs when a `\p` is
-    /// followed by something other than a `{`.
-    UnicodeClassInvalid,
-    /// When octal support is disabled, this error is produced when an octal
-    /// escape is used. The octal escape is assumed to be an invocation of
-    /// a backreference, which is the common case.
-    UnsupportedBackreference,
-    /// When syntax similar to PCRE's look-around is used, this error is
-    /// returned. Some example syntaxes that are rejected include, but are
-    /// not necessarily limited to, `(?=re)`, `(?!re)`, `(?<=re)` and
-    /// `(?<!re)`. Note that all of these syntaxes are otherwise invalid; this
-    /// error is used to improve the user experience.
-    UnsupportedLookAround,
-}
-
-#[cfg(feature = "std")]
-impl std::error::Error for Error {}
-
-impl core::fmt::Display for Error {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        crate::error::Formatter::from(self).fmt(f)
-    }
-}
-
-impl core::fmt::Display for ErrorKind {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        use self::ErrorKind::*;
-        match *self {
-            CaptureLimitExceeded => write!(
-                f,
-                "exceeded the maximum number of \
-                 capturing groups ({})",
-                u32::MAX
-            ),
-            ClassEscapeInvalid => {
-                write!(f, "invalid escape sequence found in character class")
-            }
-            ClassRangeInvalid => write!(
-                f,
-                "invalid character class range, \
-                 the start must be <= the end"
-            ),
-            ClassRangeLiteral => {
-                write!(f, "invalid range boundary, must be a literal")
-            }
-            ClassUnclosed => write!(f, "unclosed character class"),
-            DecimalEmpty => write!(f, "decimal literal empty"),
-            DecimalInvalid => write!(f, "decimal literal invalid"),
-            EscapeHexEmpty => write!(f, "hexadecimal literal empty"),
-            EscapeHexInvalid => {
-                write!(f, "hexadecimal literal is not a Unicode scalar value")
-            }
-            EscapeHexInvalidDigit => write!(f, "invalid hexadecimal digit"),
-            EscapeUnexpectedEof => write!(
-                f,
-                "incomplete escape sequence, \
-                 reached end of pattern prematurely"
-            ),
-            EscapeUnrecognized => write!(f, "unrecognized escape sequence"),
-            FlagDanglingNegation => {
-                write!(f, "dangling flag negation operator")
-            }
-            FlagDuplicate { .. } => write!(f, "duplicate flag"),
-            FlagRepeatedNegation { .. } => {
-                write!(f, "flag negation operator repeated")
-            }
-            FlagUnexpectedEof => {
-                write!(f, "expected flag but got end of regex")
-            }
-            FlagUnrecognized => write!(f, "unrecognized flag"),
-            GroupNameDuplicate { .. } => {
-                write!(f, "duplicate capture group name")
-            }
-            GroupNameEmpty => write!(f, "empty capture group name"),
-            GroupNameInvalid => write!(f, "invalid capture group character"),
-            GroupNameUnexpectedEof => write!(f, "unclosed capture group name"),
-            GroupUnclosed => write!(f, "unclosed group"),
-            GroupUnopened => write!(f, "unopened group"),
-            NestLimitExceeded(limit) => write!(
-                f,
-                "exceed the maximum number of \
-                 nested parentheses/brackets ({})",
-                limit
-            ),
-            RepetitionCountInvalid => write!(
-                f,
-                "invalid repetition count range, \
-                 the start must be <= the end"
-            ),
-            RepetitionCountDecimalEmpty => {
-                write!(f, "repetition quantifier expects a valid decimal")
-            }
-            RepetitionCountUnclosed => {
-                write!(f, "unclosed counted repetition")
-            }
-            RepetitionMissing => {
-                write!(f, "repetition operator missing expression")
-            }
-            SpecialWordBoundaryUnclosed => {
-                write!(
-                    f,
-                    "special word boundary assertion is either \
-                     unclosed or contains an invalid character",
-                )
-            }
-            SpecialWordBoundaryUnrecognized => {
-                write!(
-                    f,
-                    "unrecognized special word boundary assertion, \
-                     valid choices are: start, end, start-half \
-                     or end-half",
-                )
-            }
-            SpecialWordOrRepetitionUnexpectedEof => {
-                write!(
-                    f,
-                    "found either the beginning of a special word \
-                     boundary or a bounded repetition on a \\b with \
-                     an opening brace, but no closing brace",
-                )
-            }
-            UnicodeClassInvalid => {
-                write!(f, "invalid Unicode character class")
-            }
-            UnsupportedBackreference => {
-                write!(f, "backreferences are not supported")
-            }
-            UnsupportedLookAround => write!(
-                f,
-                "look-around, including look-ahead and look-behind, \
-                 is not supported"
-            ),
-        }
-    }
-}
-
-/// Span represents the position information of a single AST item.
-///
-/// All span positions are absolute byte offsets that can be used on the
-/// original regular expression that was parsed.
-#[derive(Clone, Copy, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub struct Span {
-    /// The start byte offset.
-    pub start: Position,
-    /// The end byte offset.
-    pub end: Position,
-}
-
-impl core::fmt::Debug for Span {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        write!(f, "Span({:?}, {:?})", self.start, self.end)
-    }
-}
-
-impl Ord for Span {
-    fn cmp(&self, other: &Span) -> Ordering {
-        (&self.start, &self.end).cmp(&(&other.start, &other.end))
-    }
-}
-
-impl PartialOrd for Span {
-    fn partial_cmp(&self, other: &Span) -> Option<Ordering> {
-        Some(self.cmp(other))
-    }
-}
-
-/// A single position in a regular expression.
-///
-/// A position encodes one half of a span, and include the byte offset, line
-/// number and column number.
-#[derive(Clone, Copy, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub struct Position {
-    /// The absolute offset of this position, starting at `0` from the
-    /// beginning of the regular expression pattern string.
-    pub offset: usize,
-    /// The line number, starting at `1`.
-    pub line: usize,
-    /// The approximate column number, starting at `1`.
-    pub column: usize,
-}
-
-impl core::fmt::Debug for Position {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        write!(
-            f,
-            "Position(o: {:?}, l: {:?}, c: {:?})",
-            self.offset, self.line, self.column
-        )
-    }
-}
-
-impl Ord for Position {
-    fn cmp(&self, other: &Position) -> Ordering {
-        self.offset.cmp(&other.offset)
-    }
-}
-
-impl PartialOrd for Position {
-    fn partial_cmp(&self, other: &Position) -> Option<Ordering> {
-        Some(self.cmp(other))
-    }
-}
-
-impl Span {
-    /// Create a new span with the given positions.
-    pub fn new(start: Position, end: Position) -> Span {
-        Span { start, end }
-    }
-
-    /// Create a new span using the given position as the start and end.
-    pub fn splat(pos: Position) -> Span {
-        Span::new(pos, pos)
-    }
-
-    /// Create a new span by replacing the starting the position with the one
-    /// given.
-    pub fn with_start(self, pos: Position) -> Span {
-        Span { start: pos, ..self }
-    }
-
-    /// Create a new span by replacing the ending the position with the one
-    /// given.
-    pub fn with_end(self, pos: Position) -> Span {
-        Span { end: pos, ..self }
-    }
-
-    /// Returns true if and only if this span occurs on a single line.
-    pub fn is_one_line(&self) -> bool {
-        self.start.line == self.end.line
-    }
-
-    /// Returns true if and only if this span is empty. That is, it points to
-    /// a single position in the concrete syntax of a regular expression.
-    pub fn is_empty(&self) -> bool {
-        self.start.offset == self.end.offset
-    }
-}
-
-impl Position {
-    /// Create a new position with the given information.
-    ///
-    /// `offset` is the absolute offset of the position, starting at `0` from
-    /// the beginning of the regular expression pattern string.
-    ///
-    /// `line` is the line number, starting at `1`.
-    ///
-    /// `column` is the approximate column number, starting at `1`.
-    pub fn new(offset: usize, line: usize, column: usize) -> Position {
-        Position { offset, line, column }
-    }
-}
-
-/// An abstract syntax tree for a singular expression along with comments
-/// found.
-///
-/// Comments are not stored in the tree itself to avoid complexity. Each
-/// comment contains a span of precisely where it occurred in the original
-/// regular expression.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub struct WithComments {
-    /// The actual ast.
-    pub ast: Ast,
-    /// All comments found in the original regular expression.
-    pub comments: Vec<Comment>,
-}
-
-/// A comment from a regular expression with an associated span.
-///
-/// A regular expression can only contain comments when the `x` flag is
-/// enabled.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub struct Comment {
-    /// The span of this comment, including the beginning `#` and ending `\n`.
-    pub span: Span,
-    /// The comment text, starting with the first character following the `#`
-    /// and ending with the last character preceding the `\n`.
-    pub comment: String,
-}
-
-/// An abstract syntax tree for a single regular expression.
-///
-/// An `Ast`'s `fmt::Display` implementation uses constant stack space and heap
-/// space proportional to the size of the `Ast`.
-///
-/// This type defines its own destructor that uses constant stack space and
-/// heap space proportional to the size of the `Ast`.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub enum Ast {
-    /// An empty regex that matches everything.
-    Empty(Box<Span>),
-    /// A set of flags, e.g., `(?is)`.
-    Flags(Box<SetFlags>),
-    /// A single character literal, which includes escape sequences.
-    Literal(Box<Literal>),
-    /// The "any character" class.
-    Dot(Box<Span>),
-    /// A single zero-width assertion.
-    Assertion(Box<Assertion>),
-    /// A single Unicode character class, e.g., `\pL` or `\p{Greek}`.
-    ClassUnicode(Box<ClassUnicode>),
-    /// A single perl character class, e.g., `\d` or `\W`.
-    ClassPerl(Box<ClassPerl>),
-    /// A single bracketed character class set, which may contain zero or more
-    /// character ranges and/or zero or more nested classes. e.g.,
-    /// `[a-zA-Z\pL]`.
-    ClassBracketed(Box<ClassBracketed>),
-    /// A repetition operator applied to an arbitrary regular expression.
-    Repetition(Box<Repetition>),
-    /// A grouped regular expression.
-    Group(Box<Group>),
-    /// An alternation of regular expressions.
-    Alternation(Box<Alternation>),
-    /// A concatenation of regular expressions.
-    Concat(Box<Concat>),
-}
-
-impl Ast {
-    /// Create an "empty" AST item.
-    pub fn empty(span: Span) -> Ast {
-        Ast::Empty(Box::new(span))
-    }
-
-    /// Create a "flags" AST item.
-    pub fn flags(e: SetFlags) -> Ast {
-        Ast::Flags(Box::new(e))
-    }
-
-    /// Create a "literal" AST item.
-    pub fn literal(e: Literal) -> Ast {
-        Ast::Literal(Box::new(e))
-    }
-
-    /// Create a "dot" AST item.
-    pub fn dot(span: Span) -> Ast {
-        Ast::Dot(Box::new(span))
-    }
-
-    /// Create a "assertion" AST item.
-    pub fn assertion(e: Assertion) -> Ast {
-        Ast::Assertion(Box::new(e))
-    }
-
-    /// Create a "Unicode class" AST item.
-    pub fn class_unicode(e: ClassUnicode) -> Ast {
-        Ast::ClassUnicode(Box::new(e))
-    }
-
-    /// Create a "Perl class" AST item.
-    pub fn class_perl(e: ClassPerl) -> Ast {
-        Ast::ClassPerl(Box::new(e))
-    }
-
-    /// Create a "bracketed class" AST item.
-    pub fn class_bracketed(e: ClassBracketed) -> Ast {
-        Ast::ClassBracketed(Box::new(e))
-    }
-
-    /// Create a "repetition" AST item.
-    pub fn repetition(e: Repetition) -> Ast {
-        Ast::Repetition(Box::new(e))
-    }
-
-    /// Create a "group" AST item.
-    pub fn group(e: Group) -> Ast {
-        Ast::Group(Box::new(e))
-    }
-
-    /// Create a "alternation" AST item.
-    pub fn alternation(e: Alternation) -> Ast {
-        Ast::Alternation(Box::new(e))
-    }
-
-    /// Create a "concat" AST item.
-    pub fn concat(e: Concat) -> Ast {
-        Ast::Concat(Box::new(e))
-    }
-
-    /// Return the span of this abstract syntax tree.
-    pub fn span(&self) -> &Span {
-        match *self {
-            Ast::Empty(ref span) => span,
-            Ast::Flags(ref x) => &x.span,
-            Ast::Literal(ref x) => &x.span,
-            Ast::Dot(ref span) => span,
-            Ast::Assertion(ref x) => &x.span,
-            Ast::ClassUnicode(ref x) => &x.span,
-            Ast::ClassPerl(ref x) => &x.span,
-            Ast::ClassBracketed(ref x) => &x.span,
-            Ast::Repetition(ref x) => &x.span,
-            Ast::Group(ref x) => &x.span,
-            Ast::Alternation(ref x) => &x.span,
-            Ast::Concat(ref x) => &x.span,
-        }
-    }
-
-    /// Return true if and only if this Ast is empty.
-    pub fn is_empty(&self) -> bool {
-        match *self {
-            Ast::Empty(_) => true,
-            _ => false,
-        }
-    }
-
-    /// Returns true if and only if this AST has any (including possibly empty)
-    /// subexpressions.
-    fn has_subexprs(&self) -> bool {
-        match *self {
-            Ast::Empty(_)
-            | Ast::Flags(_)
-            | Ast::Literal(_)
-            | Ast::Dot(_)
-            | Ast::Assertion(_)
-            | Ast::ClassUnicode(_)
-            | Ast::ClassPerl(_) => false,
-            Ast::ClassBracketed(_)
-            | Ast::Repetition(_)
-            | Ast::Group(_)
-            | Ast::Alternation(_)
-            | Ast::Concat(_) => true,
-        }
-    }
-}
-
-/// Print a display representation of this Ast.
-///
-/// This does not preserve any of the original whitespace formatting that may
-/// have originally been present in the concrete syntax from which this Ast
-/// was generated.
-///
-/// This implementation uses constant stack space and heap space proportional
-/// to the size of the `Ast`.
-impl core::fmt::Display for Ast {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        use crate::ast::print::Printer;
-        Printer::new().print(self, f)
-    }
-}
-
-/// An alternation of regular expressions.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub struct Alternation {
-    /// The span of this alternation.
-    pub span: Span,
-    /// The alternate regular expressions.
-    pub asts: Vec<Ast>,
-}
-
-impl Alternation {
-    /// Return this alternation as an AST.
-    ///
-    /// If this alternation contains zero ASTs, then `Ast::empty` is returned.
-    /// If this alternation contains exactly 1 AST, then the corresponding AST
-    /// is returned. Otherwise, `Ast::alternation` is returned.
-    pub fn into_ast(mut self) -> Ast {
-        match self.asts.len() {
-            0 => Ast::empty(self.span),
-            1 => self.asts.pop().unwrap(),
-            _ => Ast::alternation(self),
-        }
-    }
-}
-
-/// A concatenation of regular expressions.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub struct Concat {
-    /// The span of this concatenation.
-    pub span: Span,
-    /// The concatenation regular expressions.
-    pub asts: Vec<Ast>,
-}
-
-impl Concat {
-    /// Return this concatenation as an AST.
-    ///
-    /// If this alternation contains zero ASTs, then `Ast::empty` is returned.
-    /// If this alternation contains exactly 1 AST, then the corresponding AST
-    /// is returned. Otherwise, `Ast::concat` is returned.
-    pub fn into_ast(mut self) -> Ast {
-        match self.asts.len() {
-            0 => Ast::empty(self.span),
-            1 => self.asts.pop().unwrap(),
-            _ => Ast::concat(self),
-        }
-    }
-}
-
-/// A single literal expression.
-///
-/// A literal corresponds to a single Unicode scalar value. Literals may be
-/// represented in their literal form, e.g., `a` or in their escaped form,
-/// e.g., `\x61`.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub struct Literal {
-    /// The span of this literal.
-    pub span: Span,
-    /// The kind of this literal.
-    pub kind: LiteralKind,
-    /// The Unicode scalar value corresponding to this literal.
-    pub c: char,
-}
-
-impl Literal {
-    /// If this literal was written as a `\x` hex escape, then this returns
-    /// the corresponding byte value. Otherwise, this returns `None`.
-    pub fn byte(&self) -> Option<u8> {
-        match self.kind {
-            LiteralKind::HexFixed(HexLiteralKind::X) => {
-                u8::try_from(self.c).ok()
-            }
-            _ => None,
-        }
-    }
-}
-
-/// The kind of a single literal expression.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub enum LiteralKind {
-    /// The literal is written verbatim, e.g., `a` or `☃`.
-    Verbatim,
-    /// The literal is written as an escape because it is otherwise a special
-    /// regex meta character, e.g., `\*` or `\[`.
-    Meta,
-    /// The literal is written as an escape despite the fact that the escape is
-    /// unnecessary, e.g., `\%` or `\/`.
-    Superfluous,
-    /// The literal is written as an octal escape, e.g., `\141`.
-    Octal,
-    /// The literal is written as a hex code with a fixed number of digits
-    /// depending on the type of the escape, e.g., `\x61` or `\u0061` or
-    /// `\U00000061`.
-    HexFixed(HexLiteralKind),
-    /// The literal is written as a hex code with a bracketed number of
-    /// digits. The only restriction is that the bracketed hex code must refer
-    /// to a valid Unicode scalar value.
-    HexBrace(HexLiteralKind),
-    /// The literal is written as a specially recognized escape, e.g., `\f`
-    /// or `\n`.
-    Special(SpecialLiteralKind),
-}
-
-/// The type of a special literal.
-///
-/// A special literal is a special escape sequence recognized by the regex
-/// parser, e.g., `\f` or `\n`.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub enum SpecialLiteralKind {
-    /// Bell, spelled `\a` (`\x07`).
-    Bell,
-    /// Form feed, spelled `\f` (`\x0C`).
-    FormFeed,
-    /// Tab, spelled `\t` (`\x09`).
-    Tab,
-    /// Line feed, spelled `\n` (`\x0A`).
-    LineFeed,
-    /// Carriage return, spelled `\r` (`\x0D`).
-    CarriageReturn,
-    /// Vertical tab, spelled `\v` (`\x0B`).
-    VerticalTab,
-    /// Space, spelled `\ ` (`\x20`). Note that this can only appear when
-    /// parsing in verbose mode.
-    Space,
-}
-
-/// The type of a Unicode hex literal.
-///
-/// Note that all variants behave the same when used with brackets. They only
-/// differ when used without brackets in the number of hex digits that must
-/// follow.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub enum HexLiteralKind {
-    /// A `\x` prefix. When used without brackets, this form is limited to
-    /// two digits.
-    X,
-    /// A `\u` prefix. When used without brackets, this form is limited to
-    /// four digits.
-    UnicodeShort,
-    /// A `\U` prefix. When used without brackets, this form is limited to
-    /// eight digits.
-    UnicodeLong,
-}
-
-impl HexLiteralKind {
-    /// The number of digits that must be used with this literal form when
-    /// used without brackets. When used with brackets, there is no
-    /// restriction on the number of digits.
-    pub fn digits(&self) -> u32 {
-        match *self {
-            HexLiteralKind::X => 2,
-            HexLiteralKind::UnicodeShort => 4,
-            HexLiteralKind::UnicodeLong => 8,
-        }
-    }
-}
-
-/// A Perl character class.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub struct ClassPerl {
-    /// The span of this class.
-    pub span: Span,
-    /// The kind of Perl class.
-    pub kind: ClassPerlKind,
-    /// Whether the class is negated or not. e.g., `\d` is not negated but
-    /// `\D` is.
-    pub negated: bool,
-}
-
-/// The available Perl character classes.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub enum ClassPerlKind {
-    /// Decimal numbers.
-    Digit,
-    /// Whitespace.
-    Space,
-    /// Word characters.
-    Word,
-}
-
-/// An ASCII character class.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub struct ClassAscii {
-    /// The span of this class.
-    pub span: Span,
-    /// The kind of ASCII class.
-    pub kind: ClassAsciiKind,
-    /// Whether the class is negated or not. e.g., `[[:alpha:]]` is not negated
-    /// but `[[:^alpha:]]` is.
-    pub negated: bool,
-}
-
-/// The available ASCII character classes.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub enum ClassAsciiKind {
-    /// `[0-9A-Za-z]`
-    Alnum,
-    /// `[A-Za-z]`
-    Alpha,
-    /// `[\x00-\x7F]`
-    Ascii,
-    /// `[ \t]`
-    Blank,
-    /// `[\x00-\x1F\x7F]`
-    Cntrl,
-    /// `[0-9]`
-    Digit,
-    /// `[!-~]`
-    Graph,
-    /// `[a-z]`
-    Lower,
-    /// `[ -~]`
-    Print,
-    /// `[!-/:-@\[-`{-~]`
-    Punct,
-    /// `[\t\n\v\f\r ]`
-    Space,
-    /// `[A-Z]`
-    Upper,
-    /// `[0-9A-Za-z_]`
-    Word,
-    /// `[0-9A-Fa-f]`
-    Xdigit,
-}
-
-impl ClassAsciiKind {
-    /// Return the corresponding ClassAsciiKind variant for the given name.
-    ///
-    /// The name given should correspond to the lowercase version of the
-    /// variant name. e.g., `cntrl` is the name for `ClassAsciiKind::Cntrl`.
-    ///
-    /// If no variant with the corresponding name exists, then `None` is
-    /// returned.
-    pub fn from_name(name: &str) -> Option<ClassAsciiKind> {
-        use self::ClassAsciiKind::*;
-        match name {
-            "alnum" => Some(Alnum),
-            "alpha" => Some(Alpha),
-            "ascii" => Some(Ascii),
-            "blank" => Some(Blank),
-            "cntrl" => Some(Cntrl),
-            "digit" => Some(Digit),
-            "graph" => Some(Graph),
-            "lower" => Some(Lower),
-            "print" => Some(Print),
-            "punct" => Some(Punct),
-            "space" => Some(Space),
-            "upper" => Some(Upper),
-            "word" => Some(Word),
-            "xdigit" => Some(Xdigit),
-            _ => None,
-        }
-    }
-}
-
-/// A Unicode character class.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub struct ClassUnicode {
-    /// The span of this class.
-    pub span: Span,
-    /// Whether this class is negated or not.
-    ///
-    /// Note: be careful when using this attribute. This specifically refers
-    /// to whether the class is written as `\p` or `\P`, where the latter
-    /// is `negated = true`. However, it also possible to write something like
-    /// `\P{scx!=Katakana}` which is actually equivalent to
-    /// `\p{scx=Katakana}` and is therefore not actually negated even though
-    /// `negated = true` here. To test whether this class is truly negated
-    /// or not, use the `is_negated` method.
-    pub negated: bool,
-    /// The kind of Unicode class.
-    pub kind: ClassUnicodeKind,
-}
-
-impl ClassUnicode {
-    /// Returns true if this class has been negated.
-    ///
-    /// Note that this takes the Unicode op into account, if it's present.
-    /// e.g., `is_negated` for `\P{scx!=Katakana}` will return `false`.
-    pub fn is_negated(&self) -> bool {
-        match self.kind {
-            ClassUnicodeKind::NamedValue {
-                op: ClassUnicodeOpKind::NotEqual,
-                ..
-            } => !self.negated,
-            _ => self.negated,
-        }
-    }
-}
-
-/// The available forms of Unicode character classes.
-#[derive(Clone, Debug, Eq, PartialEq)]
-pub enum ClassUnicodeKind {
-    /// A one letter abbreviated class, e.g., `\pN`.
-    OneLetter(char),
-    /// A binary property, general category or script. The string may be
-    /// empty.
-    Named(String),
-    /// A property name and an associated value.
-    NamedValue {
-        /// The type of Unicode op used to associate `name` with `value`.
-        op: ClassUnicodeOpKind,
-        /// The property name (which may be empty).
-        name: String,
-        /// The property value (which may be empty).
-        value: String,
-    },
-}
-
-#[cfg(feature = "arbitrary")]
-impl arbitrary::Arbitrary<'_> for ClassUnicodeKind {
-    fn arbitrary(
-        u: &mut arbitrary::Unstructured,
-    ) -> arbitrary::Result<ClassUnicodeKind> {
-        #[cfg(any(
-            feature = "unicode-age",
-            feature = "unicode-bool",
-            feature = "unicode-gencat",
-            feature = "unicode-perl",
-            feature = "unicode-script",
-            feature = "unicode-segment",
-        ))]
-        {
-            use alloc::string::ToString;
-
-            use super::unicode_tables::{
-                property_names::PROPERTY_NAMES,
-                property_values::PROPERTY_VALUES,
-            };
-
-            match u.choose_index(3)? {
-                0 => {
-                    let all = PROPERTY_VALUES
-                        .iter()
-                        .flat_map(|e| e.1.iter())
-                        .filter(|(name, _)| name.len() == 1)
-                        .count();
-                    let idx = u.choose_index(all)?;
-                    let value = PROPERTY_VALUES
-                        .iter()
-                        .flat_map(|e| e.1.iter())
-                        .take(idx + 1)
-                        .last()
-                        .unwrap()
-                        .0
-                        .chars()
-                        .next()
-                        .unwrap();
-                    Ok(ClassUnicodeKind::OneLetter(value))
-                }
-                1 => {
-                    let all = PROPERTY_VALUES
-                        .iter()
-                        .map(|e| e.1.len())
-                        .sum::<usize>()
-                        + PROPERTY_NAMES.len();
-                    let idx = u.choose_index(all)?;
-                    let name = PROPERTY_VALUES
-                        .iter()
-                        .flat_map(|e| e.1.iter())
-                        .chain(PROPERTY_NAMES)
-                        .map(|(_, e)| e)
-                        .take(idx + 1)
-                        .last()
-                        .unwrap();
-                    Ok(ClassUnicodeKind::Named(name.to_string()))
-                }
-                2 => {
-                    let all = PROPERTY_VALUES
-                        .iter()
-                        .map(|e| e.1.len())
-                        .sum::<usize>();
-                    let idx = u.choose_index(all)?;
-                    let (prop, value) = PROPERTY_VALUES
-                        .iter()
-                        .flat_map(|e| {
-                            e.1.iter().map(|(_, value)| (e.0, value))
-                        })
-                        .take(idx + 1)
-                        .last()
-                        .unwrap();
-                    Ok(ClassUnicodeKind::NamedValue {
-                        op: u.arbitrary()?,
-                        name: prop.to_string(),
-                        value: value.to_string(),
-                    })
-                }
-                _ => unreachable!("index chosen is impossible"),
-            }
-        }
-        #[cfg(not(any(
-            feature = "unicode-age",
-            feature = "unicode-bool",
-            feature = "unicode-gencat",
-            feature = "unicode-perl",
-            feature = "unicode-script",
-            feature = "unicode-segment",
-        )))]
-        {
-            match u.choose_index(3)? {
-                0 => Ok(ClassUnicodeKind::OneLetter(u.arbitrary()?)),
-                1 => Ok(ClassUnicodeKind::Named(u.arbitrary()?)),
-                2 => Ok(ClassUnicodeKind::NamedValue {
-                    op: u.arbitrary()?,
-                    name: u.arbitrary()?,
-                    value: u.arbitrary()?,
-                }),
-                _ => unreachable!("index chosen is impossible"),
-            }
-        }
-    }
-
-    fn size_hint(depth: usize) -> (usize, Option<usize>) {
-        #[cfg(any(
-            feature = "unicode-age",
-            feature = "unicode-bool",
-            feature = "unicode-gencat",
-            feature = "unicode-perl",
-            feature = "unicode-script",
-            feature = "unicode-segment",
-        ))]
-        {
-            arbitrary::size_hint::and_all(&[
-                usize::size_hint(depth),
-                usize::size_hint(depth),
-                arbitrary::size_hint::or(
-                    (0, Some(0)),
-                    ClassUnicodeOpKind::size_hint(depth),
-                ),
-            ])
-        }
-        #[cfg(not(any(
-            feature = "unicode-age",
-            feature = "unicode-bool",
-            feature = "unicode-gencat",
-            feature = "unicode-perl",
-            feature = "unicode-script",
-            feature = "unicode-segment",
-        )))]
-        {
-            arbitrary::size_hint::and(
-                usize::size_hint(depth),
-                arbitrary::size_hint::or_all(&[
-                    char::size_hint(depth),
-                    String::size_hint(depth),
-                    arbitrary::size_hint::and_all(&[
-                        String::size_hint(depth),
-                        String::size_hint(depth),
-                        ClassUnicodeOpKind::size_hint(depth),
-                    ]),
-                ]),
-            )
-        }
-    }
-}
-
-/// The type of op used in a Unicode character class.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub enum ClassUnicodeOpKind {
-    /// A property set to a specific value, e.g., `\p{scx=Katakana}`.
-    Equal,
-    /// A property set to a specific value using a colon, e.g.,
-    /// `\p{scx:Katakana}`.
-    Colon,
-    /// A property that isn't a particular value, e.g., `\p{scx!=Katakana}`.
-    NotEqual,
-}
-
-impl ClassUnicodeOpKind {
-    /// Whether the op is an equality op or not.
-    pub fn is_equal(&self) -> bool {
-        match *self {
-            ClassUnicodeOpKind::Equal | ClassUnicodeOpKind::Colon => true,
-            _ => false,
-        }
-    }
-}
-
-/// A bracketed character class, e.g., `[a-z0-9]`.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub struct ClassBracketed {
-    /// The span of this class.
-    pub span: Span,
-    /// Whether this class is negated or not. e.g., `[a]` is not negated but
-    /// `[^a]` is.
-    pub negated: bool,
-    /// The type of this set. A set is either a normal union of things, e.g.,
-    /// `[abc]` or a result of applying set operations, e.g., `[\pL--c]`.
-    pub kind: ClassSet,
-}
-
-/// A character class set.
-///
-/// This type corresponds to the internal structure of a bracketed character
-/// class. That is, every bracketed character is one of two types: a union of
-/// items (literals, ranges, other bracketed classes) or a tree of binary set
-/// operations.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub enum ClassSet {
-    /// An item, which can be a single literal, range, nested character class
-    /// or a union of items.
-    Item(ClassSetItem),
-    /// A single binary operation (i.e., &&, -- or ~~).
-    BinaryOp(ClassSetBinaryOp),
-}
-
-impl ClassSet {
-    /// Build a set from a union.
-    pub fn union(ast: ClassSetUnion) -> ClassSet {
-        ClassSet::Item(ClassSetItem::Union(ast))
-    }
-
-    /// Return the span of this character class set.
-    pub fn span(&self) -> &Span {
-        match *self {
-            ClassSet::Item(ref x) => x.span(),
-            ClassSet::BinaryOp(ref x) => &x.span,
-        }
-    }
-
-    /// Return true if and only if this class set is empty.
-    fn is_empty(&self) -> bool {
-        match *self {
-            ClassSet::Item(ClassSetItem::Empty(_)) => true,
-            _ => false,
-        }
-    }
-}
-
-/// A single component of a character class set.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub enum ClassSetItem {
-    /// An empty item.
-    ///
-    /// Note that a bracketed character class cannot contain a single empty
-    /// item. Empty items can appear when using one of the binary operators.
-    /// For example, `[&&]` is the intersection of two empty classes.
-    Empty(Span),
-    /// A single literal.
-    Literal(Literal),
-    /// A range between two literals.
-    Range(ClassSetRange),
-    /// An ASCII character class, e.g., `[:alnum:]` or `[:punct:]`.
-    Ascii(ClassAscii),
-    /// A Unicode character class, e.g., `\pL` or `\p{Greek}`.
-    Unicode(ClassUnicode),
-    /// A perl character class, e.g., `\d` or `\W`.
-    Perl(ClassPerl),
-    /// A bracketed character class set, which may contain zero or more
-    /// character ranges and/or zero or more nested classes. e.g.,
-    /// `[a-zA-Z\pL]`.
-    Bracketed(Box<ClassBracketed>),
-    /// A union of items.
-    Union(ClassSetUnion),
-}
-
-impl ClassSetItem {
-    /// Return the span of this character class set item.
-    pub fn span(&self) -> &Span {
-        match *self {
-            ClassSetItem::Empty(ref span) => span,
-            ClassSetItem::Literal(ref x) => &x.span,
-            ClassSetItem::Range(ref x) => &x.span,
-            ClassSetItem::Ascii(ref x) => &x.span,
-            ClassSetItem::Perl(ref x) => &x.span,
-            ClassSetItem::Unicode(ref x) => &x.span,
-            ClassSetItem::Bracketed(ref x) => &x.span,
-            ClassSetItem::Union(ref x) => &x.span,
-        }
-    }
-}
-
-/// A single character class range in a set.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub struct ClassSetRange {
-    /// The span of this range.
-    pub span: Span,
-    /// The start of this range.
-    pub start: Literal,
-    /// The end of this range.
-    pub end: Literal,
-}
-
-impl ClassSetRange {
-    /// Returns true if and only if this character class range is valid.
-    ///
-    /// The only case where a range is invalid is if its start is greater than
-    /// its end.
-    pub fn is_valid(&self) -> bool {
-        self.start.c <= self.end.c
-    }
-}
-
-/// A union of items inside a character class set.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub struct ClassSetUnion {
-    /// The span of the items in this operation. e.g., the `a-z0-9` in
-    /// `[^a-z0-9]`
-    pub span: Span,
-    /// The sequence of items that make up this union.
-    pub items: Vec<ClassSetItem>,
-}
-
-impl ClassSetUnion {
-    /// Push a new item in this union.
-    ///
-    /// The ending position of this union's span is updated to the ending
-    /// position of the span of the item given. If the union is empty, then
-    /// the starting position of this union is set to the starting position
-    /// of this item.
-    ///
-    /// In other words, if you only use this method to add items to a union
-    /// and you set the spans on each item correctly, then you should never
-    /// need to adjust the span of the union directly.
-    pub fn push(&mut self, item: ClassSetItem) {
-        if self.items.is_empty() {
-            self.span.start = item.span().start;
-        }
-        self.span.end = item.span().end;
-        self.items.push(item);
-    }
-
-    /// Return this union as a character class set item.
-    ///
-    /// If this union contains zero items, then an empty union is
-    /// returned. If this concatenation contains exactly 1 item, then the
-    /// corresponding item is returned. Otherwise, ClassSetItem::Union is
-    /// returned.
-    pub fn into_item(mut self) -> ClassSetItem {
-        match self.items.len() {
-            0 => ClassSetItem::Empty(self.span),
-            1 => self.items.pop().unwrap(),
-            _ => ClassSetItem::Union(self),
-        }
-    }
-}
-
-/// A Unicode character class set operation.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub struct ClassSetBinaryOp {
-    /// The span of this operation. e.g., the `a-z--[h-p]` in `[a-z--h-p]`.
-    pub span: Span,
-    /// The type of this set operation.
-    pub kind: ClassSetBinaryOpKind,
-    /// The left hand side of the operation.
-    pub lhs: Box<ClassSet>,
-    /// The right hand side of the operation.
-    pub rhs: Box<ClassSet>,
-}
-
-/// The type of a Unicode character class set operation.
-///
-/// Note that this doesn't explicitly represent union since there is no
-/// explicit union operator. Concatenation inside a character class corresponds
-/// to the union operation.
-#[derive(Clone, Copy, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub enum ClassSetBinaryOpKind {
-    /// The intersection of two sets, e.g., `\pN&&[a-z]`.
-    Intersection,
-    /// The difference of two sets, e.g., `\pN--[0-9]`.
-    Difference,
-    /// The symmetric difference of two sets. The symmetric difference is the
-    /// set of elements belonging to one but not both sets.
-    /// e.g., `[\pL~~[:ascii:]]`.
-    SymmetricDifference,
-}
-
-/// A single zero-width assertion.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub struct Assertion {
-    /// The span of this assertion.
-    pub span: Span,
-    /// The assertion kind, e.g., `\b` or `^`.
-    pub kind: AssertionKind,
-}
-
-/// An assertion kind.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub enum AssertionKind {
-    /// `^`
-    StartLine,
-    /// `$`
-    EndLine,
-    /// `\A`
-    StartText,
-    /// `\z`
-    EndText,
-    /// `\b`
-    WordBoundary,
-    /// `\B`
-    NotWordBoundary,
-    /// `\b{start}`
-    WordBoundaryStart,
-    /// `\b{end}`
-    WordBoundaryEnd,
-    /// `\<` (alias for `\b{start}`)
-    WordBoundaryStartAngle,
-    /// `\>` (alias for `\b{end}`)
-    WordBoundaryEndAngle,
-    /// `\b{start-half}`
-    WordBoundaryStartHalf,
-    /// `\b{end-half}`
-    WordBoundaryEndHalf,
-}
-
-/// A repetition operation applied to a regular expression.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub struct Repetition {
-    /// The span of this operation.
-    pub span: Span,
-    /// The actual operation.
-    pub op: RepetitionOp,
-    /// Whether this operation was applied greedily or not.
-    pub greedy: bool,
-    /// The regular expression under repetition.
-    pub ast: Box<Ast>,
-}
-
-/// The repetition operator itself.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub struct RepetitionOp {
-    /// The span of this operator. This includes things like `+`, `*?` and
-    /// `{m,n}`.
-    pub span: Span,
-    /// The type of operation.
-    pub kind: RepetitionKind,
-}
-
-/// The kind of a repetition operator.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub enum RepetitionKind {
-    /// `?`
-    ZeroOrOne,
-    /// `*`
-    ZeroOrMore,
-    /// `+`
-    OneOrMore,
-    /// `{m,n}`
-    Range(RepetitionRange),
-}
-
-/// A range repetition operator.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub enum RepetitionRange {
-    /// `{m}`
-    Exactly(u32),
-    /// `{m,}`
-    AtLeast(u32),
-    /// `{m,n}`
-    Bounded(u32, u32),
-}
-
-impl RepetitionRange {
-    /// Returns true if and only if this repetition range is valid.
-    ///
-    /// The only case where a repetition range is invalid is if it is bounded
-    /// and its start is greater than its end.
-    pub fn is_valid(&self) -> bool {
-        match *self {
-            RepetitionRange::Bounded(s, e) if s > e => false,
-            _ => true,
-        }
-    }
-}
-
-/// A grouped regular expression.
-///
-/// This includes both capturing and non-capturing groups. This does **not**
-/// include flag-only groups like `(?is)`, but does contain any group that
-/// contains a sub-expression, e.g., `(a)`, `(?P<name>a)`, `(?:a)` and
-/// `(?is:a)`.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub struct Group {
-    /// The span of this group.
-    pub span: Span,
-    /// The kind of this group.
-    pub kind: GroupKind,
-    /// The regular expression in this group.
-    pub ast: Box<Ast>,
-}
-
-impl Group {
-    /// If this group is non-capturing, then this returns the (possibly empty)
-    /// set of flags. Otherwise, `None` is returned.
-    pub fn flags(&self) -> Option<&Flags> {
-        match self.kind {
-            GroupKind::NonCapturing(ref flags) => Some(flags),
-            _ => None,
-        }
-    }
-
-    /// Returns true if and only if this group is capturing.
-    pub fn is_capturing(&self) -> bool {
-        match self.kind {
-            GroupKind::CaptureIndex(_) | GroupKind::CaptureName { .. } => true,
-            GroupKind::NonCapturing(_) => false,
-        }
-    }
-
-    /// Returns the capture index of this group, if this is a capturing group.
-    ///
-    /// This returns a capture index precisely when `is_capturing` is `true`.
-    pub fn capture_index(&self) -> Option<u32> {
-        match self.kind {
-            GroupKind::CaptureIndex(i) => Some(i),
-            GroupKind::CaptureName { ref name, .. } => Some(name.index),
-            GroupKind::NonCapturing(_) => None,
-        }
-    }
-}
-
-/// The kind of a group.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub enum GroupKind {
-    /// `(a)`
-    CaptureIndex(u32),
-    /// `(?<name>a)` or `(?P<name>a)`
-    CaptureName {
-        /// True if the `?P<` syntax is used and false if the `?<` syntax is used.
-        starts_with_p: bool,
-        /// The capture name.
-        name: CaptureName,
-    },
-    /// `(?:a)` and `(?i:a)`
-    NonCapturing(Flags),
-}
-
-/// A capture name.
-///
-/// This corresponds to the name itself between the angle brackets in, e.g.,
-/// `(?P<foo>expr)`.
-#[derive(Clone, Debug, Eq, PartialEq)]
-pub struct CaptureName {
-    /// The span of this capture name.
-    pub span: Span,
-    /// The capture name.
-    pub name: String,
-    /// The capture index.
-    pub index: u32,
-}
-
-#[cfg(feature = "arbitrary")]
-impl arbitrary::Arbitrary<'_> for CaptureName {
-    fn arbitrary(
-        u: &mut arbitrary::Unstructured,
-    ) -> arbitrary::Result<CaptureName> {
-        let len = u.arbitrary_len::<char>()?;
-        if len == 0 {
-            return Err(arbitrary::Error::NotEnoughData);
-        }
-        let mut name: String = String::new();
-        for _ in 0..len {
-            let ch: char = u.arbitrary()?;
-            let cp = u32::from(ch);
-            let ascii_letter_offset = u8::try_from(cp % 26).unwrap();
-            let ascii_letter = b'a' + ascii_letter_offset;
-            name.push(char::from(ascii_letter));
-        }
-        Ok(CaptureName { span: u.arbitrary()?, name, index: u.arbitrary()? })
-    }
-
-    fn size_hint(depth: usize) -> (usize, Option<usize>) {
-        arbitrary::size_hint::and_all(&[
-            Span::size_hint(depth),
-            usize::size_hint(depth),
-            u32::size_hint(depth),
-        ])
-    }
-}
-
-/// A group of flags that is not applied to a particular regular expression.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub struct SetFlags {
-    /// The span of these flags, including the grouping parentheses.
-    pub span: Span,
-    /// The actual sequence of flags.
-    pub flags: Flags,
-}
-
-/// A group of flags.
-///
-/// This corresponds only to the sequence of flags themselves, e.g., `is-u`.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub struct Flags {
-    /// The span of this group of flags.
-    pub span: Span,
-    /// A sequence of flag items. Each item is either a flag or a negation
-    /// operator.
-    pub items: Vec<FlagsItem>,
-}
-
-impl Flags {
-    /// Add the given item to this sequence of flags.
-    ///
-    /// If the item was added successfully, then `None` is returned. If the
-    /// given item is a duplicate, then `Some(i)` is returned, where
-    /// `items[i].kind == item.kind`.
-    pub fn add_item(&mut self, item: FlagsItem) -> Option<usize> {
-        for (i, x) in self.items.iter().enumerate() {
-            if x.kind == item.kind {
-                return Some(i);
-            }
-        }
-        self.items.push(item);
-        None
-    }
-
-    /// Returns the state of the given flag in this set.
-    ///
-    /// If the given flag is in the set but is negated, then `Some(false)` is
-    /// returned.
-    ///
-    /// If the given flag is in the set and is not negated, then `Some(true)`
-    /// is returned.
-    ///
-    /// Otherwise, `None` is returned.
-    pub fn flag_state(&self, flag: Flag) -> Option<bool> {
-        let mut negated = false;
-        for x in &self.items {
-            match x.kind {
-                FlagsItemKind::Negation => {
-                    negated = true;
-                }
-                FlagsItemKind::Flag(ref xflag) if xflag == &flag => {
-                    return Some(!negated);
-                }
-                _ => {}
-            }
-        }
-        None
-    }
-}
-
-/// A single item in a group of flags.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub struct FlagsItem {
-    /// The span of this item.
-    pub span: Span,
-    /// The kind of this item.
-    pub kind: FlagsItemKind,
-}
-
-/// The kind of an item in a group of flags.
-#[derive(Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub enum FlagsItemKind {
-    /// A negation operator applied to all subsequent flags in the enclosing
-    /// group.
-    Negation,
-    /// A single flag in a group.
-    Flag(Flag),
-}
-
-impl FlagsItemKind {
-    /// Returns true if and only if this item is a negation operator.
-    pub fn is_negation(&self) -> bool {
-        match *self {
-            FlagsItemKind::Negation => true,
-            _ => false,
-        }
-    }
-}
-
-/// A single flag.
-#[derive(Clone, Copy, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
-pub enum Flag {
-    /// `i`
-    CaseInsensitive,
-    /// `m`
-    MultiLine,
-    /// `s`
-    DotMatchesNewLine,
-    /// `U`
-    SwapGreed,
-    /// `u`
-    Unicode,
-    /// `R`
-    CRLF,
-    /// `x`
-    IgnoreWhitespace,
-}
-
-/// A custom `Drop` impl is used for `Ast` such that it uses constant stack
-/// space but heap space proportional to the depth of the `Ast`.
-impl Drop for Ast {
-    fn drop(&mut self) {
-        use core::mem;
-
-        match *self {
-            Ast::Empty(_)
-            | Ast::Flags(_)
-            | Ast::Literal(_)
-            | Ast::Dot(_)
-            | Ast::Assertion(_)
-            | Ast::ClassUnicode(_)
-            | Ast::ClassPerl(_)
-            // Bracketed classes are recursive, they get their own Drop impl.
-            | Ast::ClassBracketed(_) => return,
-            Ast::Repetition(ref x) if !x.ast.has_subexprs() => return,
-            Ast::Group(ref x) if !x.ast.has_subexprs() => return,
-            Ast::Alternation(ref x) if x.asts.is_empty() => return,
-            Ast::Concat(ref x) if x.asts.is_empty() => return,
-            _ => {}
-        }
-
-        let empty_span = || Span::splat(Position::new(0, 0, 0));
-        let empty_ast = || Ast::empty(empty_span());
-        let mut stack = vec![mem::replace(self, empty_ast())];
-        while let Some(mut ast) = stack.pop() {
-            match ast {
-                Ast::Empty(_)
-                | Ast::Flags(_)
-                | Ast::Literal(_)
-                | Ast::Dot(_)
-                | Ast::Assertion(_)
-                | Ast::ClassUnicode(_)
-                | Ast::ClassPerl(_)
-                // Bracketed classes are recursive, so they get their own Drop
-                // impl.
-                | Ast::ClassBracketed(_) => {}
-                Ast::Repetition(ref mut x) => {
-                    stack.push(mem::replace(&mut x.ast, empty_ast()));
-                }
-                Ast::Group(ref mut x) => {
-                    stack.push(mem::replace(&mut x.ast, empty_ast()));
-                }
-                Ast::Alternation(ref mut x) => {
-                    stack.extend(x.asts.drain(..));
-                }
-                Ast::Concat(ref mut x) => {
-                    stack.extend(x.asts.drain(..));
-                }
-            }
-        }
-    }
-}
-
-/// A custom `Drop` impl is used for `ClassSet` such that it uses constant
-/// stack space but heap space proportional to the depth of the `ClassSet`.
-impl Drop for ClassSet {
-    fn drop(&mut self) {
-        use core::mem;
-
-        match *self {
-            ClassSet::Item(ref item) => match *item {
-                ClassSetItem::Empty(_)
-                | ClassSetItem::Literal(_)
-                | ClassSetItem::Range(_)
-                | ClassSetItem::Ascii(_)
-                | ClassSetItem::Unicode(_)
-                | ClassSetItem::Perl(_) => return,
-                ClassSetItem::Bracketed(ref x) => {
-                    if x.kind.is_empty() {
-                        return;
-                    }
-                }
-                ClassSetItem::Union(ref x) => {
-                    if x.items.is_empty() {
-                        return;
-                    }
-                }
-            },
-            ClassSet::BinaryOp(ref op) => {
-                if op.lhs.is_empty() && op.rhs.is_empty() {
-                    return;
-                }
-            }
-        }
-
-        let empty_span = || Span::splat(Position::new(0, 0, 0));
-        let empty_set = || ClassSet::Item(ClassSetItem::Empty(empty_span()));
-        let mut stack = vec![mem::replace(self, empty_set())];
-        while let Some(mut set) = stack.pop() {
-            match set {
-                ClassSet::Item(ref mut item) => match *item {
-                    ClassSetItem::Empty(_)
-                    | ClassSetItem::Literal(_)
-                    | ClassSetItem::Range(_)
-                    | ClassSetItem::Ascii(_)
-                    | ClassSetItem::Unicode(_)
-                    | ClassSetItem::Perl(_) => {}
-                    ClassSetItem::Bracketed(ref mut x) => {
-                        stack.push(mem::replace(&mut x.kind, empty_set()));
-                    }
-                    ClassSetItem::Union(ref mut x) => {
-                        stack.extend(x.items.drain(..).map(ClassSet::Item));
-                    }
-                },
-                ClassSet::BinaryOp(ref mut op) => {
-                    stack.push(mem::replace(&mut op.lhs, empty_set()));
-                    stack.push(mem::replace(&mut op.rhs, empty_set()));
-                }
-            }
-        }
-    }
-}
-
-#[cfg(test)]
-mod tests {
-    use super::*;
-
-    // We use a thread with an explicit stack size to test that our destructor
-    // for Ast can handle arbitrarily sized expressions in constant stack
-    // space. In case we run on a platform without threads (WASM?), we limit
-    // this test to Windows/Unix.
-    #[test]
-    #[cfg(any(unix, windows))]
-    fn no_stack_overflow_on_drop() {
-        use std::thread;
-
-        let run = || {
-            let span = || Span::splat(Position::new(0, 0, 0));
-            let mut ast = Ast::empty(span());
-            for i in 0..200 {
-                ast = Ast::group(Group {
-                    span: span(),
-                    kind: GroupKind::CaptureIndex(i),
-                    ast: Box::new(ast),
-                });
-            }
-            assert!(!ast.is_empty());
-        };
-
-        // We run our test on a thread with a small stack size so we can
-        // force the issue more easily.
-        //
-        // NOTE(2023-03-21): It turns out that some platforms (like FreeBSD)
-        // will just barf with very small stack sizes. So we bump this up a bit
-        // to give more room to breath. When I did this, I confirmed that if
-        // I remove the custom `Drop` impl for `Ast`, then this test does
-        // indeed still fail with a stack overflow. (At the time of writing, I
-        // had to bump it all the way up to 32K before the test would pass even
-        // without the custom `Drop` impl. So 16K seems like a safe number
-        // here.)
-        //
-        // See: https://github.com/rust-lang/regex/issues/967
-        thread::Builder::new()
-            .stack_size(16 << 10)
-            .spawn(run)
-            .unwrap()
-            .join()
-            .unwrap();
-    }
-
-    // This tests that our `Ast` has a reasonable size. This isn't a hard rule
-    // and it can be increased if given a good enough reason. But this test
-    // exists because the size of `Ast` was at one point over 200 bytes on a
-    // 64-bit target. Wow.
-    #[test]
-    fn ast_size() {
-        let max = 2 * core::mem::size_of::<usize>();
-        let size = core::mem::size_of::<Ast>();
-        assert!(
-            size <= max,
-            "Ast size of {} bytes is bigger than suggested max {}",
-            size,
-            max
-        );
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/ast/parse.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/ast/parse.rs
deleted file mode 100644
index 0c2a3526..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/ast/parse.rs
+++ /dev/null
@@ -1,6377 +0,0 @@
-/*!
-This module provides a regular expression parser.
-*/
-
-use core::{
-    borrow::Borrow,
-    cell::{Cell, RefCell},
-    mem,
-};
-
-use alloc::{
-    boxed::Box,
-    string::{String, ToString},
-    vec,
-    vec::Vec,
-};
-
-use crate::{
-    ast::{self, Ast, Position, Span},
-    either::Either,
-    is_escapeable_character, is_meta_character,
-};
-
-type Result<T> = core::result::Result<T, ast::Error>;
-
-/// A primitive is an expression with no sub-expressions. This includes
-/// literals, assertions and non-set character classes. This representation
-/// is used as intermediate state in the parser.
-///
-/// This does not include ASCII character classes, since they can only appear
-/// within a set character class.
-#[derive(Clone, Debug, Eq, PartialEq)]
-enum Primitive {
-    Literal(ast::Literal),
-    Assertion(ast::Assertion),
-    Dot(Span),
-    Perl(ast::ClassPerl),
-    Unicode(ast::ClassUnicode),
-}
-
-impl Primitive {
-    /// Return the span of this primitive.
-    fn span(&self) -> &Span {
-        match *self {
-            Primitive::Literal(ref x) => &x.span,
-            Primitive::Assertion(ref x) => &x.span,
-            Primitive::Dot(ref span) => span,
-            Primitive::Perl(ref x) => &x.span,
-            Primitive::Unicode(ref x) => &x.span,
-        }
-    }
-
-    /// Convert this primitive into a proper AST.
-    fn into_ast(self) -> Ast {
-        match self {
-            Primitive::Literal(lit) => Ast::literal(lit),
-            Primitive::Assertion(assert) => Ast::assertion(assert),
-            Primitive::Dot(span) => Ast::dot(span),
-            Primitive::Perl(cls) => Ast::class_perl(cls),
-            Primitive::Unicode(cls) => Ast::class_unicode(cls),
-        }
-    }
-
-    /// Convert this primitive into an item in a character class.
-    ///
-    /// If this primitive is not a legal item (i.e., an assertion or a dot),
-    /// then return an error.
-    fn into_class_set_item<P: Borrow<Parser>>(
-        self,
-        p: &ParserI<'_, P>,
-    ) -> Result<ast::ClassSetItem> {
-        use self::Primitive::*;
-        use crate::ast::ClassSetItem;
-
-        match self {
-            Literal(lit) => Ok(ClassSetItem::Literal(lit)),
-            Perl(cls) => Ok(ClassSetItem::Perl(cls)),
-            Unicode(cls) => Ok(ClassSetItem::Unicode(cls)),
-            x => Err(p.error(*x.span(), ast::ErrorKind::ClassEscapeInvalid)),
-        }
-    }
-
-    /// Convert this primitive into a literal in a character class. In
-    /// particular, literals are the only valid items that can appear in
-    /// ranges.
-    ///
-    /// If this primitive is not a legal item (i.e., a class, assertion or a
-    /// dot), then return an error.
-    fn into_class_literal<P: Borrow<Parser>>(
-        self,
-        p: &ParserI<'_, P>,
-    ) -> Result<ast::Literal> {
-        use self::Primitive::*;
-
-        match self {
-            Literal(lit) => Ok(lit),
-            x => Err(p.error(*x.span(), ast::ErrorKind::ClassRangeLiteral)),
-        }
-    }
-}
-
-/// Returns true if the given character is a hexadecimal digit.
-fn is_hex(c: char) -> bool {
-    ('0' <= c && c <= '9') || ('a' <= c && c <= 'f') || ('A' <= c && c <= 'F')
-}
-
-/// Returns true if the given character is a valid in a capture group name.
-///
-/// If `first` is true, then `c` is treated as the first character in the
-/// group name (which must be alphabetic or underscore).
-fn is_capture_char(c: char, first: bool) -> bool {
-    if first {
-        c == '_' || c.is_alphabetic()
-    } else {
-        c == '_' || c == '.' || c == '[' || c == ']' || c.is_alphanumeric()
-    }
-}
-
-/// A builder for a regular expression parser.
-///
-/// This builder permits modifying configuration options for the parser.
-#[derive(Clone, Debug)]
-pub struct ParserBuilder {
-    ignore_whitespace: bool,
-    nest_limit: u32,
-    octal: bool,
-    empty_min_range: bool,
-}
-
-impl Default for ParserBuilder {
-    fn default() -> ParserBuilder {
-        ParserBuilder::new()
-    }
-}
-
-impl ParserBuilder {
-    /// Create a new parser builder with a default configuration.
-    pub fn new() -> ParserBuilder {
-        ParserBuilder {
-            ignore_whitespace: false,
-            nest_limit: 250,
-            octal: false,
-            empty_min_range: false,
-        }
-    }
-
-    /// Build a parser from this configuration with the given pattern.
-    pub fn build(&self) -> Parser {
-        Parser {
-            pos: Cell::new(Position { offset: 0, line: 1, column: 1 }),
-            capture_index: Cell::new(0),
-            nest_limit: self.nest_limit,
-            octal: self.octal,
-            empty_min_range: self.empty_min_range,
-            initial_ignore_whitespace: self.ignore_whitespace,
-            ignore_whitespace: Cell::new(self.ignore_whitespace),
-            comments: RefCell::new(vec![]),
-            stack_group: RefCell::new(vec![]),
-            stack_class: RefCell::new(vec![]),
-            capture_names: RefCell::new(vec![]),
-            scratch: RefCell::new(String::new()),
-        }
-    }
-
-    /// Set the nesting limit for this parser.
-    ///
-    /// The nesting limit controls how deep the abstract syntax tree is allowed
-    /// to be. If the AST exceeds the given limit (e.g., with too many nested
-    /// groups), then an error is returned by the parser.
-    ///
-    /// The purpose of this limit is to act as a heuristic to prevent stack
-    /// overflow for consumers that do structural induction on an `Ast` using
-    /// explicit recursion. While this crate never does this (instead using
-    /// constant stack space and moving the call stack to the heap), other
-    /// crates may.
-    ///
-    /// This limit is not checked until the entire AST is parsed. Therefore,
-    /// if callers want to put a limit on the amount of heap space used, then
-    /// they should impose a limit on the length, in bytes, of the concrete
-    /// pattern string. In particular, this is viable since this parser
-    /// implementation will limit itself to heap space proportional to the
-    /// length of the pattern string.
-    ///
-    /// Note that a nest limit of `0` will return a nest limit error for most
-    /// patterns but not all. For example, a nest limit of `0` permits `a` but
-    /// not `ab`, since `ab` requires a concatenation, which results in a nest
-    /// depth of `1`. In general, a nest limit is not something that manifests
-    /// in an obvious way in the concrete syntax, therefore, it should not be
-    /// used in a granular way.
-    pub fn nest_limit(&mut self, limit: u32) -> &mut ParserBuilder {
-        self.nest_limit = limit;
-        self
-    }
-
-    /// Whether to support octal syntax or not.
-    ///
-    /// Octal syntax is a little-known way of uttering Unicode codepoints in
-    /// a regular expression. For example, `a`, `\x61`, `\u0061` and
-    /// `\141` are all equivalent regular expressions, where the last example
-    /// shows octal syntax.
-    ///
-    /// While supporting octal syntax isn't in and of itself a problem, it does
-    /// make good error messages harder. That is, in PCRE based regex engines,
-    /// syntax like `\0` invokes a backreference, which is explicitly
-    /// unsupported in Rust's regex engine. However, many users expect it to
-    /// be supported. Therefore, when octal support is disabled, the error
-    /// message will explicitly mention that backreferences aren't supported.
-    ///
-    /// Octal syntax is disabled by default.
-    pub fn octal(&mut self, yes: bool) -> &mut ParserBuilder {
-        self.octal = yes;
-        self
-    }
-
-    /// Enable verbose mode in the regular expression.
-    ///
-    /// When enabled, verbose mode permits insignificant whitespace in many
-    /// places in the regular expression, as well as comments. Comments are
-    /// started using `#` and continue until the end of the line.
-    ///
-    /// By default, this is disabled. It may be selectively enabled in the
-    /// regular expression by using the `x` flag regardless of this setting.
-    pub fn ignore_whitespace(&mut self, yes: bool) -> &mut ParserBuilder {
-        self.ignore_whitespace = yes;
-        self
-    }
-
-    /// Allow using `{,n}` as an equivalent to `{0,n}`.
-    ///
-    /// When enabled, the parser accepts `{,n}` as valid syntax for `{0,n}`.
-    /// Most regular expression engines don't support the `{,n}` syntax, but
-    /// some others do it, namely Python's `re` library.
-    ///
-    /// This is disabled by default.
-    pub fn empty_min_range(&mut self, yes: bool) -> &mut ParserBuilder {
-        self.empty_min_range = yes;
-        self
-    }
-}
-
-/// A regular expression parser.
-///
-/// This parses a string representation of a regular expression into an
-/// abstract syntax tree. The size of the tree is proportional to the length
-/// of the regular expression pattern.
-///
-/// A `Parser` can be configured in more detail via a [`ParserBuilder`].
-#[derive(Clone, Debug)]
-pub struct Parser {
-    /// The current position of the parser.
-    pos: Cell<Position>,
-    /// The current capture index.
-    capture_index: Cell<u32>,
-    /// The maximum number of open parens/brackets allowed. If the parser
-    /// exceeds this number, then an error is returned.
-    nest_limit: u32,
-    /// Whether to support octal syntax or not. When `false`, the parser will
-    /// return an error helpfully pointing out that backreferences are not
-    /// supported.
-    octal: bool,
-    /// The initial setting for `ignore_whitespace` as provided by
-    /// `ParserBuilder`. It is used when resetting the parser's state.
-    initial_ignore_whitespace: bool,
-    /// Whether the parser supports `{,n}` repetitions as an equivalent to
-    /// `{0,n}.`
-    empty_min_range: bool,
-    /// Whether whitespace should be ignored. When enabled, comments are
-    /// also permitted.
-    ignore_whitespace: Cell<bool>,
-    /// A list of comments, in order of appearance.
-    comments: RefCell<Vec<ast::Comment>>,
-    /// A stack of grouped sub-expressions, including alternations.
-    stack_group: RefCell<Vec<GroupState>>,
-    /// A stack of nested character classes. This is only non-empty when
-    /// parsing a class.
-    stack_class: RefCell<Vec<ClassState>>,
-    /// A sorted sequence of capture names. This is used to detect duplicate
-    /// capture names and report an error if one is detected.
-    capture_names: RefCell<Vec<ast::CaptureName>>,
-    /// A scratch buffer used in various places. Mostly this is used to
-    /// accumulate relevant characters from parts of a pattern.
-    scratch: RefCell<String>,
-}
-
-/// ParserI is the internal parser implementation.
-///
-/// We use this separate type so that we can carry the provided pattern string
-/// along with us. In particular, a `Parser` internal state is not tied to any
-/// one pattern, but `ParserI` is.
-///
-/// This type also lets us use `ParserI<&Parser>` in production code while
-/// retaining the convenience of `ParserI<Parser>` for tests, which sometimes
-/// work against the internal interface of the parser.
-#[derive(Clone, Debug)]
-struct ParserI<'s, P> {
-    /// The parser state/configuration.
-    parser: P,
-    /// The full regular expression provided by the user.
-    pattern: &'s str,
-}
-
-/// GroupState represents a single stack frame while parsing nested groups
-/// and alternations. Each frame records the state up to an opening parenthesis
-/// or a alternating bracket `|`.
-#[derive(Clone, Debug)]
-enum GroupState {
-    /// This state is pushed whenever an opening group is found.
-    Group {
-        /// The concatenation immediately preceding the opening group.
-        concat: ast::Concat,
-        /// The group that has been opened. Its sub-AST is always empty.
-        group: ast::Group,
-        /// Whether this group has the `x` flag enabled or not.
-        ignore_whitespace: bool,
-    },
-    /// This state is pushed whenever a new alternation branch is found. If
-    /// an alternation branch is found and this state is at the top of the
-    /// stack, then this state should be modified to include the new
-    /// alternation.
-    Alternation(ast::Alternation),
-}
-
-/// ClassState represents a single stack frame while parsing character classes.
-/// Each frame records the state up to an intersection, difference, symmetric
-/// difference or nested class.
-///
-/// Note that a parser's character class stack is only non-empty when parsing
-/// a character class. In all other cases, it is empty.
-#[derive(Clone, Debug)]
-enum ClassState {
-    /// This state is pushed whenever an opening bracket is found.
-    Open {
-        /// The union of class items immediately preceding this class.
-        union: ast::ClassSetUnion,
-        /// The class that has been opened. Typically this just corresponds
-        /// to the `[`, but it can also include `[^` since `^` indicates
-        /// negation of the class.
-        set: ast::ClassBracketed,
-    },
-    /// This state is pushed when a operator is seen. When popped, the stored
-    /// set becomes the left hand side of the operator.
-    Op {
-        /// The type of the operation, i.e., &&, -- or ~~.
-        kind: ast::ClassSetBinaryOpKind,
-        /// The left-hand side of the operator.
-        lhs: ast::ClassSet,
-    },
-}
-
-impl Parser {
-    /// Create a new parser with a default configuration.
-    ///
-    /// The parser can be run with either the `parse` or `parse_with_comments`
-    /// methods. The parse methods return an abstract syntax tree.
-    ///
-    /// To set configuration options on the parser, use [`ParserBuilder`].
-    pub fn new() -> Parser {
-        ParserBuilder::new().build()
-    }
-
-    /// Parse the regular expression into an abstract syntax tree.
-    pub fn parse(&mut self, pattern: &str) -> Result<Ast> {
-        ParserI::new(self, pattern).parse()
-    }
-
-    /// Parse the regular expression and return an abstract syntax tree with
-    /// all of the comments found in the pattern.
-    pub fn parse_with_comments(
-        &mut self,
-        pattern: &str,
-    ) -> Result<ast::WithComments> {
-        ParserI::new(self, pattern).parse_with_comments()
-    }
-
-    /// Reset the internal state of a parser.
-    ///
-    /// This is called at the beginning of every parse. This prevents the
-    /// parser from running with inconsistent state (say, if a previous
-    /// invocation returned an error and the parser is reused).
-    fn reset(&self) {
-        // These settings should be in line with the construction
-        // in `ParserBuilder::build`.
-        self.pos.set(Position { offset: 0, line: 1, column: 1 });
-        self.ignore_whitespace.set(self.initial_ignore_whitespace);
-        self.comments.borrow_mut().clear();
-        self.stack_group.borrow_mut().clear();
-        self.stack_class.borrow_mut().clear();
-    }
-}
-
-impl<'s, P: Borrow<Parser>> ParserI<'s, P> {
-    /// Build an internal parser from a parser configuration and a pattern.
-    fn new(parser: P, pattern: &'s str) -> ParserI<'s, P> {
-        ParserI { parser, pattern }
-    }
-
-    /// Return a reference to the parser state.
-    fn parser(&self) -> &Parser {
-        self.parser.borrow()
-    }
-
-    /// Return a reference to the pattern being parsed.
-    fn pattern(&self) -> &str {
-        self.pattern
-    }
-
-    /// Create a new error with the given span and error type.
-    fn error(&self, span: Span, kind: ast::ErrorKind) -> ast::Error {
-        ast::Error { kind, pattern: self.pattern().to_string(), span }
-    }
-
-    /// Return the current offset of the parser.
-    ///
-    /// The offset starts at `0` from the beginning of the regular expression
-    /// pattern string.
-    fn offset(&self) -> usize {
-        self.parser().pos.get().offset
-    }
-
-    /// Return the current line number of the parser.
-    ///
-    /// The line number starts at `1`.
-    fn line(&self) -> usize {
-        self.parser().pos.get().line
-    }
-
-    /// Return the current column of the parser.
-    ///
-    /// The column number starts at `1` and is reset whenever a `\n` is seen.
-    fn column(&self) -> usize {
-        self.parser().pos.get().column
-    }
-
-    /// Return the next capturing index. Each subsequent call increments the
-    /// internal index.
-    ///
-    /// The span given should correspond to the location of the opening
-    /// parenthesis.
-    ///
-    /// If the capture limit is exceeded, then an error is returned.
-    fn next_capture_index(&self, span: Span) -> Result<u32> {
-        let current = self.parser().capture_index.get();
-        let i = current.checked_add(1).ok_or_else(|| {
-            self.error(span, ast::ErrorKind::CaptureLimitExceeded)
-        })?;
-        self.parser().capture_index.set(i);
-        Ok(i)
-    }
-
-    /// Adds the given capture name to this parser. If this capture name has
-    /// already been used, then an error is returned.
-    fn add_capture_name(&self, cap: &ast::CaptureName) -> Result<()> {
-        let mut names = self.parser().capture_names.borrow_mut();
-        match names
-            .binary_search_by_key(&cap.name.as_str(), |c| c.name.as_str())
-        {
-            Err(i) => {
-                names.insert(i, cap.clone());
-                Ok(())
-            }
-            Ok(i) => Err(self.error(
-                cap.span,
-                ast::ErrorKind::GroupNameDuplicate { original: names[i].span },
-            )),
-        }
-    }
-
-    /// Return whether the parser should ignore whitespace or not.
-    fn ignore_whitespace(&self) -> bool {
-        self.parser().ignore_whitespace.get()
-    }
-
-    /// Return the character at the current position of the parser.
-    ///
-    /// This panics if the current position does not point to a valid char.
-    fn char(&self) -> char {
-        self.char_at(self.offset())
-    }
-
-    /// Return the character at the given position.
-    ///
-    /// This panics if the given position does not point to a valid char.
-    fn char_at(&self, i: usize) -> char {
-        self.pattern()[i..]
-            .chars()
-            .next()
-            .unwrap_or_else(|| panic!("expected char at offset {}", i))
-    }
-
-    /// Bump the parser to the next Unicode scalar value.
-    ///
-    /// If the end of the input has been reached, then `false` is returned.
-    fn bump(&self) -> bool {
-        if self.is_eof() {
-            return false;
-        }
-        let Position { mut offset, mut line, mut column } = self.pos();
-        if self.char() == '\n' {
-            line = line.checked_add(1).unwrap();
-            column = 1;
-        } else {
-            column = column.checked_add(1).unwrap();
-        }
-        offset += self.char().len_utf8();
-        self.parser().pos.set(Position { offset, line, column });
-        self.pattern()[self.offset()..].chars().next().is_some()
-    }
-
-    /// If the substring starting at the current position of the parser has
-    /// the given prefix, then bump the parser to the character immediately
-    /// following the prefix and return true. Otherwise, don't bump the parser
-    /// and return false.
-    fn bump_if(&self, prefix: &str) -> bool {
-        if self.pattern()[self.offset()..].starts_with(prefix) {
-            for _ in 0..prefix.chars().count() {
-                self.bump();
-            }
-            true
-        } else {
-            false
-        }
-    }
-
-    /// Returns true if and only if the parser is positioned at a look-around
-    /// prefix. The conditions under which this returns true must always
-    /// correspond to a regular expression that would otherwise be consider
-    /// invalid.
-    ///
-    /// This should only be called immediately after parsing the opening of
-    /// a group or a set of flags.
-    fn is_lookaround_prefix(&self) -> bool {
-        self.bump_if("?=")
-            || self.bump_if("?!")
-            || self.bump_if("?<=")
-            || self.bump_if("?<!")
-    }
-
-    /// Bump the parser, and if the `x` flag is enabled, bump through any
-    /// subsequent spaces. Return true if and only if the parser is not at
-    /// EOF.
-    fn bump_and_bump_space(&self) -> bool {
-        if !self.bump() {
-            return false;
-        }
-        self.bump_space();
-        !self.is_eof()
-    }
-
-    /// If the `x` flag is enabled (i.e., whitespace insensitivity with
-    /// comments), then this will advance the parser through all whitespace
-    /// and comments to the next non-whitespace non-comment byte.
-    ///
-    /// If the `x` flag is disabled, then this is a no-op.
-    ///
-    /// This should be used selectively throughout the parser where
-    /// arbitrary whitespace is permitted when the `x` flag is enabled. For
-    /// example, `{   5  , 6}` is equivalent to `{5,6}`.
-    fn bump_space(&self) {
-        if !self.ignore_whitespace() {
-            return;
-        }
-        while !self.is_eof() {
-            if self.char().is_whitespace() {
-                self.bump();
-            } else if self.char() == '#' {
-                let start = self.pos();
-                let mut comment_text = String::new();
-                self.bump();
-                while !self.is_eof() {
-                    let c = self.char();
-                    self.bump();
-                    if c == '\n' {
-                        break;
-                    }
-                    comment_text.push(c);
-                }
-                let comment = ast::Comment {
-                    span: Span::new(start, self.pos()),
-                    comment: comment_text,
-                };
-                self.parser().comments.borrow_mut().push(comment);
-            } else {
-                break;
-            }
-        }
-    }
-
-    /// Peek at the next character in the input without advancing the parser.
-    ///
-    /// If the input has been exhausted, then this returns `None`.
-    fn peek(&self) -> Option<char> {
-        if self.is_eof() {
-            return None;
-        }
-        self.pattern()[self.offset() + self.char().len_utf8()..].chars().next()
-    }
-
-    /// Like peek, but will ignore spaces when the parser is in whitespace
-    /// insensitive mode.
-    fn peek_space(&self) -> Option<char> {
-        if !self.ignore_whitespace() {
-            return self.peek();
-        }
-        if self.is_eof() {
-            return None;
-        }
-        let mut start = self.offset() + self.char().len_utf8();
-        let mut in_comment = false;
-        for (i, c) in self.pattern()[start..].char_indices() {
-            if c.is_whitespace() {
-                continue;
-            } else if !in_comment && c == '#' {
-                in_comment = true;
-            } else if in_comment && c == '\n' {
-                in_comment = false;
-            } else {
-                start += i;
-                break;
-            }
-        }
-        self.pattern()[start..].chars().next()
-    }
-
-    /// Returns true if the next call to `bump` would return false.
-    fn is_eof(&self) -> bool {
-        self.offset() == self.pattern().len()
-    }
-
-    /// Return the current position of the parser, which includes the offset,
-    /// line and column.
-    fn pos(&self) -> Position {
-        self.parser().pos.get()
-    }
-
-    /// Create a span at the current position of the parser. Both the start
-    /// and end of the span are set.
-    fn span(&self) -> Span {
-        Span::splat(self.pos())
-    }
-
-    /// Create a span that covers the current character.
-    fn span_char(&self) -> Span {
-        let mut next = Position {
-            offset: self.offset().checked_add(self.char().len_utf8()).unwrap(),
-            line: self.line(),
-            column: self.column().checked_add(1).unwrap(),
-        };
-        if self.char() == '\n' {
-            next.line += 1;
-            next.column = 1;
-        }
-        Span::new(self.pos(), next)
-    }
-
-    /// Parse and push a single alternation on to the parser's internal stack.
-    /// If the top of the stack already has an alternation, then add to that
-    /// instead of pushing a new one.
-    ///
-    /// The concatenation given corresponds to a single alternation branch.
-    /// The concatenation returned starts the next branch and is empty.
-    ///
-    /// This assumes the parser is currently positioned at `|` and will advance
-    /// the parser to the character following `|`.
-    #[inline(never)]
-    fn push_alternate(&self, mut concat: ast::Concat) -> Result<ast::Concat> {
-        assert_eq!(self.char(), '|');
-        concat.span.end = self.pos();
-        self.push_or_add_alternation(concat);
-        self.bump();
-        Ok(ast::Concat { span: self.span(), asts: vec![] })
-    }
-
-    /// Pushes or adds the given branch of an alternation to the parser's
-    /// internal stack of state.
-    fn push_or_add_alternation(&self, concat: ast::Concat) {
-        use self::GroupState::*;
-
-        let mut stack = self.parser().stack_group.borrow_mut();
-        if let Some(&mut Alternation(ref mut alts)) = stack.last_mut() {
-            alts.asts.push(concat.into_ast());
-            return;
-        }
-        stack.push(Alternation(ast::Alternation {
-            span: Span::new(concat.span.start, self.pos()),
-            asts: vec![concat.into_ast()],
-        }));
-    }
-
-    /// Parse and push a group AST (and its parent concatenation) on to the
-    /// parser's internal stack. Return a fresh concatenation corresponding
-    /// to the group's sub-AST.
-    ///
-    /// If a set of flags was found (with no group), then the concatenation
-    /// is returned with that set of flags added.
-    ///
-    /// This assumes that the parser is currently positioned on the opening
-    /// parenthesis. It advances the parser to the character at the start
-    /// of the sub-expression (or adjoining expression).
-    ///
-    /// If there was a problem parsing the start of the group, then an error
-    /// is returned.
-    #[inline(never)]
-    fn push_group(&self, mut concat: ast::Concat) -> Result<ast::Concat> {
-        assert_eq!(self.char(), '(');
-        match self.parse_group()? {
-            Either::Left(set) => {
-                let ignore = set.flags.flag_state(ast::Flag::IgnoreWhitespace);
-                if let Some(v) = ignore {
-                    self.parser().ignore_whitespace.set(v);
-                }
-
-                concat.asts.push(Ast::flags(set));
-                Ok(concat)
-            }
-            Either::Right(group) => {
-                let old_ignore_whitespace = self.ignore_whitespace();
-                let new_ignore_whitespace = group
-                    .flags()
-                    .and_then(|f| f.flag_state(ast::Flag::IgnoreWhitespace))
-                    .unwrap_or(old_ignore_whitespace);
-                self.parser().stack_group.borrow_mut().push(
-                    GroupState::Group {
-                        concat,
-                        group,
-                        ignore_whitespace: old_ignore_whitespace,
-                    },
-                );
-                self.parser().ignore_whitespace.set(new_ignore_whitespace);
-                Ok(ast::Concat { span: self.span(), asts: vec![] })
-            }
-        }
-    }
-
-    /// Pop a group AST from the parser's internal stack and set the group's
-    /// AST to the given concatenation. Return the concatenation containing
-    /// the group.
-    ///
-    /// This assumes that the parser is currently positioned on the closing
-    /// parenthesis and advances the parser to the character following the `)`.
-    ///
-    /// If no such group could be popped, then an unopened group error is
-    /// returned.
-    #[inline(never)]
-    fn pop_group(&self, mut group_concat: ast::Concat) -> Result<ast::Concat> {
-        use self::GroupState::*;
-
-        assert_eq!(self.char(), ')');
-        let mut stack = self.parser().stack_group.borrow_mut();
-        let (mut prior_concat, mut group, ignore_whitespace, alt) = match stack
-            .pop()
-        {
-            Some(Group { concat, group, ignore_whitespace }) => {
-                (concat, group, ignore_whitespace, None)
-            }
-            Some(Alternation(alt)) => match stack.pop() {
-                Some(Group { concat, group, ignore_whitespace }) => {
-                    (concat, group, ignore_whitespace, Some(alt))
-                }
-                None | Some(Alternation(_)) => {
-                    return Err(self.error(
-                        self.span_char(),
-                        ast::ErrorKind::GroupUnopened,
-                    ));
-                }
-            },
-            None => {
-                return Err(self
-                    .error(self.span_char(), ast::ErrorKind::GroupUnopened));
-            }
-        };
-        self.parser().ignore_whitespace.set(ignore_whitespace);
-        group_concat.span.end = self.pos();
-        self.bump();
-        group.span.end = self.pos();
-        match alt {
-            Some(mut alt) => {
-                alt.span.end = group_concat.span.end;
-                alt.asts.push(group_concat.into_ast());
-                group.ast = Box::new(alt.into_ast());
-            }
-            None => {
-                group.ast = Box::new(group_concat.into_ast());
-            }
-        }
-        prior_concat.asts.push(Ast::group(group));
-        Ok(prior_concat)
-    }
-
-    /// Pop the last state from the parser's internal stack, if it exists, and
-    /// add the given concatenation to it. There either must be no state or a
-    /// single alternation item on the stack. Any other scenario produces an
-    /// error.
-    ///
-    /// This assumes that the parser has advanced to the end.
-    #[inline(never)]
-    fn pop_group_end(&self, mut concat: ast::Concat) -> Result<Ast> {
-        concat.span.end = self.pos();
-        let mut stack = self.parser().stack_group.borrow_mut();
-        let ast = match stack.pop() {
-            None => Ok(concat.into_ast()),
-            Some(GroupState::Alternation(mut alt)) => {
-                alt.span.end = self.pos();
-                alt.asts.push(concat.into_ast());
-                Ok(Ast::alternation(alt))
-            }
-            Some(GroupState::Group { group, .. }) => {
-                return Err(
-                    self.error(group.span, ast::ErrorKind::GroupUnclosed)
-                );
-            }
-        };
-        // If we try to pop again, there should be nothing.
-        match stack.pop() {
-            None => ast,
-            Some(GroupState::Alternation(_)) => {
-                // This unreachable is unfortunate. This case can't happen
-                // because the only way we can be here is if there were two
-                // `GroupState::Alternation`s adjacent in the parser's stack,
-                // which we guarantee to never happen because we never push a
-                // `GroupState::Alternation` if one is already at the top of
-                // the stack.
-                unreachable!()
-            }
-            Some(GroupState::Group { group, .. }) => {
-                Err(self.error(group.span, ast::ErrorKind::GroupUnclosed))
-            }
-        }
-    }
-
-    /// Parse the opening of a character class and push the current class
-    /// parsing context onto the parser's stack. This assumes that the parser
-    /// is positioned at an opening `[`. The given union should correspond to
-    /// the union of set items built up before seeing the `[`.
-    ///
-    /// If there was a problem parsing the opening of the class, then an error
-    /// is returned. Otherwise, a new union of set items for the class is
-    /// returned (which may be populated with either a `]` or a `-`).
-    #[inline(never)]
-    fn push_class_open(
-        &self,
-        parent_union: ast::ClassSetUnion,
-    ) -> Result<ast::ClassSetUnion> {
-        assert_eq!(self.char(), '[');
-
-        let (nested_set, nested_union) = self.parse_set_class_open()?;
-        self.parser()
-            .stack_class
-            .borrow_mut()
-            .push(ClassState::Open { union: parent_union, set: nested_set });
-        Ok(nested_union)
-    }
-
-    /// Parse the end of a character class set and pop the character class
-    /// parser stack. The union given corresponds to the last union built
-    /// before seeing the closing `]`. The union returned corresponds to the
-    /// parent character class set with the nested class added to it.
-    ///
-    /// This assumes that the parser is positioned at a `]` and will advance
-    /// the parser to the byte immediately following the `]`.
-    ///
-    /// If the stack is empty after popping, then this returns the final
-    /// "top-level" character class AST (where a "top-level" character class
-    /// is one that is not nested inside any other character class).
-    ///
-    /// If there is no corresponding opening bracket on the parser's stack,
-    /// then an error is returned.
-    #[inline(never)]
-    fn pop_class(
-        &self,
-        nested_union: ast::ClassSetUnion,
-    ) -> Result<Either<ast::ClassSetUnion, ast::ClassBracketed>> {
-        assert_eq!(self.char(), ']');
-
-        let item = ast::ClassSet::Item(nested_union.into_item());
-        let prevset = self.pop_class_op(item);
-        let mut stack = self.parser().stack_class.borrow_mut();
-        match stack.pop() {
-            None => {
-                // We can never observe an empty stack:
-                //
-                // 1) We are guaranteed to start with a non-empty stack since
-                //    the character class parser is only initiated when it sees
-                //    a `[`.
-                // 2) If we ever observe an empty stack while popping after
-                //    seeing a `]`, then we signal the character class parser
-                //    to terminate.
-                panic!("unexpected empty character class stack")
-            }
-            Some(ClassState::Op { .. }) => {
-                // This panic is unfortunate, but this case is impossible
-                // since we already popped the Op state if one exists above.
-                // Namely, every push to the class parser stack is guarded by
-                // whether an existing Op is already on the top of the stack.
-                // If it is, the existing Op is modified. That is, the stack
-                // can never have consecutive Op states.
-                panic!("unexpected ClassState::Op")
-            }
-            Some(ClassState::Open { mut union, mut set }) => {
-                self.bump();
-                set.span.end = self.pos();
-                set.kind = prevset;
-                if stack.is_empty() {
-                    Ok(Either::Right(set))
-                } else {
-                    union.push(ast::ClassSetItem::Bracketed(Box::new(set)));
-                    Ok(Either::Left(union))
-                }
-            }
-        }
-    }
-
-    /// Return an "unclosed class" error whose span points to the most
-    /// recently opened class.
-    ///
-    /// This should only be called while parsing a character class.
-    #[inline(never)]
-    fn unclosed_class_error(&self) -> ast::Error {
-        for state in self.parser().stack_class.borrow().iter().rev() {
-            if let ClassState::Open { ref set, .. } = *state {
-                return self.error(set.span, ast::ErrorKind::ClassUnclosed);
-            }
-        }
-        // We are guaranteed to have a non-empty stack with at least
-        // one open bracket, so we should never get here.
-        panic!("no open character class found")
-    }
-
-    /// Push the current set of class items on to the class parser's stack as
-    /// the left hand side of the given operator.
-    ///
-    /// A fresh set union is returned, which should be used to build the right
-    /// hand side of this operator.
-    #[inline(never)]
-    fn push_class_op(
-        &self,
-        next_kind: ast::ClassSetBinaryOpKind,
-        next_union: ast::ClassSetUnion,
-    ) -> ast::ClassSetUnion {
-        let item = ast::ClassSet::Item(next_union.into_item());
-        let new_lhs = self.pop_class_op(item);
-        self.parser()
-            .stack_class
-            .borrow_mut()
-            .push(ClassState::Op { kind: next_kind, lhs: new_lhs });
-        ast::ClassSetUnion { span: self.span(), items: vec![] }
-    }
-
-    /// Pop a character class set from the character class parser stack. If the
-    /// top of the stack is just an item (not an operation), then return the
-    /// given set unchanged. If the top of the stack is an operation, then the
-    /// given set will be used as the rhs of the operation on the top of the
-    /// stack. In that case, the binary operation is returned as a set.
-    #[inline(never)]
-    fn pop_class_op(&self, rhs: ast::ClassSet) -> ast::ClassSet {
-        let mut stack = self.parser().stack_class.borrow_mut();
-        let (kind, lhs) = match stack.pop() {
-            Some(ClassState::Op { kind, lhs }) => (kind, lhs),
-            Some(state @ ClassState::Open { .. }) => {
-                stack.push(state);
-                return rhs;
-            }
-            None => unreachable!(),
-        };
-        let span = Span::new(lhs.span().start, rhs.span().end);
-        ast::ClassSet::BinaryOp(ast::ClassSetBinaryOp {
-            span,
-            kind,
-            lhs: Box::new(lhs),
-            rhs: Box::new(rhs),
-        })
-    }
-}
-
-impl<'s, P: Borrow<Parser>> ParserI<'s, P> {
-    /// Parse the regular expression into an abstract syntax tree.
-    fn parse(&self) -> Result<Ast> {
-        self.parse_with_comments().map(|astc| astc.ast)
-    }
-
-    /// Parse the regular expression and return an abstract syntax tree with
-    /// all of the comments found in the pattern.
-    fn parse_with_comments(&self) -> Result<ast::WithComments> {
-        assert_eq!(self.offset(), 0, "parser can only be used once");
-        self.parser().reset();
-        let mut concat = ast::Concat { span: self.span(), asts: vec![] };
-        loop {
-            self.bump_space();
-            if self.is_eof() {
-                break;
-            }
-            match self.char() {
-                '(' => concat = self.push_group(concat)?,
-                ')' => concat = self.pop_group(concat)?,
-                '|' => concat = self.push_alternate(concat)?,
-                '[' => {
-                    let class = self.parse_set_class()?;
-                    concat.asts.push(Ast::class_bracketed(class));
-                }
-                '?' => {
-                    concat = self.parse_uncounted_repetition(
-                        concat,
-                        ast::RepetitionKind::ZeroOrOne,
-                    )?;
-                }
-                '*' => {
-                    concat = self.parse_uncounted_repetition(
-                        concat,
-                        ast::RepetitionKind::ZeroOrMore,
-                    )?;
-                }
-                '+' => {
-                    concat = self.parse_uncounted_repetition(
-                        concat,
-                        ast::RepetitionKind::OneOrMore,
-                    )?;
-                }
-                '{' => {
-                    concat = self.parse_counted_repetition(concat)?;
-                }
-                _ => concat.asts.push(self.parse_primitive()?.into_ast()),
-            }
-        }
-        let ast = self.pop_group_end(concat)?;
-        NestLimiter::new(self).check(&ast)?;
-        Ok(ast::WithComments {
-            ast,
-            comments: mem::replace(
-                &mut *self.parser().comments.borrow_mut(),
-                vec![],
-            ),
-        })
-    }
-
-    /// Parses an uncounted repetition operation. An uncounted repetition
-    /// operator includes ?, * and +, but does not include the {m,n} syntax.
-    /// The given `kind` should correspond to the operator observed by the
-    /// caller.
-    ///
-    /// This assumes that the parser is currently positioned at the repetition
-    /// operator and advances the parser to the first character after the
-    /// operator. (Note that the operator may include a single additional `?`,
-    /// which makes the operator ungreedy.)
-    ///
-    /// The caller should include the concatenation that is being built. The
-    /// concatenation returned includes the repetition operator applied to the
-    /// last expression in the given concatenation.
-    #[inline(never)]
-    fn parse_uncounted_repetition(
-        &self,
-        mut concat: ast::Concat,
-        kind: ast::RepetitionKind,
-    ) -> Result<ast::Concat> {
-        assert!(
-            self.char() == '?' || self.char() == '*' || self.char() == '+'
-        );
-        let op_start = self.pos();
-        let ast = match concat.asts.pop() {
-            Some(ast) => ast,
-            None => {
-                return Err(
-                    self.error(self.span(), ast::ErrorKind::RepetitionMissing)
-                )
-            }
-        };
-        match ast {
-            Ast::Empty(_) | Ast::Flags(_) => {
-                return Err(
-                    self.error(self.span(), ast::ErrorKind::RepetitionMissing)
-                )
-            }
-            _ => {}
-        }
-        let mut greedy = true;
-        if self.bump() && self.char() == '?' {
-            greedy = false;
-            self.bump();
-        }
-        concat.asts.push(Ast::repetition(ast::Repetition {
-            span: ast.span().with_end(self.pos()),
-            op: ast::RepetitionOp {
-                span: Span::new(op_start, self.pos()),
-                kind,
-            },
-            greedy,
-            ast: Box::new(ast),
-        }));
-        Ok(concat)
-    }
-
-    /// Parses a counted repetition operation. A counted repetition operator
-    /// corresponds to the {m,n} syntax, and does not include the ?, * or +
-    /// operators.
-    ///
-    /// This assumes that the parser is currently positioned at the opening `{`
-    /// and advances the parser to the first character after the operator.
-    /// (Note that the operator may include a single additional `?`, which
-    /// makes the operator ungreedy.)
-    ///
-    /// The caller should include the concatenation that is being built. The
-    /// concatenation returned includes the repetition operator applied to the
-    /// last expression in the given concatenation.
-    #[inline(never)]
-    fn parse_counted_repetition(
-        &self,
-        mut concat: ast::Concat,
-    ) -> Result<ast::Concat> {
-        assert!(self.char() == '{');
-        let start = self.pos();
-        let ast = match concat.asts.pop() {
-            Some(ast) => ast,
-            None => {
-                return Err(
-                    self.error(self.span(), ast::ErrorKind::RepetitionMissing)
-                )
-            }
-        };
-        match ast {
-            Ast::Empty(_) | Ast::Flags(_) => {
-                return Err(
-                    self.error(self.span(), ast::ErrorKind::RepetitionMissing)
-                )
-            }
-            _ => {}
-        }
-        if !self.bump_and_bump_space() {
-            return Err(self.error(
-                Span::new(start, self.pos()),
-                ast::ErrorKind::RepetitionCountUnclosed,
-            ));
-        }
-        let count_start = specialize_err(
-            self.parse_decimal(),
-            ast::ErrorKind::DecimalEmpty,
-            ast::ErrorKind::RepetitionCountDecimalEmpty,
-        );
-        if self.is_eof() {
-            return Err(self.error(
-                Span::new(start, self.pos()),
-                ast::ErrorKind::RepetitionCountUnclosed,
-            ));
-        }
-        let range = if self.char() == ',' {
-            if !self.bump_and_bump_space() {
-                return Err(self.error(
-                    Span::new(start, self.pos()),
-                    ast::ErrorKind::RepetitionCountUnclosed,
-                ));
-            }
-            if self.char() != '}' {
-                let count_start = match count_start {
-                    Ok(c) => c,
-                    Err(err)
-                        if err.kind
-                            == ast::ErrorKind::RepetitionCountDecimalEmpty =>
-                    {
-                        if self.parser().empty_min_range {
-                            0
-                        } else {
-                            return Err(err);
-                        }
-                    }
-                    err => err?,
-                };
-                let count_end = specialize_err(
-                    self.parse_decimal(),
-                    ast::ErrorKind::DecimalEmpty,
-                    ast::ErrorKind::RepetitionCountDecimalEmpty,
-                )?;
-                ast::RepetitionRange::Bounded(count_start, count_end)
-            } else {
-                ast::RepetitionRange::AtLeast(count_start?)
-            }
-        } else {
-            ast::RepetitionRange::Exactly(count_start?)
-        };
-
-        if self.is_eof() || self.char() != '}' {
-            return Err(self.error(
-                Span::new(start, self.pos()),
-                ast::ErrorKind::RepetitionCountUnclosed,
-            ));
-        }
-
-        let mut greedy = true;
-        if self.bump_and_bump_space() && self.char() == '?' {
-            greedy = false;
-            self.bump();
-        }
-
-        let op_span = Span::new(start, self.pos());
-        if !range.is_valid() {
-            return Err(
-                self.error(op_span, ast::ErrorKind::RepetitionCountInvalid)
-            );
-        }
-        concat.asts.push(Ast::repetition(ast::Repetition {
-            span: ast.span().with_end(self.pos()),
-            op: ast::RepetitionOp {
-                span: op_span,
-                kind: ast::RepetitionKind::Range(range),
-            },
-            greedy,
-            ast: Box::new(ast),
-        }));
-        Ok(concat)
-    }
-
-    /// Parse a group (which contains a sub-expression) or a set of flags.
-    ///
-    /// If a group was found, then it is returned with an empty AST. If a set
-    /// of flags is found, then that set is returned.
-    ///
-    /// The parser should be positioned at the opening parenthesis.
-    ///
-    /// This advances the parser to the character before the start of the
-    /// sub-expression (in the case of a group) or to the closing parenthesis
-    /// immediately following the set of flags.
-    ///
-    /// # Errors
-    ///
-    /// If flags are given and incorrectly specified, then a corresponding
-    /// error is returned.
-    ///
-    /// If a capture name is given and it is incorrectly specified, then a
-    /// corresponding error is returned.
-    #[inline(never)]
-    fn parse_group(&self) -> Result<Either<ast::SetFlags, ast::Group>> {
-        assert_eq!(self.char(), '(');
-        let open_span = self.span_char();
-        self.bump();
-        self.bump_space();
-        if self.is_lookaround_prefix() {
-            return Err(self.error(
-                Span::new(open_span.start, self.span().end),
-                ast::ErrorKind::UnsupportedLookAround,
-            ));
-        }
-        let inner_span = self.span();
-        let mut starts_with_p = true;
-        if self.bump_if("?P<") || {
-            starts_with_p = false;
-            self.bump_if("?<")
-        } {
-            let capture_index = self.next_capture_index(open_span)?;
-            let name = self.parse_capture_name(capture_index)?;
-            Ok(Either::Right(ast::Group {
-                span: open_span,
-                kind: ast::GroupKind::CaptureName { starts_with_p, name },
-                ast: Box::new(Ast::empty(self.span())),
-            }))
-        } else if self.bump_if("?") {
-            if self.is_eof() {
-                return Err(
-                    self.error(open_span, ast::ErrorKind::GroupUnclosed)
-                );
-            }
-            let flags = self.parse_flags()?;
-            let char_end = self.char();
-            self.bump();
-            if char_end == ')' {
-                // We don't allow empty flags, e.g., `(?)`. We instead
-                // interpret it as a repetition operator missing its argument.
-                if flags.items.is_empty() {
-                    return Err(self.error(
-                        inner_span,
-                        ast::ErrorKind::RepetitionMissing,
-                    ));
-                }
-                Ok(Either::Left(ast::SetFlags {
-                    span: Span { end: self.pos(), ..open_span },
-                    flags,
-                }))
-            } else {
-                assert_eq!(char_end, ':');
-                Ok(Either::Right(ast::Group {
-                    span: open_span,
-                    kind: ast::GroupKind::NonCapturing(flags),
-                    ast: Box::new(Ast::empty(self.span())),
-                }))
-            }
-        } else {
-            let capture_index = self.next_capture_index(open_span)?;
-            Ok(Either::Right(ast::Group {
-                span: open_span,
-                kind: ast::GroupKind::CaptureIndex(capture_index),
-                ast: Box::new(Ast::empty(self.span())),
-            }))
-        }
-    }
-
-    /// Parses a capture group name. Assumes that the parser is positioned at
-    /// the first character in the name following the opening `<` (and may
-    /// possibly be EOF). This advances the parser to the first character
-    /// following the closing `>`.
-    ///
-    /// The caller must provide the capture index of the group for this name.
-    #[inline(never)]
-    fn parse_capture_name(
-        &self,
-        capture_index: u32,
-    ) -> Result<ast::CaptureName> {
-        if self.is_eof() {
-            return Err(self
-                .error(self.span(), ast::ErrorKind::GroupNameUnexpectedEof));
-        }
-        let start = self.pos();
-        loop {
-            if self.char() == '>' {
-                break;
-            }
-            if !is_capture_char(self.char(), self.pos() == start) {
-                return Err(self.error(
-                    self.span_char(),
-                    ast::ErrorKind::GroupNameInvalid,
-                ));
-            }
-            if !self.bump() {
-                break;
-            }
-        }
-        let end = self.pos();
-        if self.is_eof() {
-            return Err(self
-                .error(self.span(), ast::ErrorKind::GroupNameUnexpectedEof));
-        }
-        assert_eq!(self.char(), '>');
-        self.bump();
-        let name = &self.pattern()[start.offset..end.offset];
-        if name.is_empty() {
-            return Err(self.error(
-                Span::new(start, start),
-                ast::ErrorKind::GroupNameEmpty,
-            ));
-        }
-        let capname = ast::CaptureName {
-            span: Span::new(start, end),
-            name: name.to_string(),
-            index: capture_index,
-        };
-        self.add_capture_name(&capname)?;
-        Ok(capname)
-    }
-
-    /// Parse a sequence of flags starting at the current character.
-    ///
-    /// This advances the parser to the character immediately following the
-    /// flags, which is guaranteed to be either `:` or `)`.
-    ///
-    /// # Errors
-    ///
-    /// If any flags are duplicated, then an error is returned.
-    ///
-    /// If the negation operator is used more than once, then an error is
-    /// returned.
-    ///
-    /// If no flags could be found or if the negation operation is not followed
-    /// by any flags, then an error is returned.
-    #[inline(never)]
-    fn parse_flags(&self) -> Result<ast::Flags> {
-        let mut flags = ast::Flags { span: self.span(), items: vec![] };
-        let mut last_was_negation = None;
-        while self.char() != ':' && self.char() != ')' {
-            if self.char() == '-' {
-                last_was_negation = Some(self.span_char());
-                let item = ast::FlagsItem {
-                    span: self.span_char(),
-                    kind: ast::FlagsItemKind::Negation,
-                };
-                if let Some(i) = flags.add_item(item) {
-                    return Err(self.error(
-                        self.span_char(),
-                        ast::ErrorKind::FlagRepeatedNegation {
-                            original: flags.items[i].span,
-                        },
-                    ));
-                }
-            } else {
-                last_was_negation = None;
-                let item = ast::FlagsItem {
-                    span: self.span_char(),
-                    kind: ast::FlagsItemKind::Flag(self.parse_flag()?),
-                };
-                if let Some(i) = flags.add_item(item) {
-                    return Err(self.error(
-                        self.span_char(),
-                        ast::ErrorKind::FlagDuplicate {
-                            original: flags.items[i].span,
-                        },
-                    ));
-                }
-            }
-            if !self.bump() {
-                return Err(
-                    self.error(self.span(), ast::ErrorKind::FlagUnexpectedEof)
-                );
-            }
-        }
-        if let Some(span) = last_was_negation {
-            return Err(self.error(span, ast::ErrorKind::FlagDanglingNegation));
-        }
-        flags.span.end = self.pos();
-        Ok(flags)
-    }
-
-    /// Parse the current character as a flag. Do not advance the parser.
-    ///
-    /// # Errors
-    ///
-    /// If the flag is not recognized, then an error is returned.
-    #[inline(never)]
-    fn parse_flag(&self) -> Result<ast::Flag> {
-        match self.char() {
-            'i' => Ok(ast::Flag::CaseInsensitive),
-            'm' => Ok(ast::Flag::MultiLine),
-            's' => Ok(ast::Flag::DotMatchesNewLine),
-            'U' => Ok(ast::Flag::SwapGreed),
-            'u' => Ok(ast::Flag::Unicode),
-            'R' => Ok(ast::Flag::CRLF),
-            'x' => Ok(ast::Flag::IgnoreWhitespace),
-            _ => {
-                Err(self
-                    .error(self.span_char(), ast::ErrorKind::FlagUnrecognized))
-            }
-        }
-    }
-
-    /// Parse a primitive AST. e.g., A literal, non-set character class or
-    /// assertion.
-    ///
-    /// This assumes that the parser expects a primitive at the current
-    /// location. i.e., All other non-primitive cases have been handled.
-    /// For example, if the parser's position is at `|`, then `|` will be
-    /// treated as a literal (e.g., inside a character class).
-    ///
-    /// This advances the parser to the first character immediately following
-    /// the primitive.
-    fn parse_primitive(&self) -> Result<Primitive> {
-        match self.char() {
-            '\\' => self.parse_escape(),
-            '.' => {
-                let ast = Primitive::Dot(self.span_char());
-                self.bump();
-                Ok(ast)
-            }
-            '^' => {
-                let ast = Primitive::Assertion(ast::Assertion {
-                    span: self.span_char(),
-                    kind: ast::AssertionKind::StartLine,
-                });
-                self.bump();
-                Ok(ast)
-            }
-            '$' => {
-                let ast = Primitive::Assertion(ast::Assertion {
-                    span: self.span_char(),
-                    kind: ast::AssertionKind::EndLine,
-                });
-                self.bump();
-                Ok(ast)
-            }
-            c => {
-                let ast = Primitive::Literal(ast::Literal {
-                    span: self.span_char(),
-                    kind: ast::LiteralKind::Verbatim,
-                    c,
-                });
-                self.bump();
-                Ok(ast)
-            }
-        }
-    }
-
-    /// Parse an escape sequence as a primitive AST.
-    ///
-    /// This assumes the parser is positioned at the start of the escape
-    /// sequence, i.e., `\`. It advances the parser to the first position
-    /// immediately following the escape sequence.
-    #[inline(never)]
-    fn parse_escape(&self) -> Result<Primitive> {
-        assert_eq!(self.char(), '\\');
-        let start = self.pos();
-        if !self.bump() {
-            return Err(self.error(
-                Span::new(start, self.pos()),
-                ast::ErrorKind::EscapeUnexpectedEof,
-            ));
-        }
-        let c = self.char();
-        // Put some of the more complicated routines into helpers.
-        match c {
-            '0'..='7' => {
-                if !self.parser().octal {
-                    return Err(self.error(
-                        Span::new(start, self.span_char().end),
-                        ast::ErrorKind::UnsupportedBackreference,
-                    ));
-                }
-                let mut lit = self.parse_octal();
-                lit.span.start = start;
-                return Ok(Primitive::Literal(lit));
-            }
-            '8'..='9' if !self.parser().octal => {
-                return Err(self.error(
-                    Span::new(start, self.span_char().end),
-                    ast::ErrorKind::UnsupportedBackreference,
-                ));
-            }
-            'x' | 'u' | 'U' => {
-                let mut lit = self.parse_hex()?;
-                lit.span.start = start;
-                return Ok(Primitive::Literal(lit));
-            }
-            'p' | 'P' => {
-                let mut cls = self.parse_unicode_class()?;
-                cls.span.start = start;
-                return Ok(Primitive::Unicode(cls));
-            }
-            'd' | 's' | 'w' | 'D' | 'S' | 'W' => {
-                let mut cls = self.parse_perl_class();
-                cls.span.start = start;
-                return Ok(Primitive::Perl(cls));
-            }
-            _ => {}
-        }
-
-        // Handle all of the one letter sequences inline.
-        self.bump();
-        let span = Span::new(start, self.pos());
-        if is_meta_character(c) {
-            return Ok(Primitive::Literal(ast::Literal {
-                span,
-                kind: ast::LiteralKind::Meta,
-                c,
-            }));
-        }
-        if is_escapeable_character(c) {
-            return Ok(Primitive::Literal(ast::Literal {
-                span,
-                kind: ast::LiteralKind::Superfluous,
-                c,
-            }));
-        }
-        let special = |kind, c| {
-            Ok(Primitive::Literal(ast::Literal {
-                span,
-                kind: ast::LiteralKind::Special(kind),
-                c,
-            }))
-        };
-        match c {
-            'a' => special(ast::SpecialLiteralKind::Bell, '\x07'),
-            'f' => special(ast::SpecialLiteralKind::FormFeed, '\x0C'),
-            't' => special(ast::SpecialLiteralKind::Tab, '\t'),
-            'n' => special(ast::SpecialLiteralKind::LineFeed, '\n'),
-            'r' => special(ast::SpecialLiteralKind::CarriageReturn, '\r'),
-            'v' => special(ast::SpecialLiteralKind::VerticalTab, '\x0B'),
-            'A' => Ok(Primitive::Assertion(ast::Assertion {
-                span,
-                kind: ast::AssertionKind::StartText,
-            })),
-            'z' => Ok(Primitive::Assertion(ast::Assertion {
-                span,
-                kind: ast::AssertionKind::EndText,
-            })),
-            'b' => {
-                let mut wb = ast::Assertion {
-                    span,
-                    kind: ast::AssertionKind::WordBoundary,
-                };
-                // After a \b, we "try" to parse things like \b{start} for
-                // special word boundary assertions.
-                if !self.is_eof() && self.char() == '{' {
-                    if let Some(kind) =
-                        self.maybe_parse_special_word_boundary(start)?
-                    {
-                        wb.kind = kind;
-                        wb.span.end = self.pos();
-                    }
-                }
-                Ok(Primitive::Assertion(wb))
-            }
-            'B' => Ok(Primitive::Assertion(ast::Assertion {
-                span,
-                kind: ast::AssertionKind::NotWordBoundary,
-            })),
-            '<' => Ok(Primitive::Assertion(ast::Assertion {
-                span,
-                kind: ast::AssertionKind::WordBoundaryStartAngle,
-            })),
-            '>' => Ok(Primitive::Assertion(ast::Assertion {
-                span,
-                kind: ast::AssertionKind::WordBoundaryEndAngle,
-            })),
-            _ => Err(self.error(span, ast::ErrorKind::EscapeUnrecognized)),
-        }
-    }
-
-    /// Attempt to parse a specialty word boundary. That is, `\b{start}`,
-    /// `\b{end}`, `\b{start-half}` or `\b{end-half}`.
-    ///
-    /// This is similar to `maybe_parse_ascii_class` in that, in most cases,
-    /// if it fails it will just return `None` with no error. This is done
-    /// because `\b{5}` is a valid expression and we want to let that be parsed
-    /// by the existing counted repetition parsing code. (I thought about just
-    /// invoking the counted repetition code from here, but it seemed a little
-    /// ham-fisted.)
-    ///
-    /// Unlike `maybe_parse_ascii_class` though, this can return an error.
-    /// Namely, if we definitely know it isn't a counted repetition, then we
-    /// return an error specific to the specialty word boundaries.
-    ///
-    /// This assumes the parser is positioned at a `{` immediately following
-    /// a `\b`. When `None` is returned, the parser is returned to the position
-    /// at which it started: pointing at a `{`.
-    ///
-    /// The position given should correspond to the start of the `\b`.
-    fn maybe_parse_special_word_boundary(
-        &self,
-        wb_start: Position,
-    ) -> Result<Option<ast::AssertionKind>> {
-        assert_eq!(self.char(), '{');
-
-        let is_valid_char = |c| match c {
-            'A'..='Z' | 'a'..='z' | '-' => true,
-            _ => false,
-        };
-        let start = self.pos();
-        if !self.bump_and_bump_space() {
-            return Err(self.error(
-                Span::new(wb_start, self.pos()),
-                ast::ErrorKind::SpecialWordOrRepetitionUnexpectedEof,
-            ));
-        }
-        let start_contents = self.pos();
-        // This is one of the critical bits: if the first non-whitespace
-        // character isn't in [-A-Za-z] (i.e., this can't be a special word
-        // boundary), then we bail and let the counted repetition parser deal
-        // with this.
-        if !is_valid_char(self.char()) {
-            self.parser().pos.set(start);
-            return Ok(None);
-        }
-
-        // Now collect up our chars until we see a '}'.
-        let mut scratch = self.parser().scratch.borrow_mut();
-        scratch.clear();
-        while !self.is_eof() && is_valid_char(self.char()) {
-            scratch.push(self.char());
-            self.bump_and_bump_space();
-        }
-        if self.is_eof() || self.char() != '}' {
-            return Err(self.error(
-                Span::new(start, self.pos()),
-                ast::ErrorKind::SpecialWordBoundaryUnclosed,
-            ));
-        }
-        let end = self.pos();
-        self.bump();
-        let kind = match scratch.as_str() {
-            "start" => ast::AssertionKind::WordBoundaryStart,
-            "end" => ast::AssertionKind::WordBoundaryEnd,
-            "start-half" => ast::AssertionKind::WordBoundaryStartHalf,
-            "end-half" => ast::AssertionKind::WordBoundaryEndHalf,
-            _ => {
-                return Err(self.error(
-                    Span::new(start_contents, end),
-                    ast::ErrorKind::SpecialWordBoundaryUnrecognized,
-                ))
-            }
-        };
-        Ok(Some(kind))
-    }
-
-    /// Parse an octal representation of a Unicode codepoint up to 3 digits
-    /// long. This expects the parser to be positioned at the first octal
-    /// digit and advances the parser to the first character immediately
-    /// following the octal number. This also assumes that parsing octal
-    /// escapes is enabled.
-    ///
-    /// Assuming the preconditions are met, this routine can never fail.
-    #[inline(never)]
-    fn parse_octal(&self) -> ast::Literal {
-        assert!(self.parser().octal);
-        assert!('0' <= self.char() && self.char() <= '7');
-        let start = self.pos();
-        // Parse up to two more digits.
-        while self.bump()
-            && '0' <= self.char()
-            && self.char() <= '7'
-            && self.pos().offset - start.offset <= 2
-        {}
-        let end = self.pos();
-        let octal = &self.pattern()[start.offset..end.offset];
-        // Parsing the octal should never fail since the above guarantees a
-        // valid number.
-        let codepoint =
-            u32::from_str_radix(octal, 8).expect("valid octal number");
-        // The max value for 3 digit octal is 0777 = 511 and [0, 511] has no
-        // invalid Unicode scalar values.
-        let c = char::from_u32(codepoint).expect("Unicode scalar value");
-        ast::Literal {
-            span: Span::new(start, end),
-            kind: ast::LiteralKind::Octal,
-            c,
-        }
-    }
-
-    /// Parse a hex representation of a Unicode codepoint. This handles both
-    /// hex notations, i.e., `\xFF` and `\x{FFFF}`. This expects the parser to
-    /// be positioned at the `x`, `u` or `U` prefix. The parser is advanced to
-    /// the first character immediately following the hexadecimal literal.
-    #[inline(never)]
-    fn parse_hex(&self) -> Result<ast::Literal> {
-        assert!(
-            self.char() == 'x' || self.char() == 'u' || self.char() == 'U'
-        );
-
-        let hex_kind = match self.char() {
-            'x' => ast::HexLiteralKind::X,
-            'u' => ast::HexLiteralKind::UnicodeShort,
-            _ => ast::HexLiteralKind::UnicodeLong,
-        };
-        if !self.bump_and_bump_space() {
-            return Err(
-                self.error(self.span(), ast::ErrorKind::EscapeUnexpectedEof)
-            );
-        }
-        if self.char() == '{' {
-            self.parse_hex_brace(hex_kind)
-        } else {
-            self.parse_hex_digits(hex_kind)
-        }
-    }
-
-    /// Parse an N-digit hex representation of a Unicode codepoint. This
-    /// expects the parser to be positioned at the first digit and will advance
-    /// the parser to the first character immediately following the escape
-    /// sequence.
-    ///
-    /// The number of digits given must be 2 (for `\xNN`), 4 (for `\uNNNN`)
-    /// or 8 (for `\UNNNNNNNN`).
-    #[inline(never)]
-    fn parse_hex_digits(
-        &self,
-        kind: ast::HexLiteralKind,
-    ) -> Result<ast::Literal> {
-        let mut scratch = self.parser().scratch.borrow_mut();
-        scratch.clear();
-
-        let start = self.pos();
-        for i in 0..kind.digits() {
-            if i > 0 && !self.bump_and_bump_space() {
-                return Err(self
-                    .error(self.span(), ast::ErrorKind::EscapeUnexpectedEof));
-            }
-            if !is_hex(self.char()) {
-                return Err(self.error(
-                    self.span_char(),
-                    ast::ErrorKind::EscapeHexInvalidDigit,
-                ));
-            }
-            scratch.push(self.char());
-        }
-        // The final bump just moves the parser past the literal, which may
-        // be EOF.
-        self.bump_and_bump_space();
-        let end = self.pos();
-        let hex = scratch.as_str();
-        match u32::from_str_radix(hex, 16).ok().and_then(char::from_u32) {
-            None => Err(self.error(
-                Span::new(start, end),
-                ast::ErrorKind::EscapeHexInvalid,
-            )),
-            Some(c) => Ok(ast::Literal {
-                span: Span::new(start, end),
-                kind: ast::LiteralKind::HexFixed(kind),
-                c,
-            }),
-        }
-    }
-
-    /// Parse a hex representation of any Unicode scalar value. This expects
-    /// the parser to be positioned at the opening brace `{` and will advance
-    /// the parser to the first character following the closing brace `}`.
-    #[inline(never)]
-    fn parse_hex_brace(
-        &self,
-        kind: ast::HexLiteralKind,
-    ) -> Result<ast::Literal> {
-        let mut scratch = self.parser().scratch.borrow_mut();
-        scratch.clear();
-
-        let brace_pos = self.pos();
-        let start = self.span_char().end;
-        while self.bump_and_bump_space() && self.char() != '}' {
-            if !is_hex(self.char()) {
-                return Err(self.error(
-                    self.span_char(),
-                    ast::ErrorKind::EscapeHexInvalidDigit,
-                ));
-            }
-            scratch.push(self.char());
-        }
-        if self.is_eof() {
-            return Err(self.error(
-                Span::new(brace_pos, self.pos()),
-                ast::ErrorKind::EscapeUnexpectedEof,
-            ));
-        }
-        let end = self.pos();
-        let hex = scratch.as_str();
-        assert_eq!(self.char(), '}');
-        self.bump_and_bump_space();
-
-        if hex.is_empty() {
-            return Err(self.error(
-                Span::new(brace_pos, self.pos()),
-                ast::ErrorKind::EscapeHexEmpty,
-            ));
-        }
-        match u32::from_str_radix(hex, 16).ok().and_then(char::from_u32) {
-            None => Err(self.error(
-                Span::new(start, end),
-                ast::ErrorKind::EscapeHexInvalid,
-            )),
-            Some(c) => Ok(ast::Literal {
-                span: Span::new(start, self.pos()),
-                kind: ast::LiteralKind::HexBrace(kind),
-                c,
-            }),
-        }
-    }
-
-    /// Parse a decimal number into a u32 while trimming leading and trailing
-    /// whitespace.
-    ///
-    /// This expects the parser to be positioned at the first position where
-    /// a decimal digit could occur. This will advance the parser to the byte
-    /// immediately following the last contiguous decimal digit.
-    ///
-    /// If no decimal digit could be found or if there was a problem parsing
-    /// the complete set of digits into a u32, then an error is returned.
-    fn parse_decimal(&self) -> Result<u32> {
-        let mut scratch = self.parser().scratch.borrow_mut();
-        scratch.clear();
-
-        while !self.is_eof() && self.char().is_whitespace() {
-            self.bump();
-        }
-        let start = self.pos();
-        while !self.is_eof() && '0' <= self.char() && self.char() <= '9' {
-            scratch.push(self.char());
-            self.bump_and_bump_space();
-        }
-        let span = Span::new(start, self.pos());
-        while !self.is_eof() && self.char().is_whitespace() {
-            self.bump_and_bump_space();
-        }
-        let digits = scratch.as_str();
-        if digits.is_empty() {
-            return Err(self.error(span, ast::ErrorKind::DecimalEmpty));
-        }
-        match u32::from_str_radix(digits, 10).ok() {
-            Some(n) => Ok(n),
-            None => Err(self.error(span, ast::ErrorKind::DecimalInvalid)),
-        }
-    }
-
-    /// Parse a standard character class consisting primarily of characters or
-    /// character ranges, but can also contain nested character classes of
-    /// any type (sans `.`).
-    ///
-    /// This assumes the parser is positioned at the opening `[`. If parsing
-    /// is successful, then the parser is advanced to the position immediately
-    /// following the closing `]`.
-    #[inline(never)]
-    fn parse_set_class(&self) -> Result<ast::ClassBracketed> {
-        assert_eq!(self.char(), '[');
-
-        let mut union =
-            ast::ClassSetUnion { span: self.span(), items: vec![] };
-        loop {
-            self.bump_space();
-            if self.is_eof() {
-                return Err(self.unclosed_class_error());
-            }
-            match self.char() {
-                '[' => {
-                    // If we've already parsed the opening bracket, then
-                    // attempt to treat this as the beginning of an ASCII
-                    // class. If ASCII class parsing fails, then the parser
-                    // backs up to `[`.
-                    if !self.parser().stack_class.borrow().is_empty() {
-                        if let Some(cls) = self.maybe_parse_ascii_class() {
-                            union.push(ast::ClassSetItem::Ascii(cls));
-                            continue;
-                        }
-                    }
-                    union = self.push_class_open(union)?;
-                }
-                ']' => match self.pop_class(union)? {
-                    Either::Left(nested_union) => {
-                        union = nested_union;
-                    }
-                    Either::Right(class) => return Ok(class),
-                },
-                '&' if self.peek() == Some('&') => {
-                    assert!(self.bump_if("&&"));
-                    union = self.push_class_op(
-                        ast::ClassSetBinaryOpKind::Intersection,
-                        union,
-                    );
-                }
-                '-' if self.peek() == Some('-') => {
-                    assert!(self.bump_if("--"));
-                    union = self.push_class_op(
-                        ast::ClassSetBinaryOpKind::Difference,
-                        union,
-                    );
-                }
-                '~' if self.peek() == Some('~') => {
-                    assert!(self.bump_if("~~"));
-                    union = self.push_class_op(
-                        ast::ClassSetBinaryOpKind::SymmetricDifference,
-                        union,
-                    );
-                }
-                _ => {
-                    union.push(self.parse_set_class_range()?);
-                }
-            }
-        }
-    }
-
-    /// Parse a single primitive item in a character class set. The item to
-    /// be parsed can either be one of a simple literal character, a range
-    /// between two simple literal characters or a "primitive" character
-    /// class like \w or \p{Greek}.
-    ///
-    /// If an invalid escape is found, or if a character class is found where
-    /// a simple literal is expected (e.g., in a range), then an error is
-    /// returned.
-    #[inline(never)]
-    fn parse_set_class_range(&self) -> Result<ast::ClassSetItem> {
-        let prim1 = self.parse_set_class_item()?;
-        self.bump_space();
-        if self.is_eof() {
-            return Err(self.unclosed_class_error());
-        }
-        // If the next char isn't a `-`, then we don't have a range.
-        // There are two exceptions. If the char after a `-` is a `]`, then
-        // `-` is interpreted as a literal `-`. Alternatively, if the char
-        // after a `-` is a `-`, then `--` corresponds to a "difference"
-        // operation.
-        if self.char() != '-'
-            || self.peek_space() == Some(']')
-            || self.peek_space() == Some('-')
-        {
-            return prim1.into_class_set_item(self);
-        }
-        // OK, now we're parsing a range, so bump past the `-` and parse the
-        // second half of the range.
-        if !self.bump_and_bump_space() {
-            return Err(self.unclosed_class_error());
-        }
-        let prim2 = self.parse_set_class_item()?;
-        let range = ast::ClassSetRange {
-            span: Span::new(prim1.span().start, prim2.span().end),
-            start: prim1.into_class_literal(self)?,
-            end: prim2.into_class_literal(self)?,
-        };
-        if !range.is_valid() {
-            return Err(
-                self.error(range.span, ast::ErrorKind::ClassRangeInvalid)
-            );
-        }
-        Ok(ast::ClassSetItem::Range(range))
-    }
-
-    /// Parse a single item in a character class as a primitive, where the
-    /// primitive either consists of a verbatim literal or a single escape
-    /// sequence.
-    ///
-    /// This assumes the parser is positioned at the beginning of a primitive,
-    /// and advances the parser to the first position after the primitive if
-    /// successful.
-    ///
-    /// Note that it is the caller's responsibility to report an error if an
-    /// illegal primitive was parsed.
-    #[inline(never)]
-    fn parse_set_class_item(&self) -> Result<Primitive> {
-        if self.char() == '\\' {
-            self.parse_escape()
-        } else {
-            let x = Primitive::Literal(ast::Literal {
-                span: self.span_char(),
-                kind: ast::LiteralKind::Verbatim,
-                c: self.char(),
-            });
-            self.bump();
-            Ok(x)
-        }
-    }
-
-    /// Parses the opening of a character class set. This includes the opening
-    /// bracket along with `^` if present to indicate negation. This also
-    /// starts parsing the opening set of unioned items if applicable, since
-    /// there are special rules applied to certain characters in the opening
-    /// of a character class. For example, `[^]]` is the class of all
-    /// characters not equal to `]`. (`]` would need to be escaped in any other
-    /// position.) Similarly for `-`.
-    ///
-    /// In all cases, the op inside the returned `ast::ClassBracketed` is an
-    /// empty union. This empty union should be replaced with the actual item
-    /// when it is popped from the parser's stack.
-    ///
-    /// This assumes the parser is positioned at the opening `[` and advances
-    /// the parser to the first non-special byte of the character class.
-    ///
-    /// An error is returned if EOF is found.
-    #[inline(never)]
-    fn parse_set_class_open(
-        &self,
-    ) -> Result<(ast::ClassBracketed, ast::ClassSetUnion)> {
-        assert_eq!(self.char(), '[');
-        let start = self.pos();
-        if !self.bump_and_bump_space() {
-            return Err(self.error(
-                Span::new(start, self.pos()),
-                ast::ErrorKind::ClassUnclosed,
-            ));
-        }
-
-        let negated = if self.char() != '^' {
-            false
-        } else {
-            if !self.bump_and_bump_space() {
-                return Err(self.error(
-                    Span::new(start, self.pos()),
-                    ast::ErrorKind::ClassUnclosed,
-                ));
-            }
-            true
-        };
-        // Accept any number of `-` as literal `-`.
-        let mut union =
-            ast::ClassSetUnion { span: self.span(), items: vec![] };
-        while self.char() == '-' {
-            union.push(ast::ClassSetItem::Literal(ast::Literal {
-                span: self.span_char(),
-                kind: ast::LiteralKind::Verbatim,
-                c: '-',
-            }));
-            if !self.bump_and_bump_space() {
-                return Err(self.error(
-                    Span::new(start, start),
-                    ast::ErrorKind::ClassUnclosed,
-                ));
-            }
-        }
-        // If `]` is the *first* char in a set, then interpret it as a literal
-        // `]`. That is, an empty class is impossible to write.
-        if union.items.is_empty() && self.char() == ']' {
-            union.push(ast::ClassSetItem::Literal(ast::Literal {
-                span: self.span_char(),
-                kind: ast::LiteralKind::Verbatim,
-                c: ']',
-            }));
-            if !self.bump_and_bump_space() {
-                return Err(self.error(
-                    Span::new(start, self.pos()),
-                    ast::ErrorKind::ClassUnclosed,
-                ));
-            }
-        }
-        let set = ast::ClassBracketed {
-            span: Span::new(start, self.pos()),
-            negated,
-            kind: ast::ClassSet::union(ast::ClassSetUnion {
-                span: Span::new(union.span.start, union.span.start),
-                items: vec![],
-            }),
-        };
-        Ok((set, union))
-    }
-
-    /// Attempt to parse an ASCII character class, e.g., `[:alnum:]`.
-    ///
-    /// This assumes the parser is positioned at the opening `[`.
-    ///
-    /// If no valid ASCII character class could be found, then this does not
-    /// advance the parser and `None` is returned. Otherwise, the parser is
-    /// advanced to the first byte following the closing `]` and the
-    /// corresponding ASCII class is returned.
-    #[inline(never)]
-    fn maybe_parse_ascii_class(&self) -> Option<ast::ClassAscii> {
-        // ASCII character classes are interesting from a parsing perspective
-        // because parsing cannot fail with any interesting error. For example,
-        // in order to use an ASCII character class, it must be enclosed in
-        // double brackets, e.g., `[[:alnum:]]`. Alternatively, you might think
-        // of it as "ASCII character classes have the syntax `[:NAME:]` which
-        // can only appear within character brackets." This means that things
-        // like `[[:lower:]A]` are legal constructs.
-        //
-        // However, if one types an incorrect ASCII character class, e.g.,
-        // `[[:loower:]]`, then we treat that as a normal nested character
-        // class containing the characters `:elorw`. One might argue that we
-        // should return an error instead since the repeated colons give away
-        // the intent to write an ASCII class. But what if the user typed
-        // `[[:lower]]` instead? How can we tell that was intended to be an
-        // ASCII class and not just a normal nested class?
-        //
-        // Reasonable people can probably disagree over this, but for better
-        // or worse, we implement semantics that never fails at the expense
-        // of better failure modes.
-        assert_eq!(self.char(), '[');
-        // If parsing fails, then we back up the parser to this starting point.
-        let start = self.pos();
-        let mut negated = false;
-        if !self.bump() || self.char() != ':' {
-            self.parser().pos.set(start);
-            return None;
-        }
-        if !self.bump() {
-            self.parser().pos.set(start);
-            return None;
-        }
-        if self.char() == '^' {
-            negated = true;
-            if !self.bump() {
-                self.parser().pos.set(start);
-                return None;
-            }
-        }
-        let name_start = self.offset();
-        while self.char() != ':' && self.bump() {}
-        if self.is_eof() {
-            self.parser().pos.set(start);
-            return None;
-        }
-        let name = &self.pattern()[name_start..self.offset()];
-        if !self.bump_if(":]") {
-            self.parser().pos.set(start);
-            return None;
-        }
-        let kind = match ast::ClassAsciiKind::from_name(name) {
-            Some(kind) => kind,
-            None => {
-                self.parser().pos.set(start);
-                return None;
-            }
-        };
-        Some(ast::ClassAscii {
-            span: Span::new(start, self.pos()),
-            kind,
-            negated,
-        })
-    }
-
-    /// Parse a Unicode class in either the single character notation, `\pN`
-    /// or the multi-character bracketed notation, `\p{Greek}`. This assumes
-    /// the parser is positioned at the `p` (or `P` for negation) and will
-    /// advance the parser to the character immediately following the class.
-    ///
-    /// Note that this does not check whether the class name is valid or not.
-    #[inline(never)]
-    fn parse_unicode_class(&self) -> Result<ast::ClassUnicode> {
-        assert!(self.char() == 'p' || self.char() == 'P');
-
-        let mut scratch = self.parser().scratch.borrow_mut();
-        scratch.clear();
-
-        let negated = self.char() == 'P';
-        if !self.bump_and_bump_space() {
-            return Err(
-                self.error(self.span(), ast::ErrorKind::EscapeUnexpectedEof)
-            );
-        }
-        let (start, kind) = if self.char() == '{' {
-            let start = self.span_char().end;
-            while self.bump_and_bump_space() && self.char() != '}' {
-                scratch.push(self.char());
-            }
-            if self.is_eof() {
-                return Err(self
-                    .error(self.span(), ast::ErrorKind::EscapeUnexpectedEof));
-            }
-            assert_eq!(self.char(), '}');
-            self.bump();
-
-            let name = scratch.as_str();
-            if let Some(i) = name.find("!=") {
-                (
-                    start,
-                    ast::ClassUnicodeKind::NamedValue {
-                        op: ast::ClassUnicodeOpKind::NotEqual,
-                        name: name[..i].to_string(),
-                        value: name[i + 2..].to_string(),
-                    },
-                )
-            } else if let Some(i) = name.find(':') {
-                (
-                    start,
-                    ast::ClassUnicodeKind::NamedValue {
-                        op: ast::ClassUnicodeOpKind::Colon,
-                        name: name[..i].to_string(),
-                        value: name[i + 1..].to_string(),
-                    },
-                )
-            } else if let Some(i) = name.find('=') {
-                (
-                    start,
-                    ast::ClassUnicodeKind::NamedValue {
-                        op: ast::ClassUnicodeOpKind::Equal,
-                        name: name[..i].to_string(),
-                        value: name[i + 1..].to_string(),
-                    },
-                )
-            } else {
-                (start, ast::ClassUnicodeKind::Named(name.to_string()))
-            }
-        } else {
-            let start = self.pos();
-            let c = self.char();
-            if c == '\\' {
-                return Err(self.error(
-                    self.span_char(),
-                    ast::ErrorKind::UnicodeClassInvalid,
-                ));
-            }
-            self.bump_and_bump_space();
-            let kind = ast::ClassUnicodeKind::OneLetter(c);
-            (start, kind)
-        };
-        Ok(ast::ClassUnicode {
-            span: Span::new(start, self.pos()),
-            negated,
-            kind,
-        })
-    }
-
-    /// Parse a Perl character class, e.g., `\d` or `\W`. This assumes the
-    /// parser is currently at a valid character class name and will be
-    /// advanced to the character immediately following the class.
-    #[inline(never)]
-    fn parse_perl_class(&self) -> ast::ClassPerl {
-        let c = self.char();
-        let span = self.span_char();
-        self.bump();
-        let (negated, kind) = match c {
-            'd' => (false, ast::ClassPerlKind::Digit),
-            'D' => (true, ast::ClassPerlKind::Digit),
-            's' => (false, ast::ClassPerlKind::Space),
-            'S' => (true, ast::ClassPerlKind::Space),
-            'w' => (false, ast::ClassPerlKind::Word),
-            'W' => (true, ast::ClassPerlKind::Word),
-            c => panic!("expected valid Perl class but got '{}'", c),
-        };
-        ast::ClassPerl { span, kind, negated }
-    }
-}
-
-/// A type that traverses a fully parsed Ast and checks whether its depth
-/// exceeds the specified nesting limit. If it does, then an error is returned.
-#[derive(Debug)]
-struct NestLimiter<'p, 's, P> {
-    /// The parser that is checking the nest limit.
-    p: &'p ParserI<'s, P>,
-    /// The current depth while walking an Ast.
-    depth: u32,
-}
-
-impl<'p, 's, P: Borrow<Parser>> NestLimiter<'p, 's, P> {
-    fn new(p: &'p ParserI<'s, P>) -> NestLimiter<'p, 's, P> {
-        NestLimiter { p, depth: 0 }
-    }
-
-    #[inline(never)]
-    fn check(self, ast: &Ast) -> Result<()> {
-        ast::visit(ast, self)
-    }
-
-    fn increment_depth(&mut self, span: &Span) -> Result<()> {
-        let new = self.depth.checked_add(1).ok_or_else(|| {
-            self.p.error(
-                span.clone(),
-                ast::ErrorKind::NestLimitExceeded(u32::MAX),
-            )
-        })?;
-        let limit = self.p.parser().nest_limit;
-        if new > limit {
-            return Err(self.p.error(
-                span.clone(),
-                ast::ErrorKind::NestLimitExceeded(limit),
-            ));
-        }
-        self.depth = new;
-        Ok(())
-    }
-
-    fn decrement_depth(&mut self) {
-        // Assuming the correctness of the visitor, this should never drop
-        // below 0.
-        self.depth = self.depth.checked_sub(1).unwrap();
-    }
-}
-
-impl<'p, 's, P: Borrow<Parser>> ast::Visitor for NestLimiter<'p, 's, P> {
-    type Output = ();
-    type Err = ast::Error;
-
-    fn finish(self) -> Result<()> {
-        Ok(())
-    }
-
-    fn visit_pre(&mut self, ast: &Ast) -> Result<()> {
-        let span = match *ast {
-            Ast::Empty(_)
-            | Ast::Flags(_)
-            | Ast::Literal(_)
-            | Ast::Dot(_)
-            | Ast::Assertion(_)
-            | Ast::ClassUnicode(_)
-            | Ast::ClassPerl(_) => {
-                // These are all base cases, so we don't increment depth.
-                return Ok(());
-            }
-            Ast::ClassBracketed(ref x) => &x.span,
-            Ast::Repetition(ref x) => &x.span,
-            Ast::Group(ref x) => &x.span,
-            Ast::Alternation(ref x) => &x.span,
-            Ast::Concat(ref x) => &x.span,
-        };
-        self.increment_depth(span)
-    }
-
-    fn visit_post(&mut self, ast: &Ast) -> Result<()> {
-        match *ast {
-            Ast::Empty(_)
-            | Ast::Flags(_)
-            | Ast::Literal(_)
-            | Ast::Dot(_)
-            | Ast::Assertion(_)
-            | Ast::ClassUnicode(_)
-            | Ast::ClassPerl(_) => {
-                // These are all base cases, so we don't decrement depth.
-                Ok(())
-            }
-            Ast::ClassBracketed(_)
-            | Ast::Repetition(_)
-            | Ast::Group(_)
-            | Ast::Alternation(_)
-            | Ast::Concat(_) => {
-                self.decrement_depth();
-                Ok(())
-            }
-        }
-    }
-
-    fn visit_class_set_item_pre(
-        &mut self,
-        ast: &ast::ClassSetItem,
-    ) -> Result<()> {
-        let span = match *ast {
-            ast::ClassSetItem::Empty(_)
-            | ast::ClassSetItem::Literal(_)
-            | ast::ClassSetItem::Range(_)
-            | ast::ClassSetItem::Ascii(_)
-            | ast::ClassSetItem::Unicode(_)
-            | ast::ClassSetItem::Perl(_) => {
-                // These are all base cases, so we don't increment depth.
-                return Ok(());
-            }
-            ast::ClassSetItem::Bracketed(ref x) => &x.span,
-            ast::ClassSetItem::Union(ref x) => &x.span,
-        };
-        self.increment_depth(span)
-    }
-
-    fn visit_class_set_item_post(
-        &mut self,
-        ast: &ast::ClassSetItem,
-    ) -> Result<()> {
-        match *ast {
-            ast::ClassSetItem::Empty(_)
-            | ast::ClassSetItem::Literal(_)
-            | ast::ClassSetItem::Range(_)
-            | ast::ClassSetItem::Ascii(_)
-            | ast::ClassSetItem::Unicode(_)
-            | ast::ClassSetItem::Perl(_) => {
-                // These are all base cases, so we don't decrement depth.
-                Ok(())
-            }
-            ast::ClassSetItem::Bracketed(_) | ast::ClassSetItem::Union(_) => {
-                self.decrement_depth();
-                Ok(())
-            }
-        }
-    }
-
-    fn visit_class_set_binary_op_pre(
-        &mut self,
-        ast: &ast::ClassSetBinaryOp,
-    ) -> Result<()> {
-        self.increment_depth(&ast.span)
-    }
-
-    fn visit_class_set_binary_op_post(
-        &mut self,
-        _ast: &ast::ClassSetBinaryOp,
-    ) -> Result<()> {
-        self.decrement_depth();
-        Ok(())
-    }
-}
-
-/// When the result is an error, transforms the ast::ErrorKind from the source
-/// Result into another one. This function is used to return clearer error
-/// messages when possible.
-fn specialize_err<T>(
-    result: Result<T>,
-    from: ast::ErrorKind,
-    to: ast::ErrorKind,
-) -> Result<T> {
-    if let Err(e) = result {
-        if e.kind == from {
-            Err(ast::Error { kind: to, pattern: e.pattern, span: e.span })
-        } else {
-            Err(e)
-        }
-    } else {
-        result
-    }
-}
-
-#[cfg(test)]
-mod tests {
-    use core::ops::Range;
-
-    use alloc::format;
-
-    use super::*;
-
-    // Our own assert_eq, which has slightly better formatting (but honestly
-    // still kind of crappy).
-    macro_rules! assert_eq {
-        ($left:expr, $right:expr) => {{
-            match (&$left, &$right) {
-                (left_val, right_val) => {
-                    if !(*left_val == *right_val) {
-                        panic!(
-                            "assertion failed: `(left == right)`\n\n\
-                             left:  `{:?}`\nright: `{:?}`\n\n",
-                            left_val, right_val
-                        )
-                    }
-                }
-            }
-        }};
-    }
-
-    // We create these errors to compare with real ast::Errors in the tests.
-    // We define equality between TestError and ast::Error to disregard the
-    // pattern string in ast::Error, which is annoying to provide in tests.
-    #[derive(Clone, Debug)]
-    struct TestError {
-        span: Span,
-        kind: ast::ErrorKind,
-    }
-
-    impl PartialEq<ast::Error> for TestError {
-        fn eq(&self, other: &ast::Error) -> bool {
-            self.span == other.span && self.kind == other.kind
-        }
-    }
-
-    impl PartialEq<TestError> for ast::Error {
-        fn eq(&self, other: &TestError) -> bool {
-            self.span == other.span && self.kind == other.kind
-        }
-    }
-
-    fn s(str: &str) -> String {
-        str.to_string()
-    }
-
-    fn parser(pattern: &str) -> ParserI<'_, Parser> {
-        ParserI::new(Parser::new(), pattern)
-    }
-
-    fn parser_octal(pattern: &str) -> ParserI<'_, Parser> {
-        let parser = ParserBuilder::new().octal(true).build();
-        ParserI::new(parser, pattern)
-    }
-
-    fn parser_empty_min_range(pattern: &str) -> ParserI<'_, Parser> {
-        let parser = ParserBuilder::new().empty_min_range(true).build();
-        ParserI::new(parser, pattern)
-    }
-
-    fn parser_nest_limit(
-        pattern: &str,
-        nest_limit: u32,
-    ) -> ParserI<'_, Parser> {
-        let p = ParserBuilder::new().nest_limit(nest_limit).build();
-        ParserI::new(p, pattern)
-    }
-
-    fn parser_ignore_whitespace(pattern: &str) -> ParserI<'_, Parser> {
-        let p = ParserBuilder::new().ignore_whitespace(true).build();
-        ParserI::new(p, pattern)
-    }
-
-    /// Short alias for creating a new span.
-    fn nspan(start: Position, end: Position) -> Span {
-        Span::new(start, end)
-    }
-
-    /// Short alias for creating a new position.
-    fn npos(offset: usize, line: usize, column: usize) -> Position {
-        Position::new(offset, line, column)
-    }
-
-    /// Create a new span from the given offset range. This assumes a single
-    /// line and sets the columns based on the offsets. i.e., This only works
-    /// out of the box for ASCII, which is fine for most tests.
-    fn span(range: Range<usize>) -> Span {
-        let start = Position::new(range.start, 1, range.start + 1);
-        let end = Position::new(range.end, 1, range.end + 1);
-        Span::new(start, end)
-    }
-
-    /// Create a new span for the corresponding byte range in the given string.
-    fn span_range(subject: &str, range: Range<usize>) -> Span {
-        let start = Position {
-            offset: range.start,
-            line: 1 + subject[..range.start].matches('\n').count(),
-            column: 1 + subject[..range.start]
-                .chars()
-                .rev()
-                .position(|c| c == '\n')
-                .unwrap_or(subject[..range.start].chars().count()),
-        };
-        let end = Position {
-            offset: range.end,
-            line: 1 + subject[..range.end].matches('\n').count(),
-            column: 1 + subject[..range.end]
-                .chars()
-                .rev()
-                .position(|c| c == '\n')
-                .unwrap_or(subject[..range.end].chars().count()),
-        };
-        Span::new(start, end)
-    }
-
-    /// Create a verbatim literal starting at the given position.
-    fn lit(c: char, start: usize) -> Ast {
-        lit_with(c, span(start..start + c.len_utf8()))
-    }
-
-    /// Create a meta literal starting at the given position.
-    fn meta_lit(c: char, span: Span) -> Ast {
-        Ast::literal(ast::Literal { span, kind: ast::LiteralKind::Meta, c })
-    }
-
-    /// Create a verbatim literal with the given span.
-    fn lit_with(c: char, span: Span) -> Ast {
-        Ast::literal(ast::Literal {
-            span,
-            kind: ast::LiteralKind::Verbatim,
-            c,
-        })
-    }
-
-    /// Create a concatenation with the given range.
-    fn concat(range: Range<usize>, asts: Vec<Ast>) -> Ast {
-        concat_with(span(range), asts)
-    }
-
-    /// Create a concatenation with the given span.
-    fn concat_with(span: Span, asts: Vec<Ast>) -> Ast {
-        Ast::concat(ast::Concat { span, asts })
-    }
-
-    /// Create an alternation with the given span.
-    fn alt(range: Range<usize>, asts: Vec<Ast>) -> Ast {
-        Ast::alternation(ast::Alternation { span: span(range), asts })
-    }
-
-    /// Create a capturing group with the given span.
-    fn group(range: Range<usize>, index: u32, ast: Ast) -> Ast {
-        Ast::group(ast::Group {
-            span: span(range),
-            kind: ast::GroupKind::CaptureIndex(index),
-            ast: Box::new(ast),
-        })
-    }
-
-    /// Create an ast::SetFlags.
-    ///
-    /// The given pattern should be the full pattern string. The range given
-    /// should correspond to the byte offsets where the flag set occurs.
-    ///
-    /// If negated is true, then the set is interpreted as beginning with a
-    /// negation.
-    fn flag_set(
-        pat: &str,
-        range: Range<usize>,
-        flag: ast::Flag,
-        negated: bool,
-    ) -> Ast {
-        let mut items = vec![ast::FlagsItem {
-            span: span_range(pat, (range.end - 2)..(range.end - 1)),
-            kind: ast::FlagsItemKind::Flag(flag),
-        }];
-        if negated {
-            items.insert(
-                0,
-                ast::FlagsItem {
-                    span: span_range(pat, (range.start + 2)..(range.end - 2)),
-                    kind: ast::FlagsItemKind::Negation,
-                },
-            );
-        }
-        Ast::flags(ast::SetFlags {
-            span: span_range(pat, range.clone()),
-            flags: ast::Flags {
-                span: span_range(pat, (range.start + 2)..(range.end - 1)),
-                items,
-            },
-        })
-    }
-
-    #[test]
-    fn parse_nest_limit() {
-        // A nest limit of 0 still allows some types of regexes.
-        assert_eq!(
-            parser_nest_limit("", 0).parse(),
-            Ok(Ast::empty(span(0..0)))
-        );
-        assert_eq!(parser_nest_limit("a", 0).parse(), Ok(lit('a', 0)));
-
-        // Test repetition operations, which require one level of nesting.
-        assert_eq!(
-            parser_nest_limit("a+", 0).parse().unwrap_err(),
-            TestError {
-                span: span(0..2),
-                kind: ast::ErrorKind::NestLimitExceeded(0),
-            }
-        );
-        assert_eq!(
-            parser_nest_limit("a+", 1).parse(),
-            Ok(Ast::repetition(ast::Repetition {
-                span: span(0..2),
-                op: ast::RepetitionOp {
-                    span: span(1..2),
-                    kind: ast::RepetitionKind::OneOrMore,
-                },
-                greedy: true,
-                ast: Box::new(lit('a', 0)),
-            }))
-        );
-        assert_eq!(
-            parser_nest_limit("(a)+", 1).parse().unwrap_err(),
-            TestError {
-                span: span(0..3),
-                kind: ast::ErrorKind::NestLimitExceeded(1),
-            }
-        );
-        assert_eq!(
-            parser_nest_limit("a+*", 1).parse().unwrap_err(),
-            TestError {
-                span: span(0..2),
-                kind: ast::ErrorKind::NestLimitExceeded(1),
-            }
-        );
-        assert_eq!(
-            parser_nest_limit("a+*", 2).parse(),
-            Ok(Ast::repetition(ast::Repetition {
-                span: span(0..3),
-                op: ast::RepetitionOp {
-                    span: span(2..3),
-                    kind: ast::RepetitionKind::ZeroOrMore,
-                },
-                greedy: true,
-                ast: Box::new(Ast::repetition(ast::Repetition {
-                    span: span(0..2),
-                    op: ast::RepetitionOp {
-                        span: span(1..2),
-                        kind: ast::RepetitionKind::OneOrMore,
-                    },
-                    greedy: true,
-                    ast: Box::new(lit('a', 0)),
-                })),
-            }))
-        );
-
-        // Test concatenations. A concatenation requires one level of nesting.
-        assert_eq!(
-            parser_nest_limit("ab", 0).parse().unwrap_err(),
-            TestError {
-                span: span(0..2),
-                kind: ast::ErrorKind::NestLimitExceeded(0),
-            }
-        );
-        assert_eq!(
-            parser_nest_limit("ab", 1).parse(),
-            Ok(concat(0..2, vec![lit('a', 0), lit('b', 1)]))
-        );
-        assert_eq!(
-            parser_nest_limit("abc", 1).parse(),
-            Ok(concat(0..3, vec![lit('a', 0), lit('b', 1), lit('c', 2)]))
-        );
-
-        // Test alternations. An alternation requires one level of nesting.
-        assert_eq!(
-            parser_nest_limit("a|b", 0).parse().unwrap_err(),
-            TestError {
-                span: span(0..3),
-                kind: ast::ErrorKind::NestLimitExceeded(0),
-            }
-        );
-        assert_eq!(
-            parser_nest_limit("a|b", 1).parse(),
-            Ok(alt(0..3, vec![lit('a', 0), lit('b', 2)]))
-        );
-        assert_eq!(
-            parser_nest_limit("a|b|c", 1).parse(),
-            Ok(alt(0..5, vec![lit('a', 0), lit('b', 2), lit('c', 4)]))
-        );
-
-        // Test character classes. Classes form their own mini-recursive
-        // syntax!
-        assert_eq!(
-            parser_nest_limit("[a]", 0).parse().unwrap_err(),
-            TestError {
-                span: span(0..3),
-                kind: ast::ErrorKind::NestLimitExceeded(0),
-            }
-        );
-        assert_eq!(
-            parser_nest_limit("[a]", 1).parse(),
-            Ok(Ast::class_bracketed(ast::ClassBracketed {
-                span: span(0..3),
-                negated: false,
-                kind: ast::ClassSet::Item(ast::ClassSetItem::Literal(
-                    ast::Literal {
-                        span: span(1..2),
-                        kind: ast::LiteralKind::Verbatim,
-                        c: 'a',
-                    }
-                )),
-            }))
-        );
-        assert_eq!(
-            parser_nest_limit("[ab]", 1).parse().unwrap_err(),
-            TestError {
-                span: span(1..3),
-                kind: ast::ErrorKind::NestLimitExceeded(1),
-            }
-        );
-        assert_eq!(
-            parser_nest_limit("[ab[cd]]", 2).parse().unwrap_err(),
-            TestError {
-                span: span(3..7),
-                kind: ast::ErrorKind::NestLimitExceeded(2),
-            }
-        );
-        assert_eq!(
-            parser_nest_limit("[ab[cd]]", 3).parse().unwrap_err(),
-            TestError {
-                span: span(4..6),
-                kind: ast::ErrorKind::NestLimitExceeded(3),
-            }
-        );
-        assert_eq!(
-            parser_nest_limit("[a--b]", 1).parse().unwrap_err(),
-            TestError {
-                span: span(1..5),
-                kind: ast::ErrorKind::NestLimitExceeded(1),
-            }
-        );
-        assert_eq!(
-            parser_nest_limit("[a--bc]", 2).parse().unwrap_err(),
-            TestError {
-                span: span(4..6),
-                kind: ast::ErrorKind::NestLimitExceeded(2),
-            }
-        );
-    }
-
-    #[test]
-    fn parse_comments() {
-        let pat = "(?x)
-# This is comment 1.
-foo # This is comment 2.
-  # This is comment 3.
-bar
-# This is comment 4.";
-        let astc = parser(pat).parse_with_comments().unwrap();
-        assert_eq!(
-            astc.ast,
-            concat_with(
-                span_range(pat, 0..pat.len()),
-                vec![
-                    flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false),
-                    lit_with('f', span_range(pat, 26..27)),
-                    lit_with('o', span_range(pat, 27..28)),
-                    lit_with('o', span_range(pat, 28..29)),
-                    lit_with('b', span_range(pat, 74..75)),
-                    lit_with('a', span_range(pat, 75..76)),
-                    lit_with('r', span_range(pat, 76..77)),
-                ]
-            )
-        );
-        assert_eq!(
-            astc.comments,
-            vec![
-                ast::Comment {
-                    span: span_range(pat, 5..26),
-                    comment: s(" This is comment 1."),
-                },
-                ast::Comment {
-                    span: span_range(pat, 30..51),
-                    comment: s(" This is comment 2."),
-                },
-                ast::Comment {
-                    span: span_range(pat, 53..74),
-                    comment: s(" This is comment 3."),
-                },
-                ast::Comment {
-                    span: span_range(pat, 78..98),
-                    comment: s(" This is comment 4."),
-                },
-            ]
-        );
-    }
-
-    #[test]
-    fn parse_holistic() {
-        assert_eq!(parser("]").parse(), Ok(lit(']', 0)));
-        assert_eq!(
-            parser(r"\\\.\+\*\?\(\)\|\[\]\{\}\^\$\#\&\-\~").parse(),
-            Ok(concat(
-                0..36,
-                vec![
-                    meta_lit('\\', span(0..2)),
-                    meta_lit('.', span(2..4)),
-                    meta_lit('+', span(4..6)),
-                    meta_lit('*', span(6..8)),
-                    meta_lit('?', span(8..10)),
-                    meta_lit('(', span(10..12)),
-                    meta_lit(')', span(12..14)),
-                    meta_lit('|', span(14..16)),
-                    meta_lit('[', span(16..18)),
-                    meta_lit(']', span(18..20)),
-                    meta_lit('{', span(20..22)),
-                    meta_lit('}', span(22..24)),
-                    meta_lit('^', span(24..26)),
-                    meta_lit('$', span(26..28)),
-                    meta_lit('#', span(28..30)),
-                    meta_lit('&', span(30..32)),
-                    meta_lit('-', span(32..34)),
-                    meta_lit('~', span(34..36)),
-                ]
-            ))
-        );
-    }
-
-    #[test]
-    fn parse_ignore_whitespace() {
-        // Test that basic whitespace insensitivity works.
-        let pat = "(?x)a b";
-        assert_eq!(
-            parser(pat).parse(),
-            Ok(concat_with(
-                nspan(npos(0, 1, 1), npos(7, 1, 8)),
-                vec![
-                    flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false),
-                    lit_with('a', nspan(npos(4, 1, 5), npos(5, 1, 6))),
-                    lit_with('b', nspan(npos(6, 1, 7), npos(7, 1, 8))),
-                ]
-            ))
-        );
-
-        // Test that we can toggle whitespace insensitivity.
-        let pat = "(?x)a b(?-x)a b";
-        assert_eq!(
-            parser(pat).parse(),
-            Ok(concat_with(
-                nspan(npos(0, 1, 1), npos(15, 1, 16)),
-                vec![
-                    flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false),
-                    lit_with('a', nspan(npos(4, 1, 5), npos(5, 1, 6))),
-                    lit_with('b', nspan(npos(6, 1, 7), npos(7, 1, 8))),
-                    flag_set(pat, 7..12, ast::Flag::IgnoreWhitespace, true),
-                    lit_with('a', nspan(npos(12, 1, 13), npos(13, 1, 14))),
-                    lit_with(' ', nspan(npos(13, 1, 14), npos(14, 1, 15))),
-                    lit_with('b', nspan(npos(14, 1, 15), npos(15, 1, 16))),
-                ]
-            ))
-        );
-
-        // Test that nesting whitespace insensitive flags works.
-        let pat = "a (?x:a )a ";
-        assert_eq!(
-            parser(pat).parse(),
-            Ok(concat_with(
-                span_range(pat, 0..11),
-                vec![
-                    lit_with('a', span_range(pat, 0..1)),
-                    lit_with(' ', span_range(pat, 1..2)),
-                    Ast::group(ast::Group {
-                        span: span_range(pat, 2..9),
-                        kind: ast::GroupKind::NonCapturing(ast::Flags {
-                            span: span_range(pat, 4..5),
-                            items: vec![ast::FlagsItem {
-                                span: span_range(pat, 4..5),
-                                kind: ast::FlagsItemKind::Flag(
-                                    ast::Flag::IgnoreWhitespace
-                                ),
-                            },],
-                        }),
-                        ast: Box::new(lit_with('a', span_range(pat, 6..7))),
-                    }),
-                    lit_with('a', span_range(pat, 9..10)),
-                    lit_with(' ', span_range(pat, 10..11)),
-                ]
-            ))
-        );
-
-        // Test that whitespace after an opening paren is insignificant.
-        let pat = "(?x)( ?P<foo> a )";
-        assert_eq!(
-            parser(pat).parse(),
-            Ok(concat_with(
-                span_range(pat, 0..pat.len()),
-                vec![
-                    flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false),
-                    Ast::group(ast::Group {
-                        span: span_range(pat, 4..pat.len()),
-                        kind: ast::GroupKind::CaptureName {
-                            starts_with_p: true,
-                            name: ast::CaptureName {
-                                span: span_range(pat, 9..12),
-                                name: s("foo"),
-                                index: 1,
-                            }
-                        },
-                        ast: Box::new(lit_with('a', span_range(pat, 14..15))),
-                    }),
-                ]
-            ))
-        );
-        let pat = "(?x)(  a )";
-        assert_eq!(
-            parser(pat).parse(),
-            Ok(concat_with(
-                span_range(pat, 0..pat.len()),
-                vec![
-                    flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false),
-                    Ast::group(ast::Group {
-                        span: span_range(pat, 4..pat.len()),
-                        kind: ast::GroupKind::CaptureIndex(1),
-                        ast: Box::new(lit_with('a', span_range(pat, 7..8))),
-                    }),
-                ]
-            ))
-        );
-        let pat = "(?x)(  ?:  a )";
-        assert_eq!(
-            parser(pat).parse(),
-            Ok(concat_with(
-                span_range(pat, 0..pat.len()),
-                vec![
-                    flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false),
-                    Ast::group(ast::Group {
-                        span: span_range(pat, 4..pat.len()),
-                        kind: ast::GroupKind::NonCapturing(ast::Flags {
-                            span: span_range(pat, 8..8),
-                            items: vec![],
-                        }),
-                        ast: Box::new(lit_with('a', span_range(pat, 11..12))),
-                    }),
-                ]
-            ))
-        );
-        let pat = r"(?x)\x { 53 }";
-        assert_eq!(
-            parser(pat).parse(),
-            Ok(concat_with(
-                span_range(pat, 0..pat.len()),
-                vec![
-                    flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false),
-                    Ast::literal(ast::Literal {
-                        span: span(4..13),
-                        kind: ast::LiteralKind::HexBrace(
-                            ast::HexLiteralKind::X
-                        ),
-                        c: 'S',
-                    }),
-                ]
-            ))
-        );
-
-        // Test that whitespace after an escape is OK.
-        let pat = r"(?x)\ ";
-        assert_eq!(
-            parser(pat).parse(),
-            Ok(concat_with(
-                span_range(pat, 0..pat.len()),
-                vec![
-                    flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false),
-                    Ast::literal(ast::Literal {
-                        span: span_range(pat, 4..6),
-                        kind: ast::LiteralKind::Superfluous,
-                        c: ' ',
-                    }),
-                ]
-            ))
-        );
-    }
-
-    #[test]
-    fn parse_newlines() {
-        let pat = ".\n.";
-        assert_eq!(
-            parser(pat).parse(),
-            Ok(concat_with(
-                span_range(pat, 0..3),
-                vec![
-                    Ast::dot(span_range(pat, 0..1)),
-                    lit_with('\n', span_range(pat, 1..2)),
-                    Ast::dot(span_range(pat, 2..3)),
-                ]
-            ))
-        );
-
-        let pat = "foobar\nbaz\nquux\n";
-        assert_eq!(
-            parser(pat).parse(),
-            Ok(concat_with(
-                span_range(pat, 0..pat.len()),
-                vec![
-                    lit_with('f', nspan(npos(0, 1, 1), npos(1, 1, 2))),
-                    lit_with('o', nspan(npos(1, 1, 2), npos(2, 1, 3))),
-                    lit_with('o', nspan(npos(2, 1, 3), npos(3, 1, 4))),
-                    lit_with('b', nspan(npos(3, 1, 4), npos(4, 1, 5))),
-                    lit_with('a', nspan(npos(4, 1, 5), npos(5, 1, 6))),
-                    lit_with('r', nspan(npos(5, 1, 6), npos(6, 1, 7))),
-                    lit_with('\n', nspan(npos(6, 1, 7), npos(7, 2, 1))),
-                    lit_with('b', nspan(npos(7, 2, 1), npos(8, 2, 2))),
-                    lit_with('a', nspan(npos(8, 2, 2), npos(9, 2, 3))),
-                    lit_with('z', nspan(npos(9, 2, 3), npos(10, 2, 4))),
-                    lit_with('\n', nspan(npos(10, 2, 4), npos(11, 3, 1))),
-                    lit_with('q', nspan(npos(11, 3, 1), npos(12, 3, 2))),
-                    lit_with('u', nspan(npos(12, 3, 2), npos(13, 3, 3))),
-                    lit_with('u', nspan(npos(13, 3, 3), npos(14, 3, 4))),
-                    lit_with('x', nspan(npos(14, 3, 4), npos(15, 3, 5))),
-                    lit_with('\n', nspan(npos(15, 3, 5), npos(16, 4, 1))),
-                ]
-            ))
-        );
-    }
-
-    #[test]
-    fn parse_uncounted_repetition() {
-        assert_eq!(
-            parser(r"a*").parse(),
-            Ok(Ast::repetition(ast::Repetition {
-                span: span(0..2),
-                op: ast::RepetitionOp {
-                    span: span(1..2),
-                    kind: ast::RepetitionKind::ZeroOrMore,
-                },
-                greedy: true,
-                ast: Box::new(lit('a', 0)),
-            }))
-        );
-        assert_eq!(
-            parser(r"a+").parse(),
-            Ok(Ast::repetition(ast::Repetition {
-                span: span(0..2),
-                op: ast::RepetitionOp {
-                    span: span(1..2),
-                    kind: ast::RepetitionKind::OneOrMore,
-                },
-                greedy: true,
-                ast: Box::new(lit('a', 0)),
-            }))
-        );
-
-        assert_eq!(
-            parser(r"a?").parse(),
-            Ok(Ast::repetition(ast::Repetition {
-                span: span(0..2),
-                op: ast::RepetitionOp {
-                    span: span(1..2),
-                    kind: ast::RepetitionKind::ZeroOrOne,
-                },
-                greedy: true,
-                ast: Box::new(lit('a', 0)),
-            }))
-        );
-        assert_eq!(
-            parser(r"a??").parse(),
-            Ok(Ast::repetition(ast::Repetition {
-                span: span(0..3),
-                op: ast::RepetitionOp {
-                    span: span(1..3),
-                    kind: ast::RepetitionKind::ZeroOrOne,
-                },
-                greedy: false,
-                ast: Box::new(lit('a', 0)),
-            }))
-        );
-        assert_eq!(
-            parser(r"a?").parse(),
-            Ok(Ast::repetition(ast::Repetition {
-                span: span(0..2),
-                op: ast::RepetitionOp {
-                    span: span(1..2),
-                    kind: ast::RepetitionKind::ZeroOrOne,
-                },
-                greedy: true,
-                ast: Box::new(lit('a', 0)),
-            }))
-        );
-        assert_eq!(
-            parser(r"a?b").parse(),
-            Ok(concat(
-                0..3,
-                vec![
-                    Ast::repetition(ast::Repetition {
-                        span: span(0..2),
-                        op: ast::RepetitionOp {
-                            span: span(1..2),
-                            kind: ast::RepetitionKind::ZeroOrOne,
-                        },
-                        greedy: true,
-                        ast: Box::new(lit('a', 0)),
-                    }),
-                    lit('b', 2),
-                ]
-            ))
-        );
-        assert_eq!(
-            parser(r"a??b").parse(),
-            Ok(concat(
-                0..4,
-                vec![
-                    Ast::repetition(ast::Repetition {
-                        span: span(0..3),
-                        op: ast::RepetitionOp {
-                            span: span(1..3),
-                            kind: ast::RepetitionKind::ZeroOrOne,
-                        },
-                        greedy: false,
-                        ast: Box::new(lit('a', 0)),
-                    }),
-                    lit('b', 3),
-                ]
-            ))
-        );
-        assert_eq!(
-            parser(r"ab?").parse(),
-            Ok(concat(
-                0..3,
-                vec![
-                    lit('a', 0),
-                    Ast::repetition(ast::Repetition {
-                        span: span(1..3),
-                        op: ast::RepetitionOp {
-                            span: span(2..3),
-                            kind: ast::RepetitionKind::ZeroOrOne,
-                        },
-                        greedy: true,
-                        ast: Box::new(lit('b', 1)),
-                    }),
-                ]
-            ))
-        );
-        assert_eq!(
-            parser(r"(ab)?").parse(),
-            Ok(Ast::repetition(ast::Repetition {
-                span: span(0..5),
-                op: ast::RepetitionOp {
-                    span: span(4..5),
-                    kind: ast::RepetitionKind::ZeroOrOne,
-                },
-                greedy: true,
-                ast: Box::new(group(
-                    0..4,
-                    1,
-                    concat(1..3, vec![lit('a', 1), lit('b', 2),])
-                )),
-            }))
-        );
-        assert_eq!(
-            parser(r"|a?").parse(),
-            Ok(alt(
-                0..3,
-                vec![
-                    Ast::empty(span(0..0)),
-                    Ast::repetition(ast::Repetition {
-                        span: span(1..3),
-                        op: ast::RepetitionOp {
-                            span: span(2..3),
-                            kind: ast::RepetitionKind::ZeroOrOne,
-                        },
-                        greedy: true,
-                        ast: Box::new(lit('a', 1)),
-                    }),
-                ]
-            ))
-        );
-
-        assert_eq!(
-            parser(r"*").parse().unwrap_err(),
-            TestError {
-                span: span(0..0),
-                kind: ast::ErrorKind::RepetitionMissing,
-            }
-        );
-        assert_eq!(
-            parser(r"(?i)*").parse().unwrap_err(),
-            TestError {
-                span: span(4..4),
-                kind: ast::ErrorKind::RepetitionMissing,
-            }
-        );
-        assert_eq!(
-            parser(r"(*)").parse().unwrap_err(),
-            TestError {
-                span: span(1..1),
-                kind: ast::ErrorKind::RepetitionMissing,
-            }
-        );
-        assert_eq!(
-            parser(r"(?:?)").parse().unwrap_err(),
-            TestError {
-                span: span(3..3),
-                kind: ast::ErrorKind::RepetitionMissing,
-            }
-        );
-        assert_eq!(
-            parser(r"+").parse().unwrap_err(),
-            TestError {
-                span: span(0..0),
-                kind: ast::ErrorKind::RepetitionMissing,
-            }
-        );
-        assert_eq!(
-            parser(r"?").parse().unwrap_err(),
-            TestError {
-                span: span(0..0),
-                kind: ast::ErrorKind::RepetitionMissing,
-            }
-        );
-        assert_eq!(
-            parser(r"(?)").parse().unwrap_err(),
-            TestError {
-                span: span(1..1),
-                kind: ast::ErrorKind::RepetitionMissing,
-            }
-        );
-        assert_eq!(
-            parser(r"|*").parse().unwrap_err(),
-            TestError {
-                span: span(1..1),
-                kind: ast::ErrorKind::RepetitionMissing,
-            }
-        );
-        assert_eq!(
-            parser(r"|+").parse().unwrap_err(),
-            TestError {
-                span: span(1..1),
-                kind: ast::ErrorKind::RepetitionMissing,
-            }
-        );
-        assert_eq!(
-            parser(r"|?").parse().unwrap_err(),
-            TestError {
-                span: span(1..1),
-                kind: ast::ErrorKind::RepetitionMissing,
-            }
-        );
-    }
-
-    #[test]
-    fn parse_counted_repetition() {
-        assert_eq!(
-            parser(r"a{5}").parse(),
-            Ok(Ast::repetition(ast::Repetition {
-                span: span(0..4),
-                op: ast::RepetitionOp {
-                    span: span(1..4),
-                    kind: ast::RepetitionKind::Range(
-                        ast::RepetitionRange::Exactly(5)
-                    ),
-                },
-                greedy: true,
-                ast: Box::new(lit('a', 0)),
-            }))
-        );
-        assert_eq!(
-            parser(r"a{5,}").parse(),
-            Ok(Ast::repetition(ast::Repetition {
-                span: span(0..5),
-                op: ast::RepetitionOp {
-                    span: span(1..5),
-                    kind: ast::RepetitionKind::Range(
-                        ast::RepetitionRange::AtLeast(5)
-                    ),
-                },
-                greedy: true,
-                ast: Box::new(lit('a', 0)),
-            }))
-        );
-        assert_eq!(
-            parser(r"a{5,9}").parse(),
-            Ok(Ast::repetition(ast::Repetition {
-                span: span(0..6),
-                op: ast::RepetitionOp {
-                    span: span(1..6),
-                    kind: ast::RepetitionKind::Range(
-                        ast::RepetitionRange::Bounded(5, 9)
-                    ),
-                },
-                greedy: true,
-                ast: Box::new(lit('a', 0)),
-            }))
-        );
-        assert_eq!(
-            parser(r"a{5}?").parse(),
-            Ok(Ast::repetition(ast::Repetition {
-                span: span(0..5),
-                op: ast::RepetitionOp {
-                    span: span(1..5),
-                    kind: ast::RepetitionKind::Range(
-                        ast::RepetitionRange::Exactly(5)
-                    ),
-                },
-                greedy: false,
-                ast: Box::new(lit('a', 0)),
-            }))
-        );
-        assert_eq!(
-            parser(r"ab{5}").parse(),
-            Ok(concat(
-                0..5,
-                vec![
-                    lit('a', 0),
-                    Ast::repetition(ast::Repetition {
-                        span: span(1..5),
-                        op: ast::RepetitionOp {
-                            span: span(2..5),
-                            kind: ast::RepetitionKind::Range(
-                                ast::RepetitionRange::Exactly(5)
-                            ),
-                        },
-                        greedy: true,
-                        ast: Box::new(lit('b', 1)),
-                    }),
-                ]
-            ))
-        );
-        assert_eq!(
-            parser(r"ab{5}c").parse(),
-            Ok(concat(
-                0..6,
-                vec![
-                    lit('a', 0),
-                    Ast::repetition(ast::Repetition {
-                        span: span(1..5),
-                        op: ast::RepetitionOp {
-                            span: span(2..5),
-                            kind: ast::RepetitionKind::Range(
-                                ast::RepetitionRange::Exactly(5)
-                            ),
-                        },
-                        greedy: true,
-                        ast: Box::new(lit('b', 1)),
-                    }),
-                    lit('c', 5),
-                ]
-            ))
-        );
-
-        assert_eq!(
-            parser(r"a{ 5 }").parse(),
-            Ok(Ast::repetition(ast::Repetition {
-                span: span(0..6),
-                op: ast::RepetitionOp {
-                    span: span(1..6),
-                    kind: ast::RepetitionKind::Range(
-                        ast::RepetitionRange::Exactly(5)
-                    ),
-                },
-                greedy: true,
-                ast: Box::new(lit('a', 0)),
-            }))
-        );
-        assert_eq!(
-            parser(r"a{ 5 , 9 }").parse(),
-            Ok(Ast::repetition(ast::Repetition {
-                span: span(0..10),
-                op: ast::RepetitionOp {
-                    span: span(1..10),
-                    kind: ast::RepetitionKind::Range(
-                        ast::RepetitionRange::Bounded(5, 9)
-                    ),
-                },
-                greedy: true,
-                ast: Box::new(lit('a', 0)),
-            }))
-        );
-        assert_eq!(
-            parser_empty_min_range(r"a{,9}").parse(),
-            Ok(Ast::repetition(ast::Repetition {
-                span: span(0..5),
-                op: ast::RepetitionOp {
-                    span: span(1..5),
-                    kind: ast::RepetitionKind::Range(
-                        ast::RepetitionRange::Bounded(0, 9)
-                    ),
-                },
-                greedy: true,
-                ast: Box::new(lit('a', 0)),
-            }))
-        );
-        assert_eq!(
-            parser_ignore_whitespace(r"a{5,9} ?").parse(),
-            Ok(Ast::repetition(ast::Repetition {
-                span: span(0..8),
-                op: ast::RepetitionOp {
-                    span: span(1..8),
-                    kind: ast::RepetitionKind::Range(
-                        ast::RepetitionRange::Bounded(5, 9)
-                    ),
-                },
-                greedy: false,
-                ast: Box::new(lit('a', 0)),
-            }))
-        );
-        assert_eq!(
-            parser(r"\b{5,9}").parse(),
-            Ok(Ast::repetition(ast::Repetition {
-                span: span(0..7),
-                op: ast::RepetitionOp {
-                    span: span(2..7),
-                    kind: ast::RepetitionKind::Range(
-                        ast::RepetitionRange::Bounded(5, 9)
-                    ),
-                },
-                greedy: true,
-                ast: Box::new(Ast::assertion(ast::Assertion {
-                    span: span(0..2),
-                    kind: ast::AssertionKind::WordBoundary,
-                })),
-            }))
-        );
-
-        assert_eq!(
-            parser(r"(?i){0}").parse().unwrap_err(),
-            TestError {
-                span: span(4..4),
-                kind: ast::ErrorKind::RepetitionMissing,
-            }
-        );
-        assert_eq!(
-            parser(r"(?m){1,1}").parse().unwrap_err(),
-            TestError {
-                span: span(4..4),
-                kind: ast::ErrorKind::RepetitionMissing,
-            }
-        );
-        assert_eq!(
-            parser(r"a{]}").parse().unwrap_err(),
-            TestError {
-                span: span(2..2),
-                kind: ast::ErrorKind::RepetitionCountDecimalEmpty,
-            }
-        );
-        assert_eq!(
-            parser(r"a{1,]}").parse().unwrap_err(),
-            TestError {
-                span: span(4..4),
-                kind: ast::ErrorKind::RepetitionCountDecimalEmpty,
-            }
-        );
-        assert_eq!(
-            parser(r"a{").parse().unwrap_err(),
-            TestError {
-                span: span(1..2),
-                kind: ast::ErrorKind::RepetitionCountUnclosed,
-            }
-        );
-        assert_eq!(
-            parser(r"a{}").parse().unwrap_err(),
-            TestError {
-                span: span(2..2),
-                kind: ast::ErrorKind::RepetitionCountDecimalEmpty,
-            }
-        );
-        assert_eq!(
-            parser(r"a{a").parse().unwrap_err(),
-            TestError {
-                span: span(2..2),
-                kind: ast::ErrorKind::RepetitionCountDecimalEmpty,
-            }
-        );
-        assert_eq!(
-            parser(r"a{9999999999}").parse().unwrap_err(),
-            TestError {
-                span: span(2..12),
-                kind: ast::ErrorKind::DecimalInvalid,
-            }
-        );
-        assert_eq!(
-            parser(r"a{9").parse().unwrap_err(),
-            TestError {
-                span: span(1..3),
-                kind: ast::ErrorKind::RepetitionCountUnclosed,
-            }
-        );
-        assert_eq!(
-            parser(r"a{9,a").parse().unwrap_err(),
-            TestError {
-                span: span(4..4),
-                kind: ast::ErrorKind::RepetitionCountDecimalEmpty,
-            }
-        );
-        assert_eq!(
-            parser(r"a{9,9999999999}").parse().unwrap_err(),
-            TestError {
-                span: span(4..14),
-                kind: ast::ErrorKind::DecimalInvalid,
-            }
-        );
-        assert_eq!(
-            parser(r"a{9,").parse().unwrap_err(),
-            TestError {
-                span: span(1..4),
-                kind: ast::ErrorKind::RepetitionCountUnclosed,
-            }
-        );
-        assert_eq!(
-            parser(r"a{9,11").parse().unwrap_err(),
-            TestError {
-                span: span(1..6),
-                kind: ast::ErrorKind::RepetitionCountUnclosed,
-            }
-        );
-        assert_eq!(
-            parser(r"a{2,1}").parse().unwrap_err(),
-            TestError {
-                span: span(1..6),
-                kind: ast::ErrorKind::RepetitionCountInvalid,
-            }
-        );
-        assert_eq!(
-            parser(r"{5}").parse().unwrap_err(),
-            TestError {
-                span: span(0..0),
-                kind: ast::ErrorKind::RepetitionMissing,
-            }
-        );
-        assert_eq!(
-            parser(r"|{5}").parse().unwrap_err(),
-            TestError {
-                span: span(1..1),
-                kind: ast::ErrorKind::RepetitionMissing,
-            }
-        );
-    }
-
-    #[test]
-    fn parse_alternate() {
-        assert_eq!(
-            parser(r"a|b").parse(),
-            Ok(Ast::alternation(ast::Alternation {
-                span: span(0..3),
-                asts: vec![lit('a', 0), lit('b', 2)],
-            }))
-        );
-        assert_eq!(
-            parser(r"(a|b)").parse(),
-            Ok(group(
-                0..5,
-                1,
-                Ast::alternation(ast::Alternation {
-                    span: span(1..4),
-                    asts: vec![lit('a', 1), lit('b', 3)],
-                })
-            ))
-        );
-
-        assert_eq!(
-            parser(r"a|b|c").parse(),
-            Ok(Ast::alternation(ast::Alternation {
-                span: span(0..5),
-                asts: vec![lit('a', 0), lit('b', 2), lit('c', 4)],
-            }))
-        );
-        assert_eq!(
-            parser(r"ax|by|cz").parse(),
-            Ok(Ast::alternation(ast::Alternation {
-                span: span(0..8),
-                asts: vec![
-                    concat(0..2, vec![lit('a', 0), lit('x', 1)]),
-                    concat(3..5, vec![lit('b', 3), lit('y', 4)]),
-                    concat(6..8, vec![lit('c', 6), lit('z', 7)]),
-                ],
-            }))
-        );
-        assert_eq!(
-            parser(r"(ax|by|cz)").parse(),
-            Ok(group(
-                0..10,
-                1,
-                Ast::alternation(ast::Alternation {
-                    span: span(1..9),
-                    asts: vec![
-                        concat(1..3, vec![lit('a', 1), lit('x', 2)]),
-                        concat(4..6, vec![lit('b', 4), lit('y', 5)]),
-                        concat(7..9, vec![lit('c', 7), lit('z', 8)]),
-                    ],
-                })
-            ))
-        );
-        assert_eq!(
-            parser(r"(ax|(by|(cz)))").parse(),
-            Ok(group(
-                0..14,
-                1,
-                alt(
-                    1..13,
-                    vec![
-                        concat(1..3, vec![lit('a', 1), lit('x', 2)]),
-                        group(
-                            4..13,
-                            2,
-                            alt(
-                                5..12,
-                                vec![
-                                    concat(
-                                        5..7,
-                                        vec![lit('b', 5), lit('y', 6)]
-                                    ),
-                                    group(
-                                        8..12,
-                                        3,
-                                        concat(
-                                            9..11,
-                                            vec![lit('c', 9), lit('z', 10),]
-                                        )
-                                    ),
-                                ]
-                            )
-                        ),
-                    ]
-                )
-            ))
-        );
-
-        assert_eq!(
-            parser(r"|").parse(),
-            Ok(alt(
-                0..1,
-                vec![Ast::empty(span(0..0)), Ast::empty(span(1..1)),]
-            ))
-        );
-        assert_eq!(
-            parser(r"||").parse(),
-            Ok(alt(
-                0..2,
-                vec![
-                    Ast::empty(span(0..0)),
-                    Ast::empty(span(1..1)),
-                    Ast::empty(span(2..2)),
-                ]
-            ))
-        );
-        assert_eq!(
-            parser(r"a|").parse(),
-            Ok(alt(0..2, vec![lit('a', 0), Ast::empty(span(2..2)),]))
-        );
-        assert_eq!(
-            parser(r"|a").parse(),
-            Ok(alt(0..2, vec![Ast::empty(span(0..0)), lit('a', 1),]))
-        );
-
-        assert_eq!(
-            parser(r"(|)").parse(),
-            Ok(group(
-                0..3,
-                1,
-                alt(
-                    1..2,
-                    vec![Ast::empty(span(1..1)), Ast::empty(span(2..2)),]
-                )
-            ))
-        );
-        assert_eq!(
-            parser(r"(a|)").parse(),
-            Ok(group(
-                0..4,
-                1,
-                alt(1..3, vec![lit('a', 1), Ast::empty(span(3..3)),])
-            ))
-        );
-        assert_eq!(
-            parser(r"(|a)").parse(),
-            Ok(group(
-                0..4,
-                1,
-                alt(1..3, vec![Ast::empty(span(1..1)), lit('a', 2),])
-            ))
-        );
-
-        assert_eq!(
-            parser(r"a|b)").parse().unwrap_err(),
-            TestError {
-                span: span(3..4),
-                kind: ast::ErrorKind::GroupUnopened,
-            }
-        );
-        assert_eq!(
-            parser(r"(a|b").parse().unwrap_err(),
-            TestError {
-                span: span(0..1),
-                kind: ast::ErrorKind::GroupUnclosed,
-            }
-        );
-    }
-
-    #[test]
-    fn parse_unsupported_lookaround() {
-        assert_eq!(
-            parser(r"(?=a)").parse().unwrap_err(),
-            TestError {
-                span: span(0..3),
-                kind: ast::ErrorKind::UnsupportedLookAround,
-            }
-        );
-        assert_eq!(
-            parser(r"(?!a)").parse().unwrap_err(),
-            TestError {
-                span: span(0..3),
-                kind: ast::ErrorKind::UnsupportedLookAround,
-            }
-        );
-        assert_eq!(
-            parser(r"(?<=a)").parse().unwrap_err(),
-            TestError {
-                span: span(0..4),
-                kind: ast::ErrorKind::UnsupportedLookAround,
-            }
-        );
-        assert_eq!(
-            parser(r"(?<!a)").parse().unwrap_err(),
-            TestError {
-                span: span(0..4),
-                kind: ast::ErrorKind::UnsupportedLookAround,
-            }
-        );
-    }
-
-    #[test]
-    fn parse_group() {
-        assert_eq!(
-            parser("(?i)").parse(),
-            Ok(Ast::flags(ast::SetFlags {
-                span: span(0..4),
-                flags: ast::Flags {
-                    span: span(2..3),
-                    items: vec![ast::FlagsItem {
-                        span: span(2..3),
-                        kind: ast::FlagsItemKind::Flag(
-                            ast::Flag::CaseInsensitive
-                        ),
-                    }],
-                },
-            }))
-        );
-        assert_eq!(
-            parser("(?iU)").parse(),
-            Ok(Ast::flags(ast::SetFlags {
-                span: span(0..5),
-                flags: ast::Flags {
-                    span: span(2..4),
-                    items: vec![
-                        ast::FlagsItem {
-                            span: span(2..3),
-                            kind: ast::FlagsItemKind::Flag(
-                                ast::Flag::CaseInsensitive
-                            ),
-                        },
-                        ast::FlagsItem {
-                            span: span(3..4),
-                            kind: ast::FlagsItemKind::Flag(
-                                ast::Flag::SwapGreed
-                            ),
-                        },
-                    ],
-                },
-            }))
-        );
-        assert_eq!(
-            parser("(?i-U)").parse(),
-            Ok(Ast::flags(ast::SetFlags {
-                span: span(0..6),
-                flags: ast::Flags {
-                    span: span(2..5),
-                    items: vec![
-                        ast::FlagsItem {
-                            span: span(2..3),
-                            kind: ast::FlagsItemKind::Flag(
-                                ast::Flag::CaseInsensitive
-                            ),
-                        },
-                        ast::FlagsItem {
-                            span: span(3..4),
-                            kind: ast::FlagsItemKind::Negation,
-                        },
-                        ast::FlagsItem {
-                            span: span(4..5),
-                            kind: ast::FlagsItemKind::Flag(
-                                ast::Flag::SwapGreed
-                            ),
-                        },
-                    ],
-                },
-            }))
-        );
-
-        assert_eq!(
-            parser("()").parse(),
-            Ok(Ast::group(ast::Group {
-                span: span(0..2),
-                kind: ast::GroupKind::CaptureIndex(1),
-                ast: Box::new(Ast::empty(span(1..1))),
-            }))
-        );
-        assert_eq!(
-            parser("(a)").parse(),
-            Ok(Ast::group(ast::Group {
-                span: span(0..3),
-                kind: ast::GroupKind::CaptureIndex(1),
-                ast: Box::new(lit('a', 1)),
-            }))
-        );
-        assert_eq!(
-            parser("(())").parse(),
-            Ok(Ast::group(ast::Group {
-                span: span(0..4),
-                kind: ast::GroupKind::CaptureIndex(1),
-                ast: Box::new(Ast::group(ast::Group {
-                    span: span(1..3),
-                    kind: ast::GroupKind::CaptureIndex(2),
-                    ast: Box::new(Ast::empty(span(2..2))),
-                })),
-            }))
-        );
-
-        assert_eq!(
-            parser("(?:a)").parse(),
-            Ok(Ast::group(ast::Group {
-                span: span(0..5),
-                kind: ast::GroupKind::NonCapturing(ast::Flags {
-                    span: span(2..2),
-                    items: vec![],
-                }),
-                ast: Box::new(lit('a', 3)),
-            }))
-        );
-
-        assert_eq!(
-            parser("(?i:a)").parse(),
-            Ok(Ast::group(ast::Group {
-                span: span(0..6),
-                kind: ast::GroupKind::NonCapturing(ast::Flags {
-                    span: span(2..3),
-                    items: vec![ast::FlagsItem {
-                        span: span(2..3),
-                        kind: ast::FlagsItemKind::Flag(
-                            ast::Flag::CaseInsensitive
-                        ),
-                    },],
-                }),
-                ast: Box::new(lit('a', 4)),
-            }))
-        );
-        assert_eq!(
-            parser("(?i-U:a)").parse(),
-            Ok(Ast::group(ast::Group {
-                span: span(0..8),
-                kind: ast::GroupKind::NonCapturing(ast::Flags {
-                    span: span(2..5),
-                    items: vec![
-                        ast::FlagsItem {
-                            span: span(2..3),
-                            kind: ast::FlagsItemKind::Flag(
-                                ast::Flag::CaseInsensitive
-                            ),
-                        },
-                        ast::FlagsItem {
-                            span: span(3..4),
-                            kind: ast::FlagsItemKind::Negation,
-                        },
-                        ast::FlagsItem {
-                            span: span(4..5),
-                            kind: ast::FlagsItemKind::Flag(
-                                ast::Flag::SwapGreed
-                            ),
-                        },
-                    ],
-                }),
-                ast: Box::new(lit('a', 6)),
-            }))
-        );
-
-        assert_eq!(
-            parser("(").parse().unwrap_err(),
-            TestError {
-                span: span(0..1),
-                kind: ast::ErrorKind::GroupUnclosed,
-            }
-        );
-        assert_eq!(
-            parser("(?").parse().unwrap_err(),
-            TestError {
-                span: span(0..1),
-                kind: ast::ErrorKind::GroupUnclosed,
-            }
-        );
-        assert_eq!(
-            parser("(?P").parse().unwrap_err(),
-            TestError {
-                span: span(2..3),
-                kind: ast::ErrorKind::FlagUnrecognized,
-            }
-        );
-        assert_eq!(
-            parser("(?P<").parse().unwrap_err(),
-            TestError {
-                span: span(4..4),
-                kind: ast::ErrorKind::GroupNameUnexpectedEof,
-            }
-        );
-        assert_eq!(
-            parser("(a").parse().unwrap_err(),
-            TestError {
-                span: span(0..1),
-                kind: ast::ErrorKind::GroupUnclosed,
-            }
-        );
-        assert_eq!(
-            parser("(()").parse().unwrap_err(),
-            TestError {
-                span: span(0..1),
-                kind: ast::ErrorKind::GroupUnclosed,
-            }
-        );
-        assert_eq!(
-            parser(")").parse().unwrap_err(),
-            TestError {
-                span: span(0..1),
-                kind: ast::ErrorKind::GroupUnopened,
-            }
-        );
-        assert_eq!(
-            parser("a)").parse().unwrap_err(),
-            TestError {
-                span: span(1..2),
-                kind: ast::ErrorKind::GroupUnopened,
-            }
-        );
-    }
-
-    #[test]
-    fn parse_capture_name() {
-        assert_eq!(
-            parser("(?<a>z)").parse(),
-            Ok(Ast::group(ast::Group {
-                span: span(0..7),
-                kind: ast::GroupKind::CaptureName {
-                    starts_with_p: false,
-                    name: ast::CaptureName {
-                        span: span(3..4),
-                        name: s("a"),
-                        index: 1,
-                    }
-                },
-                ast: Box::new(lit('z', 5)),
-            }))
-        );
-        assert_eq!(
-            parser("(?P<a>z)").parse(),
-            Ok(Ast::group(ast::Group {
-                span: span(0..8),
-                kind: ast::GroupKind::CaptureName {
-                    starts_with_p: true,
-                    name: ast::CaptureName {
-                        span: span(4..5),
-                        name: s("a"),
-                        index: 1,
-                    }
-                },
-                ast: Box::new(lit('z', 6)),
-            }))
-        );
-        assert_eq!(
-            parser("(?P<abc>z)").parse(),
-            Ok(Ast::group(ast::Group {
-                span: span(0..10),
-                kind: ast::GroupKind::CaptureName {
-                    starts_with_p: true,
-                    name: ast::CaptureName {
-                        span: span(4..7),
-                        name: s("abc"),
-                        index: 1,
-                    }
-                },
-                ast: Box::new(lit('z', 8)),
-            }))
-        );
-
-        assert_eq!(
-            parser("(?P<a_1>z)").parse(),
-            Ok(Ast::group(ast::Group {
-                span: span(0..10),
-                kind: ast::GroupKind::CaptureName {
-                    starts_with_p: true,
-                    name: ast::CaptureName {
-                        span: span(4..7),
-                        name: s("a_1"),
-                        index: 1,
-                    }
-                },
-                ast: Box::new(lit('z', 8)),
-            }))
-        );
-
-        assert_eq!(
-            parser("(?P<a.1>z)").parse(),
-            Ok(Ast::group(ast::Group {
-                span: span(0..10),
-                kind: ast::GroupKind::CaptureName {
-                    starts_with_p: true,
-                    name: ast::CaptureName {
-                        span: span(4..7),
-                        name: s("a.1"),
-                        index: 1,
-                    }
-                },
-                ast: Box::new(lit('z', 8)),
-            }))
-        );
-
-        assert_eq!(
-            parser("(?P<a[1]>z)").parse(),
-            Ok(Ast::group(ast::Group {
-                span: span(0..11),
-                kind: ast::GroupKind::CaptureName {
-                    starts_with_p: true,
-                    name: ast::CaptureName {
-                        span: span(4..8),
-                        name: s("a[1]"),
-                        index: 1,
-                    }
-                },
-                ast: Box::new(lit('z', 9)),
-            }))
-        );
-
-        assert_eq!(
-            parser("(?P<a¾>)").parse(),
-            Ok(Ast::group(ast::Group {
-                span: Span::new(
-                    Position::new(0, 1, 1),
-                    Position::new(9, 1, 9),
-                ),
-                kind: ast::GroupKind::CaptureName {
-                    starts_with_p: true,
-                    name: ast::CaptureName {
-                        span: Span::new(
-                            Position::new(4, 1, 5),
-                            Position::new(7, 1, 7),
-                        ),
-                        name: s("a¾"),
-                        index: 1,
-                    }
-                },
-                ast: Box::new(Ast::empty(Span::new(
-                    Position::new(8, 1, 8),
-                    Position::new(8, 1, 8),
-                ))),
-            }))
-        );
-        assert_eq!(
-            parser("(?P<損歗>)").parse(),
-            Ok(Ast::group(ast::Group {
-                span: Span::new(
-                    Position::new(0, 1, 1),
-                    Position::new(12, 1, 9),
-                ),
-                kind: ast::GroupKind::CaptureName {
-                    starts_with_p: true,
-                    name: ast::CaptureName {
-                        span: Span::new(
-                            Position::new(4, 1, 5),
-                            Position::new(10, 1, 7),
-                        ),
-                        name: s("損歗"),
-                        index: 1,
-                    }
-                },
-                ast: Box::new(Ast::empty(Span::new(
-                    Position::new(11, 1, 8),
-                    Position::new(11, 1, 8),
-                ))),
-            }))
-        );
-
-        assert_eq!(
-            parser("(?P<").parse().unwrap_err(),
-            TestError {
-                span: span(4..4),
-                kind: ast::ErrorKind::GroupNameUnexpectedEof,
-            }
-        );
-        assert_eq!(
-            parser("(?P<>z)").parse().unwrap_err(),
-            TestError {
-                span: span(4..4),
-                kind: ast::ErrorKind::GroupNameEmpty,
-            }
-        );
-        assert_eq!(
-            parser("(?P<a").parse().unwrap_err(),
-            TestError {
-                span: span(5..5),
-                kind: ast::ErrorKind::GroupNameUnexpectedEof,
-            }
-        );
-        assert_eq!(
-            parser("(?P<ab").parse().unwrap_err(),
-            TestError {
-                span: span(6..6),
-                kind: ast::ErrorKind::GroupNameUnexpectedEof,
-            }
-        );
-        assert_eq!(
-            parser("(?P<0a").parse().unwrap_err(),
-            TestError {
-                span: span(4..5),
-                kind: ast::ErrorKind::GroupNameInvalid,
-            }
-        );
-        assert_eq!(
-            parser("(?P<~").parse().unwrap_err(),
-            TestError {
-                span: span(4..5),
-                kind: ast::ErrorKind::GroupNameInvalid,
-            }
-        );
-        assert_eq!(
-            parser("(?P<abc~").parse().unwrap_err(),
-            TestError {
-                span: span(7..8),
-                kind: ast::ErrorKind::GroupNameInvalid,
-            }
-        );
-        assert_eq!(
-            parser("(?P<a>y)(?P<a>z)").parse().unwrap_err(),
-            TestError {
-                span: span(12..13),
-                kind: ast::ErrorKind::GroupNameDuplicate {
-                    original: span(4..5),
-                },
-            }
-        );
-        assert_eq!(
-            parser("(?P<5>)").parse().unwrap_err(),
-            TestError {
-                span: span(4..5),
-                kind: ast::ErrorKind::GroupNameInvalid,
-            }
-        );
-        assert_eq!(
-            parser("(?P<5a>)").parse().unwrap_err(),
-            TestError {
-                span: span(4..5),
-                kind: ast::ErrorKind::GroupNameInvalid,
-            }
-        );
-        assert_eq!(
-            parser("(?P<¾>)").parse().unwrap_err(),
-            TestError {
-                span: Span::new(
-                    Position::new(4, 1, 5),
-                    Position::new(6, 1, 6),
-                ),
-                kind: ast::ErrorKind::GroupNameInvalid,
-            }
-        );
-        assert_eq!(
-            parser("(?P<¾a>)").parse().unwrap_err(),
-            TestError {
-                span: Span::new(
-                    Position::new(4, 1, 5),
-                    Position::new(6, 1, 6),
-                ),
-                kind: ast::ErrorKind::GroupNameInvalid,
-            }
-        );
-        assert_eq!(
-            parser("(?P<☃>)").parse().unwrap_err(),
-            TestError {
-                span: Span::new(
-                    Position::new(4, 1, 5),
-                    Position::new(7, 1, 6),
-                ),
-                kind: ast::ErrorKind::GroupNameInvalid,
-            }
-        );
-        assert_eq!(
-            parser("(?P<a☃>)").parse().unwrap_err(),
-            TestError {
-                span: Span::new(
-                    Position::new(5, 1, 6),
-                    Position::new(8, 1, 7),
-                ),
-                kind: ast::ErrorKind::GroupNameInvalid,
-            }
-        );
-    }
-
-    #[test]
-    fn parse_flags() {
-        assert_eq!(
-            parser("i:").parse_flags(),
-            Ok(ast::Flags {
-                span: span(0..1),
-                items: vec![ast::FlagsItem {
-                    span: span(0..1),
-                    kind: ast::FlagsItemKind::Flag(ast::Flag::CaseInsensitive),
-                }],
-            })
-        );
-        assert_eq!(
-            parser("i)").parse_flags(),
-            Ok(ast::Flags {
-                span: span(0..1),
-                items: vec![ast::FlagsItem {
-                    span: span(0..1),
-                    kind: ast::FlagsItemKind::Flag(ast::Flag::CaseInsensitive),
-                }],
-            })
-        );
-
-        assert_eq!(
-            parser("isU:").parse_flags(),
-            Ok(ast::Flags {
-                span: span(0..3),
-                items: vec![
-                    ast::FlagsItem {
-                        span: span(0..1),
-                        kind: ast::FlagsItemKind::Flag(
-                            ast::Flag::CaseInsensitive
-                        ),
-                    },
-                    ast::FlagsItem {
-                        span: span(1..2),
-                        kind: ast::FlagsItemKind::Flag(
-                            ast::Flag::DotMatchesNewLine
-                        ),
-                    },
-                    ast::FlagsItem {
-                        span: span(2..3),
-                        kind: ast::FlagsItemKind::Flag(ast::Flag::SwapGreed),
-                    },
-                ],
-            })
-        );
-
-        assert_eq!(
-            parser("-isU:").parse_flags(),
-            Ok(ast::Flags {
-                span: span(0..4),
-                items: vec![
-                    ast::FlagsItem {
-                        span: span(0..1),
-                        kind: ast::FlagsItemKind::Negation,
-                    },
-                    ast::FlagsItem {
-                        span: span(1..2),
-                        kind: ast::FlagsItemKind::Flag(
-                            ast::Flag::CaseInsensitive
-                        ),
-                    },
-                    ast::FlagsItem {
-                        span: span(2..3),
-                        kind: ast::FlagsItemKind::Flag(
-                            ast::Flag::DotMatchesNewLine
-                        ),
-                    },
-                    ast::FlagsItem {
-                        span: span(3..4),
-                        kind: ast::FlagsItemKind::Flag(ast::Flag::SwapGreed),
-                    },
-                ],
-            })
-        );
-        assert_eq!(
-            parser("i-sU:").parse_flags(),
-            Ok(ast::Flags {
-                span: span(0..4),
-                items: vec![
-                    ast::FlagsItem {
-                        span: span(0..1),
-                        kind: ast::FlagsItemKind::Flag(
-                            ast::Flag::CaseInsensitive
-                        ),
-                    },
-                    ast::FlagsItem {
-                        span: span(1..2),
-                        kind: ast::FlagsItemKind::Negation,
-                    },
-                    ast::FlagsItem {
-                        span: span(2..3),
-                        kind: ast::FlagsItemKind::Flag(
-                            ast::Flag::DotMatchesNewLine
-                        ),
-                    },
-                    ast::FlagsItem {
-                        span: span(3..4),
-                        kind: ast::FlagsItemKind::Flag(ast::Flag::SwapGreed),
-                    },
-                ],
-            })
-        );
-        assert_eq!(
-            parser("i-sR:").parse_flags(),
-            Ok(ast::Flags {
-                span: span(0..4),
-                items: vec![
-                    ast::FlagsItem {
-                        span: span(0..1),
-                        kind: ast::FlagsItemKind::Flag(
-                            ast::Flag::CaseInsensitive
-                        ),
-                    },
-                    ast::FlagsItem {
-                        span: span(1..2),
-                        kind: ast::FlagsItemKind::Negation,
-                    },
-                    ast::FlagsItem {
-                        span: span(2..3),
-                        kind: ast::FlagsItemKind::Flag(
-                            ast::Flag::DotMatchesNewLine
-                        ),
-                    },
-                    ast::FlagsItem {
-                        span: span(3..4),
-                        kind: ast::FlagsItemKind::Flag(ast::Flag::CRLF),
-                    },
-                ],
-            })
-        );
-
-        assert_eq!(
-            parser("isU").parse_flags().unwrap_err(),
-            TestError {
-                span: span(3..3),
-                kind: ast::ErrorKind::FlagUnexpectedEof,
-            }
-        );
-        assert_eq!(
-            parser("isUa:").parse_flags().unwrap_err(),
-            TestError {
-                span: span(3..4),
-                kind: ast::ErrorKind::FlagUnrecognized,
-            }
-        );
-        assert_eq!(
-            parser("isUi:").parse_flags().unwrap_err(),
-            TestError {
-                span: span(3..4),
-                kind: ast::ErrorKind::FlagDuplicate { original: span(0..1) },
-            }
-        );
-        assert_eq!(
-            parser("i-sU-i:").parse_flags().unwrap_err(),
-            TestError {
-                span: span(4..5),
-                kind: ast::ErrorKind::FlagRepeatedNegation {
-                    original: span(1..2),
-                },
-            }
-        );
-        assert_eq!(
-            parser("-)").parse_flags().unwrap_err(),
-            TestError {
-                span: span(0..1),
-                kind: ast::ErrorKind::FlagDanglingNegation,
-            }
-        );
-        assert_eq!(
-            parser("i-)").parse_flags().unwrap_err(),
-            TestError {
-                span: span(1..2),
-                kind: ast::ErrorKind::FlagDanglingNegation,
-            }
-        );
-        assert_eq!(
-            parser("iU-)").parse_flags().unwrap_err(),
-            TestError {
-                span: span(2..3),
-                kind: ast::ErrorKind::FlagDanglingNegation,
-            }
-        );
-    }
-
-    #[test]
-    fn parse_flag() {
-        assert_eq!(parser("i").parse_flag(), Ok(ast::Flag::CaseInsensitive));
-        assert_eq!(parser("m").parse_flag(), Ok(ast::Flag::MultiLine));
-        assert_eq!(parser("s").parse_flag(), Ok(ast::Flag::DotMatchesNewLine));
-        assert_eq!(parser("U").parse_flag(), Ok(ast::Flag::SwapGreed));
-        assert_eq!(parser("u").parse_flag(), Ok(ast::Flag::Unicode));
-        assert_eq!(parser("R").parse_flag(), Ok(ast::Flag::CRLF));
-        assert_eq!(parser("x").parse_flag(), Ok(ast::Flag::IgnoreWhitespace));
-
-        assert_eq!(
-            parser("a").parse_flag().unwrap_err(),
-            TestError {
-                span: span(0..1),
-                kind: ast::ErrorKind::FlagUnrecognized,
-            }
-        );
-        assert_eq!(
-            parser("☃").parse_flag().unwrap_err(),
-            TestError {
-                span: span_range("☃", 0..3),
-                kind: ast::ErrorKind::FlagUnrecognized,
-            }
-        );
-    }
-
-    #[test]
-    fn parse_primitive_non_escape() {
-        assert_eq!(
-            parser(r".").parse_primitive(),
-            Ok(Primitive::Dot(span(0..1)))
-        );
-        assert_eq!(
-            parser(r"^").parse_primitive(),
-            Ok(Primitive::Assertion(ast::Assertion {
-                span: span(0..1),
-                kind: ast::AssertionKind::StartLine,
-            }))
-        );
-        assert_eq!(
-            parser(r"$").parse_primitive(),
-            Ok(Primitive::Assertion(ast::Assertion {
-                span: span(0..1),
-                kind: ast::AssertionKind::EndLine,
-            }))
-        );
-
-        assert_eq!(
-            parser(r"a").parse_primitive(),
-            Ok(Primitive::Literal(ast::Literal {
-                span: span(0..1),
-                kind: ast::LiteralKind::Verbatim,
-                c: 'a',
-            }))
-        );
-        assert_eq!(
-            parser(r"|").parse_primitive(),
-            Ok(Primitive::Literal(ast::Literal {
-                span: span(0..1),
-                kind: ast::LiteralKind::Verbatim,
-                c: '|',
-            }))
-        );
-        assert_eq!(
-            parser(r"☃").parse_primitive(),
-            Ok(Primitive::Literal(ast::Literal {
-                span: span_range("☃", 0..3),
-                kind: ast::LiteralKind::Verbatim,
-                c: '☃',
-            }))
-        );
-    }
-
-    #[test]
-    fn parse_escape() {
-        assert_eq!(
-            parser(r"\|").parse_primitive(),
-            Ok(Primitive::Literal(ast::Literal {
-                span: span(0..2),
-                kind: ast::LiteralKind::Meta,
-                c: '|',
-            }))
-        );
-        let specials = &[
-            (r"\a", '\x07', ast::SpecialLiteralKind::Bell),
-            (r"\f", '\x0C', ast::SpecialLiteralKind::FormFeed),
-            (r"\t", '\t', ast::SpecialLiteralKind::Tab),
-            (r"\n", '\n', ast::SpecialLiteralKind::LineFeed),
-            (r"\r", '\r', ast::SpecialLiteralKind::CarriageReturn),
-            (r"\v", '\x0B', ast::SpecialLiteralKind::VerticalTab),
-        ];
-        for &(pat, c, ref kind) in specials {
-            assert_eq!(
-                parser(pat).parse_primitive(),
-                Ok(Primitive::Literal(ast::Literal {
-                    span: span(0..2),
-                    kind: ast::LiteralKind::Special(kind.clone()),
-                    c,
-                }))
-            );
-        }
-        assert_eq!(
-            parser(r"\A").parse_primitive(),
-            Ok(Primitive::Assertion(ast::Assertion {
-                span: span(0..2),
-                kind: ast::AssertionKind::StartText,
-            }))
-        );
-        assert_eq!(
-            parser(r"\z").parse_primitive(),
-            Ok(Primitive::Assertion(ast::Assertion {
-                span: span(0..2),
-                kind: ast::AssertionKind::EndText,
-            }))
-        );
-        assert_eq!(
-            parser(r"\b").parse_primitive(),
-            Ok(Primitive::Assertion(ast::Assertion {
-                span: span(0..2),
-                kind: ast::AssertionKind::WordBoundary,
-            }))
-        );
-        assert_eq!(
-            parser(r"\b{start}").parse_primitive(),
-            Ok(Primitive::Assertion(ast::Assertion {
-                span: span(0..9),
-                kind: ast::AssertionKind::WordBoundaryStart,
-            }))
-        );
-        assert_eq!(
-            parser(r"\b{end}").parse_primitive(),
-            Ok(Primitive::Assertion(ast::Assertion {
-                span: span(0..7),
-                kind: ast::AssertionKind::WordBoundaryEnd,
-            }))
-        );
-        assert_eq!(
-            parser(r"\b{start-half}").parse_primitive(),
-            Ok(Primitive::Assertion(ast::Assertion {
-                span: span(0..14),
-                kind: ast::AssertionKind::WordBoundaryStartHalf,
-            }))
-        );
-        assert_eq!(
-            parser(r"\b{end-half}").parse_primitive(),
-            Ok(Primitive::Assertion(ast::Assertion {
-                span: span(0..12),
-                kind: ast::AssertionKind::WordBoundaryEndHalf,
-            }))
-        );
-        assert_eq!(
-            parser(r"\<").parse_primitive(),
-            Ok(Primitive::Assertion(ast::Assertion {
-                span: span(0..2),
-                kind: ast::AssertionKind::WordBoundaryStartAngle,
-            }))
-        );
-        assert_eq!(
-            parser(r"\>").parse_primitive(),
-            Ok(Primitive::Assertion(ast::Assertion {
-                span: span(0..2),
-                kind: ast::AssertionKind::WordBoundaryEndAngle,
-            }))
-        );
-        assert_eq!(
-            parser(r"\B").parse_primitive(),
-            Ok(Primitive::Assertion(ast::Assertion {
-                span: span(0..2),
-                kind: ast::AssertionKind::NotWordBoundary,
-            }))
-        );
-
-        // We also support superfluous escapes in most cases now too.
-        for c in ['!', '@', '%', '"', '\'', '/', ' '] {
-            let pat = format!(r"\{}", c);
-            assert_eq!(
-                parser(&pat).parse_primitive(),
-                Ok(Primitive::Literal(ast::Literal {
-                    span: span(0..2),
-                    kind: ast::LiteralKind::Superfluous,
-                    c,
-                }))
-            );
-        }
-
-        // Some superfluous escapes, namely [0-9A-Za-z], are still banned. This
-        // gives flexibility for future evolution.
-        assert_eq!(
-            parser(r"\e").parse_escape().unwrap_err(),
-            TestError {
-                span: span(0..2),
-                kind: ast::ErrorKind::EscapeUnrecognized,
-            }
-        );
-        assert_eq!(
-            parser(r"\y").parse_escape().unwrap_err(),
-            TestError {
-                span: span(0..2),
-                kind: ast::ErrorKind::EscapeUnrecognized,
-            }
-        );
-
-        // Starting a special word boundary without any non-whitespace chars
-        // after the brace makes it ambiguous whether the user meant to write
-        // a counted repetition (probably not?) or an actual special word
-        // boundary assertion.
-        assert_eq!(
-            parser(r"\b{").parse_escape().unwrap_err(),
-            TestError {
-                span: span(0..3),
-                kind: ast::ErrorKind::SpecialWordOrRepetitionUnexpectedEof,
-            }
-        );
-        assert_eq!(
-            parser_ignore_whitespace(r"\b{ ").parse_escape().unwrap_err(),
-            TestError {
-                span: span(0..4),
-                kind: ast::ErrorKind::SpecialWordOrRepetitionUnexpectedEof,
-            }
-        );
-        // When 'x' is not enabled, the space is seen as a non-[-A-Za-z] char,
-        // and thus causes the parser to treat it as a counted repetition.
-        assert_eq!(
-            parser(r"\b{ ").parse().unwrap_err(),
-            TestError {
-                span: span(2..4),
-                kind: ast::ErrorKind::RepetitionCountUnclosed,
-            }
-        );
-        // In this case, we got some valid chars that makes it look like the
-        // user is writing one of the special word boundary assertions, but
-        // we forget to close the brace.
-        assert_eq!(
-            parser(r"\b{foo").parse_escape().unwrap_err(),
-            TestError {
-                span: span(2..6),
-                kind: ast::ErrorKind::SpecialWordBoundaryUnclosed,
-            }
-        );
-        // We get the same error as above, except it is provoked by seeing a
-        // char that we know is invalid before seeing a closing brace.
-        assert_eq!(
-            parser(r"\b{foo!}").parse_escape().unwrap_err(),
-            TestError {
-                span: span(2..6),
-                kind: ast::ErrorKind::SpecialWordBoundaryUnclosed,
-            }
-        );
-        // And this one occurs when, syntactically, everything looks okay, but
-        // we don't use a valid spelling of a word boundary assertion.
-        assert_eq!(
-            parser(r"\b{foo}").parse_escape().unwrap_err(),
-            TestError {
-                span: span(3..6),
-                kind: ast::ErrorKind::SpecialWordBoundaryUnrecognized,
-            }
-        );
-
-        // An unfinished escape is illegal.
-        assert_eq!(
-            parser(r"\").parse_escape().unwrap_err(),
-            TestError {
-                span: span(0..1),
-                kind: ast::ErrorKind::EscapeUnexpectedEof,
-            }
-        );
-    }
-
-    #[test]
-    fn parse_unsupported_backreference() {
-        assert_eq!(
-            parser(r"\0").parse_escape().unwrap_err(),
-            TestError {
-                span: span(0..2),
-                kind: ast::ErrorKind::UnsupportedBackreference,
-            }
-        );
-        assert_eq!(
-            parser(r"\9").parse_escape().unwrap_err(),
-            TestError {
-                span: span(0..2),
-                kind: ast::ErrorKind::UnsupportedBackreference,
-            }
-        );
-    }
-
-    #[test]
-    fn parse_octal() {
-        for i in 0..511 {
-            let pat = format!(r"\{:o}", i);
-            assert_eq!(
-                parser_octal(&pat).parse_escape(),
-                Ok(Primitive::Literal(ast::Literal {
-                    span: span(0..pat.len()),
-                    kind: ast::LiteralKind::Octal,
-                    c: char::from_u32(i).unwrap(),
-                }))
-            );
-        }
-        assert_eq!(
-            parser_octal(r"\778").parse_escape(),
-            Ok(Primitive::Literal(ast::Literal {
-                span: span(0..3),
-                kind: ast::LiteralKind::Octal,
-                c: '?',
-            }))
-        );
-        assert_eq!(
-            parser_octal(r"\7777").parse_escape(),
-            Ok(Primitive::Literal(ast::Literal {
-                span: span(0..4),
-                kind: ast::LiteralKind::Octal,
-                c: '\u{01FF}',
-            }))
-        );
-        assert_eq!(
-            parser_octal(r"\778").parse(),
-            Ok(Ast::concat(ast::Concat {
-                span: span(0..4),
-                asts: vec![
-                    Ast::literal(ast::Literal {
-                        span: span(0..3),
-                        kind: ast::LiteralKind::Octal,
-                        c: '?',
-                    }),
-                    Ast::literal(ast::Literal {
-                        span: span(3..4),
-                        kind: ast::LiteralKind::Verbatim,
-                        c: '8',
-                    }),
-                ],
-            }))
-        );
-        assert_eq!(
-            parser_octal(r"\7777").parse(),
-            Ok(Ast::concat(ast::Concat {
-                span: span(0..5),
-                asts: vec![
-                    Ast::literal(ast::Literal {
-                        span: span(0..4),
-                        kind: ast::LiteralKind::Octal,
-                        c: '\u{01FF}',
-                    }),
-                    Ast::literal(ast::Literal {
-                        span: span(4..5),
-                        kind: ast::LiteralKind::Verbatim,
-                        c: '7',
-                    }),
-                ],
-            }))
-        );
-
-        assert_eq!(
-            parser_octal(r"\8").parse_escape().unwrap_err(),
-            TestError {
-                span: span(0..2),
-                kind: ast::ErrorKind::EscapeUnrecognized,
-            }
-        );
-    }
-
-    #[test]
-    fn parse_hex_two() {
-        for i in 0..256 {
-            let pat = format!(r"\x{:02x}", i);
-            assert_eq!(
-                parser(&pat).parse_escape(),
-                Ok(Primitive::Literal(ast::Literal {
-                    span: span(0..pat.len()),
-                    kind: ast::LiteralKind::HexFixed(ast::HexLiteralKind::X),
-                    c: char::from_u32(i).unwrap(),
-                }))
-            );
-        }
-
-        assert_eq!(
-            parser(r"\xF").parse_escape().unwrap_err(),
-            TestError {
-                span: span(3..3),
-                kind: ast::ErrorKind::EscapeUnexpectedEof,
-            }
-        );
-        assert_eq!(
-            parser(r"\xG").parse_escape().unwrap_err(),
-            TestError {
-                span: span(2..3),
-                kind: ast::ErrorKind::EscapeHexInvalidDigit,
-            }
-        );
-        assert_eq!(
-            parser(r"\xFG").parse_escape().unwrap_err(),
-            TestError {
-                span: span(3..4),
-                kind: ast::ErrorKind::EscapeHexInvalidDigit,
-            }
-        );
-    }
-
-    #[test]
-    fn parse_hex_four() {
-        for i in 0..65536 {
-            let c = match char::from_u32(i) {
-                None => continue,
-                Some(c) => c,
-            };
-            let pat = format!(r"\u{:04x}", i);
-            assert_eq!(
-                parser(&pat).parse_escape(),
-                Ok(Primitive::Literal(ast::Literal {
-                    span: span(0..pat.len()),
-                    kind: ast::LiteralKind::HexFixed(
-                        ast::HexLiteralKind::UnicodeShort
-                    ),
-                    c,
-                }))
-            );
-        }
-
-        assert_eq!(
-            parser(r"\uF").parse_escape().unwrap_err(),
-            TestError {
-                span: span(3..3),
-                kind: ast::ErrorKind::EscapeUnexpectedEof,
-            }
-        );
-        assert_eq!(
-            parser(r"\uG").parse_escape().unwrap_err(),
-            TestError {
-                span: span(2..3),
-                kind: ast::ErrorKind::EscapeHexInvalidDigit,
-            }
-        );
-        assert_eq!(
-            parser(r"\uFG").parse_escape().unwrap_err(),
-            TestError {
-                span: span(3..4),
-                kind: ast::ErrorKind::EscapeHexInvalidDigit,
-            }
-        );
-        assert_eq!(
-            parser(r"\uFFG").parse_escape().unwrap_err(),
-            TestError {
-                span: span(4..5),
-                kind: ast::ErrorKind::EscapeHexInvalidDigit,
-            }
-        );
-        assert_eq!(
-            parser(r"\uFFFG").parse_escape().unwrap_err(),
-            TestError {
-                span: span(5..6),
-                kind: ast::ErrorKind::EscapeHexInvalidDigit,
-            }
-        );
-        assert_eq!(
-            parser(r"\uD800").parse_escape().unwrap_err(),
-            TestError {
-                span: span(2..6),
-                kind: ast::ErrorKind::EscapeHexInvalid,
-            }
-        );
-    }
-
-    #[test]
-    fn parse_hex_eight() {
-        for i in 0..65536 {
-            let c = match char::from_u32(i) {
-                None => continue,
-                Some(c) => c,
-            };
-            let pat = format!(r"\U{:08x}", i);
-            assert_eq!(
-                parser(&pat).parse_escape(),
-                Ok(Primitive::Literal(ast::Literal {
-                    span: span(0..pat.len()),
-                    kind: ast::LiteralKind::HexFixed(
-                        ast::HexLiteralKind::UnicodeLong
-                    ),
-                    c,
-                }))
-            );
-        }
-
-        assert_eq!(
-            parser(r"\UF").parse_escape().unwrap_err(),
-            TestError {
-                span: span(3..3),
-                kind: ast::ErrorKind::EscapeUnexpectedEof,
-            }
-        );
-        assert_eq!(
-            parser(r"\UG").parse_escape().unwrap_err(),
-            TestError {
-                span: span(2..3),
-                kind: ast::ErrorKind::EscapeHexInvalidDigit,
-            }
-        );
-        assert_eq!(
-            parser(r"\UFG").parse_escape().unwrap_err(),
-            TestError {
-                span: span(3..4),
-                kind: ast::ErrorKind::EscapeHexInvalidDigit,
-            }
-        );
-        assert_eq!(
-            parser(r"\UFFG").parse_escape().unwrap_err(),
-            TestError {
-                span: span(4..5),
-                kind: ast::ErrorKind::EscapeHexInvalidDigit,
-            }
-        );
-        assert_eq!(
-            parser(r"\UFFFG").parse_escape().unwrap_err(),
-            TestError {
-                span: span(5..6),
-                kind: ast::ErrorKind::EscapeHexInvalidDigit,
-            }
-        );
-        assert_eq!(
-            parser(r"\UFFFFG").parse_escape().unwrap_err(),
-            TestError {
-                span: span(6..7),
-                kind: ast::ErrorKind::EscapeHexInvalidDigit,
-            }
-        );
-        assert_eq!(
-            parser(r"\UFFFFFG").parse_escape().unwrap_err(),
-            TestError {
-                span: span(7..8),
-                kind: ast::ErrorKind::EscapeHexInvalidDigit,
-            }
-        );
-        assert_eq!(
-            parser(r"\UFFFFFFG").parse_escape().unwrap_err(),
-            TestError {
-                span: span(8..9),
-                kind: ast::ErrorKind::EscapeHexInvalidDigit,
-            }
-        );
-        assert_eq!(
-            parser(r"\UFFFFFFFG").parse_escape().unwrap_err(),
-            TestError {
-                span: span(9..10),
-                kind: ast::ErrorKind::EscapeHexInvalidDigit,
-            }
-        );
-    }
-
-    #[test]
-    fn parse_hex_brace() {
-        assert_eq!(
-            parser(r"\u{26c4}").parse_escape(),
-            Ok(Primitive::Literal(ast::Literal {
-                span: span(0..8),
-                kind: ast::LiteralKind::HexBrace(
-                    ast::HexLiteralKind::UnicodeShort
-                ),
-                c: '⛄',
-            }))
-        );
-        assert_eq!(
-            parser(r"\U{26c4}").parse_escape(),
-            Ok(Primitive::Literal(ast::Literal {
-                span: span(0..8),
-                kind: ast::LiteralKind::HexBrace(
-                    ast::HexLiteralKind::UnicodeLong
-                ),
-                c: '⛄',
-            }))
-        );
-        assert_eq!(
-            parser(r"\x{26c4}").parse_escape(),
-            Ok(Primitive::Literal(ast::Literal {
-                span: span(0..8),
-                kind: ast::LiteralKind::HexBrace(ast::HexLiteralKind::X),
-                c: '⛄',
-            }))
-        );
-        assert_eq!(
-            parser(r"\x{26C4}").parse_escape(),
-            Ok(Primitive::Literal(ast::Literal {
-                span: span(0..8),
-                kind: ast::LiteralKind::HexBrace(ast::HexLiteralKind::X),
-                c: '⛄',
-            }))
-        );
-        assert_eq!(
-            parser(r"\x{10fFfF}").parse_escape(),
-            Ok(Primitive::Literal(ast::Literal {
-                span: span(0..10),
-                kind: ast::LiteralKind::HexBrace(ast::HexLiteralKind::X),
-                c: '\u{10FFFF}',
-            }))
-        );
-
-        assert_eq!(
-            parser(r"\x").parse_escape().unwrap_err(),
-            TestError {
-                span: span(2..2),
-                kind: ast::ErrorKind::EscapeUnexpectedEof,
-            }
-        );
-        assert_eq!(
-            parser(r"\x{").parse_escape().unwrap_err(),
-            TestError {
-                span: span(2..3),
-                kind: ast::ErrorKind::EscapeUnexpectedEof,
-            }
-        );
-        assert_eq!(
-            parser(r"\x{FF").parse_escape().unwrap_err(),
-            TestError {
-                span: span(2..5),
-                kind: ast::ErrorKind::EscapeUnexpectedEof,
-            }
-        );
-        assert_eq!(
-            parser(r"\x{}").parse_escape().unwrap_err(),
-            TestError {
-                span: span(2..4),
-                kind: ast::ErrorKind::EscapeHexEmpty,
-            }
-        );
-        assert_eq!(
-            parser(r"\x{FGF}").parse_escape().unwrap_err(),
-            TestError {
-                span: span(4..5),
-                kind: ast::ErrorKind::EscapeHexInvalidDigit,
-            }
-        );
-        assert_eq!(
-            parser(r"\x{FFFFFF}").parse_escape().unwrap_err(),
-            TestError {
-                span: span(3..9),
-                kind: ast::ErrorKind::EscapeHexInvalid,
-            }
-        );
-        assert_eq!(
-            parser(r"\x{D800}").parse_escape().unwrap_err(),
-            TestError {
-                span: span(3..7),
-                kind: ast::ErrorKind::EscapeHexInvalid,
-            }
-        );
-        assert_eq!(
-            parser(r"\x{FFFFFFFFF}").parse_escape().unwrap_err(),
-            TestError {
-                span: span(3..12),
-                kind: ast::ErrorKind::EscapeHexInvalid,
-            }
-        );
-    }
-
-    #[test]
-    fn parse_decimal() {
-        assert_eq!(parser("123").parse_decimal(), Ok(123));
-        assert_eq!(parser("0").parse_decimal(), Ok(0));
-        assert_eq!(parser("01").parse_decimal(), Ok(1));
-
-        assert_eq!(
-            parser("-1").parse_decimal().unwrap_err(),
-            TestError { span: span(0..0), kind: ast::ErrorKind::DecimalEmpty }
-        );
-        assert_eq!(
-            parser("").parse_decimal().unwrap_err(),
-            TestError { span: span(0..0), kind: ast::ErrorKind::DecimalEmpty }
-        );
-        assert_eq!(
-            parser("9999999999").parse_decimal().unwrap_err(),
-            TestError {
-                span: span(0..10),
-                kind: ast::ErrorKind::DecimalInvalid,
-            }
-        );
-    }
-
-    #[test]
-    fn parse_set_class() {
-        fn union(span: Span, items: Vec<ast::ClassSetItem>) -> ast::ClassSet {
-            ast::ClassSet::union(ast::ClassSetUnion { span, items })
-        }
-
-        fn intersection(
-            span: Span,
-            lhs: ast::ClassSet,
-            rhs: ast::ClassSet,
-        ) -> ast::ClassSet {
-            ast::ClassSet::BinaryOp(ast::ClassSetBinaryOp {
-                span,
-                kind: ast::ClassSetBinaryOpKind::Intersection,
-                lhs: Box::new(lhs),
-                rhs: Box::new(rhs),
-            })
-        }
-
-        fn difference(
-            span: Span,
-            lhs: ast::ClassSet,
-            rhs: ast::ClassSet,
-        ) -> ast::ClassSet {
-            ast::ClassSet::BinaryOp(ast::ClassSetBinaryOp {
-                span,
-                kind: ast::ClassSetBinaryOpKind::Difference,
-                lhs: Box::new(lhs),
-                rhs: Box::new(rhs),
-            })
-        }
-
-        fn symdifference(
-            span: Span,
-            lhs: ast::ClassSet,
-            rhs: ast::ClassSet,
-        ) -> ast::ClassSet {
-            ast::ClassSet::BinaryOp(ast::ClassSetBinaryOp {
-                span,
-                kind: ast::ClassSetBinaryOpKind::SymmetricDifference,
-                lhs: Box::new(lhs),
-                rhs: Box::new(rhs),
-            })
-        }
-
-        fn itemset(item: ast::ClassSetItem) -> ast::ClassSet {
-            ast::ClassSet::Item(item)
-        }
-
-        fn item_ascii(cls: ast::ClassAscii) -> ast::ClassSetItem {
-            ast::ClassSetItem::Ascii(cls)
-        }
-
-        fn item_unicode(cls: ast::ClassUnicode) -> ast::ClassSetItem {
-            ast::ClassSetItem::Unicode(cls)
-        }
-
-        fn item_perl(cls: ast::ClassPerl) -> ast::ClassSetItem {
-            ast::ClassSetItem::Perl(cls)
-        }
-
-        fn item_bracket(cls: ast::ClassBracketed) -> ast::ClassSetItem {
-            ast::ClassSetItem::Bracketed(Box::new(cls))
-        }
-
-        fn lit(span: Span, c: char) -> ast::ClassSetItem {
-            ast::ClassSetItem::Literal(ast::Literal {
-                span,
-                kind: ast::LiteralKind::Verbatim,
-                c,
-            })
-        }
-
-        fn empty(span: Span) -> ast::ClassSetItem {
-            ast::ClassSetItem::Empty(span)
-        }
-
-        fn range(span: Span, start: char, end: char) -> ast::ClassSetItem {
-            let pos1 = Position {
-                offset: span.start.offset + start.len_utf8(),
-                column: span.start.column + 1,
-                ..span.start
-            };
-            let pos2 = Position {
-                offset: span.end.offset - end.len_utf8(),
-                column: span.end.column - 1,
-                ..span.end
-            };
-            ast::ClassSetItem::Range(ast::ClassSetRange {
-                span,
-                start: ast::Literal {
-                    span: Span { end: pos1, ..span },
-                    kind: ast::LiteralKind::Verbatim,
-                    c: start,
-                },
-                end: ast::Literal {
-                    span: Span { start: pos2, ..span },
-                    kind: ast::LiteralKind::Verbatim,
-                    c: end,
-                },
-            })
-        }
-
-        fn alnum(span: Span, negated: bool) -> ast::ClassAscii {
-            ast::ClassAscii { span, kind: ast::ClassAsciiKind::Alnum, negated }
-        }
-
-        fn lower(span: Span, negated: bool) -> ast::ClassAscii {
-            ast::ClassAscii { span, kind: ast::ClassAsciiKind::Lower, negated }
-        }
-
-        assert_eq!(
-            parser("[[:alnum:]]").parse(),
-            Ok(Ast::class_bracketed(ast::ClassBracketed {
-                span: span(0..11),
-                negated: false,
-                kind: itemset(item_ascii(alnum(span(1..10), false))),
-            }))
-        );
-        assert_eq!(
-            parser("[[[:alnum:]]]").parse(),
-            Ok(Ast::class_bracketed(ast::ClassBracketed {
-                span: span(0..13),
-                negated: false,
-                kind: itemset(item_bracket(ast::ClassBracketed {
-                    span: span(1..12),
-                    negated: false,
-                    kind: itemset(item_ascii(alnum(span(2..11), false))),
-                })),
-            }))
-        );
-        assert_eq!(
-            parser("[[:alnum:]&&[:lower:]]").parse(),
-            Ok(Ast::class_bracketed(ast::ClassBracketed {
-                span: span(0..22),
-                negated: false,
-                kind: intersection(
-                    span(1..21),
-                    itemset(item_ascii(alnum(span(1..10), false))),
-                    itemset(item_ascii(lower(span(12..21), false))),
-                ),
-            }))
-        );
-        assert_eq!(
-            parser("[[:alnum:]--[:lower:]]").parse(),
-            Ok(Ast::class_bracketed(ast::ClassBracketed {
-                span: span(0..22),
-                negated: false,
-                kind: difference(
-                    span(1..21),
-                    itemset(item_ascii(alnum(span(1..10), false))),
-                    itemset(item_ascii(lower(span(12..21), false))),
-                ),
-            }))
-        );
-        assert_eq!(
-            parser("[[:alnum:]~~[:lower:]]").parse(),
-            Ok(Ast::class_bracketed(ast::ClassBracketed {
-                span: span(0..22),
-                negated: false,
-                kind: symdifference(
-                    span(1..21),
-                    itemset(item_ascii(alnum(span(1..10), false))),
-                    itemset(item_ascii(lower(span(12..21), false))),
-                ),
-            }))
-        );
-
-        assert_eq!(
-            parser("[a]").parse(),
-            Ok(Ast::class_bracketed(ast::ClassBracketed {
-                span: span(0..3),
-                negated: false,
-                kind: itemset(lit(span(1..2), 'a')),
-            }))
-        );
-        assert_eq!(
-            parser(r"[a\]]").parse(),
-            Ok(Ast::class_bracketed(ast::ClassBracketed {
-                span: span(0..5),
-                negated: false,
-                kind: union(
-                    span(1..4),
-                    vec![
-                        lit(span(1..2), 'a'),
-                        ast::ClassSetItem::Literal(ast::Literal {
-                            span: span(2..4),
-                            kind: ast::LiteralKind::Meta,
-                            c: ']',
-                        }),
-                    ]
-                ),
-            }))
-        );
-        assert_eq!(
-            parser(r"[a\-z]").parse(),
-            Ok(Ast::class_bracketed(ast::ClassBracketed {
-                span: span(0..6),
-                negated: false,
-                kind: union(
-                    span(1..5),
-                    vec![
-                        lit(span(1..2), 'a'),
-                        ast::ClassSetItem::Literal(ast::Literal {
-                            span: span(2..4),
-                            kind: ast::LiteralKind::Meta,
-                            c: '-',
-                        }),
-                        lit(span(4..5), 'z'),
-                    ]
-                ),
-            }))
-        );
-        assert_eq!(
-            parser("[ab]").parse(),
-            Ok(Ast::class_bracketed(ast::ClassBracketed {
-                span: span(0..4),
-                negated: false,
-                kind: union(
-                    span(1..3),
-                    vec![lit(span(1..2), 'a'), lit(span(2..3), 'b'),]
-                ),
-            }))
-        );
-        assert_eq!(
-            parser("[a-]").parse(),
-            Ok(Ast::class_bracketed(ast::ClassBracketed {
-                span: span(0..4),
-                negated: false,
-                kind: union(
-                    span(1..3),
-                    vec![lit(span(1..2), 'a'), lit(span(2..3), '-'),]
-                ),
-            }))
-        );
-        assert_eq!(
-            parser("[-a]").parse(),
-            Ok(Ast::class_bracketed(ast::ClassBracketed {
-                span: span(0..4),
-                negated: false,
-                kind: union(
-                    span(1..3),
-                    vec![lit(span(1..2), '-'), lit(span(2..3), 'a'),]
-                ),
-            }))
-        );
-        assert_eq!(
-            parser(r"[\pL]").parse(),
-            Ok(Ast::class_bracketed(ast::ClassBracketed {
-                span: span(0..5),
-                negated: false,
-                kind: itemset(item_unicode(ast::ClassUnicode {
-                    span: span(1..4),
-                    negated: false,
-                    kind: ast::ClassUnicodeKind::OneLetter('L'),
-                })),
-            }))
-        );
-        assert_eq!(
-            parser(r"[\w]").parse(),
-            Ok(Ast::class_bracketed(ast::ClassBracketed {
-                span: span(0..4),
-                negated: false,
-                kind: itemset(item_perl(ast::ClassPerl {
-                    span: span(1..3),
-                    kind: ast::ClassPerlKind::Word,
-                    negated: false,
-                })),
-            }))
-        );
-        assert_eq!(
-            parser(r"[a\wz]").parse(),
-            Ok(Ast::class_bracketed(ast::ClassBracketed {
-                span: span(0..6),
-                negated: false,
-                kind: union(
-                    span(1..5),
-                    vec![
-                        lit(span(1..2), 'a'),
-                        item_perl(ast::ClassPerl {
-                            span: span(2..4),
-                            kind: ast::ClassPerlKind::Word,
-                            negated: false,
-                        }),
-                        lit(span(4..5), 'z'),
-                    ]
-                ),
-            }))
-        );
-
-        assert_eq!(
-            parser("[a-z]").parse(),
-            Ok(Ast::class_bracketed(ast::ClassBracketed {
-                span: span(0..5),
-                negated: false,
-                kind: itemset(range(span(1..4), 'a', 'z')),
-            }))
-        );
-        assert_eq!(
-            parser("[a-cx-z]").parse(),
-            Ok(Ast::class_bracketed(ast::ClassBracketed {
-                span: span(0..8),
-                negated: false,
-                kind: union(
-                    span(1..7),
-                    vec![
-                        range(span(1..4), 'a', 'c'),
-                        range(span(4..7), 'x', 'z'),
-                    ]
-                ),
-            }))
-        );
-        assert_eq!(
-            parser(r"[\w&&a-cx-z]").parse(),
-            Ok(Ast::class_bracketed(ast::ClassBracketed {
-                span: span(0..12),
-                negated: false,
-                kind: intersection(
-                    span(1..11),
-                    itemset(item_perl(ast::ClassPerl {
-                        span: span(1..3),
-                        kind: ast::ClassPerlKind::Word,
-                        negated: false,
-                    })),
-                    union(
-                        span(5..11),
-                        vec![
-                            range(span(5..8), 'a', 'c'),
-                            range(span(8..11), 'x', 'z'),
-                        ]
-                    ),
-                ),
-            }))
-        );
-        assert_eq!(
-            parser(r"[a-cx-z&&\w]").parse(),
-            Ok(Ast::class_bracketed(ast::ClassBracketed {
-                span: span(0..12),
-                negated: false,
-                kind: intersection(
-                    span(1..11),
-                    union(
-                        span(1..7),
-                        vec![
-                            range(span(1..4), 'a', 'c'),
-                            range(span(4..7), 'x', 'z'),
-                        ]
-                    ),
-                    itemset(item_perl(ast::ClassPerl {
-                        span: span(9..11),
-                        kind: ast::ClassPerlKind::Word,
-                        negated: false,
-                    })),
-                ),
-            }))
-        );
-        assert_eq!(
-            parser(r"[a--b--c]").parse(),
-            Ok(Ast::class_bracketed(ast::ClassBracketed {
-                span: span(0..9),
-                negated: false,
-                kind: difference(
-                    span(1..8),
-                    difference(
-                        span(1..5),
-                        itemset(lit(span(1..2), 'a')),
-                        itemset(lit(span(4..5), 'b')),
-                    ),
-                    itemset(lit(span(7..8), 'c')),
-                ),
-            }))
-        );
-        assert_eq!(
-            parser(r"[a~~b~~c]").parse(),
-            Ok(Ast::class_bracketed(ast::ClassBracketed {
-                span: span(0..9),
-                negated: false,
-                kind: symdifference(
-                    span(1..8),
-                    symdifference(
-                        span(1..5),
-                        itemset(lit(span(1..2), 'a')),
-                        itemset(lit(span(4..5), 'b')),
-                    ),
-                    itemset(lit(span(7..8), 'c')),
-                ),
-            }))
-        );
-        assert_eq!(
-            parser(r"[\^&&^]").parse(),
-            Ok(Ast::class_bracketed(ast::ClassBracketed {
-                span: span(0..7),
-                negated: false,
-                kind: intersection(
-                    span(1..6),
-                    itemset(ast::ClassSetItem::Literal(ast::Literal {
-                        span: span(1..3),
-                        kind: ast::LiteralKind::Meta,
-                        c: '^',
-                    })),
-                    itemset(lit(span(5..6), '^')),
-                ),
-            }))
-        );
-        assert_eq!(
-            parser(r"[\&&&&]").parse(),
-            Ok(Ast::class_bracketed(ast::ClassBracketed {
-                span: span(0..7),
-                negated: false,
-                kind: intersection(
-                    span(1..6),
-                    itemset(ast::ClassSetItem::Literal(ast::Literal {
-                        span: span(1..3),
-                        kind: ast::LiteralKind::Meta,
-                        c: '&',
-                    })),
-                    itemset(lit(span(5..6), '&')),
-                ),
-            }))
-        );
-        assert_eq!(
-            parser(r"[&&&&]").parse(),
-            Ok(Ast::class_bracketed(ast::ClassBracketed {
-                span: span(0..6),
-                negated: false,
-                kind: intersection(
-                    span(1..5),
-                    intersection(
-                        span(1..3),
-                        itemset(empty(span(1..1))),
-                        itemset(empty(span(3..3))),
-                    ),
-                    itemset(empty(span(5..5))),
-                ),
-            }))
-        );
-
-        let pat = "[☃-⛄]";
-        assert_eq!(
-            parser(pat).parse(),
-            Ok(Ast::class_bracketed(ast::ClassBracketed {
-                span: span_range(pat, 0..9),
-                negated: false,
-                kind: itemset(ast::ClassSetItem::Range(ast::ClassSetRange {
-                    span: span_range(pat, 1..8),
-                    start: ast::Literal {
-                        span: span_range(pat, 1..4),
-                        kind: ast::LiteralKind::Verbatim,
-                        c: '☃',
-                    },
-                    end: ast::Literal {
-                        span: span_range(pat, 5..8),
-                        kind: ast::LiteralKind::Verbatim,
-                        c: '⛄',
-                    },
-                })),
-            }))
-        );
-
-        assert_eq!(
-            parser(r"[]]").parse(),
-            Ok(Ast::class_bracketed(ast::ClassBracketed {
-                span: span(0..3),
-                negated: false,
-                kind: itemset(lit(span(1..2), ']')),
-            }))
-        );
-        assert_eq!(
-            parser(r"[]\[]").parse(),
-            Ok(Ast::class_bracketed(ast::ClassBracketed {
-                span: span(0..5),
-                negated: false,
-                kind: union(
-                    span(1..4),
-                    vec![
-                        lit(span(1..2), ']'),
-                        ast::ClassSetItem::Literal(ast::Literal {
-                            span: span(2..4),
-                            kind: ast::LiteralKind::Meta,
-                            c: '[',
-                        }),
-                    ]
-                ),
-            }))
-        );
-        assert_eq!(
-            parser(r"[\[]]").parse(),
-            Ok(concat(
-                0..5,
-                vec![
-                    Ast::class_bracketed(ast::ClassBracketed {
-                        span: span(0..4),
-                        negated: false,
-                        kind: itemset(ast::ClassSetItem::Literal(
-                            ast::Literal {
-                                span: span(1..3),
-                                kind: ast::LiteralKind::Meta,
-                                c: '[',
-                            }
-                        )),
-                    }),
-                    Ast::literal(ast::Literal {
-                        span: span(4..5),
-                        kind: ast::LiteralKind::Verbatim,
-                        c: ']',
-                    }),
-                ]
-            ))
-        );
-
-        assert_eq!(
-            parser("[").parse().unwrap_err(),
-            TestError {
-                span: span(0..1),
-                kind: ast::ErrorKind::ClassUnclosed,
-            }
-        );
-        assert_eq!(
-            parser("[[").parse().unwrap_err(),
-            TestError {
-                span: span(1..2),
-                kind: ast::ErrorKind::ClassUnclosed,
-            }
-        );
-        assert_eq!(
-            parser("[[-]").parse().unwrap_err(),
-            TestError {
-                span: span(0..1),
-                kind: ast::ErrorKind::ClassUnclosed,
-            }
-        );
-        assert_eq!(
-            parser("[[[:alnum:]").parse().unwrap_err(),
-            TestError {
-                span: span(1..2),
-                kind: ast::ErrorKind::ClassUnclosed,
-            }
-        );
-        assert_eq!(
-            parser(r"[\b]").parse().unwrap_err(),
-            TestError {
-                span: span(1..3),
-                kind: ast::ErrorKind::ClassEscapeInvalid,
-            }
-        );
-        assert_eq!(
-            parser(r"[\w-a]").parse().unwrap_err(),
-            TestError {
-                span: span(1..3),
-                kind: ast::ErrorKind::ClassRangeLiteral,
-            }
-        );
-        assert_eq!(
-            parser(r"[a-\w]").parse().unwrap_err(),
-            TestError {
-                span: span(3..5),
-                kind: ast::ErrorKind::ClassRangeLiteral,
-            }
-        );
-        assert_eq!(
-            parser(r"[z-a]").parse().unwrap_err(),
-            TestError {
-                span: span(1..4),
-                kind: ast::ErrorKind::ClassRangeInvalid,
-            }
-        );
-
-        assert_eq!(
-            parser_ignore_whitespace("[a ").parse().unwrap_err(),
-            TestError {
-                span: span(0..1),
-                kind: ast::ErrorKind::ClassUnclosed,
-            }
-        );
-        assert_eq!(
-            parser_ignore_whitespace("[a- ").parse().unwrap_err(),
-            TestError {
-                span: span(0..1),
-                kind: ast::ErrorKind::ClassUnclosed,
-            }
-        );
-    }
-
-    #[test]
-    fn parse_set_class_open() {
-        assert_eq!(parser("[a]").parse_set_class_open(), {
-            let set = ast::ClassBracketed {
-                span: span(0..1),
-                negated: false,
-                kind: ast::ClassSet::union(ast::ClassSetUnion {
-                    span: span(1..1),
-                    items: vec![],
-                }),
-            };
-            let union = ast::ClassSetUnion { span: span(1..1), items: vec![] };
-            Ok((set, union))
-        });
-        assert_eq!(
-            parser_ignore_whitespace("[   a]").parse_set_class_open(),
-            {
-                let set = ast::ClassBracketed {
-                    span: span(0..4),
-                    negated: false,
-                    kind: ast::ClassSet::union(ast::ClassSetUnion {
-                        span: span(4..4),
-                        items: vec![],
-                    }),
-                };
-                let union =
-                    ast::ClassSetUnion { span: span(4..4), items: vec![] };
-                Ok((set, union))
-            }
-        );
-        assert_eq!(parser("[^a]").parse_set_class_open(), {
-            let set = ast::ClassBracketed {
-                span: span(0..2),
-                negated: true,
-                kind: ast::ClassSet::union(ast::ClassSetUnion {
-                    span: span(2..2),
-                    items: vec![],
-                }),
-            };
-            let union = ast::ClassSetUnion { span: span(2..2), items: vec![] };
-            Ok((set, union))
-        });
-        assert_eq!(
-            parser_ignore_whitespace("[ ^ a]").parse_set_class_open(),
-            {
-                let set = ast::ClassBracketed {
-                    span: span(0..4),
-                    negated: true,
-                    kind: ast::ClassSet::union(ast::ClassSetUnion {
-                        span: span(4..4),
-                        items: vec![],
-                    }),
-                };
-                let union =
-                    ast::ClassSetUnion { span: span(4..4), items: vec![] };
-                Ok((set, union))
-            }
-        );
-        assert_eq!(parser("[-a]").parse_set_class_open(), {
-            let set = ast::ClassBracketed {
-                span: span(0..2),
-                negated: false,
-                kind: ast::ClassSet::union(ast::ClassSetUnion {
-                    span: span(1..1),
-                    items: vec![],
-                }),
-            };
-            let union = ast::ClassSetUnion {
-                span: span(1..2),
-                items: vec![ast::ClassSetItem::Literal(ast::Literal {
-                    span: span(1..2),
-                    kind: ast::LiteralKind::Verbatim,
-                    c: '-',
-                })],
-            };
-            Ok((set, union))
-        });
-        assert_eq!(
-            parser_ignore_whitespace("[ - a]").parse_set_class_open(),
-            {
-                let set = ast::ClassBracketed {
-                    span: span(0..4),
-                    negated: false,
-                    kind: ast::ClassSet::union(ast::ClassSetUnion {
-                        span: span(2..2),
-                        items: vec![],
-                    }),
-                };
-                let union = ast::ClassSetUnion {
-                    span: span(2..3),
-                    items: vec![ast::ClassSetItem::Literal(ast::Literal {
-                        span: span(2..3),
-                        kind: ast::LiteralKind::Verbatim,
-                        c: '-',
-                    })],
-                };
-                Ok((set, union))
-            }
-        );
-        assert_eq!(parser("[^-a]").parse_set_class_open(), {
-            let set = ast::ClassBracketed {
-                span: span(0..3),
-                negated: true,
-                kind: ast::ClassSet::union(ast::ClassSetUnion {
-                    span: span(2..2),
-                    items: vec![],
-                }),
-            };
-            let union = ast::ClassSetUnion {
-                span: span(2..3),
-                items: vec![ast::ClassSetItem::Literal(ast::Literal {
-                    span: span(2..3),
-                    kind: ast::LiteralKind::Verbatim,
-                    c: '-',
-                })],
-            };
-            Ok((set, union))
-        });
-        assert_eq!(parser("[--a]").parse_set_class_open(), {
-            let set = ast::ClassBracketed {
-                span: span(0..3),
-                negated: false,
-                kind: ast::ClassSet::union(ast::ClassSetUnion {
-                    span: span(1..1),
-                    items: vec![],
-                }),
-            };
-            let union = ast::ClassSetUnion {
-                span: span(1..3),
-                items: vec![
-                    ast::ClassSetItem::Literal(ast::Literal {
-                        span: span(1..2),
-                        kind: ast::LiteralKind::Verbatim,
-                        c: '-',
-                    }),
-                    ast::ClassSetItem::Literal(ast::Literal {
-                        span: span(2..3),
-                        kind: ast::LiteralKind::Verbatim,
-                        c: '-',
-                    }),
-                ],
-            };
-            Ok((set, union))
-        });
-        assert_eq!(parser("[]a]").parse_set_class_open(), {
-            let set = ast::ClassBracketed {
-                span: span(0..2),
-                negated: false,
-                kind: ast::ClassSet::union(ast::ClassSetUnion {
-                    span: span(1..1),
-                    items: vec![],
-                }),
-            };
-            let union = ast::ClassSetUnion {
-                span: span(1..2),
-                items: vec![ast::ClassSetItem::Literal(ast::Literal {
-                    span: span(1..2),
-                    kind: ast::LiteralKind::Verbatim,
-                    c: ']',
-                })],
-            };
-            Ok((set, union))
-        });
-        assert_eq!(
-            parser_ignore_whitespace("[ ] a]").parse_set_class_open(),
-            {
-                let set = ast::ClassBracketed {
-                    span: span(0..4),
-                    negated: false,
-                    kind: ast::ClassSet::union(ast::ClassSetUnion {
-                        span: span(2..2),
-                        items: vec![],
-                    }),
-                };
-                let union = ast::ClassSetUnion {
-                    span: span(2..3),
-                    items: vec![ast::ClassSetItem::Literal(ast::Literal {
-                        span: span(2..3),
-                        kind: ast::LiteralKind::Verbatim,
-                        c: ']',
-                    })],
-                };
-                Ok((set, union))
-            }
-        );
-        assert_eq!(parser("[^]a]").parse_set_class_open(), {
-            let set = ast::ClassBracketed {
-                span: span(0..3),
-                negated: true,
-                kind: ast::ClassSet::union(ast::ClassSetUnion {
-                    span: span(2..2),
-                    items: vec![],
-                }),
-            };
-            let union = ast::ClassSetUnion {
-                span: span(2..3),
-                items: vec![ast::ClassSetItem::Literal(ast::Literal {
-                    span: span(2..3),
-                    kind: ast::LiteralKind::Verbatim,
-                    c: ']',
-                })],
-            };
-            Ok((set, union))
-        });
-        assert_eq!(parser("[-]a]").parse_set_class_open(), {
-            let set = ast::ClassBracketed {
-                span: span(0..2),
-                negated: false,
-                kind: ast::ClassSet::union(ast::ClassSetUnion {
-                    span: span(1..1),
-                    items: vec![],
-                }),
-            };
-            let union = ast::ClassSetUnion {
-                span: span(1..2),
-                items: vec![ast::ClassSetItem::Literal(ast::Literal {
-                    span: span(1..2),
-                    kind: ast::LiteralKind::Verbatim,
-                    c: '-',
-                })],
-            };
-            Ok((set, union))
-        });
-
-        assert_eq!(
-            parser("[").parse_set_class_open().unwrap_err(),
-            TestError {
-                span: span(0..1),
-                kind: ast::ErrorKind::ClassUnclosed,
-            }
-        );
-        assert_eq!(
-            parser_ignore_whitespace("[    ")
-                .parse_set_class_open()
-                .unwrap_err(),
-            TestError {
-                span: span(0..5),
-                kind: ast::ErrorKind::ClassUnclosed,
-            }
-        );
-        assert_eq!(
-            parser("[^").parse_set_class_open().unwrap_err(),
-            TestError {
-                span: span(0..2),
-                kind: ast::ErrorKind::ClassUnclosed,
-            }
-        );
-        assert_eq!(
-            parser("[]").parse_set_class_open().unwrap_err(),
-            TestError {
-                span: span(0..2),
-                kind: ast::ErrorKind::ClassUnclosed,
-            }
-        );
-        assert_eq!(
-            parser("[-").parse_set_class_open().unwrap_err(),
-            TestError {
-                span: span(0..0),
-                kind: ast::ErrorKind::ClassUnclosed,
-            }
-        );
-        assert_eq!(
-            parser("[--").parse_set_class_open().unwrap_err(),
-            TestError {
-                span: span(0..0),
-                kind: ast::ErrorKind::ClassUnclosed,
-            }
-        );
-
-        // See: https://github.com/rust-lang/regex/issues/792
-        assert_eq!(
-            parser("(?x)[-#]").parse_with_comments().unwrap_err(),
-            TestError {
-                span: span(4..4),
-                kind: ast::ErrorKind::ClassUnclosed,
-            }
-        );
-    }
-
-    #[test]
-    fn maybe_parse_ascii_class() {
-        assert_eq!(
-            parser(r"[:alnum:]").maybe_parse_ascii_class(),
-            Some(ast::ClassAscii {
-                span: span(0..9),
-                kind: ast::ClassAsciiKind::Alnum,
-                negated: false,
-            })
-        );
-        assert_eq!(
-            parser(r"[:alnum:]A").maybe_parse_ascii_class(),
-            Some(ast::ClassAscii {
-                span: span(0..9),
-                kind: ast::ClassAsciiKind::Alnum,
-                negated: false,
-            })
-        );
-        assert_eq!(
-            parser(r"[:^alnum:]").maybe_parse_ascii_class(),
-            Some(ast::ClassAscii {
-                span: span(0..10),
-                kind: ast::ClassAsciiKind::Alnum,
-                negated: true,
-            })
-        );
-
-        let p = parser(r"[:");
-        assert_eq!(p.maybe_parse_ascii_class(), None);
-        assert_eq!(p.offset(), 0);
-
-        let p = parser(r"[:^");
-        assert_eq!(p.maybe_parse_ascii_class(), None);
-        assert_eq!(p.offset(), 0);
-
-        let p = parser(r"[^:alnum:]");
-        assert_eq!(p.maybe_parse_ascii_class(), None);
-        assert_eq!(p.offset(), 0);
-
-        let p = parser(r"[:alnnum:]");
-        assert_eq!(p.maybe_parse_ascii_class(), None);
-        assert_eq!(p.offset(), 0);
-
-        let p = parser(r"[:alnum]");
-        assert_eq!(p.maybe_parse_ascii_class(), None);
-        assert_eq!(p.offset(), 0);
-
-        let p = parser(r"[:alnum:");
-        assert_eq!(p.maybe_parse_ascii_class(), None);
-        assert_eq!(p.offset(), 0);
-    }
-
-    #[test]
-    fn parse_unicode_class() {
-        assert_eq!(
-            parser(r"\pN").parse_escape(),
-            Ok(Primitive::Unicode(ast::ClassUnicode {
-                span: span(0..3),
-                negated: false,
-                kind: ast::ClassUnicodeKind::OneLetter('N'),
-            }))
-        );
-        assert_eq!(
-            parser(r"\PN").parse_escape(),
-            Ok(Primitive::Unicode(ast::ClassUnicode {
-                span: span(0..3),
-                negated: true,
-                kind: ast::ClassUnicodeKind::OneLetter('N'),
-            }))
-        );
-        assert_eq!(
-            parser(r"\p{N}").parse_escape(),
-            Ok(Primitive::Unicode(ast::ClassUnicode {
-                span: span(0..5),
-                negated: false,
-                kind: ast::ClassUnicodeKind::Named(s("N")),
-            }))
-        );
-        assert_eq!(
-            parser(r"\P{N}").parse_escape(),
-            Ok(Primitive::Unicode(ast::ClassUnicode {
-                span: span(0..5),
-                negated: true,
-                kind: ast::ClassUnicodeKind::Named(s("N")),
-            }))
-        );
-        assert_eq!(
-            parser(r"\p{Greek}").parse_escape(),
-            Ok(Primitive::Unicode(ast::ClassUnicode {
-                span: span(0..9),
-                negated: false,
-                kind: ast::ClassUnicodeKind::Named(s("Greek")),
-            }))
-        );
-
-        assert_eq!(
-            parser(r"\p{scx:Katakana}").parse_escape(),
-            Ok(Primitive::Unicode(ast::ClassUnicode {
-                span: span(0..16),
-                negated: false,
-                kind: ast::ClassUnicodeKind::NamedValue {
-                    op: ast::ClassUnicodeOpKind::Colon,
-                    name: s("scx"),
-                    value: s("Katakana"),
-                },
-            }))
-        );
-        assert_eq!(
-            parser(r"\p{scx=Katakana}").parse_escape(),
-            Ok(Primitive::Unicode(ast::ClassUnicode {
-                span: span(0..16),
-                negated: false,
-                kind: ast::ClassUnicodeKind::NamedValue {
-                    op: ast::ClassUnicodeOpKind::Equal,
-                    name: s("scx"),
-                    value: s("Katakana"),
-                },
-            }))
-        );
-        assert_eq!(
-            parser(r"\p{scx!=Katakana}").parse_escape(),
-            Ok(Primitive::Unicode(ast::ClassUnicode {
-                span: span(0..17),
-                negated: false,
-                kind: ast::ClassUnicodeKind::NamedValue {
-                    op: ast::ClassUnicodeOpKind::NotEqual,
-                    name: s("scx"),
-                    value: s("Katakana"),
-                },
-            }))
-        );
-
-        assert_eq!(
-            parser(r"\p{:}").parse_escape(),
-            Ok(Primitive::Unicode(ast::ClassUnicode {
-                span: span(0..5),
-                negated: false,
-                kind: ast::ClassUnicodeKind::NamedValue {
-                    op: ast::ClassUnicodeOpKind::Colon,
-                    name: s(""),
-                    value: s(""),
-                },
-            }))
-        );
-        assert_eq!(
-            parser(r"\p{=}").parse_escape(),
-            Ok(Primitive::Unicode(ast::ClassUnicode {
-                span: span(0..5),
-                negated: false,
-                kind: ast::ClassUnicodeKind::NamedValue {
-                    op: ast::ClassUnicodeOpKind::Equal,
-                    name: s(""),
-                    value: s(""),
-                },
-            }))
-        );
-        assert_eq!(
-            parser(r"\p{!=}").parse_escape(),
-            Ok(Primitive::Unicode(ast::ClassUnicode {
-                span: span(0..6),
-                negated: false,
-                kind: ast::ClassUnicodeKind::NamedValue {
-                    op: ast::ClassUnicodeOpKind::NotEqual,
-                    name: s(""),
-                    value: s(""),
-                },
-            }))
-        );
-
-        assert_eq!(
-            parser(r"\p").parse_escape().unwrap_err(),
-            TestError {
-                span: span(2..2),
-                kind: ast::ErrorKind::EscapeUnexpectedEof,
-            }
-        );
-        assert_eq!(
-            parser(r"\p{").parse_escape().unwrap_err(),
-            TestError {
-                span: span(3..3),
-                kind: ast::ErrorKind::EscapeUnexpectedEof,
-            }
-        );
-        assert_eq!(
-            parser(r"\p{N").parse_escape().unwrap_err(),
-            TestError {
-                span: span(4..4),
-                kind: ast::ErrorKind::EscapeUnexpectedEof,
-            }
-        );
-        assert_eq!(
-            parser(r"\p{Greek").parse_escape().unwrap_err(),
-            TestError {
-                span: span(8..8),
-                kind: ast::ErrorKind::EscapeUnexpectedEof,
-            }
-        );
-
-        assert_eq!(
-            parser(r"\pNz").parse(),
-            Ok(Ast::concat(ast::Concat {
-                span: span(0..4),
-                asts: vec![
-                    Ast::class_unicode(ast::ClassUnicode {
-                        span: span(0..3),
-                        negated: false,
-                        kind: ast::ClassUnicodeKind::OneLetter('N'),
-                    }),
-                    Ast::literal(ast::Literal {
-                        span: span(3..4),
-                        kind: ast::LiteralKind::Verbatim,
-                        c: 'z',
-                    }),
-                ],
-            }))
-        );
-        assert_eq!(
-            parser(r"\p{Greek}z").parse(),
-            Ok(Ast::concat(ast::Concat {
-                span: span(0..10),
-                asts: vec![
-                    Ast::class_unicode(ast::ClassUnicode {
-                        span: span(0..9),
-                        negated: false,
-                        kind: ast::ClassUnicodeKind::Named(s("Greek")),
-                    }),
-                    Ast::literal(ast::Literal {
-                        span: span(9..10),
-                        kind: ast::LiteralKind::Verbatim,
-                        c: 'z',
-                    }),
-                ],
-            }))
-        );
-        assert_eq!(
-            parser(r"\p\{").parse().unwrap_err(),
-            TestError {
-                span: span(2..3),
-                kind: ast::ErrorKind::UnicodeClassInvalid,
-            }
-        );
-        assert_eq!(
-            parser(r"\P\{").parse().unwrap_err(),
-            TestError {
-                span: span(2..3),
-                kind: ast::ErrorKind::UnicodeClassInvalid,
-            }
-        );
-    }
-
-    #[test]
-    fn parse_perl_class() {
-        assert_eq!(
-            parser(r"\d").parse_escape(),
-            Ok(Primitive::Perl(ast::ClassPerl {
-                span: span(0..2),
-                kind: ast::ClassPerlKind::Digit,
-                negated: false,
-            }))
-        );
-        assert_eq!(
-            parser(r"\D").parse_escape(),
-            Ok(Primitive::Perl(ast::ClassPerl {
-                span: span(0..2),
-                kind: ast::ClassPerlKind::Digit,
-                negated: true,
-            }))
-        );
-        assert_eq!(
-            parser(r"\s").parse_escape(),
-            Ok(Primitive::Perl(ast::ClassPerl {
-                span: span(0..2),
-                kind: ast::ClassPerlKind::Space,
-                negated: false,
-            }))
-        );
-        assert_eq!(
-            parser(r"\S").parse_escape(),
-            Ok(Primitive::Perl(ast::ClassPerl {
-                span: span(0..2),
-                kind: ast::ClassPerlKind::Space,
-                negated: true,
-            }))
-        );
-        assert_eq!(
-            parser(r"\w").parse_escape(),
-            Ok(Primitive::Perl(ast::ClassPerl {
-                span: span(0..2),
-                kind: ast::ClassPerlKind::Word,
-                negated: false,
-            }))
-        );
-        assert_eq!(
-            parser(r"\W").parse_escape(),
-            Ok(Primitive::Perl(ast::ClassPerl {
-                span: span(0..2),
-                kind: ast::ClassPerlKind::Word,
-                negated: true,
-            }))
-        );
-
-        assert_eq!(
-            parser(r"\d").parse(),
-            Ok(Ast::class_perl(ast::ClassPerl {
-                span: span(0..2),
-                kind: ast::ClassPerlKind::Digit,
-                negated: false,
-            }))
-        );
-        assert_eq!(
-            parser(r"\dz").parse(),
-            Ok(Ast::concat(ast::Concat {
-                span: span(0..3),
-                asts: vec![
-                    Ast::class_perl(ast::ClassPerl {
-                        span: span(0..2),
-                        kind: ast::ClassPerlKind::Digit,
-                        negated: false,
-                    }),
-                    Ast::literal(ast::Literal {
-                        span: span(2..3),
-                        kind: ast::LiteralKind::Verbatim,
-                        c: 'z',
-                    }),
-                ],
-            }))
-        );
-    }
-
-    // This tests a bug fix where the nest limit checker wasn't decrementing
-    // its depth during post-traversal, which causes long regexes to trip
-    // the default limit too aggressively.
-    #[test]
-    fn regression_454_nest_too_big() {
-        let pattern = r#"
-        2(?:
-          [45]\d{3}|
-          7(?:
-            1[0-267]|
-            2[0-289]|
-            3[0-29]|
-            4[01]|
-            5[1-3]|
-            6[013]|
-            7[0178]|
-            91
-          )|
-          8(?:
-            0[125]|
-            [139][1-6]|
-            2[0157-9]|
-            41|
-            6[1-35]|
-            7[1-5]|
-            8[1-8]|
-            90
-          )|
-          9(?:
-            0[0-2]|
-            1[0-4]|
-            2[568]|
-            3[3-6]|
-            5[5-7]|
-            6[0167]|
-            7[15]|
-            8[0146-9]
-          )
-        )\d{4}
-        "#;
-        assert!(parser_nest_limit(pattern, 50).parse().is_ok());
-    }
-
-    // This tests that we treat a trailing `-` in a character class as a
-    // literal `-` even when whitespace mode is enabled and there is whitespace
-    // after the trailing `-`.
-    #[test]
-    fn regression_455_trailing_dash_ignore_whitespace() {
-        assert!(parser("(?x)[ / - ]").parse().is_ok());
-        assert!(parser("(?x)[ a - ]").parse().is_ok());
-        assert!(parser(
-            "(?x)[
-            a
-            - ]
-        "
-        )
-        .parse()
-        .is_ok());
-        assert!(parser(
-            "(?x)[
-            a # wat
-            - ]
-        "
-        )
-        .parse()
-        .is_ok());
-
-        assert!(parser("(?x)[ / -").parse().is_err());
-        assert!(parser("(?x)[ / - ").parse().is_err());
-        assert!(parser(
-            "(?x)[
-            / -
-        "
-        )
-        .parse()
-        .is_err());
-        assert!(parser(
-            "(?x)[
-            / - # wat
-        "
-        )
-        .parse()
-        .is_err());
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/ast/print.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/ast/print.rs
deleted file mode 100644
index 1ceb3c7..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/ast/print.rs
+++ /dev/null
@@ -1,577 +0,0 @@
-/*!
-This module provides a regular expression printer for `Ast`.
-*/
-
-use core::fmt;
-
-use crate::ast::{
-    self,
-    visitor::{self, Visitor},
-    Ast,
-};
-
-/// A builder for constructing a printer.
-///
-/// Note that since a printer doesn't have any configuration knobs, this type
-/// remains unexported.
-#[derive(Clone, Debug)]
-struct PrinterBuilder {
-    _priv: (),
-}
-
-impl Default for PrinterBuilder {
-    fn default() -> PrinterBuilder {
-        PrinterBuilder::new()
-    }
-}
-
-impl PrinterBuilder {
-    fn new() -> PrinterBuilder {
-        PrinterBuilder { _priv: () }
-    }
-
-    fn build(&self) -> Printer {
-        Printer { _priv: () }
-    }
-}
-
-/// A printer for a regular expression abstract syntax tree.
-///
-/// A printer converts an abstract syntax tree (AST) to a regular expression
-/// pattern string. This particular printer uses constant stack space and heap
-/// space proportional to the size of the AST.
-///
-/// This printer will not necessarily preserve the original formatting of the
-/// regular expression pattern string. For example, all whitespace and comments
-/// are ignored.
-#[derive(Debug)]
-pub struct Printer {
-    _priv: (),
-}
-
-impl Printer {
-    /// Create a new printer.
-    pub fn new() -> Printer {
-        PrinterBuilder::new().build()
-    }
-
-    /// Print the given `Ast` to the given writer. The writer must implement
-    /// `fmt::Write`. Typical implementations of `fmt::Write` that can be used
-    /// here are a `fmt::Formatter` (which is available in `fmt::Display`
-    /// implementations) or a `&mut String`.
-    pub fn print<W: fmt::Write>(&mut self, ast: &Ast, wtr: W) -> fmt::Result {
-        visitor::visit(ast, Writer { wtr })
-    }
-}
-
-#[derive(Debug)]
-struct Writer<W> {
-    wtr: W,
-}
-
-impl<W: fmt::Write> Visitor for Writer<W> {
-    type Output = ();
-    type Err = fmt::Error;
-
-    fn finish(self) -> fmt::Result {
-        Ok(())
-    }
-
-    fn visit_pre(&mut self, ast: &Ast) -> fmt::Result {
-        match *ast {
-            Ast::Group(ref x) => self.fmt_group_pre(x),
-            Ast::ClassBracketed(ref x) => self.fmt_class_bracketed_pre(x),
-            _ => Ok(()),
-        }
-    }
-
-    fn visit_post(&mut self, ast: &Ast) -> fmt::Result {
-        match *ast {
-            Ast::Empty(_) => Ok(()),
-            Ast::Flags(ref x) => self.fmt_set_flags(x),
-            Ast::Literal(ref x) => self.fmt_literal(x),
-            Ast::Dot(_) => self.wtr.write_str("."),
-            Ast::Assertion(ref x) => self.fmt_assertion(x),
-            Ast::ClassPerl(ref x) => self.fmt_class_perl(x),
-            Ast::ClassUnicode(ref x) => self.fmt_class_unicode(x),
-            Ast::ClassBracketed(ref x) => self.fmt_class_bracketed_post(x),
-            Ast::Repetition(ref x) => self.fmt_repetition(x),
-            Ast::Group(ref x) => self.fmt_group_post(x),
-            Ast::Alternation(_) => Ok(()),
-            Ast::Concat(_) => Ok(()),
-        }
-    }
-
-    fn visit_alternation_in(&mut self) -> fmt::Result {
-        self.wtr.write_str("|")
-    }
-
-    fn visit_class_set_item_pre(
-        &mut self,
-        ast: &ast::ClassSetItem,
-    ) -> Result<(), Self::Err> {
-        match *ast {
-            ast::ClassSetItem::Bracketed(ref x) => {
-                self.fmt_class_bracketed_pre(x)
-            }
-            _ => Ok(()),
-        }
-    }
-
-    fn visit_class_set_item_post(
-        &mut self,
-        ast: &ast::ClassSetItem,
-    ) -> Result<(), Self::Err> {
-        use crate::ast::ClassSetItem::*;
-
-        match *ast {
-            Empty(_) => Ok(()),
-            Literal(ref x) => self.fmt_literal(x),
-            Range(ref x) => {
-                self.fmt_literal(&x.start)?;
-                self.wtr.write_str("-")?;
-                self.fmt_literal(&x.end)?;
-                Ok(())
-            }
-            Ascii(ref x) => self.fmt_class_ascii(x),
-            Unicode(ref x) => self.fmt_class_unicode(x),
-            Perl(ref x) => self.fmt_class_perl(x),
-            Bracketed(ref x) => self.fmt_class_bracketed_post(x),
-            Union(_) => Ok(()),
-        }
-    }
-
-    fn visit_class_set_binary_op_in(
-        &mut self,
-        ast: &ast::ClassSetBinaryOp,
-    ) -> Result<(), Self::Err> {
-        self.fmt_class_set_binary_op_kind(&ast.kind)
-    }
-}
-
-impl<W: fmt::Write> Writer<W> {
-    fn fmt_group_pre(&mut self, ast: &ast::Group) -> fmt::Result {
-        use crate::ast::GroupKind::*;
-        match ast.kind {
-            CaptureIndex(_) => self.wtr.write_str("("),
-            CaptureName { ref name, starts_with_p } => {
-                let start = if starts_with_p { "(?P<" } else { "(?<" };
-                self.wtr.write_str(start)?;
-                self.wtr.write_str(&name.name)?;
-                self.wtr.write_str(">")?;
-                Ok(())
-            }
-            NonCapturing(ref flags) => {
-                self.wtr.write_str("(?")?;
-                self.fmt_flags(flags)?;
-                self.wtr.write_str(":")?;
-                Ok(())
-            }
-        }
-    }
-
-    fn fmt_group_post(&mut self, _ast: &ast::Group) -> fmt::Result {
-        self.wtr.write_str(")")
-    }
-
-    fn fmt_repetition(&mut self, ast: &ast::Repetition) -> fmt::Result {
-        use crate::ast::RepetitionKind::*;
-        match ast.op.kind {
-            ZeroOrOne if ast.greedy => self.wtr.write_str("?"),
-            ZeroOrOne => self.wtr.write_str("??"),
-            ZeroOrMore if ast.greedy => self.wtr.write_str("*"),
-            ZeroOrMore => self.wtr.write_str("*?"),
-            OneOrMore if ast.greedy => self.wtr.write_str("+"),
-            OneOrMore => self.wtr.write_str("+?"),
-            Range(ref x) => {
-                self.fmt_repetition_range(x)?;
-                if !ast.greedy {
-                    self.wtr.write_str("?")?;
-                }
-                Ok(())
-            }
-        }
-    }
-
-    fn fmt_repetition_range(
-        &mut self,
-        ast: &ast::RepetitionRange,
-    ) -> fmt::Result {
-        use crate::ast::RepetitionRange::*;
-        match *ast {
-            Exactly(x) => write!(self.wtr, "{{{}}}", x),
-            AtLeast(x) => write!(self.wtr, "{{{},}}", x),
-            Bounded(x, y) => write!(self.wtr, "{{{},{}}}", x, y),
-        }
-    }
-
-    fn fmt_literal(&mut self, ast: &ast::Literal) -> fmt::Result {
-        use crate::ast::LiteralKind::*;
-
-        match ast.kind {
-            Verbatim => self.wtr.write_char(ast.c),
-            Meta | Superfluous => write!(self.wtr, r"\{}", ast.c),
-            Octal => write!(self.wtr, r"\{:o}", u32::from(ast.c)),
-            HexFixed(ast::HexLiteralKind::X) => {
-                write!(self.wtr, r"\x{:02X}", u32::from(ast.c))
-            }
-            HexFixed(ast::HexLiteralKind::UnicodeShort) => {
-                write!(self.wtr, r"\u{:04X}", u32::from(ast.c))
-            }
-            HexFixed(ast::HexLiteralKind::UnicodeLong) => {
-                write!(self.wtr, r"\U{:08X}", u32::from(ast.c))
-            }
-            HexBrace(ast::HexLiteralKind::X) => {
-                write!(self.wtr, r"\x{{{:X}}}", u32::from(ast.c))
-            }
-            HexBrace(ast::HexLiteralKind::UnicodeShort) => {
-                write!(self.wtr, r"\u{{{:X}}}", u32::from(ast.c))
-            }
-            HexBrace(ast::HexLiteralKind::UnicodeLong) => {
-                write!(self.wtr, r"\U{{{:X}}}", u32::from(ast.c))
-            }
-            Special(ast::SpecialLiteralKind::Bell) => {
-                self.wtr.write_str(r"\a")
-            }
-            Special(ast::SpecialLiteralKind::FormFeed) => {
-                self.wtr.write_str(r"\f")
-            }
-            Special(ast::SpecialLiteralKind::Tab) => self.wtr.write_str(r"\t"),
-            Special(ast::SpecialLiteralKind::LineFeed) => {
-                self.wtr.write_str(r"\n")
-            }
-            Special(ast::SpecialLiteralKind::CarriageReturn) => {
-                self.wtr.write_str(r"\r")
-            }
-            Special(ast::SpecialLiteralKind::VerticalTab) => {
-                self.wtr.write_str(r"\v")
-            }
-            Special(ast::SpecialLiteralKind::Space) => {
-                self.wtr.write_str(r"\ ")
-            }
-        }
-    }
-
-    fn fmt_assertion(&mut self, ast: &ast::Assertion) -> fmt::Result {
-        use crate::ast::AssertionKind::*;
-        match ast.kind {
-            StartLine => self.wtr.write_str("^"),
-            EndLine => self.wtr.write_str("$"),
-            StartText => self.wtr.write_str(r"\A"),
-            EndText => self.wtr.write_str(r"\z"),
-            WordBoundary => self.wtr.write_str(r"\b"),
-            NotWordBoundary => self.wtr.write_str(r"\B"),
-            WordBoundaryStart => self.wtr.write_str(r"\b{start}"),
-            WordBoundaryEnd => self.wtr.write_str(r"\b{end}"),
-            WordBoundaryStartAngle => self.wtr.write_str(r"\<"),
-            WordBoundaryEndAngle => self.wtr.write_str(r"\>"),
-            WordBoundaryStartHalf => self.wtr.write_str(r"\b{start-half}"),
-            WordBoundaryEndHalf => self.wtr.write_str(r"\b{end-half}"),
-        }
-    }
-
-    fn fmt_set_flags(&mut self, ast: &ast::SetFlags) -> fmt::Result {
-        self.wtr.write_str("(?")?;
-        self.fmt_flags(&ast.flags)?;
-        self.wtr.write_str(")")?;
-        Ok(())
-    }
-
-    fn fmt_flags(&mut self, ast: &ast::Flags) -> fmt::Result {
-        use crate::ast::{Flag, FlagsItemKind};
-
-        for item in &ast.items {
-            match item.kind {
-                FlagsItemKind::Negation => self.wtr.write_str("-"),
-                FlagsItemKind::Flag(ref flag) => match *flag {
-                    Flag::CaseInsensitive => self.wtr.write_str("i"),
-                    Flag::MultiLine => self.wtr.write_str("m"),
-                    Flag::DotMatchesNewLine => self.wtr.write_str("s"),
-                    Flag::SwapGreed => self.wtr.write_str("U"),
-                    Flag::Unicode => self.wtr.write_str("u"),
-                    Flag::CRLF => self.wtr.write_str("R"),
-                    Flag::IgnoreWhitespace => self.wtr.write_str("x"),
-                },
-            }?;
-        }
-        Ok(())
-    }
-
-    fn fmt_class_bracketed_pre(
-        &mut self,
-        ast: &ast::ClassBracketed,
-    ) -> fmt::Result {
-        if ast.negated {
-            self.wtr.write_str("[^")
-        } else {
-            self.wtr.write_str("[")
-        }
-    }
-
-    fn fmt_class_bracketed_post(
-        &mut self,
-        _ast: &ast::ClassBracketed,
-    ) -> fmt::Result {
-        self.wtr.write_str("]")
-    }
-
-    fn fmt_class_set_binary_op_kind(
-        &mut self,
-        ast: &ast::ClassSetBinaryOpKind,
-    ) -> fmt::Result {
-        use crate::ast::ClassSetBinaryOpKind::*;
-        match *ast {
-            Intersection => self.wtr.write_str("&&"),
-            Difference => self.wtr.write_str("--"),
-            SymmetricDifference => self.wtr.write_str("~~"),
-        }
-    }
-
-    fn fmt_class_perl(&mut self, ast: &ast::ClassPerl) -> fmt::Result {
-        use crate::ast::ClassPerlKind::*;
-        match ast.kind {
-            Digit if ast.negated => self.wtr.write_str(r"\D"),
-            Digit => self.wtr.write_str(r"\d"),
-            Space if ast.negated => self.wtr.write_str(r"\S"),
-            Space => self.wtr.write_str(r"\s"),
-            Word if ast.negated => self.wtr.write_str(r"\W"),
-            Word => self.wtr.write_str(r"\w"),
-        }
-    }
-
-    fn fmt_class_ascii(&mut self, ast: &ast::ClassAscii) -> fmt::Result {
-        use crate::ast::ClassAsciiKind::*;
-        match ast.kind {
-            Alnum if ast.negated => self.wtr.write_str("[:^alnum:]"),
-            Alnum => self.wtr.write_str("[:alnum:]"),
-            Alpha if ast.negated => self.wtr.write_str("[:^alpha:]"),
-            Alpha => self.wtr.write_str("[:alpha:]"),
-            Ascii if ast.negated => self.wtr.write_str("[:^ascii:]"),
-            Ascii => self.wtr.write_str("[:ascii:]"),
-            Blank if ast.negated => self.wtr.write_str("[:^blank:]"),
-            Blank => self.wtr.write_str("[:blank:]"),
-            Cntrl if ast.negated => self.wtr.write_str("[:^cntrl:]"),
-            Cntrl => self.wtr.write_str("[:cntrl:]"),
-            Digit if ast.negated => self.wtr.write_str("[:^digit:]"),
-            Digit => self.wtr.write_str("[:digit:]"),
-            Graph if ast.negated => self.wtr.write_str("[:^graph:]"),
-            Graph => self.wtr.write_str("[:graph:]"),
-            Lower if ast.negated => self.wtr.write_str("[:^lower:]"),
-            Lower => self.wtr.write_str("[:lower:]"),
-            Print if ast.negated => self.wtr.write_str("[:^print:]"),
-            Print => self.wtr.write_str("[:print:]"),
-            Punct if ast.negated => self.wtr.write_str("[:^punct:]"),
-            Punct => self.wtr.write_str("[:punct:]"),
-            Space if ast.negated => self.wtr.write_str("[:^space:]"),
-            Space => self.wtr.write_str("[:space:]"),
-            Upper if ast.negated => self.wtr.write_str("[:^upper:]"),
-            Upper => self.wtr.write_str("[:upper:]"),
-            Word if ast.negated => self.wtr.write_str("[:^word:]"),
-            Word => self.wtr.write_str("[:word:]"),
-            Xdigit if ast.negated => self.wtr.write_str("[:^xdigit:]"),
-            Xdigit => self.wtr.write_str("[:xdigit:]"),
-        }
-    }
-
-    fn fmt_class_unicode(&mut self, ast: &ast::ClassUnicode) -> fmt::Result {
-        use crate::ast::ClassUnicodeKind::*;
-        use crate::ast::ClassUnicodeOpKind::*;
-
-        if ast.negated {
-            self.wtr.write_str(r"\P")?;
-        } else {
-            self.wtr.write_str(r"\p")?;
-        }
-        match ast.kind {
-            OneLetter(c) => self.wtr.write_char(c),
-            Named(ref x) => write!(self.wtr, "{{{}}}", x),
-            NamedValue { op: Equal, ref name, ref value } => {
-                write!(self.wtr, "{{{}={}}}", name, value)
-            }
-            NamedValue { op: Colon, ref name, ref value } => {
-                write!(self.wtr, "{{{}:{}}}", name, value)
-            }
-            NamedValue { op: NotEqual, ref name, ref value } => {
-                write!(self.wtr, "{{{}!={}}}", name, value)
-            }
-        }
-    }
-}
-
-#[cfg(test)]
-mod tests {
-    use alloc::string::String;
-
-    use crate::ast::parse::ParserBuilder;
-
-    use super::*;
-
-    fn roundtrip(given: &str) {
-        roundtrip_with(|b| b, given);
-    }
-
-    fn roundtrip_with<F>(mut f: F, given: &str)
-    where
-        F: FnMut(&mut ParserBuilder) -> &mut ParserBuilder,
-    {
-        let mut builder = ParserBuilder::new();
-        f(&mut builder);
-        let ast = builder.build().parse(given).unwrap();
-
-        let mut printer = Printer::new();
-        let mut dst = String::new();
-        printer.print(&ast, &mut dst).unwrap();
-        assert_eq!(given, dst);
-    }
-
-    #[test]
-    fn print_literal() {
-        roundtrip("a");
-        roundtrip(r"\[");
-        roundtrip_with(|b| b.octal(true), r"\141");
-        roundtrip(r"\x61");
-        roundtrip(r"\x7F");
-        roundtrip(r"\u0061");
-        roundtrip(r"\U00000061");
-        roundtrip(r"\x{61}");
-        roundtrip(r"\x{7F}");
-        roundtrip(r"\u{61}");
-        roundtrip(r"\U{61}");
-
-        roundtrip(r"\a");
-        roundtrip(r"\f");
-        roundtrip(r"\t");
-        roundtrip(r"\n");
-        roundtrip(r"\r");
-        roundtrip(r"\v");
-        roundtrip(r"(?x)\ ");
-    }
-
-    #[test]
-    fn print_dot() {
-        roundtrip(".");
-    }
-
-    #[test]
-    fn print_concat() {
-        roundtrip("ab");
-        roundtrip("abcde");
-        roundtrip("a(bcd)ef");
-    }
-
-    #[test]
-    fn print_alternation() {
-        roundtrip("a|b");
-        roundtrip("a|b|c|d|e");
-        roundtrip("|a|b|c|d|e");
-        roundtrip("|a|b|c|d|e|");
-        roundtrip("a(b|c|d)|e|f");
-    }
-
-    #[test]
-    fn print_assertion() {
-        roundtrip(r"^");
-        roundtrip(r"$");
-        roundtrip(r"\A");
-        roundtrip(r"\z");
-        roundtrip(r"\b");
-        roundtrip(r"\B");
-    }
-
-    #[test]
-    fn print_repetition() {
-        roundtrip("a?");
-        roundtrip("a??");
-        roundtrip("a*");
-        roundtrip("a*?");
-        roundtrip("a+");
-        roundtrip("a+?");
-        roundtrip("a{5}");
-        roundtrip("a{5}?");
-        roundtrip("a{5,}");
-        roundtrip("a{5,}?");
-        roundtrip("a{5,10}");
-        roundtrip("a{5,10}?");
-    }
-
-    #[test]
-    fn print_flags() {
-        roundtrip("(?i)");
-        roundtrip("(?-i)");
-        roundtrip("(?s-i)");
-        roundtrip("(?-si)");
-        roundtrip("(?siUmux)");
-    }
-
-    #[test]
-    fn print_group() {
-        roundtrip("(?i:a)");
-        roundtrip("(?P<foo>a)");
-        roundtrip("(?<foo>a)");
-        roundtrip("(a)");
-    }
-
-    #[test]
-    fn print_class() {
-        roundtrip(r"[abc]");
-        roundtrip(r"[a-z]");
-        roundtrip(r"[^a-z]");
-        roundtrip(r"[a-z0-9]");
-        roundtrip(r"[-a-z0-9]");
-        roundtrip(r"[-a-z0-9]");
-        roundtrip(r"[a-z0-9---]");
-        roundtrip(r"[a-z&&m-n]");
-        roundtrip(r"[[a-z&&m-n]]");
-        roundtrip(r"[a-z--m-n]");
-        roundtrip(r"[a-z~~m-n]");
-        roundtrip(r"[a-z[0-9]]");
-        roundtrip(r"[a-z[^0-9]]");
-
-        roundtrip(r"\d");
-        roundtrip(r"\D");
-        roundtrip(r"\s");
-        roundtrip(r"\S");
-        roundtrip(r"\w");
-        roundtrip(r"\W");
-
-        roundtrip(r"[[:alnum:]]");
-        roundtrip(r"[[:^alnum:]]");
-        roundtrip(r"[[:alpha:]]");
-        roundtrip(r"[[:^alpha:]]");
-        roundtrip(r"[[:ascii:]]");
-        roundtrip(r"[[:^ascii:]]");
-        roundtrip(r"[[:blank:]]");
-        roundtrip(r"[[:^blank:]]");
-        roundtrip(r"[[:cntrl:]]");
-        roundtrip(r"[[:^cntrl:]]");
-        roundtrip(r"[[:digit:]]");
-        roundtrip(r"[[:^digit:]]");
-        roundtrip(r"[[:graph:]]");
-        roundtrip(r"[[:^graph:]]");
-        roundtrip(r"[[:lower:]]");
-        roundtrip(r"[[:^lower:]]");
-        roundtrip(r"[[:print:]]");
-        roundtrip(r"[[:^print:]]");
-        roundtrip(r"[[:punct:]]");
-        roundtrip(r"[[:^punct:]]");
-        roundtrip(r"[[:space:]]");
-        roundtrip(r"[[:^space:]]");
-        roundtrip(r"[[:upper:]]");
-        roundtrip(r"[[:^upper:]]");
-        roundtrip(r"[[:word:]]");
-        roundtrip(r"[[:^word:]]");
-        roundtrip(r"[[:xdigit:]]");
-        roundtrip(r"[[:^xdigit:]]");
-
-        roundtrip(r"\pL");
-        roundtrip(r"\PL");
-        roundtrip(r"\p{L}");
-        roundtrip(r"\P{L}");
-        roundtrip(r"\p{X=Y}");
-        roundtrip(r"\P{X=Y}");
-        roundtrip(r"\p{X:Y}");
-        roundtrip(r"\P{X:Y}");
-        roundtrip(r"\p{X!=Y}");
-        roundtrip(r"\P{X!=Y}");
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/ast/visitor.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/ast/visitor.rs
deleted file mode 100644
index c1bb24d..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/ast/visitor.rs
+++ /dev/null
@@ -1,522 +0,0 @@
-use alloc::{vec, vec::Vec};
-
-use crate::ast::{self, Ast};
-
-/// A trait for visiting an abstract syntax tree (AST) in depth first order.
-///
-/// The principle aim of this trait is to enable callers to perform case
-/// analysis on an abstract syntax tree without necessarily using recursion.
-/// In particular, this permits callers to do case analysis with constant stack
-/// usage, which can be important since the size of an abstract syntax tree
-/// may be proportional to end user input.
-///
-/// Typical usage of this trait involves providing an implementation and then
-/// running it using the [`visit`] function.
-///
-/// Note that the abstract syntax tree for a regular expression is quite
-/// complex. Unless you specifically need it, you might be able to use the much
-/// simpler [high-level intermediate representation](crate::hir::Hir) and its
-/// [corresponding `Visitor` trait](crate::hir::Visitor) instead.
-pub trait Visitor {
-    /// The result of visiting an AST.
-    type Output;
-    /// An error that visiting an AST might return.
-    type Err;
-
-    /// All implementors of `Visitor` must provide a `finish` method, which
-    /// yields the result of visiting the AST or an error.
-    fn finish(self) -> Result<Self::Output, Self::Err>;
-
-    /// This method is called before beginning traversal of the AST.
-    fn start(&mut self) {}
-
-    /// This method is called on an `Ast` before descending into child `Ast`
-    /// nodes.
-    fn visit_pre(&mut self, _ast: &Ast) -> Result<(), Self::Err> {
-        Ok(())
-    }
-
-    /// This method is called on an `Ast` after descending all of its child
-    /// `Ast` nodes.
-    fn visit_post(&mut self, _ast: &Ast) -> Result<(), Self::Err> {
-        Ok(())
-    }
-
-    /// This method is called between child nodes of an
-    /// [`Alternation`](ast::Alternation).
-    fn visit_alternation_in(&mut self) -> Result<(), Self::Err> {
-        Ok(())
-    }
-
-    /// This method is called between child nodes of a concatenation.
-    fn visit_concat_in(&mut self) -> Result<(), Self::Err> {
-        Ok(())
-    }
-
-    /// This method is called on every [`ClassSetItem`](ast::ClassSetItem)
-    /// before descending into child nodes.
-    fn visit_class_set_item_pre(
-        &mut self,
-        _ast: &ast::ClassSetItem,
-    ) -> Result<(), Self::Err> {
-        Ok(())
-    }
-
-    /// This method is called on every [`ClassSetItem`](ast::ClassSetItem)
-    /// after descending into child nodes.
-    fn visit_class_set_item_post(
-        &mut self,
-        _ast: &ast::ClassSetItem,
-    ) -> Result<(), Self::Err> {
-        Ok(())
-    }
-
-    /// This method is called on every
-    /// [`ClassSetBinaryOp`](ast::ClassSetBinaryOp) before descending into
-    /// child nodes.
-    fn visit_class_set_binary_op_pre(
-        &mut self,
-        _ast: &ast::ClassSetBinaryOp,
-    ) -> Result<(), Self::Err> {
-        Ok(())
-    }
-
-    /// This method is called on every
-    /// [`ClassSetBinaryOp`](ast::ClassSetBinaryOp) after descending into child
-    /// nodes.
-    fn visit_class_set_binary_op_post(
-        &mut self,
-        _ast: &ast::ClassSetBinaryOp,
-    ) -> Result<(), Self::Err> {
-        Ok(())
-    }
-
-    /// This method is called between the left hand and right hand child nodes
-    /// of a [`ClassSetBinaryOp`](ast::ClassSetBinaryOp).
-    fn visit_class_set_binary_op_in(
-        &mut self,
-        _ast: &ast::ClassSetBinaryOp,
-    ) -> Result<(), Self::Err> {
-        Ok(())
-    }
-}
-
-/// Executes an implementation of `Visitor` in constant stack space.
-///
-/// This function will visit every node in the given `Ast` while calling the
-/// appropriate methods provided by the [`Visitor`] trait.
-///
-/// The primary use case for this method is when one wants to perform case
-/// analysis over an `Ast` without using a stack size proportional to the depth
-/// of the `Ast`. Namely, this method will instead use constant stack size, but
-/// will use heap space proportional to the size of the `Ast`. This may be
-/// desirable in cases where the size of `Ast` is proportional to end user
-/// input.
-///
-/// If the visitor returns an error at any point, then visiting is stopped and
-/// the error is returned.
-pub fn visit<V: Visitor>(ast: &Ast, visitor: V) -> Result<V::Output, V::Err> {
-    HeapVisitor::new().visit(ast, visitor)
-}
-
-/// HeapVisitor visits every item in an `Ast` recursively using constant stack
-/// size and a heap size proportional to the size of the `Ast`.
-struct HeapVisitor<'a> {
-    /// A stack of `Ast` nodes. This is roughly analogous to the call stack
-    /// used in a typical recursive visitor.
-    stack: Vec<(&'a Ast, Frame<'a>)>,
-    /// Similar to the `Ast` stack above, but is used only for character
-    /// classes. In particular, character classes embed their own mini
-    /// recursive syntax.
-    stack_class: Vec<(ClassInduct<'a>, ClassFrame<'a>)>,
-}
-
-/// Represents a single stack frame while performing structural induction over
-/// an `Ast`.
-enum Frame<'a> {
-    /// A stack frame allocated just before descending into a repetition
-    /// operator's child node.
-    Repetition(&'a ast::Repetition),
-    /// A stack frame allocated just before descending into a group's child
-    /// node.
-    Group(&'a ast::Group),
-    /// The stack frame used while visiting every child node of a concatenation
-    /// of expressions.
-    Concat {
-        /// The child node we are currently visiting.
-        head: &'a Ast,
-        /// The remaining child nodes to visit (which may be empty).
-        tail: &'a [Ast],
-    },
-    /// The stack frame used while visiting every child node of an alternation
-    /// of expressions.
-    Alternation {
-        /// The child node we are currently visiting.
-        head: &'a Ast,
-        /// The remaining child nodes to visit (which may be empty).
-        tail: &'a [Ast],
-    },
-}
-
-/// Represents a single stack frame while performing structural induction over
-/// a character class.
-enum ClassFrame<'a> {
-    /// The stack frame used while visiting every child node of a union of
-    /// character class items.
-    Union {
-        /// The child node we are currently visiting.
-        head: &'a ast::ClassSetItem,
-        /// The remaining child nodes to visit (which may be empty).
-        tail: &'a [ast::ClassSetItem],
-    },
-    /// The stack frame used while a binary class operation.
-    Binary { op: &'a ast::ClassSetBinaryOp },
-    /// A stack frame allocated just before descending into a binary operator's
-    /// left hand child node.
-    BinaryLHS {
-        op: &'a ast::ClassSetBinaryOp,
-        lhs: &'a ast::ClassSet,
-        rhs: &'a ast::ClassSet,
-    },
-    /// A stack frame allocated just before descending into a binary operator's
-    /// right hand child node.
-    BinaryRHS { op: &'a ast::ClassSetBinaryOp, rhs: &'a ast::ClassSet },
-}
-
-/// A representation of the inductive step when performing structural induction
-/// over a character class.
-///
-/// Note that there is no analogous explicit type for the inductive step for
-/// `Ast` nodes because the inductive step is just an `Ast`. For character
-/// classes, the inductive step can produce one of two possible child nodes:
-/// an item or a binary operation. (An item cannot be a binary operation
-/// because that would imply binary operations can be unioned in the concrete
-/// syntax, which is not possible.)
-enum ClassInduct<'a> {
-    Item(&'a ast::ClassSetItem),
-    BinaryOp(&'a ast::ClassSetBinaryOp),
-}
-
-impl<'a> HeapVisitor<'a> {
-    fn new() -> HeapVisitor<'a> {
-        HeapVisitor { stack: vec![], stack_class: vec![] }
-    }
-
-    fn visit<V: Visitor>(
-        &mut self,
-        mut ast: &'a Ast,
-        mut visitor: V,
-    ) -> Result<V::Output, V::Err> {
-        self.stack.clear();
-        self.stack_class.clear();
-
-        visitor.start();
-        loop {
-            visitor.visit_pre(ast)?;
-            if let Some(x) = self.induct(ast, &mut visitor)? {
-                let child = x.child();
-                self.stack.push((ast, x));
-                ast = child;
-                continue;
-            }
-            // No induction means we have a base case, so we can post visit
-            // it now.
-            visitor.visit_post(ast)?;
-
-            // At this point, we now try to pop our call stack until it is
-            // either empty or we hit another inductive case.
-            loop {
-                let (post_ast, frame) = match self.stack.pop() {
-                    None => return visitor.finish(),
-                    Some((post_ast, frame)) => (post_ast, frame),
-                };
-                // If this is a concat/alternate, then we might have additional
-                // inductive steps to process.
-                if let Some(x) = self.pop(frame) {
-                    match x {
-                        Frame::Alternation { .. } => {
-                            visitor.visit_alternation_in()?;
-                        }
-                        Frame::Concat { .. } => {
-                            visitor.visit_concat_in()?;
-                        }
-                        _ => {}
-                    }
-                    ast = x.child();
-                    self.stack.push((post_ast, x));
-                    break;
-                }
-                // Otherwise, we've finished visiting all the child nodes for
-                // this AST, so we can post visit it now.
-                visitor.visit_post(post_ast)?;
-            }
-        }
-    }
-
-    /// Build a stack frame for the given AST if one is needed (which occurs if
-    /// and only if there are child nodes in the AST). Otherwise, return None.
-    ///
-    /// If this visits a class, then the underlying visitor implementation may
-    /// return an error which will be passed on here.
-    fn induct<V: Visitor>(
-        &mut self,
-        ast: &'a Ast,
-        visitor: &mut V,
-    ) -> Result<Option<Frame<'a>>, V::Err> {
-        Ok(match *ast {
-            Ast::ClassBracketed(ref x) => {
-                self.visit_class(x, visitor)?;
-                None
-            }
-            Ast::Repetition(ref x) => Some(Frame::Repetition(x)),
-            Ast::Group(ref x) => Some(Frame::Group(x)),
-            Ast::Concat(ref x) if x.asts.is_empty() => None,
-            Ast::Concat(ref x) => {
-                Some(Frame::Concat { head: &x.asts[0], tail: &x.asts[1..] })
-            }
-            Ast::Alternation(ref x) if x.asts.is_empty() => None,
-            Ast::Alternation(ref x) => Some(Frame::Alternation {
-                head: &x.asts[0],
-                tail: &x.asts[1..],
-            }),
-            _ => None,
-        })
-    }
-
-    /// Pops the given frame. If the frame has an additional inductive step,
-    /// then return it, otherwise return `None`.
-    fn pop(&self, induct: Frame<'a>) -> Option<Frame<'a>> {
-        match induct {
-            Frame::Repetition(_) => None,
-            Frame::Group(_) => None,
-            Frame::Concat { tail, .. } => {
-                if tail.is_empty() {
-                    None
-                } else {
-                    Some(Frame::Concat { head: &tail[0], tail: &tail[1..] })
-                }
-            }
-            Frame::Alternation { tail, .. } => {
-                if tail.is_empty() {
-                    None
-                } else {
-                    Some(Frame::Alternation {
-                        head: &tail[0],
-                        tail: &tail[1..],
-                    })
-                }
-            }
-        }
-    }
-
-    fn visit_class<V: Visitor>(
-        &mut self,
-        ast: &'a ast::ClassBracketed,
-        visitor: &mut V,
-    ) -> Result<(), V::Err> {
-        let mut ast = ClassInduct::from_bracketed(ast);
-        loop {
-            self.visit_class_pre(&ast, visitor)?;
-            if let Some(x) = self.induct_class(&ast) {
-                let child = x.child();
-                self.stack_class.push((ast, x));
-                ast = child;
-                continue;
-            }
-            self.visit_class_post(&ast, visitor)?;
-
-            // At this point, we now try to pop our call stack until it is
-            // either empty or we hit another inductive case.
-            loop {
-                let (post_ast, frame) = match self.stack_class.pop() {
-                    None => return Ok(()),
-                    Some((post_ast, frame)) => (post_ast, frame),
-                };
-                // If this is a union or a binary op, then we might have
-                // additional inductive steps to process.
-                if let Some(x) = self.pop_class(frame) {
-                    if let ClassFrame::BinaryRHS { ref op, .. } = x {
-                        visitor.visit_class_set_binary_op_in(op)?;
-                    }
-                    ast = x.child();
-                    self.stack_class.push((post_ast, x));
-                    break;
-                }
-                // Otherwise, we've finished visiting all the child nodes for
-                // this class node, so we can post visit it now.
-                self.visit_class_post(&post_ast, visitor)?;
-            }
-        }
-    }
-
-    /// Call the appropriate `Visitor` methods given an inductive step.
-    fn visit_class_pre<V: Visitor>(
-        &self,
-        ast: &ClassInduct<'a>,
-        visitor: &mut V,
-    ) -> Result<(), V::Err> {
-        match *ast {
-            ClassInduct::Item(item) => {
-                visitor.visit_class_set_item_pre(item)?;
-            }
-            ClassInduct::BinaryOp(op) => {
-                visitor.visit_class_set_binary_op_pre(op)?;
-            }
-        }
-        Ok(())
-    }
-
-    /// Call the appropriate `Visitor` methods given an inductive step.
-    fn visit_class_post<V: Visitor>(
-        &self,
-        ast: &ClassInduct<'a>,
-        visitor: &mut V,
-    ) -> Result<(), V::Err> {
-        match *ast {
-            ClassInduct::Item(item) => {
-                visitor.visit_class_set_item_post(item)?;
-            }
-            ClassInduct::BinaryOp(op) => {
-                visitor.visit_class_set_binary_op_post(op)?;
-            }
-        }
-        Ok(())
-    }
-
-    /// Build a stack frame for the given class node if one is needed (which
-    /// occurs if and only if there are child nodes). Otherwise, return None.
-    fn induct_class(&self, ast: &ClassInduct<'a>) -> Option<ClassFrame<'a>> {
-        match *ast {
-            ClassInduct::Item(&ast::ClassSetItem::Bracketed(ref x)) => {
-                match x.kind {
-                    ast::ClassSet::Item(ref item) => {
-                        Some(ClassFrame::Union { head: item, tail: &[] })
-                    }
-                    ast::ClassSet::BinaryOp(ref op) => {
-                        Some(ClassFrame::Binary { op })
-                    }
-                }
-            }
-            ClassInduct::Item(&ast::ClassSetItem::Union(ref x)) => {
-                if x.items.is_empty() {
-                    None
-                } else {
-                    Some(ClassFrame::Union {
-                        head: &x.items[0],
-                        tail: &x.items[1..],
-                    })
-                }
-            }
-            ClassInduct::BinaryOp(op) => {
-                Some(ClassFrame::BinaryLHS { op, lhs: &op.lhs, rhs: &op.rhs })
-            }
-            _ => None,
-        }
-    }
-
-    /// Pops the given frame. If the frame has an additional inductive step,
-    /// then return it, otherwise return `None`.
-    fn pop_class(&self, induct: ClassFrame<'a>) -> Option<ClassFrame<'a>> {
-        match induct {
-            ClassFrame::Union { tail, .. } => {
-                if tail.is_empty() {
-                    None
-                } else {
-                    Some(ClassFrame::Union {
-                        head: &tail[0],
-                        tail: &tail[1..],
-                    })
-                }
-            }
-            ClassFrame::Binary { .. } => None,
-            ClassFrame::BinaryLHS { op, rhs, .. } => {
-                Some(ClassFrame::BinaryRHS { op, rhs })
-            }
-            ClassFrame::BinaryRHS { .. } => None,
-        }
-    }
-}
-
-impl<'a> Frame<'a> {
-    /// Perform the next inductive step on this frame and return the next
-    /// child AST node to visit.
-    fn child(&self) -> &'a Ast {
-        match *self {
-            Frame::Repetition(rep) => &rep.ast,
-            Frame::Group(group) => &group.ast,
-            Frame::Concat { head, .. } => head,
-            Frame::Alternation { head, .. } => head,
-        }
-    }
-}
-
-impl<'a> ClassFrame<'a> {
-    /// Perform the next inductive step on this frame and return the next
-    /// child class node to visit.
-    fn child(&self) -> ClassInduct<'a> {
-        match *self {
-            ClassFrame::Union { head, .. } => ClassInduct::Item(head),
-            ClassFrame::Binary { op, .. } => ClassInduct::BinaryOp(op),
-            ClassFrame::BinaryLHS { ref lhs, .. } => {
-                ClassInduct::from_set(lhs)
-            }
-            ClassFrame::BinaryRHS { ref rhs, .. } => {
-                ClassInduct::from_set(rhs)
-            }
-        }
-    }
-}
-
-impl<'a> ClassInduct<'a> {
-    fn from_bracketed(ast: &'a ast::ClassBracketed) -> ClassInduct<'a> {
-        ClassInduct::from_set(&ast.kind)
-    }
-
-    fn from_set(ast: &'a ast::ClassSet) -> ClassInduct<'a> {
-        match *ast {
-            ast::ClassSet::Item(ref item) => ClassInduct::Item(item),
-            ast::ClassSet::BinaryOp(ref op) => ClassInduct::BinaryOp(op),
-        }
-    }
-}
-
-impl<'a> core::fmt::Debug for ClassFrame<'a> {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        let x = match *self {
-            ClassFrame::Union { .. } => "Union",
-            ClassFrame::Binary { .. } => "Binary",
-            ClassFrame::BinaryLHS { .. } => "BinaryLHS",
-            ClassFrame::BinaryRHS { .. } => "BinaryRHS",
-        };
-        write!(f, "{}", x)
-    }
-}
-
-impl<'a> core::fmt::Debug for ClassInduct<'a> {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        let x = match *self {
-            ClassInduct::Item(it) => match *it {
-                ast::ClassSetItem::Empty(_) => "Item(Empty)",
-                ast::ClassSetItem::Literal(_) => "Item(Literal)",
-                ast::ClassSetItem::Range(_) => "Item(Range)",
-                ast::ClassSetItem::Ascii(_) => "Item(Ascii)",
-                ast::ClassSetItem::Perl(_) => "Item(Perl)",
-                ast::ClassSetItem::Unicode(_) => "Item(Unicode)",
-                ast::ClassSetItem::Bracketed(_) => "Item(Bracketed)",
-                ast::ClassSetItem::Union(_) => "Item(Union)",
-            },
-            ClassInduct::BinaryOp(it) => match it.kind {
-                ast::ClassSetBinaryOpKind::Intersection => {
-                    "BinaryOp(Intersection)"
-                }
-                ast::ClassSetBinaryOpKind::Difference => {
-                    "BinaryOp(Difference)"
-                }
-                ast::ClassSetBinaryOpKind::SymmetricDifference => {
-                    "BinaryOp(SymmetricDifference)"
-                }
-            },
-        };
-        write!(f, "{}", x)
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/debug.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/debug.rs
deleted file mode 100644
index a0b051b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/debug.rs
+++ /dev/null
@@ -1,107 +0,0 @@
-/// A type that wraps a single byte with a convenient fmt::Debug impl that
-/// escapes the byte.
-pub(crate) struct Byte(pub(crate) u8);
-
-impl core::fmt::Debug for Byte {
-    fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-        // Special case ASCII space. It's too hard to read otherwise, so
-        // put quotes around it. I sometimes wonder whether just '\x20' would
-        // be better...
-        if self.0 == b' ' {
-            return write!(f, "' '");
-        }
-        // 10 bytes is enough to cover any output from ascii::escape_default.
-        let mut bytes = [0u8; 10];
-        let mut len = 0;
-        for (i, mut b) in core::ascii::escape_default(self.0).enumerate() {
-            // capitalize \xab to \xAB
-            if i >= 2 && b'a' <= b && b <= b'f' {
-                b -= 32;
-            }
-            bytes[len] = b;
-            len += 1;
-        }
-        write!(f, "{}", core::str::from_utf8(&bytes[..len]).unwrap())
-    }
-}
-
-/// A type that provides a human readable debug impl for arbitrary bytes.
-///
-/// This generally works best when the bytes are presumed to be mostly UTF-8,
-/// but will work for anything.
-///
-/// N.B. This is copied nearly verbatim from regex-automata. Sigh.
-pub(crate) struct Bytes<'a>(pub(crate) &'a [u8]);
-
-impl<'a> core::fmt::Debug for Bytes<'a> {
-    fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-        write!(f, "\"")?;
-        // This is a sad re-implementation of a similar impl found in bstr.
-        let mut bytes = self.0;
-        while let Some(result) = utf8_decode(bytes) {
-            let ch = match result {
-                Ok(ch) => ch,
-                Err(byte) => {
-                    write!(f, r"\x{:02x}", byte)?;
-                    bytes = &bytes[1..];
-                    continue;
-                }
-            };
-            bytes = &bytes[ch.len_utf8()..];
-            match ch {
-                '\0' => write!(f, "\\0")?,
-                // ASCII control characters except \0, \n, \r, \t
-                '\x01'..='\x08'
-                | '\x0b'
-                | '\x0c'
-                | '\x0e'..='\x19'
-                | '\x7f' => {
-                    write!(f, "\\x{:02x}", u32::from(ch))?;
-                }
-                '\n' | '\r' | '\t' | _ => {
-                    write!(f, "{}", ch.escape_debug())?;
-                }
-            }
-        }
-        write!(f, "\"")?;
-        Ok(())
-    }
-}
-
-/// Decodes the next UTF-8 encoded codepoint from the given byte slice.
-///
-/// If no valid encoding of a codepoint exists at the beginning of the given
-/// byte slice, then the first byte is returned instead.
-///
-/// This returns `None` if and only if `bytes` is empty.
-pub(crate) fn utf8_decode(bytes: &[u8]) -> Option<Result<char, u8>> {
-    fn len(byte: u8) -> Option<usize> {
-        if byte <= 0x7F {
-            return Some(1);
-        } else if byte & 0b1100_0000 == 0b1000_0000 {
-            return None;
-        } else if byte <= 0b1101_1111 {
-            Some(2)
-        } else if byte <= 0b1110_1111 {
-            Some(3)
-        } else if byte <= 0b1111_0111 {
-            Some(4)
-        } else {
-            None
-        }
-    }
-
-    if bytes.is_empty() {
-        return None;
-    }
-    let len = match len(bytes[0]) {
-        None => return Some(Err(bytes[0])),
-        Some(len) if len > bytes.len() => return Some(Err(bytes[0])),
-        Some(1) => return Some(Ok(char::from(bytes[0]))),
-        Some(len) => len,
-    };
-    match core::str::from_utf8(&bytes[..len]) {
-        Ok(s) => Some(Ok(s.chars().next().unwrap())),
-        Err(_) => Some(Err(bytes[0])),
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/either.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/either.rs
deleted file mode 100644
index 7ae41e4c..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/either.rs
+++ /dev/null
@@ -1,8 +0,0 @@
-/// A simple binary sum type.
-///
-/// This is occasionally useful in an ad hoc fashion.
-#[derive(Clone, Debug, Eq, PartialEq)]
-pub enum Either<Left, Right> {
-    Left(Left),
-    Right(Right),
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/error.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/error.rs
deleted file mode 100644
index 98869c4..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/error.rs
+++ /dev/null
@@ -1,311 +0,0 @@
-use alloc::{
-    format,
-    string::{String, ToString},
-    vec,
-    vec::Vec,
-};
-
-use crate::{ast, hir};
-
-/// This error type encompasses any error that can be returned by this crate.
-///
-/// This error type is marked as `non_exhaustive`. This means that adding a
-/// new variant is not considered a breaking change.
-#[non_exhaustive]
-#[derive(Clone, Debug, Eq, PartialEq)]
-pub enum Error {
-    /// An error that occurred while translating concrete syntax into abstract
-    /// syntax (AST).
-    Parse(ast::Error),
-    /// An error that occurred while translating abstract syntax into a high
-    /// level intermediate representation (HIR).
-    Translate(hir::Error),
-}
-
-impl From<ast::Error> for Error {
-    fn from(err: ast::Error) -> Error {
-        Error::Parse(err)
-    }
-}
-
-impl From<hir::Error> for Error {
-    fn from(err: hir::Error) -> Error {
-        Error::Translate(err)
-    }
-}
-
-#[cfg(feature = "std")]
-impl std::error::Error for Error {}
-
-impl core::fmt::Display for Error {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        match *self {
-            Error::Parse(ref x) => x.fmt(f),
-            Error::Translate(ref x) => x.fmt(f),
-        }
-    }
-}
-
-/// A helper type for formatting nice error messages.
-///
-/// This type is responsible for reporting regex parse errors in a nice human
-/// readable format. Most of its complexity is from interspersing notational
-/// markers pointing out the position where an error occurred.
-#[derive(Debug)]
-pub struct Formatter<'e, E> {
-    /// The original regex pattern in which the error occurred.
-    pattern: &'e str,
-    /// The error kind. It must impl fmt::Display.
-    err: &'e E,
-    /// The primary span of the error.
-    span: &'e ast::Span,
-    /// An auxiliary and optional span, in case the error needs to point to
-    /// two locations (e.g., when reporting a duplicate capture group name).
-    aux_span: Option<&'e ast::Span>,
-}
-
-impl<'e> From<&'e ast::Error> for Formatter<'e, ast::ErrorKind> {
-    fn from(err: &'e ast::Error) -> Self {
-        Formatter {
-            pattern: err.pattern(),
-            err: err.kind(),
-            span: err.span(),
-            aux_span: err.auxiliary_span(),
-        }
-    }
-}
-
-impl<'e> From<&'e hir::Error> for Formatter<'e, hir::ErrorKind> {
-    fn from(err: &'e hir::Error) -> Self {
-        Formatter {
-            pattern: err.pattern(),
-            err: err.kind(),
-            span: err.span(),
-            aux_span: None,
-        }
-    }
-}
-
-impl<'e, E: core::fmt::Display> core::fmt::Display for Formatter<'e, E> {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        let spans = Spans::from_formatter(self);
-        if self.pattern.contains('\n') {
-            let divider = repeat_char('~', 79);
-
-            writeln!(f, "regex parse error:")?;
-            writeln!(f, "{}", divider)?;
-            let notated = spans.notate();
-            write!(f, "{}", notated)?;
-            writeln!(f, "{}", divider)?;
-            // If we have error spans that cover multiple lines, then we just
-            // note the line numbers.
-            if !spans.multi_line.is_empty() {
-                let mut notes = vec![];
-                for span in &spans.multi_line {
-                    notes.push(format!(
-                        "on line {} (column {}) through line {} (column {})",
-                        span.start.line,
-                        span.start.column,
-                        span.end.line,
-                        span.end.column - 1
-                    ));
-                }
-                writeln!(f, "{}", notes.join("\n"))?;
-            }
-            write!(f, "error: {}", self.err)?;
-        } else {
-            writeln!(f, "regex parse error:")?;
-            let notated = Spans::from_formatter(self).notate();
-            write!(f, "{}", notated)?;
-            write!(f, "error: {}", self.err)?;
-        }
-        Ok(())
-    }
-}
-
-/// This type represents an arbitrary number of error spans in a way that makes
-/// it convenient to notate the regex pattern. ("Notate" means "point out
-/// exactly where the error occurred in the regex pattern.")
-///
-/// Technically, we can only ever have two spans given our current error
-/// structure. However, after toiling with a specific algorithm for handling
-/// two spans, it became obvious that an algorithm to handle an arbitrary
-/// number of spans was actually much simpler.
-struct Spans<'p> {
-    /// The original regex pattern string.
-    pattern: &'p str,
-    /// The total width that should be used for line numbers. The width is
-    /// used for left padding the line numbers for alignment.
-    ///
-    /// A value of `0` means line numbers should not be displayed. That is,
-    /// the pattern is itself only one line.
-    line_number_width: usize,
-    /// All error spans that occur on a single line. This sequence always has
-    /// length equivalent to the number of lines in `pattern`, where the index
-    /// of the sequence represents a line number, starting at `0`. The spans
-    /// in each line are sorted in ascending order.
-    by_line: Vec<Vec<ast::Span>>,
-    /// All error spans that occur over one or more lines. That is, the start
-    /// and end position of the span have different line numbers. The spans are
-    /// sorted in ascending order.
-    multi_line: Vec<ast::Span>,
-}
-
-impl<'p> Spans<'p> {
-    /// Build a sequence of spans from a formatter.
-    fn from_formatter<'e, E: core::fmt::Display>(
-        fmter: &'p Formatter<'e, E>,
-    ) -> Spans<'p> {
-        let mut line_count = fmter.pattern.lines().count();
-        // If the pattern ends with a `\n` literal, then our line count is
-        // off by one, since a span can occur immediately after the last `\n`,
-        // which is consider to be an additional line.
-        if fmter.pattern.ends_with('\n') {
-            line_count += 1;
-        }
-        let line_number_width =
-            if line_count <= 1 { 0 } else { line_count.to_string().len() };
-        let mut spans = Spans {
-            pattern: &fmter.pattern,
-            line_number_width,
-            by_line: vec![vec![]; line_count],
-            multi_line: vec![],
-        };
-        spans.add(fmter.span.clone());
-        if let Some(span) = fmter.aux_span {
-            spans.add(span.clone());
-        }
-        spans
-    }
-
-    /// Add the given span to this sequence, putting it in the right place.
-    fn add(&mut self, span: ast::Span) {
-        // This is grossly inefficient since we sort after each add, but right
-        // now, we only ever add two spans at most.
-        if span.is_one_line() {
-            let i = span.start.line - 1; // because lines are 1-indexed
-            self.by_line[i].push(span);
-            self.by_line[i].sort();
-        } else {
-            self.multi_line.push(span);
-            self.multi_line.sort();
-        }
-    }
-
-    /// Notate the pattern string with carents (`^`) pointing at each span
-    /// location. This only applies to spans that occur within a single line.
-    fn notate(&self) -> String {
-        let mut notated = String::new();
-        for (i, line) in self.pattern.lines().enumerate() {
-            if self.line_number_width > 0 {
-                notated.push_str(&self.left_pad_line_number(i + 1));
-                notated.push_str(": ");
-            } else {
-                notated.push_str("    ");
-            }
-            notated.push_str(line);
-            notated.push('\n');
-            if let Some(notes) = self.notate_line(i) {
-                notated.push_str(&notes);
-                notated.push('\n');
-            }
-        }
-        notated
-    }
-
-    /// Return notes for the line indexed at `i` (zero-based). If there are no
-    /// spans for the given line, then `None` is returned. Otherwise, an
-    /// appropriately space padded string with correctly positioned `^` is
-    /// returned, accounting for line numbers.
-    fn notate_line(&self, i: usize) -> Option<String> {
-        let spans = &self.by_line[i];
-        if spans.is_empty() {
-            return None;
-        }
-        let mut notes = String::new();
-        for _ in 0..self.line_number_padding() {
-            notes.push(' ');
-        }
-        let mut pos = 0;
-        for span in spans {
-            for _ in pos..(span.start.column - 1) {
-                notes.push(' ');
-                pos += 1;
-            }
-            let note_len = span.end.column.saturating_sub(span.start.column);
-            for _ in 0..core::cmp::max(1, note_len) {
-                notes.push('^');
-                pos += 1;
-            }
-        }
-        Some(notes)
-    }
-
-    /// Left pad the given line number with spaces such that it is aligned with
-    /// other line numbers.
-    fn left_pad_line_number(&self, n: usize) -> String {
-        let n = n.to_string();
-        let pad = self.line_number_width.checked_sub(n.len()).unwrap();
-        let mut result = repeat_char(' ', pad);
-        result.push_str(&n);
-        result
-    }
-
-    /// Return the line number padding beginning at the start of each line of
-    /// the pattern.
-    ///
-    /// If the pattern is only one line, then this returns a fixed padding
-    /// for visual indentation.
-    fn line_number_padding(&self) -> usize {
-        if self.line_number_width == 0 {
-            4
-        } else {
-            2 + self.line_number_width
-        }
-    }
-}
-
-fn repeat_char(c: char, count: usize) -> String {
-    core::iter::repeat(c).take(count).collect()
-}
-
-#[cfg(test)]
-mod tests {
-    use alloc::string::ToString;
-
-    use crate::ast::parse::Parser;
-
-    fn assert_panic_message(pattern: &str, expected_msg: &str) {
-        let result = Parser::new().parse(pattern);
-        match result {
-            Ok(_) => {
-                panic!("regex should not have parsed");
-            }
-            Err(err) => {
-                assert_eq!(err.to_string(), expected_msg.trim());
-            }
-        }
-    }
-
-    // See: https://github.com/rust-lang/regex/issues/464
-    #[test]
-    fn regression_464() {
-        let err = Parser::new().parse("a{\n").unwrap_err();
-        // This test checks that the error formatter doesn't panic.
-        assert!(!err.to_string().is_empty());
-    }
-
-    // See: https://github.com/rust-lang/regex/issues/545
-    #[test]
-    fn repetition_quantifier_expects_a_valid_decimal() {
-        assert_panic_message(
-            r"\\u{[^}]*}",
-            r#"
-regex parse error:
-    \\u{[^}]*}
-        ^
-error: repetition quantifier expects a valid decimal
-"#,
-        );
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/hir/interval.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/hir/interval.rs
deleted file mode 100644
index d507ee7..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/hir/interval.rs
+++ /dev/null
@@ -1,564 +0,0 @@
-use core::{char, cmp, fmt::Debug, slice};
-
-use alloc::vec::Vec;
-
-use crate::unicode;
-
-// This module contains an *internal* implementation of interval sets.
-//
-// The primary invariant that interval sets guards is canonical ordering. That
-// is, every interval set contains an ordered sequence of intervals where
-// no two intervals are overlapping or adjacent. While this invariant is
-// occasionally broken within the implementation, it should be impossible for
-// callers to observe it.
-//
-// Since case folding (as implemented below) breaks that invariant, we roll
-// that into this API even though it is a little out of place in an otherwise
-// generic interval set. (Hence the reason why the `unicode` module is imported
-// here.)
-//
-// Some of the implementation complexity here is a result of me wanting to
-// preserve the sequential representation without using additional memory.
-// In many cases, we do use linear extra memory, but it is at most 2x and it
-// is amortized. If we relaxed the memory requirements, this implementation
-// could become much simpler. The extra memory is honestly probably OK, but
-// character classes (especially of the Unicode variety) can become quite
-// large, and it would be nice to keep regex compilation snappy even in debug
-// builds. (In the past, I have been careless with this area of code and it has
-// caused slow regex compilations in debug mode, so this isn't entirely
-// unwarranted.)
-//
-// Tests on this are relegated to the public API of HIR in src/hir.rs.
-
-#[derive(Clone, Debug)]
-pub struct IntervalSet<I> {
-    /// A sorted set of non-overlapping ranges.
-    ranges: Vec<I>,
-    /// While not required at all for correctness, we keep track of whether an
-    /// interval set has been case folded or not. This helps us avoid doing
-    /// redundant work if, for example, a set has already been cased folded.
-    /// And note that whether a set is folded or not is preserved through
-    /// all of the pairwise set operations. That is, if both interval sets
-    /// have been case folded, then any of difference, union, intersection or
-    /// symmetric difference all produce a case folded set.
-    ///
-    /// Note that when this is true, it *must* be the case that the set is case
-    /// folded. But when it's false, the set *may* be case folded. In other
-    /// words, we only set this to true when we know it to be case, but we're
-    /// okay with it being false if it would otherwise be costly to determine
-    /// whether it should be true. This means code cannot assume that a false
-    /// value necessarily indicates that the set is not case folded.
-    ///
-    /// Bottom line: this is a performance optimization.
-    folded: bool,
-}
-
-impl<I: Interval> Eq for IntervalSet<I> {}
-
-// We implement PartialEq manually so that we don't consider the set's internal
-// 'folded' property to be part of its identity. The 'folded' property is
-// strictly an optimization.
-impl<I: Interval> PartialEq for IntervalSet<I> {
-    fn eq(&self, other: &IntervalSet<I>) -> bool {
-        self.ranges.eq(&other.ranges)
-    }
-}
-
-impl<I: Interval> IntervalSet<I> {
-    /// Create a new set from a sequence of intervals. Each interval is
-    /// specified as a pair of bounds, where both bounds are inclusive.
-    ///
-    /// The given ranges do not need to be in any specific order, and ranges
-    /// may overlap.
-    pub fn new<T: IntoIterator<Item = I>>(intervals: T) -> IntervalSet<I> {
-        let ranges: Vec<I> = intervals.into_iter().collect();
-        // An empty set is case folded.
-        let folded = ranges.is_empty();
-        let mut set = IntervalSet { ranges, folded };
-        set.canonicalize();
-        set
-    }
-
-    /// Add a new interval to this set.
-    pub fn push(&mut self, interval: I) {
-        // TODO: This could be faster. e.g., Push the interval such that
-        // it preserves canonicalization.
-        self.ranges.push(interval);
-        self.canonicalize();
-        // We don't know whether the new interval added here is considered
-        // case folded, so we conservatively assume that the entire set is
-        // no longer case folded if it was previously.
-        self.folded = false;
-    }
-
-    /// Return an iterator over all intervals in this set.
-    ///
-    /// The iterator yields intervals in ascending order.
-    pub fn iter(&self) -> IntervalSetIter<'_, I> {
-        IntervalSetIter(self.ranges.iter())
-    }
-
-    /// Return an immutable slice of intervals in this set.
-    ///
-    /// The sequence returned is in canonical ordering.
-    pub fn intervals(&self) -> &[I] {
-        &self.ranges
-    }
-
-    /// Expand this interval set such that it contains all case folded
-    /// characters. For example, if this class consists of the range `a-z`,
-    /// then applying case folding will result in the class containing both the
-    /// ranges `a-z` and `A-Z`.
-    ///
-    /// This returns an error if the necessary case mapping data is not
-    /// available.
-    pub fn case_fold_simple(&mut self) -> Result<(), unicode::CaseFoldError> {
-        if self.folded {
-            return Ok(());
-        }
-        let len = self.ranges.len();
-        for i in 0..len {
-            let range = self.ranges[i];
-            if let Err(err) = range.case_fold_simple(&mut self.ranges) {
-                self.canonicalize();
-                return Err(err);
-            }
-        }
-        self.canonicalize();
-        self.folded = true;
-        Ok(())
-    }
-
-    /// Union this set with the given set, in place.
-    pub fn union(&mut self, other: &IntervalSet<I>) {
-        if other.ranges.is_empty() || self.ranges == other.ranges {
-            return;
-        }
-        // This could almost certainly be done more efficiently.
-        self.ranges.extend(&other.ranges);
-        self.canonicalize();
-        self.folded = self.folded && other.folded;
-    }
-
-    /// Intersect this set with the given set, in place.
-    pub fn intersect(&mut self, other: &IntervalSet<I>) {
-        if self.ranges.is_empty() {
-            return;
-        }
-        if other.ranges.is_empty() {
-            self.ranges.clear();
-            // An empty set is case folded.
-            self.folded = true;
-            return;
-        }
-
-        // There should be a way to do this in-place with constant memory,
-        // but I couldn't figure out a simple way to do it. So just append
-        // the intersection to the end of this range, and then drain it before
-        // we're done.
-        let drain_end = self.ranges.len();
-
-        let mut ita = 0..drain_end;
-        let mut itb = 0..other.ranges.len();
-        let mut a = ita.next().unwrap();
-        let mut b = itb.next().unwrap();
-        loop {
-            if let Some(ab) = self.ranges[a].intersect(&other.ranges[b]) {
-                self.ranges.push(ab);
-            }
-            let (it, aorb) =
-                if self.ranges[a].upper() < other.ranges[b].upper() {
-                    (&mut ita, &mut a)
-                } else {
-                    (&mut itb, &mut b)
-                };
-            match it.next() {
-                Some(v) => *aorb = v,
-                None => break,
-            }
-        }
-        self.ranges.drain(..drain_end);
-        self.folded = self.folded && other.folded;
-    }
-
-    /// Subtract the given set from this set, in place.
-    pub fn difference(&mut self, other: &IntervalSet<I>) {
-        if self.ranges.is_empty() || other.ranges.is_empty() {
-            return;
-        }
-
-        // This algorithm is (to me) surprisingly complex. A search of the
-        // interwebs indicate that this is a potentially interesting problem.
-        // Folks seem to suggest interval or segment trees, but I'd like to
-        // avoid the overhead (both runtime and conceptual) of that.
-        //
-        // The following is basically my Shitty First Draft. Therefore, in
-        // order to grok it, you probably need to read each line carefully.
-        // Simplifications are most welcome!
-        //
-        // Remember, we can assume the canonical format invariant here, which
-        // says that all ranges are sorted, not overlapping and not adjacent in
-        // each class.
-        let drain_end = self.ranges.len();
-        let (mut a, mut b) = (0, 0);
-        'LOOP: while a < drain_end && b < other.ranges.len() {
-            // Basically, the easy cases are when neither range overlaps with
-            // each other. If the `b` range is less than our current `a`
-            // range, then we can skip it and move on.
-            if other.ranges[b].upper() < self.ranges[a].lower() {
-                b += 1;
-                continue;
-            }
-            // ... similarly for the `a` range. If it's less than the smallest
-            // `b` range, then we can add it as-is.
-            if self.ranges[a].upper() < other.ranges[b].lower() {
-                let range = self.ranges[a];
-                self.ranges.push(range);
-                a += 1;
-                continue;
-            }
-            // Otherwise, we have overlapping ranges.
-            assert!(!self.ranges[a].is_intersection_empty(&other.ranges[b]));
-
-            // This part is tricky and was non-obvious to me without looking
-            // at explicit examples (see the tests). The trickiness stems from
-            // two things: 1) subtracting a range from another range could
-            // yield two ranges and 2) after subtracting a range, it's possible
-            // that future ranges can have an impact. The loop below advances
-            // the `b` ranges until they can't possible impact the current
-            // range.
-            //
-            // For example, if our `a` range is `a-t` and our next three `b`
-            // ranges are `a-c`, `g-i`, `r-t` and `x-z`, then we need to apply
-            // subtraction three times before moving on to the next `a` range.
-            let mut range = self.ranges[a];
-            while b < other.ranges.len()
-                && !range.is_intersection_empty(&other.ranges[b])
-            {
-                let old_range = range;
-                range = match range.difference(&other.ranges[b]) {
-                    (None, None) => {
-                        // We lost the entire range, so move on to the next
-                        // without adding this one.
-                        a += 1;
-                        continue 'LOOP;
-                    }
-                    (Some(range1), None) | (None, Some(range1)) => range1,
-                    (Some(range1), Some(range2)) => {
-                        self.ranges.push(range1);
-                        range2
-                    }
-                };
-                // It's possible that the `b` range has more to contribute
-                // here. In particular, if it is greater than the original
-                // range, then it might impact the next `a` range *and* it
-                // has impacted the current `a` range as much as possible,
-                // so we can quit. We don't bump `b` so that the next `a`
-                // range can apply it.
-                if other.ranges[b].upper() > old_range.upper() {
-                    break;
-                }
-                // Otherwise, the next `b` range might apply to the current
-                // `a` range.
-                b += 1;
-            }
-            self.ranges.push(range);
-            a += 1;
-        }
-        while a < drain_end {
-            let range = self.ranges[a];
-            self.ranges.push(range);
-            a += 1;
-        }
-        self.ranges.drain(..drain_end);
-        self.folded = self.folded && other.folded;
-    }
-
-    /// Compute the symmetric difference of the two sets, in place.
-    ///
-    /// This computes the symmetric difference of two interval sets. This
-    /// removes all elements in this set that are also in the given set,
-    /// but also adds all elements from the given set that aren't in this
-    /// set. That is, the set will contain all elements in either set,
-    /// but will not contain any elements that are in both sets.
-    pub fn symmetric_difference(&mut self, other: &IntervalSet<I>) {
-        // TODO(burntsushi): Fix this so that it amortizes allocation.
-        let mut intersection = self.clone();
-        intersection.intersect(other);
-        self.union(other);
-        self.difference(&intersection);
-    }
-
-    /// Negate this interval set.
-    ///
-    /// For all `x` where `x` is any element, if `x` was in this set, then it
-    /// will not be in this set after negation.
-    pub fn negate(&mut self) {
-        if self.ranges.is_empty() {
-            let (min, max) = (I::Bound::min_value(), I::Bound::max_value());
-            self.ranges.push(I::create(min, max));
-            // The set containing everything must case folded.
-            self.folded = true;
-            return;
-        }
-
-        // There should be a way to do this in-place with constant memory,
-        // but I couldn't figure out a simple way to do it. So just append
-        // the negation to the end of this range, and then drain it before
-        // we're done.
-        let drain_end = self.ranges.len();
-
-        // We do checked arithmetic below because of the canonical ordering
-        // invariant.
-        if self.ranges[0].lower() > I::Bound::min_value() {
-            let upper = self.ranges[0].lower().decrement();
-            self.ranges.push(I::create(I::Bound::min_value(), upper));
-        }
-        for i in 1..drain_end {
-            let lower = self.ranges[i - 1].upper().increment();
-            let upper = self.ranges[i].lower().decrement();
-            self.ranges.push(I::create(lower, upper));
-        }
-        if self.ranges[drain_end - 1].upper() < I::Bound::max_value() {
-            let lower = self.ranges[drain_end - 1].upper().increment();
-            self.ranges.push(I::create(lower, I::Bound::max_value()));
-        }
-        self.ranges.drain(..drain_end);
-        // We don't need to update whether this set is folded or not, because
-        // it is conservatively preserved through negation. Namely, if a set
-        // is not folded, then it is possible that its negation is folded, for
-        // example, [^☃]. But we're fine with assuming that the set is not
-        // folded in that case. (`folded` permits false negatives but not false
-        // positives.)
-        //
-        // But what about when a set is folded, is its negation also
-        // necessarily folded? Yes. Because if a set is folded, then for every
-        // character in the set, it necessarily included its equivalence class
-        // of case folded characters. Negating it in turn means that all
-        // equivalence classes in the set are negated, and any equivalence
-        // class that was previously not in the set is now entirely in the set.
-    }
-
-    /// Converts this set into a canonical ordering.
-    fn canonicalize(&mut self) {
-        if self.is_canonical() {
-            return;
-        }
-        self.ranges.sort();
-        assert!(!self.ranges.is_empty());
-
-        // Is there a way to do this in-place with constant memory? I couldn't
-        // figure out a way to do it. So just append the canonicalization to
-        // the end of this range, and then drain it before we're done.
-        let drain_end = self.ranges.len();
-        for oldi in 0..drain_end {
-            // If we've added at least one new range, then check if we can
-            // merge this range in the previously added range.
-            if self.ranges.len() > drain_end {
-                let (last, rest) = self.ranges.split_last_mut().unwrap();
-                if let Some(union) = last.union(&rest[oldi]) {
-                    *last = union;
-                    continue;
-                }
-            }
-            let range = self.ranges[oldi];
-            self.ranges.push(range);
-        }
-        self.ranges.drain(..drain_end);
-    }
-
-    /// Returns true if and only if this class is in a canonical ordering.
-    fn is_canonical(&self) -> bool {
-        for pair in self.ranges.windows(2) {
-            if pair[0] >= pair[1] {
-                return false;
-            }
-            if pair[0].is_contiguous(&pair[1]) {
-                return false;
-            }
-        }
-        true
-    }
-}
-
-/// An iterator over intervals.
-#[derive(Debug)]
-pub struct IntervalSetIter<'a, I>(slice::Iter<'a, I>);
-
-impl<'a, I> Iterator for IntervalSetIter<'a, I> {
-    type Item = &'a I;
-
-    fn next(&mut self) -> Option<&'a I> {
-        self.0.next()
-    }
-}
-
-pub trait Interval:
-    Clone + Copy + Debug + Default + Eq + PartialEq + PartialOrd + Ord
-{
-    type Bound: Bound;
-
-    fn lower(&self) -> Self::Bound;
-    fn upper(&self) -> Self::Bound;
-    fn set_lower(&mut self, bound: Self::Bound);
-    fn set_upper(&mut self, bound: Self::Bound);
-    fn case_fold_simple(
-        &self,
-        intervals: &mut Vec<Self>,
-    ) -> Result<(), unicode::CaseFoldError>;
-
-    /// Create a new interval.
-    fn create(lower: Self::Bound, upper: Self::Bound) -> Self {
-        let mut int = Self::default();
-        if lower <= upper {
-            int.set_lower(lower);
-            int.set_upper(upper);
-        } else {
-            int.set_lower(upper);
-            int.set_upper(lower);
-        }
-        int
-    }
-
-    /// Union the given overlapping range into this range.
-    ///
-    /// If the two ranges aren't contiguous, then this returns `None`.
-    fn union(&self, other: &Self) -> Option<Self> {
-        if !self.is_contiguous(other) {
-            return None;
-        }
-        let lower = cmp::min(self.lower(), other.lower());
-        let upper = cmp::max(self.upper(), other.upper());
-        Some(Self::create(lower, upper))
-    }
-
-    /// Intersect this range with the given range and return the result.
-    ///
-    /// If the intersection is empty, then this returns `None`.
-    fn intersect(&self, other: &Self) -> Option<Self> {
-        let lower = cmp::max(self.lower(), other.lower());
-        let upper = cmp::min(self.upper(), other.upper());
-        if lower <= upper {
-            Some(Self::create(lower, upper))
-        } else {
-            None
-        }
-    }
-
-    /// Subtract the given range from this range and return the resulting
-    /// ranges.
-    ///
-    /// If subtraction would result in an empty range, then no ranges are
-    /// returned.
-    fn difference(&self, other: &Self) -> (Option<Self>, Option<Self>) {
-        if self.is_subset(other) {
-            return (None, None);
-        }
-        if self.is_intersection_empty(other) {
-            return (Some(self.clone()), None);
-        }
-        let add_lower = other.lower() > self.lower();
-        let add_upper = other.upper() < self.upper();
-        // We know this because !self.is_subset(other) and the ranges have
-        // a non-empty intersection.
-        assert!(add_lower || add_upper);
-        let mut ret = (None, None);
-        if add_lower {
-            let upper = other.lower().decrement();
-            ret.0 = Some(Self::create(self.lower(), upper));
-        }
-        if add_upper {
-            let lower = other.upper().increment();
-            let range = Self::create(lower, self.upper());
-            if ret.0.is_none() {
-                ret.0 = Some(range);
-            } else {
-                ret.1 = Some(range);
-            }
-        }
-        ret
-    }
-
-    /// Returns true if and only if the two ranges are contiguous. Two ranges
-    /// are contiguous if and only if the ranges are either overlapping or
-    /// adjacent.
-    fn is_contiguous(&self, other: &Self) -> bool {
-        let lower1 = self.lower().as_u32();
-        let upper1 = self.upper().as_u32();
-        let lower2 = other.lower().as_u32();
-        let upper2 = other.upper().as_u32();
-        cmp::max(lower1, lower2) <= cmp::min(upper1, upper2).saturating_add(1)
-    }
-
-    /// Returns true if and only if the intersection of this range and the
-    /// other range is empty.
-    fn is_intersection_empty(&self, other: &Self) -> bool {
-        let (lower1, upper1) = (self.lower(), self.upper());
-        let (lower2, upper2) = (other.lower(), other.upper());
-        cmp::max(lower1, lower2) > cmp::min(upper1, upper2)
-    }
-
-    /// Returns true if and only if this range is a subset of the other range.
-    fn is_subset(&self, other: &Self) -> bool {
-        let (lower1, upper1) = (self.lower(), self.upper());
-        let (lower2, upper2) = (other.lower(), other.upper());
-        (lower2 <= lower1 && lower1 <= upper2)
-            && (lower2 <= upper1 && upper1 <= upper2)
-    }
-}
-
-pub trait Bound:
-    Copy + Clone + Debug + Eq + PartialEq + PartialOrd + Ord
-{
-    fn min_value() -> Self;
-    fn max_value() -> Self;
-    fn as_u32(self) -> u32;
-    fn increment(self) -> Self;
-    fn decrement(self) -> Self;
-}
-
-impl Bound for u8 {
-    fn min_value() -> Self {
-        u8::MIN
-    }
-    fn max_value() -> Self {
-        u8::MAX
-    }
-    fn as_u32(self) -> u32 {
-        u32::from(self)
-    }
-    fn increment(self) -> Self {
-        self.checked_add(1).unwrap()
-    }
-    fn decrement(self) -> Self {
-        self.checked_sub(1).unwrap()
-    }
-}
-
-impl Bound for char {
-    fn min_value() -> Self {
-        '\x00'
-    }
-    fn max_value() -> Self {
-        '\u{10FFFF}'
-    }
-    fn as_u32(self) -> u32 {
-        u32::from(self)
-    }
-
-    fn increment(self) -> Self {
-        match self {
-            '\u{D7FF}' => '\u{E000}',
-            c => char::from_u32(u32::from(c).checked_add(1).unwrap()).unwrap(),
-        }
-    }
-
-    fn decrement(self) -> Self {
-        match self {
-            '\u{E000}' => '\u{D7FF}',
-            c => char::from_u32(u32::from(c).checked_sub(1).unwrap()).unwrap(),
-        }
-    }
-}
-
-// Tests for interval sets are written in src/hir.rs against the public API.
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/hir/literal.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/hir/literal.rs
deleted file mode 100644
index a5a3737..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/hir/literal.rs
+++ /dev/null
@@ -1,3214 +0,0 @@
-/*!
-Provides literal extraction from `Hir` expressions.
-
-An [`Extractor`] pulls literals out of [`Hir`] expressions and returns a
-[`Seq`] of [`Literal`]s.
-
-The purpose of literal extraction is generally to provide avenues for
-optimizing regex searches. The main idea is that substring searches can be an
-order of magnitude faster than a regex search. Therefore, if one can execute
-a substring search to find candidate match locations and only run the regex
-search at those locations, then it is possible for huge improvements in
-performance to be realized.
-
-With that said, literal optimizations are generally a black art because even
-though substring search is generally faster, if the number of candidates
-produced is high, then it can create a lot of overhead by ping-ponging between
-the substring search and the regex search.
-
-Here are some heuristics that might be used to help increase the chances of
-effective literal optimizations:
-
-* Stick to small [`Seq`]s. If you search for too many literals, it's likely
-to lead to substring search that is only a little faster than a regex search,
-and thus the overhead of using literal optimizations in the first place might
-make things slower overall.
-* The literals in your [`Seq`] shouldn't be too short. In general, longer is
-better. A sequence corresponding to single bytes that occur frequently in the
-haystack, for example, is probably a bad literal optimization because it's
-likely to produce many false positive candidates. Longer literals are less
-likely to match, and thus probably produce fewer false positives.
-* If it's possible to estimate the approximate frequency of each byte according
-to some pre-computed background distribution, it is possible to compute a score
-of how "good" a `Seq` is. If a `Seq` isn't good enough, you might consider
-skipping the literal optimization and just use the regex engine.
-
-(It should be noted that there are always pathological cases that can make
-any kind of literal optimization be a net slower result. This is why it
-might be a good idea to be conservative, or to even provide a means for
-literal optimizations to be dynamically disabled if they are determined to be
-ineffective according to some measure.)
-
-You're encouraged to explore the methods on [`Seq`], which permit shrinking
-the size of sequences in a preference-order preserving fashion.
-
-Finally, note that it isn't strictly necessary to use an [`Extractor`]. Namely,
-an `Extractor` only uses public APIs of the [`Seq`] and [`Literal`] types,
-so it is possible to implement your own extractor. For example, for n-grams
-or "inner" literals (i.e., not prefix or suffix literals). The `Extractor`
-is mostly responsible for the case analysis over `Hir` expressions. Much of
-the "trickier" parts are how to combine literal sequences, and that is all
-implemented on [`Seq`].
-*/
-
-use core::{cmp, mem, num::NonZeroUsize};
-
-use alloc::{vec, vec::Vec};
-
-use crate::hir::{self, Hir};
-
-/// Extracts prefix or suffix literal sequences from [`Hir`] expressions.
-///
-/// Literal extraction is based on the following observations:
-///
-/// * Many regexes start with one or a small number of literals.
-/// * Substring search for literals is often much faster (sometimes by an order
-/// of magnitude) than a regex search.
-///
-/// Thus, in many cases, one can search for literals to find candidate starting
-/// locations of a match, and then only run the full regex engine at each such
-/// location instead of over the full haystack.
-///
-/// The main downside of literal extraction is that it can wind up causing a
-/// search to be slower overall. For example, if there are many matches or if
-/// there are many candidates that don't ultimately lead to a match, then a
-/// lot of overhead will be spent in shuffing back-and-forth between substring
-/// search and the regex engine. This is the fundamental reason why literal
-/// optimizations for regex patterns is sometimes considered a "black art."
-///
-/// # Look-around assertions
-///
-/// Literal extraction treats all look-around assertions as-if they match every
-/// empty string. So for example, the regex `\bquux\b` will yield a sequence
-/// containing a single exact literal `quux`. However, not all occurrences
-/// of `quux` correspond to a match a of the regex. For example, `\bquux\b`
-/// does not match `ZquuxZ` anywhere because `quux` does not fall on a word
-/// boundary.
-///
-/// In effect, if your regex contains look-around assertions, then a match of
-/// an exact literal does not necessarily mean the regex overall matches. So
-/// you may still need to run the regex engine in such cases to confirm the
-/// match.
-///
-/// The precise guarantee you get from a literal sequence is: if every literal
-/// in the sequence is exact and the original regex contains zero look-around
-/// assertions, then a preference-order multi-substring search of those
-/// literals will precisely match a preference-order search of the original
-/// regex.
-///
-/// # Example
-///
-/// This shows how to extract prefixes:
-///
-/// ```
-/// use regex_syntax::{hir::literal::{Extractor, Literal, Seq}, parse};
-///
-/// let hir = parse(r"(a|b|c)(x|y|z)[A-Z]+foo")?;
-/// let got = Extractor::new().extract(&hir);
-/// // All literals returned are "inexact" because none of them reach the
-/// // match state.
-/// let expected = Seq::from_iter([
-///     Literal::inexact("ax"),
-///     Literal::inexact("ay"),
-///     Literal::inexact("az"),
-///     Literal::inexact("bx"),
-///     Literal::inexact("by"),
-///     Literal::inexact("bz"),
-///     Literal::inexact("cx"),
-///     Literal::inexact("cy"),
-///     Literal::inexact("cz"),
-/// ]);
-/// assert_eq!(expected, got);
-///
-/// # Ok::<(), Box<dyn std::error::Error>>(())
-/// ```
-///
-/// This shows how to extract suffixes:
-///
-/// ```
-/// use regex_syntax::{
-///     hir::literal::{Extractor, ExtractKind, Literal, Seq},
-///     parse,
-/// };
-///
-/// let hir = parse(r"foo|[A-Z]+bar")?;
-/// let got = Extractor::new().kind(ExtractKind::Suffix).extract(&hir);
-/// // Since 'foo' gets to a match state, it is considered exact. But 'bar'
-/// // does not because of the '[A-Z]+', and thus is marked inexact.
-/// let expected = Seq::from_iter([
-///     Literal::exact("foo"),
-///     Literal::inexact("bar"),
-/// ]);
-/// assert_eq!(expected, got);
-///
-/// # Ok::<(), Box<dyn std::error::Error>>(())
-/// ```
-#[derive(Clone, Debug)]
-pub struct Extractor {
-    kind: ExtractKind,
-    limit_class: usize,
-    limit_repeat: usize,
-    limit_literal_len: usize,
-    limit_total: usize,
-}
-
-impl Extractor {
-    /// Create a new extractor with a default configuration.
-    ///
-    /// The extractor can be optionally configured before calling
-    /// [`Extractor::extract`] to get a literal sequence.
-    pub fn new() -> Extractor {
-        Extractor {
-            kind: ExtractKind::Prefix,
-            limit_class: 10,
-            limit_repeat: 10,
-            limit_literal_len: 100,
-            limit_total: 250,
-        }
-    }
-
-    /// Execute the extractor and return a sequence of literals.
-    pub fn extract(&self, hir: &Hir) -> Seq {
-        use crate::hir::HirKind::*;
-
-        match *hir.kind() {
-            Empty | Look(_) => Seq::singleton(self::Literal::exact(vec![])),
-            Literal(hir::Literal(ref bytes)) => {
-                let mut seq =
-                    Seq::singleton(self::Literal::exact(bytes.to_vec()));
-                self.enforce_literal_len(&mut seq);
-                seq
-            }
-            Class(hir::Class::Unicode(ref cls)) => {
-                self.extract_class_unicode(cls)
-            }
-            Class(hir::Class::Bytes(ref cls)) => self.extract_class_bytes(cls),
-            Repetition(ref rep) => self.extract_repetition(rep),
-            Capture(hir::Capture { ref sub, .. }) => self.extract(sub),
-            Concat(ref hirs) => match self.kind {
-                ExtractKind::Prefix => self.extract_concat(hirs.iter()),
-                ExtractKind::Suffix => self.extract_concat(hirs.iter().rev()),
-            },
-            Alternation(ref hirs) => {
-                // Unlike concat, we always union starting from the beginning,
-                // since the beginning corresponds to the highest preference,
-                // which doesn't change based on forwards vs reverse.
-                self.extract_alternation(hirs.iter())
-            }
-        }
-    }
-
-    /// Set the kind of literal sequence to extract from an [`Hir`] expression.
-    ///
-    /// The default is to extract prefixes, but suffixes can be selected
-    /// instead. The contract for prefixes is that every match of the
-    /// corresponding `Hir` must start with one of the literals in the sequence
-    /// returned. Moreover, the _order_ of the sequence returned corresponds to
-    /// the preference order.
-    ///
-    /// Suffixes satisfy a similar contract in that every match of the
-    /// corresponding `Hir` must end with one of the literals in the sequence
-    /// returned. However, there is no guarantee that the literals are in
-    /// preference order.
-    ///
-    /// Remember that a sequence can be infinite. For example, unless the
-    /// limits are configured to be impractically large, attempting to extract
-    /// prefixes (or suffixes) for the pattern `[A-Z]` will return an infinite
-    /// sequence. Generally speaking, if the sequence returned is infinite,
-    /// then it is presumed to be unwise to do prefix (or suffix) optimizations
-    /// for the pattern.
-    pub fn kind(&mut self, kind: ExtractKind) -> &mut Extractor {
-        self.kind = kind;
-        self
-    }
-
-    /// Configure a limit on the length of the sequence that is permitted for
-    /// a character class. If a character class exceeds this limit, then the
-    /// sequence returned for it is infinite.
-    ///
-    /// This prevents classes like `[A-Z]` or `\pL` from getting turned into
-    /// huge and likely unproductive sequences of literals.
-    ///
-    /// # Example
-    ///
-    /// This example shows how this limit can be lowered to decrease the tolerance
-    /// for character classes being turned into literal sequences.
-    ///
-    /// ```
-    /// use regex_syntax::{hir::literal::{Extractor, Seq}, parse};
-    ///
-    /// let hir = parse(r"[0-9]")?;
-    ///
-    /// let got = Extractor::new().extract(&hir);
-    /// let expected = Seq::new([
-    ///     "0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
-    /// ]);
-    /// assert_eq!(expected, got);
-    ///
-    /// // Now let's shrink the limit and see how that changes things.
-    /// let got = Extractor::new().limit_class(4).extract(&hir);
-    /// let expected = Seq::infinite();
-    /// assert_eq!(expected, got);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    pub fn limit_class(&mut self, limit: usize) -> &mut Extractor {
-        self.limit_class = limit;
-        self
-    }
-
-    /// Configure a limit on the total number of repetitions that is permitted
-    /// before literal extraction is stopped.
-    ///
-    /// This is useful for limiting things like `(abcde){50}`, or more
-    /// insidiously, `(?:){1000000000}`. This limit prevents any one single
-    /// repetition from adding too much to a literal sequence.
-    ///
-    /// With this limit set, repetitions that exceed it will be stopped and any
-    /// literals extracted up to that point will be made inexact.
-    ///
-    /// # Example
-    ///
-    /// This shows how to decrease the limit and compares it with the default.
-    ///
-    /// ```
-    /// use regex_syntax::{hir::literal::{Extractor, Literal, Seq}, parse};
-    ///
-    /// let hir = parse(r"(abc){8}")?;
-    ///
-    /// let got = Extractor::new().extract(&hir);
-    /// let expected = Seq::new(["abcabcabcabcabcabcabcabc"]);
-    /// assert_eq!(expected, got);
-    ///
-    /// // Now let's shrink the limit and see how that changes things.
-    /// let got = Extractor::new().limit_repeat(4).extract(&hir);
-    /// let expected = Seq::from_iter([
-    ///     Literal::inexact("abcabcabcabc"),
-    /// ]);
-    /// assert_eq!(expected, got);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    pub fn limit_repeat(&mut self, limit: usize) -> &mut Extractor {
-        self.limit_repeat = limit;
-        self
-    }
-
-    /// Configure a limit on the maximum length of any literal in a sequence.
-    ///
-    /// This is useful for limiting things like `(abcde){5}{5}{5}{5}`. While
-    /// each repetition or literal in that regex is small, when all the
-    /// repetitions are applied, one ends up with a literal of length `5^4 =
-    /// 625`.
-    ///
-    /// With this limit set, literals that exceed it will be made inexact and
-    /// thus prevented from growing.
-    ///
-    /// # Example
-    ///
-    /// This shows how to decrease the limit and compares it with the default.
-    ///
-    /// ```
-    /// use regex_syntax::{hir::literal::{Extractor, Literal, Seq}, parse};
-    ///
-    /// let hir = parse(r"(abc){2}{2}{2}")?;
-    ///
-    /// let got = Extractor::new().extract(&hir);
-    /// let expected = Seq::new(["abcabcabcabcabcabcabcabc"]);
-    /// assert_eq!(expected, got);
-    ///
-    /// // Now let's shrink the limit and see how that changes things.
-    /// let got = Extractor::new().limit_literal_len(14).extract(&hir);
-    /// let expected = Seq::from_iter([
-    ///     Literal::inexact("abcabcabcabcab"),
-    /// ]);
-    /// assert_eq!(expected, got);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    pub fn limit_literal_len(&mut self, limit: usize) -> &mut Extractor {
-        self.limit_literal_len = limit;
-        self
-    }
-
-    /// Configure a limit on the total number of literals that will be
-    /// returned.
-    ///
-    /// This is useful as a practical measure for avoiding the creation of
-    /// large sequences of literals. While the extractor will automatically
-    /// handle local creations of large sequences (for example, `[A-Z]` yields
-    /// an infinite sequence by default), large sequences can be created
-    /// through non-local means as well.
-    ///
-    /// For example, `[ab]{3}{3}` would yield a sequence of length `512 = 2^9`
-    /// despite each of the repetitions being small on their own. This limit
-    /// thus represents a "catch all" for avoiding locally small sequences from
-    /// combining into large sequences.
-    ///
-    /// # Example
-    ///
-    /// This example shows how reducing the limit will change the literal
-    /// sequence returned.
-    ///
-    /// ```
-    /// use regex_syntax::{hir::literal::{Extractor, Literal, Seq}, parse};
-    ///
-    /// let hir = parse(r"[ab]{2}{2}")?;
-    ///
-    /// let got = Extractor::new().extract(&hir);
-    /// let expected = Seq::new([
-    ///     "aaaa", "aaab", "aaba", "aabb",
-    ///     "abaa", "abab", "abba", "abbb",
-    ///     "baaa", "baab", "baba", "babb",
-    ///     "bbaa", "bbab", "bbba", "bbbb",
-    /// ]);
-    /// assert_eq!(expected, got);
-    ///
-    /// // The default limit is not too big, but big enough to extract all
-    /// // literals from '[ab]{2}{2}'. If we shrink the limit to less than 16,
-    /// // then we'll get a truncated set. Notice that it returns a sequence of
-    /// // length 4 even though our limit was 10. This is because the sequence
-    /// // is difficult to increase without blowing the limit. Notice also
-    /// // that every literal in the sequence is now inexact because they were
-    /// // stripped of some suffix.
-    /// let got = Extractor::new().limit_total(10).extract(&hir);
-    /// let expected = Seq::from_iter([
-    ///     Literal::inexact("aa"),
-    ///     Literal::inexact("ab"),
-    ///     Literal::inexact("ba"),
-    ///     Literal::inexact("bb"),
-    /// ]);
-    /// assert_eq!(expected, got);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    pub fn limit_total(&mut self, limit: usize) -> &mut Extractor {
-        self.limit_total = limit;
-        self
-    }
-
-    /// Extract a sequence from the given concatenation. Sequences from each of
-    /// the child HIR expressions are combined via cross product.
-    ///
-    /// This short circuits once the cross product turns into a sequence
-    /// containing only inexact literals.
-    fn extract_concat<'a, I: Iterator<Item = &'a Hir>>(&self, it: I) -> Seq {
-        let mut seq = Seq::singleton(self::Literal::exact(vec![]));
-        for hir in it {
-            // If every element in the sequence is inexact, then a cross
-            // product will always be a no-op. Thus, there is nothing else we
-            // can add to it and can quit early. Note that this also includes
-            // infinite sequences.
-            if seq.is_inexact() {
-                break;
-            }
-            // Note that 'cross' also dispatches based on whether we're
-            // extracting prefixes or suffixes.
-            seq = self.cross(seq, &mut self.extract(hir));
-        }
-        seq
-    }
-
-    /// Extract a sequence from the given alternation.
-    ///
-    /// This short circuits once the union turns into an infinite sequence.
-    fn extract_alternation<'a, I: Iterator<Item = &'a Hir>>(
-        &self,
-        it: I,
-    ) -> Seq {
-        let mut seq = Seq::empty();
-        for hir in it {
-            // Once our 'seq' is infinite, every subsequent union
-            // operation on it will itself always result in an
-            // infinite sequence. Thus, it can never change and we can
-            // short-circuit.
-            if !seq.is_finite() {
-                break;
-            }
-            seq = self.union(seq, &mut self.extract(hir));
-        }
-        seq
-    }
-
-    /// Extract a sequence of literals from the given repetition. We do our
-    /// best, Some examples:
-    ///
-    ///   'a*'    => [inexact(a), exact("")]
-    ///   'a*?'   => [exact(""), inexact(a)]
-    ///   'a+'    => [inexact(a)]
-    ///   'a{3}'  => [exact(aaa)]
-    ///   'a{3,5} => [inexact(aaa)]
-    ///
-    /// The key here really is making sure we get the 'inexact' vs 'exact'
-    /// attributes correct on each of the literals we add. For example, the
-    /// fact that 'a*' gives us an inexact 'a' and an exact empty string means
-    /// that a regex like 'ab*c' will result in [inexact(ab), exact(ac)]
-    /// literals being extracted, which might actually be a better prefilter
-    /// than just 'a'.
-    fn extract_repetition(&self, rep: &hir::Repetition) -> Seq {
-        let mut subseq = self.extract(&rep.sub);
-        match *rep {
-            hir::Repetition { min: 0, max, greedy, .. } => {
-                // When 'max=1', we can retain exactness, since 'a?' is
-                // equivalent to 'a|'. Similarly below, 'a??' is equivalent to
-                // '|a'.
-                if max != Some(1) {
-                    subseq.make_inexact();
-                }
-                let mut empty = Seq::singleton(Literal::exact(vec![]));
-                if !greedy {
-                    mem::swap(&mut subseq, &mut empty);
-                }
-                self.union(subseq, &mut empty)
-            }
-            hir::Repetition { min, max: Some(max), .. } if min == max => {
-                assert!(min > 0); // handled above
-                let limit =
-                    u32::try_from(self.limit_repeat).unwrap_or(u32::MAX);
-                let mut seq = Seq::singleton(Literal::exact(vec![]));
-                for _ in 0..cmp::min(min, limit) {
-                    if seq.is_inexact() {
-                        break;
-                    }
-                    seq = self.cross(seq, &mut subseq.clone());
-                }
-                if usize::try_from(min).is_err() || min > limit {
-                    seq.make_inexact();
-                }
-                seq
-            }
-            hir::Repetition { min, .. } => {
-                assert!(min > 0); // handled above
-                let limit =
-                    u32::try_from(self.limit_repeat).unwrap_or(u32::MAX);
-                let mut seq = Seq::singleton(Literal::exact(vec![]));
-                for _ in 0..cmp::min(min, limit) {
-                    if seq.is_inexact() {
-                        break;
-                    }
-                    seq = self.cross(seq, &mut subseq.clone());
-                }
-                seq.make_inexact();
-                seq
-            }
-        }
-    }
-
-    /// Convert the given Unicode class into a sequence of literals if the
-    /// class is small enough. If the class is too big, return an infinite
-    /// sequence.
-    fn extract_class_unicode(&self, cls: &hir::ClassUnicode) -> Seq {
-        if self.class_over_limit_unicode(cls) {
-            return Seq::infinite();
-        }
-        let mut seq = Seq::empty();
-        for r in cls.iter() {
-            for ch in r.start()..=r.end() {
-                seq.push(Literal::from(ch));
-            }
-        }
-        self.enforce_literal_len(&mut seq);
-        seq
-    }
-
-    /// Convert the given byte class into a sequence of literals if the class
-    /// is small enough. If the class is too big, return an infinite sequence.
-    fn extract_class_bytes(&self, cls: &hir::ClassBytes) -> Seq {
-        if self.class_over_limit_bytes(cls) {
-            return Seq::infinite();
-        }
-        let mut seq = Seq::empty();
-        for r in cls.iter() {
-            for b in r.start()..=r.end() {
-                seq.push(Literal::from(b));
-            }
-        }
-        self.enforce_literal_len(&mut seq);
-        seq
-    }
-
-    /// Returns true if the given Unicode class exceeds the configured limits
-    /// on this extractor.
-    fn class_over_limit_unicode(&self, cls: &hir::ClassUnicode) -> bool {
-        let mut count = 0;
-        for r in cls.iter() {
-            if count > self.limit_class {
-                return true;
-            }
-            count += r.len();
-        }
-        count > self.limit_class
-    }
-
-    /// Returns true if the given byte class exceeds the configured limits on
-    /// this extractor.
-    fn class_over_limit_bytes(&self, cls: &hir::ClassBytes) -> bool {
-        let mut count = 0;
-        for r in cls.iter() {
-            if count > self.limit_class {
-                return true;
-            }
-            count += r.len();
-        }
-        count > self.limit_class
-    }
-
-    /// Compute the cross product of the two sequences if the result would be
-    /// within configured limits. Otherwise, make `seq2` infinite and cross the
-    /// infinite sequence with `seq1`.
-    fn cross(&self, mut seq1: Seq, seq2: &mut Seq) -> Seq {
-        if seq1.max_cross_len(seq2).map_or(false, |len| len > self.limit_total)
-        {
-            seq2.make_infinite();
-        }
-        if let ExtractKind::Suffix = self.kind {
-            seq1.cross_reverse(seq2);
-        } else {
-            seq1.cross_forward(seq2);
-        }
-        assert!(seq1.len().map_or(true, |x| x <= self.limit_total));
-        self.enforce_literal_len(&mut seq1);
-        seq1
-    }
-
-    /// Union the two sequences if the result would be within configured
-    /// limits. Otherwise, make `seq2` infinite and union the infinite sequence
-    /// with `seq1`.
-    fn union(&self, mut seq1: Seq, seq2: &mut Seq) -> Seq {
-        if seq1.max_union_len(seq2).map_or(false, |len| len > self.limit_total)
-        {
-            // We try to trim our literal sequences to see if we can make
-            // room for more literals. The idea is that we'd rather trim down
-            // literals already in our sequence if it means we can add a few
-            // more and retain a finite sequence. Otherwise, we'll union with
-            // an infinite sequence and that infects everything and effectively
-            // stops literal extraction in its tracks.
-            //
-            // We do we keep 4 bytes here? Well, it's a bit of an abstraction
-            // leakage. Downstream, the literals may wind up getting fed to
-            // the Teddy algorithm, which supports searching literals up to
-            // length 4. So that's why we pick that number here. Arguably this
-            // should be a tuneable parameter, but it seems a little tricky to
-            // describe. And I'm still unsure if this is the right way to go
-            // about culling literal sequences.
-            match self.kind {
-                ExtractKind::Prefix => {
-                    seq1.keep_first_bytes(4);
-                    seq2.keep_first_bytes(4);
-                }
-                ExtractKind::Suffix => {
-                    seq1.keep_last_bytes(4);
-                    seq2.keep_last_bytes(4);
-                }
-            }
-            seq1.dedup();
-            seq2.dedup();
-            if seq1
-                .max_union_len(seq2)
-                .map_or(false, |len| len > self.limit_total)
-            {
-                seq2.make_infinite();
-            }
-        }
-        seq1.union(seq2);
-        assert!(seq1.len().map_or(true, |x| x <= self.limit_total));
-        seq1
-    }
-
-    /// Applies the literal length limit to the given sequence. If none of the
-    /// literals in the sequence exceed the limit, then this is a no-op.
-    fn enforce_literal_len(&self, seq: &mut Seq) {
-        let len = self.limit_literal_len;
-        match self.kind {
-            ExtractKind::Prefix => seq.keep_first_bytes(len),
-            ExtractKind::Suffix => seq.keep_last_bytes(len),
-        }
-    }
-}
-
-impl Default for Extractor {
-    fn default() -> Extractor {
-        Extractor::new()
-    }
-}
-
-/// The kind of literals to extract from an [`Hir`] expression.
-///
-/// The default extraction kind is `Prefix`.
-#[non_exhaustive]
-#[derive(Clone, Debug)]
-pub enum ExtractKind {
-    /// Extracts only prefix literals from a regex.
-    Prefix,
-    /// Extracts only suffix literals from a regex.
-    ///
-    /// Note that the sequence returned by suffix literals currently may
-    /// not correctly represent leftmost-first or "preference" order match
-    /// semantics.
-    Suffix,
-}
-
-impl ExtractKind {
-    /// Returns true if this kind is the `Prefix` variant.
-    pub fn is_prefix(&self) -> bool {
-        matches!(*self, ExtractKind::Prefix)
-    }
-
-    /// Returns true if this kind is the `Suffix` variant.
-    pub fn is_suffix(&self) -> bool {
-        matches!(*self, ExtractKind::Suffix)
-    }
-}
-
-impl Default for ExtractKind {
-    fn default() -> ExtractKind {
-        ExtractKind::Prefix
-    }
-}
-
-/// A sequence of literals.
-///
-/// A `Seq` is very much like a set in that it represents a union of its
-/// members. That is, it corresponds to a set of literals where at least one
-/// must match in order for a particular [`Hir`] expression to match. (Whether
-/// this corresponds to the entire `Hir` expression, a prefix of it or a suffix
-/// of it depends on how the `Seq` was extracted from the `Hir`.)
-///
-/// It is also unlike a set in that multiple identical literals may appear,
-/// and that the order of the literals in the `Seq` matters. For example, if
-/// the sequence is `[sam, samwise]` and leftmost-first matching is used, then
-/// `samwise` can never match and the sequence is equivalent to `[sam]`.
-///
-/// # States of a sequence
-///
-/// A `Seq` has a few different logical states to consider:
-///
-/// * The sequence can represent "any" literal. When this happens, the set does
-/// not have a finite size. The purpose of this state is to inhibit callers
-/// from making assumptions about what literals are required in order to match
-/// a particular [`Hir`] expression. Generally speaking, when a set is in this
-/// state, literal optimizations are inhibited. A good example of a regex that
-/// will cause this sort of set to appear is `[A-Za-z]`. The character class
-/// is just too big (and also too narrow) to be usefully expanded into 52
-/// different literals. (Note that the decision for when a seq should become
-/// infinite is determined by the caller. A seq itself has no hard-coded
-/// limits.)
-/// * The sequence can be empty, in which case, it is an affirmative statement
-/// that there are no literals that can match the corresponding `Hir`.
-/// Consequently, the `Hir` never matches any input. For example, `[a&&b]`.
-/// * The sequence can be non-empty, in which case, at least one of the
-/// literals must match in order for the corresponding `Hir` to match.
-///
-/// # Example
-///
-/// This example shows how literal sequences can be simplified by stripping
-/// suffixes and minimizing while maintaining preference order.
-///
-/// ```
-/// use regex_syntax::hir::literal::{Literal, Seq};
-///
-/// let mut seq = Seq::new(&[
-///     "farm",
-///     "appliance",
-///     "faraway",
-///     "apple",
-///     "fare",
-///     "gap",
-///     "applicant",
-///     "applaud",
-/// ]);
-/// seq.keep_first_bytes(3);
-/// seq.minimize_by_preference();
-/// // Notice that 'far' comes before 'app', which matches the order in the
-/// // original sequence. This guarantees that leftmost-first semantics are
-/// // not altered by simplifying the set.
-/// let expected = Seq::from_iter([
-///     Literal::inexact("far"),
-///     Literal::inexact("app"),
-///     Literal::exact("gap"),
-/// ]);
-/// assert_eq!(expected, seq);
-/// ```
-#[derive(Clone, Eq, PartialEq)]
-pub struct Seq {
-    /// The members of this seq.
-    ///
-    /// When `None`, the seq represents all possible literals. That is, it
-    /// prevents one from making assumptions about specific literals in the
-    /// seq, and forces one to treat it as if any literal might be in the seq.
-    ///
-    /// Note that `Some(vec![])` is valid and corresponds to the empty seq of
-    /// literals, i.e., a regex that can never match. For example, `[a&&b]`.
-    /// It is distinct from `Some(vec![""])`, which corresponds to the seq
-    /// containing an empty string, which matches at every position.
-    literals: Option<Vec<Literal>>,
-}
-
-impl Seq {
-    /// Returns an empty sequence.
-    ///
-    /// An empty sequence matches zero literals, and thus corresponds to a
-    /// regex that itself can never match.
-    #[inline]
-    pub fn empty() -> Seq {
-        Seq { literals: Some(vec![]) }
-    }
-
-    /// Returns a sequence of literals without a finite size and may contain
-    /// any literal.
-    ///
-    /// A sequence without finite size does not reveal anything about the
-    /// characteristics of the literals in its set. There are no fixed prefixes
-    /// or suffixes, nor are lower or upper bounds on the length of the literals
-    /// in the set known.
-    ///
-    /// This is useful to represent constructs in a regex that are "too big"
-    /// to useful represent as a sequence of literals. For example, `[A-Za-z]`.
-    /// When sequences get too big, they lose their discriminating nature and
-    /// are more likely to produce false positives, which in turn makes them
-    /// less likely to speed up searches.
-    ///
-    /// More pragmatically, for many regexes, enumerating all possible literals
-    /// is itself not possible or might otherwise use too many resources. So
-    /// constraining the size of sets during extraction is a practical trade
-    /// off to make.
-    #[inline]
-    pub fn infinite() -> Seq {
-        Seq { literals: None }
-    }
-
-    /// Returns a sequence containing a single literal.
-    #[inline]
-    pub fn singleton(lit: Literal) -> Seq {
-        Seq { literals: Some(vec![lit]) }
-    }
-
-    /// Returns a sequence of exact literals from the given byte strings.
-    #[inline]
-    pub fn new<I, B>(it: I) -> Seq
-    where
-        I: IntoIterator<Item = B>,
-        B: AsRef<[u8]>,
-    {
-        it.into_iter().map(|b| Literal::exact(b.as_ref())).collect()
-    }
-
-    /// If this is a finite sequence, return its members as a slice of
-    /// literals.
-    ///
-    /// The slice returned may be empty, in which case, there are no literals
-    /// that can match this sequence.
-    #[inline]
-    pub fn literals(&self) -> Option<&[Literal]> {
-        self.literals.as_deref()
-    }
-
-    /// Push a literal to the end of this sequence.
-    ///
-    /// If this sequence is not finite, then this is a no-op.
-    ///
-    /// Similarly, if the most recently added item of this sequence is
-    /// equivalent to the literal given, then it is not added. This reflects
-    /// a `Seq`'s "set like" behavior, and represents a practical trade off.
-    /// Namely, there is never any need to have two adjacent and equivalent
-    /// literals in the same sequence, _and_ it is easy to detect in some
-    /// cases.
-    #[inline]
-    pub fn push(&mut self, lit: Literal) {
-        let lits = match self.literals {
-            None => return,
-            Some(ref mut lits) => lits,
-        };
-        if lits.last().map_or(false, |m| m == &lit) {
-            return;
-        }
-        lits.push(lit);
-    }
-
-    /// Make all of the literals in this sequence inexact.
-    ///
-    /// This is a no-op if this sequence is not finite.
-    #[inline]
-    pub fn make_inexact(&mut self) {
-        let lits = match self.literals {
-            None => return,
-            Some(ref mut lits) => lits,
-        };
-        for lit in lits.iter_mut() {
-            lit.make_inexact();
-        }
-    }
-
-    /// Converts this sequence to an infinite sequence.
-    ///
-    /// This is a no-op if the sequence is already infinite.
-    #[inline]
-    pub fn make_infinite(&mut self) {
-        self.literals = None;
-    }
-
-    /// Modify this sequence to contain the cross product between it and the
-    /// sequence given.
-    ///
-    /// The cross product only considers literals in this sequence that are
-    /// exact. That is, inexact literals are not extended.
-    ///
-    /// The literals are always drained from `other`, even if none are used.
-    /// This permits callers to reuse the sequence allocation elsewhere.
-    ///
-    /// If this sequence is infinite, then this is a no-op, regardless of what
-    /// `other` contains (and in this case, the literals are still drained from
-    /// `other`). If `other` is infinite and this sequence is finite, then this
-    /// is a no-op, unless this sequence contains a zero-length literal. In
-    /// which case, the infiniteness of `other` infects this sequence, and this
-    /// sequence is itself made infinite.
-    ///
-    /// Like [`Seq::union`], this may attempt to deduplicate literals. See
-    /// [`Seq::dedup`] for how deduplication deals with exact and inexact
-    /// literals.
-    ///
-    /// # Example
-    ///
-    /// This example shows basic usage and how exact and inexact literals
-    /// interact.
-    ///
-    /// ```
-    /// use regex_syntax::hir::literal::{Literal, Seq};
-    ///
-    /// let mut seq1 = Seq::from_iter([
-    ///     Literal::exact("foo"),
-    ///     Literal::inexact("bar"),
-    /// ]);
-    /// let mut seq2 = Seq::from_iter([
-    ///     Literal::inexact("quux"),
-    ///     Literal::exact("baz"),
-    /// ]);
-    /// seq1.cross_forward(&mut seq2);
-    ///
-    /// // The literals are pulled out of seq2.
-    /// assert_eq!(Some(0), seq2.len());
-    ///
-    /// let expected = Seq::from_iter([
-    ///     Literal::inexact("fooquux"),
-    ///     Literal::exact("foobaz"),
-    ///     Literal::inexact("bar"),
-    /// ]);
-    /// assert_eq!(expected, seq1);
-    /// ```
-    ///
-    /// This example shows the behavior of when `other` is an infinite
-    /// sequence.
-    ///
-    /// ```
-    /// use regex_syntax::hir::literal::{Literal, Seq};
-    ///
-    /// let mut seq1 = Seq::from_iter([
-    ///     Literal::exact("foo"),
-    ///     Literal::inexact("bar"),
-    /// ]);
-    /// let mut seq2 = Seq::infinite();
-    /// seq1.cross_forward(&mut seq2);
-    ///
-    /// // When seq2 is infinite, cross product doesn't add anything, but
-    /// // ensures all members of seq1 are inexact.
-    /// let expected = Seq::from_iter([
-    ///     Literal::inexact("foo"),
-    ///     Literal::inexact("bar"),
-    /// ]);
-    /// assert_eq!(expected, seq1);
-    /// ```
-    ///
-    /// This example is like the one above, but shows what happens when this
-    /// sequence contains an empty string. In this case, an infinite `other`
-    /// sequence infects this sequence (because the empty string means that
-    /// there are no finite prefixes):
-    ///
-    /// ```
-    /// use regex_syntax::hir::literal::{Literal, Seq};
-    ///
-    /// let mut seq1 = Seq::from_iter([
-    ///     Literal::exact("foo"),
-    ///     Literal::exact(""), // inexact provokes same behavior
-    ///     Literal::inexact("bar"),
-    /// ]);
-    /// let mut seq2 = Seq::infinite();
-    /// seq1.cross_forward(&mut seq2);
-    ///
-    /// // seq1 is now infinite!
-    /// assert!(!seq1.is_finite());
-    /// ```
-    ///
-    /// This example shows the behavior of this sequence is infinite.
-    ///
-    /// ```
-    /// use regex_syntax::hir::literal::{Literal, Seq};
-    ///
-    /// let mut seq1 = Seq::infinite();
-    /// let mut seq2 = Seq::from_iter([
-    ///     Literal::exact("foo"),
-    ///     Literal::inexact("bar"),
-    /// ]);
-    /// seq1.cross_forward(&mut seq2);
-    ///
-    /// // seq1 remains unchanged.
-    /// assert!(!seq1.is_finite());
-    /// // Even though the literals in seq2 weren't used, it was still drained.
-    /// assert_eq!(Some(0), seq2.len());
-    /// ```
-    #[inline]
-    pub fn cross_forward(&mut self, other: &mut Seq) {
-        let (lits1, lits2) = match self.cross_preamble(other) {
-            None => return,
-            Some((lits1, lits2)) => (lits1, lits2),
-        };
-        let newcap = lits1.len().saturating_mul(lits2.len());
-        for selflit in mem::replace(lits1, Vec::with_capacity(newcap)) {
-            if !selflit.is_exact() {
-                lits1.push(selflit);
-                continue;
-            }
-            for otherlit in lits2.iter() {
-                let mut newlit = Literal::exact(Vec::with_capacity(
-                    selflit.len() + otherlit.len(),
-                ));
-                newlit.extend(&selflit);
-                newlit.extend(&otherlit);
-                if !otherlit.is_exact() {
-                    newlit.make_inexact();
-                }
-                lits1.push(newlit);
-            }
-        }
-        lits2.drain(..);
-        self.dedup();
-    }
-
-    /// Modify this sequence to contain the cross product between it and
-    /// the sequence given, where the sequences are treated as suffixes
-    /// instead of prefixes. Namely, the sequence `other` is *prepended*
-    /// to `self` (as opposed to `other` being *appended* to `self` in
-    /// [`Seq::cross_forward`]).
-    ///
-    /// The cross product only considers literals in this sequence that are
-    /// exact. That is, inexact literals are not extended.
-    ///
-    /// The literals are always drained from `other`, even if none are used.
-    /// This permits callers to reuse the sequence allocation elsewhere.
-    ///
-    /// If this sequence is infinite, then this is a no-op, regardless of what
-    /// `other` contains (and in this case, the literals are still drained from
-    /// `other`). If `other` is infinite and this sequence is finite, then this
-    /// is a no-op, unless this sequence contains a zero-length literal. In
-    /// which case, the infiniteness of `other` infects this sequence, and this
-    /// sequence is itself made infinite.
-    ///
-    /// Like [`Seq::union`], this may attempt to deduplicate literals. See
-    /// [`Seq::dedup`] for how deduplication deals with exact and inexact
-    /// literals.
-    ///
-    /// # Example
-    ///
-    /// This example shows basic usage and how exact and inexact literals
-    /// interact.
-    ///
-    /// ```
-    /// use regex_syntax::hir::literal::{Literal, Seq};
-    ///
-    /// let mut seq1 = Seq::from_iter([
-    ///     Literal::exact("foo"),
-    ///     Literal::inexact("bar"),
-    /// ]);
-    /// let mut seq2 = Seq::from_iter([
-    ///     Literal::inexact("quux"),
-    ///     Literal::exact("baz"),
-    /// ]);
-    /// seq1.cross_reverse(&mut seq2);
-    ///
-    /// // The literals are pulled out of seq2.
-    /// assert_eq!(Some(0), seq2.len());
-    ///
-    /// let expected = Seq::from_iter([
-    ///     Literal::inexact("quuxfoo"),
-    ///     Literal::inexact("bar"),
-    ///     Literal::exact("bazfoo"),
-    /// ]);
-    /// assert_eq!(expected, seq1);
-    /// ```
-    ///
-    /// This example shows the behavior of when `other` is an infinite
-    /// sequence.
-    ///
-    /// ```
-    /// use regex_syntax::hir::literal::{Literal, Seq};
-    ///
-    /// let mut seq1 = Seq::from_iter([
-    ///     Literal::exact("foo"),
-    ///     Literal::inexact("bar"),
-    /// ]);
-    /// let mut seq2 = Seq::infinite();
-    /// seq1.cross_reverse(&mut seq2);
-    ///
-    /// // When seq2 is infinite, cross product doesn't add anything, but
-    /// // ensures all members of seq1 are inexact.
-    /// let expected = Seq::from_iter([
-    ///     Literal::inexact("foo"),
-    ///     Literal::inexact("bar"),
-    /// ]);
-    /// assert_eq!(expected, seq1);
-    /// ```
-    ///
-    /// This example is like the one above, but shows what happens when this
-    /// sequence contains an empty string. In this case, an infinite `other`
-    /// sequence infects this sequence (because the empty string means that
-    /// there are no finite suffixes):
-    ///
-    /// ```
-    /// use regex_syntax::hir::literal::{Literal, Seq};
-    ///
-    /// let mut seq1 = Seq::from_iter([
-    ///     Literal::exact("foo"),
-    ///     Literal::exact(""), // inexact provokes same behavior
-    ///     Literal::inexact("bar"),
-    /// ]);
-    /// let mut seq2 = Seq::infinite();
-    /// seq1.cross_reverse(&mut seq2);
-    ///
-    /// // seq1 is now infinite!
-    /// assert!(!seq1.is_finite());
-    /// ```
-    ///
-    /// This example shows the behavior when this sequence is infinite.
-    ///
-    /// ```
-    /// use regex_syntax::hir::literal::{Literal, Seq};
-    ///
-    /// let mut seq1 = Seq::infinite();
-    /// let mut seq2 = Seq::from_iter([
-    ///     Literal::exact("foo"),
-    ///     Literal::inexact("bar"),
-    /// ]);
-    /// seq1.cross_reverse(&mut seq2);
-    ///
-    /// // seq1 remains unchanged.
-    /// assert!(!seq1.is_finite());
-    /// // Even though the literals in seq2 weren't used, it was still drained.
-    /// assert_eq!(Some(0), seq2.len());
-    /// ```
-    #[inline]
-    pub fn cross_reverse(&mut self, other: &mut Seq) {
-        let (lits1, lits2) = match self.cross_preamble(other) {
-            None => return,
-            Some((lits1, lits2)) => (lits1, lits2),
-        };
-        // We basically proceed as we do in 'cross_forward' at this point,
-        // except that the outer loop is now 'other' and the inner loop is now
-        // 'self'. That's because 'self' corresponds to suffixes and 'other'
-        // corresponds to the sequence we want to *prepend* to the suffixes.
-        let newcap = lits1.len().saturating_mul(lits2.len());
-        let selflits = mem::replace(lits1, Vec::with_capacity(newcap));
-        for (i, otherlit) in lits2.drain(..).enumerate() {
-            for selflit in selflits.iter() {
-                if !selflit.is_exact() {
-                    // If the suffix isn't exact, then we can't prepend
-                    // anything to it. However, we still want to keep it. But
-                    // we only want to keep one of them, to avoid duplication.
-                    // (The duplication is okay from a correctness perspective,
-                    // but wasteful.)
-                    if i == 0 {
-                        lits1.push(selflit.clone());
-                    }
-                    continue;
-                }
-                let mut newlit = Literal::exact(Vec::with_capacity(
-                    otherlit.len() + selflit.len(),
-                ));
-                newlit.extend(&otherlit);
-                newlit.extend(&selflit);
-                if !otherlit.is_exact() {
-                    newlit.make_inexact();
-                }
-                lits1.push(newlit);
-            }
-        }
-        self.dedup();
-    }
-
-    /// A helper function the corresponds to the subtle preamble for both
-    /// `cross_forward` and `cross_reverse`. In effect, it handles the cases
-    /// of infinite sequences for both `self` and `other`, as well as ensuring
-    /// that literals from `other` are drained even if they aren't used.
-    fn cross_preamble<'a>(
-        &'a mut self,
-        other: &'a mut Seq,
-    ) -> Option<(&'a mut Vec<Literal>, &'a mut Vec<Literal>)> {
-        let lits2 = match other.literals {
-            None => {
-                // If our current seq contains the empty string and the seq
-                // we're adding matches any literal, then it follows that the
-                // current seq must now also match any literal.
-                //
-                // Otherwise, we just have to make sure everything in this
-                // sequence is inexact.
-                if self.min_literal_len() == Some(0) {
-                    *self = Seq::infinite();
-                } else {
-                    self.make_inexact();
-                }
-                return None;
-            }
-            Some(ref mut lits) => lits,
-        };
-        let lits1 = match self.literals {
-            None => {
-                // If we aren't going to make it to the end of this routine
-                // where lits2 is drained, then we need to do it now.
-                lits2.drain(..);
-                return None;
-            }
-            Some(ref mut lits) => lits,
-        };
-        Some((lits1, lits2))
-    }
-
-    /// Unions the `other` sequence into this one.
-    ///
-    /// The literals are always drained out of the given `other` sequence,
-    /// even if they are being unioned into an infinite sequence. This permits
-    /// the caller to reuse the `other` sequence in another context.
-    ///
-    /// Some literal deduping may be performed. If any deduping happens,
-    /// any leftmost-first or "preference" order match semantics will be
-    /// preserved.
-    ///
-    /// # Example
-    ///
-    /// This example shows basic usage.
-    ///
-    /// ```
-    /// use regex_syntax::hir::literal::Seq;
-    ///
-    /// let mut seq1 = Seq::new(&["foo", "bar"]);
-    /// let mut seq2 = Seq::new(&["bar", "quux", "foo"]);
-    /// seq1.union(&mut seq2);
-    ///
-    /// // The literals are pulled out of seq2.
-    /// assert_eq!(Some(0), seq2.len());
-    ///
-    /// // Adjacent literals are deduped, but non-adjacent literals may not be.
-    /// assert_eq!(Seq::new(&["foo", "bar", "quux", "foo"]), seq1);
-    /// ```
-    ///
-    /// This example shows that literals are drained from `other` even when
-    /// they aren't necessarily used.
-    ///
-    /// ```
-    /// use regex_syntax::hir::literal::Seq;
-    ///
-    /// let mut seq1 = Seq::infinite();
-    /// // Infinite sequences have no finite length.
-    /// assert_eq!(None, seq1.len());
-    ///
-    /// let mut seq2 = Seq::new(&["bar", "quux", "foo"]);
-    /// seq1.union(&mut seq2);
-    ///
-    /// // seq1 is still infinite and seq2 has been drained.
-    /// assert_eq!(None, seq1.len());
-    /// assert_eq!(Some(0), seq2.len());
-    /// ```
-    #[inline]
-    pub fn union(&mut self, other: &mut Seq) {
-        let lits2 = match other.literals {
-            None => {
-                // Unioning with an infinite sequence always results in an
-                // infinite sequence.
-                self.make_infinite();
-                return;
-            }
-            Some(ref mut lits) => lits.drain(..),
-        };
-        let lits1 = match self.literals {
-            None => return,
-            Some(ref mut lits) => lits,
-        };
-        lits1.extend(lits2);
-        self.dedup();
-    }
-
-    /// Unions the `other` sequence into this one by splice the `other`
-    /// sequence at the position of the first zero-length literal.
-    ///
-    /// This is useful for preserving preference order semantics when combining
-    /// two literal sequences. For example, in the regex `(a||f)+foo`, the
-    /// correct preference order prefix sequence is `[a, foo, f]`.
-    ///
-    /// The literals are always drained out of the given `other` sequence,
-    /// even if they are being unioned into an infinite sequence. This permits
-    /// the caller to reuse the `other` sequence in another context. Note that
-    /// the literals are drained even if no union is performed as well, i.e.,
-    /// when this sequence does not contain a zero-length literal.
-    ///
-    /// Some literal deduping may be performed. If any deduping happens,
-    /// any leftmost-first or "preference" order match semantics will be
-    /// preserved.
-    ///
-    /// # Example
-    ///
-    /// This example shows basic usage.
-    ///
-    /// ```
-    /// use regex_syntax::hir::literal::Seq;
-    ///
-    /// let mut seq1 = Seq::new(&["a", "", "f", ""]);
-    /// let mut seq2 = Seq::new(&["foo"]);
-    /// seq1.union_into_empty(&mut seq2);
-    ///
-    /// // The literals are pulled out of seq2.
-    /// assert_eq!(Some(0), seq2.len());
-    /// // 'foo' gets spliced into seq1 where the first empty string occurs.
-    /// assert_eq!(Seq::new(&["a", "foo", "f"]), seq1);
-    /// ```
-    ///
-    /// This example shows that literals are drained from `other` even when
-    /// they aren't necessarily used.
-    ///
-    /// ```
-    /// use regex_syntax::hir::literal::Seq;
-    ///
-    /// let mut seq1 = Seq::new(&["foo", "bar"]);
-    /// let mut seq2 = Seq::new(&["bar", "quux", "foo"]);
-    /// seq1.union_into_empty(&mut seq2);
-    ///
-    /// // seq1 has no zero length literals, so no splicing happens.
-    /// assert_eq!(Seq::new(&["foo", "bar"]), seq1);
-    /// // Even though no splicing happens, seq2 is still drained.
-    /// assert_eq!(Some(0), seq2.len());
-    /// ```
-    #[inline]
-    pub fn union_into_empty(&mut self, other: &mut Seq) {
-        let lits2 = other.literals.as_mut().map(|lits| lits.drain(..));
-        let lits1 = match self.literals {
-            None => return,
-            Some(ref mut lits) => lits,
-        };
-        let first_empty = match lits1.iter().position(|m| m.is_empty()) {
-            None => return,
-            Some(i) => i,
-        };
-        let lits2 = match lits2 {
-            None => {
-                // Note that we are only here if we've found an empty literal,
-                // which implies that an infinite sequence infects this seq and
-                // also turns it into an infinite sequence.
-                self.literals = None;
-                return;
-            }
-            Some(lits) => lits,
-        };
-        // Clearing out the empties needs to come before the splice because
-        // the splice might add more empties that we don't want to get rid
-        // of. Since we're splicing into the position of the first empty, the
-        // 'first_empty' position computed above is still correct.
-        lits1.retain(|m| !m.is_empty());
-        lits1.splice(first_empty..first_empty, lits2);
-        self.dedup();
-    }
-
-    /// Deduplicate adjacent equivalent literals in this sequence.
-    ///
-    /// If adjacent literals are equivalent strings but one is exact and the
-    /// other inexact, the inexact literal is kept and the exact one is
-    /// removed.
-    ///
-    /// Deduping an infinite sequence is a no-op.
-    ///
-    /// # Example
-    ///
-    /// This example shows how literals that are duplicate byte strings but
-    /// are not equivalent with respect to exactness are resolved.
-    ///
-    /// ```
-    /// use regex_syntax::hir::literal::{Literal, Seq};
-    ///
-    /// let mut seq = Seq::from_iter([
-    ///     Literal::exact("foo"),
-    ///     Literal::inexact("foo"),
-    /// ]);
-    /// seq.dedup();
-    ///
-    /// assert_eq!(Seq::from_iter([Literal::inexact("foo")]), seq);
-    /// ```
-    #[inline]
-    pub fn dedup(&mut self) {
-        if let Some(ref mut lits) = self.literals {
-            lits.dedup_by(|lit1, lit2| {
-                if lit1.as_bytes() != lit2.as_bytes() {
-                    return false;
-                }
-                if lit1.is_exact() != lit2.is_exact() {
-                    lit1.make_inexact();
-                    lit2.make_inexact();
-                }
-                true
-            });
-        }
-    }
-
-    /// Sorts this sequence of literals lexicographically.
-    ///
-    /// Note that if, before sorting, if a literal that is a prefix of another
-    /// literal appears after it, then after sorting, the sequence will not
-    /// represent the same preference order match semantics. For example,
-    /// sorting the sequence `[samwise, sam]` yields the sequence `[sam,
-    /// samwise]`. Under preference order semantics, the latter sequence will
-    /// never match `samwise` where as the first sequence can.
-    ///
-    /// # Example
-    ///
-    /// This example shows basic usage.
-    ///
-    /// ```
-    /// use regex_syntax::hir::literal::Seq;
-    ///
-    /// let mut seq = Seq::new(&["foo", "quux", "bar"]);
-    /// seq.sort();
-    ///
-    /// assert_eq!(Seq::new(&["bar", "foo", "quux"]), seq);
-    /// ```
-    #[inline]
-    pub fn sort(&mut self) {
-        if let Some(ref mut lits) = self.literals {
-            lits.sort();
-        }
-    }
-
-    /// Reverses all of the literals in this sequence.
-    ///
-    /// The order of the sequence itself is preserved.
-    ///
-    /// # Example
-    ///
-    /// This example shows basic usage.
-    ///
-    /// ```
-    /// use regex_syntax::hir::literal::Seq;
-    ///
-    /// let mut seq = Seq::new(&["oof", "rab"]);
-    /// seq.reverse_literals();
-    /// assert_eq!(Seq::new(&["foo", "bar"]), seq);
-    /// ```
-    #[inline]
-    pub fn reverse_literals(&mut self) {
-        if let Some(ref mut lits) = self.literals {
-            for lit in lits.iter_mut() {
-                lit.reverse();
-            }
-        }
-    }
-
-    /// Shrinks this seq to its minimal size while respecting the preference
-    /// order of its literals.
-    ///
-    /// While this routine will remove duplicate literals from this seq, it
-    /// will also remove literals that can never match in a leftmost-first or
-    /// "preference order" search. Similar to [`Seq::dedup`], if a literal is
-    /// deduped, then the one that remains is made inexact.
-    ///
-    /// This is a no-op on seqs that are empty or not finite.
-    ///
-    /// # Example
-    ///
-    /// This example shows the difference between `{sam, samwise}` and
-    /// `{samwise, sam}`.
-    ///
-    /// ```
-    /// use regex_syntax::hir::literal::{Literal, Seq};
-    ///
-    /// // If 'sam' comes before 'samwise' and a preference order search is
-    /// // executed, then 'samwise' can never match.
-    /// let mut seq = Seq::new(&["sam", "samwise"]);
-    /// seq.minimize_by_preference();
-    /// assert_eq!(Seq::from_iter([Literal::inexact("sam")]), seq);
-    ///
-    /// // But if they are reversed, then it's possible for 'samwise' to match
-    /// // since it is given higher preference.
-    /// let mut seq = Seq::new(&["samwise", "sam"]);
-    /// seq.minimize_by_preference();
-    /// assert_eq!(Seq::new(&["samwise", "sam"]), seq);
-    /// ```
-    ///
-    /// This example shows that if an empty string is in this seq, then
-    /// anything that comes after it can never match.
-    ///
-    /// ```
-    /// use regex_syntax::hir::literal::{Literal, Seq};
-    ///
-    /// // An empty string is a prefix of all strings, so it automatically
-    /// // inhibits any subsequent strings from matching.
-    /// let mut seq = Seq::new(&["foo", "bar", "", "quux", "fox"]);
-    /// seq.minimize_by_preference();
-    /// let expected = Seq::from_iter([
-    ///     Literal::exact("foo"),
-    ///     Literal::exact("bar"),
-    ///     Literal::inexact(""),
-    /// ]);
-    /// assert_eq!(expected, seq);
-    ///
-    /// // And of course, if it's at the beginning, then it makes it impossible
-    /// // for anything else to match.
-    /// let mut seq = Seq::new(&["", "foo", "quux", "fox"]);
-    /// seq.minimize_by_preference();
-    /// assert_eq!(Seq::from_iter([Literal::inexact("")]), seq);
-    /// ```
-    #[inline]
-    pub fn minimize_by_preference(&mut self) {
-        if let Some(ref mut lits) = self.literals {
-            PreferenceTrie::minimize(lits, false);
-        }
-    }
-
-    /// Trims all literals in this seq such that only the first `len` bytes
-    /// remain. If a literal has less than or equal to `len` bytes, then it
-    /// remains unchanged. Otherwise, it is trimmed and made inexact.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex_syntax::hir::literal::{Literal, Seq};
-    ///
-    /// let mut seq = Seq::new(&["a", "foo", "quux"]);
-    /// seq.keep_first_bytes(2);
-    ///
-    /// let expected = Seq::from_iter([
-    ///     Literal::exact("a"),
-    ///     Literal::inexact("fo"),
-    ///     Literal::inexact("qu"),
-    /// ]);
-    /// assert_eq!(expected, seq);
-    /// ```
-    #[inline]
-    pub fn keep_first_bytes(&mut self, len: usize) {
-        if let Some(ref mut lits) = self.literals {
-            for m in lits.iter_mut() {
-                m.keep_first_bytes(len);
-            }
-        }
-    }
-
-    /// Trims all literals in this seq such that only the last `len` bytes
-    /// remain. If a literal has less than or equal to `len` bytes, then it
-    /// remains unchanged. Otherwise, it is trimmed and made inexact.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use regex_syntax::hir::literal::{Literal, Seq};
-    ///
-    /// let mut seq = Seq::new(&["a", "foo", "quux"]);
-    /// seq.keep_last_bytes(2);
-    ///
-    /// let expected = Seq::from_iter([
-    ///     Literal::exact("a"),
-    ///     Literal::inexact("oo"),
-    ///     Literal::inexact("ux"),
-    /// ]);
-    /// assert_eq!(expected, seq);
-    /// ```
-    #[inline]
-    pub fn keep_last_bytes(&mut self, len: usize) {
-        if let Some(ref mut lits) = self.literals {
-            for m in lits.iter_mut() {
-                m.keep_last_bytes(len);
-            }
-        }
-    }
-
-    /// Returns true if this sequence is finite.
-    ///
-    /// When false, this sequence is infinite and must be treated as if it
-    /// contains every possible literal.
-    #[inline]
-    pub fn is_finite(&self) -> bool {
-        self.literals.is_some()
-    }
-
-    /// Returns true if and only if this sequence is finite and empty.
-    ///
-    /// An empty sequence never matches anything. It can only be produced by
-    /// literal extraction when the corresponding regex itself cannot match.
-    #[inline]
-    pub fn is_empty(&self) -> bool {
-        self.len() == Some(0)
-    }
-
-    /// Returns the number of literals in this sequence if the sequence is
-    /// finite. If the sequence is infinite, then `None` is returned.
-    #[inline]
-    pub fn len(&self) -> Option<usize> {
-        self.literals.as_ref().map(|lits| lits.len())
-    }
-
-    /// Returns true if and only if all literals in this sequence are exact.
-    ///
-    /// This returns false if the sequence is infinite.
-    #[inline]
-    pub fn is_exact(&self) -> bool {
-        self.literals().map_or(false, |lits| lits.iter().all(|x| x.is_exact()))
-    }
-
-    /// Returns true if and only if all literals in this sequence are inexact.
-    ///
-    /// This returns true if the sequence is infinite.
-    #[inline]
-    pub fn is_inexact(&self) -> bool {
-        self.literals().map_or(true, |lits| lits.iter().all(|x| !x.is_exact()))
-    }
-
-    /// Return the maximum length of the sequence that would result from
-    /// unioning `self` with `other`. If either set is infinite, then this
-    /// returns `None`.
-    #[inline]
-    pub fn max_union_len(&self, other: &Seq) -> Option<usize> {
-        let len1 = self.len()?;
-        let len2 = other.len()?;
-        Some(len1.saturating_add(len2))
-    }
-
-    /// Return the maximum length of the sequence that would result from the
-    /// cross product of `self` with `other`. If either set is infinite, then
-    /// this returns `None`.
-    #[inline]
-    pub fn max_cross_len(&self, other: &Seq) -> Option<usize> {
-        let len1 = self.len()?;
-        let len2 = other.len()?;
-        Some(len1.saturating_mul(len2))
-    }
-
-    /// Returns the length of the shortest literal in this sequence.
-    ///
-    /// If the sequence is infinite or empty, then this returns `None`.
-    #[inline]
-    pub fn min_literal_len(&self) -> Option<usize> {
-        self.literals.as_ref()?.iter().map(|x| x.len()).min()
-    }
-
-    /// Returns the length of the longest literal in this sequence.
-    ///
-    /// If the sequence is infinite or empty, then this returns `None`.
-    #[inline]
-    pub fn max_literal_len(&self) -> Option<usize> {
-        self.literals.as_ref()?.iter().map(|x| x.len()).max()
-    }
-
-    /// Returns the longest common prefix from this seq.
-    ///
-    /// If the seq matches any literal or other contains no literals, then
-    /// there is no meaningful prefix and this returns `None`.
-    ///
-    /// # Example
-    ///
-    /// This shows some example seqs and their longest common prefix.
-    ///
-    /// ```
-    /// use regex_syntax::hir::literal::Seq;
-    ///
-    /// let seq = Seq::new(&["foo", "foobar", "fo"]);
-    /// assert_eq!(Some(&b"fo"[..]), seq.longest_common_prefix());
-    /// let seq = Seq::new(&["foo", "foo"]);
-    /// assert_eq!(Some(&b"foo"[..]), seq.longest_common_prefix());
-    /// let seq = Seq::new(&["foo", "bar"]);
-    /// assert_eq!(Some(&b""[..]), seq.longest_common_prefix());
-    /// let seq = Seq::new(&[""]);
-    /// assert_eq!(Some(&b""[..]), seq.longest_common_prefix());
-    ///
-    /// let seq = Seq::infinite();
-    /// assert_eq!(None, seq.longest_common_prefix());
-    /// let seq = Seq::empty();
-    /// assert_eq!(None, seq.longest_common_prefix());
-    /// ```
-    #[inline]
-    pub fn longest_common_prefix(&self) -> Option<&[u8]> {
-        // If we match everything or match nothing, then there's no meaningful
-        // longest common prefix.
-        let lits = match self.literals {
-            None => return None,
-            Some(ref lits) => lits,
-        };
-        if lits.len() == 0 {
-            return None;
-        }
-        let base = lits[0].as_bytes();
-        let mut len = base.len();
-        for m in lits.iter().skip(1) {
-            len = m
-                .as_bytes()
-                .iter()
-                .zip(base[..len].iter())
-                .take_while(|&(a, b)| a == b)
-                .count();
-            if len == 0 {
-                return Some(&[]);
-            }
-        }
-        Some(&base[..len])
-    }
-
-    /// Returns the longest common suffix from this seq.
-    ///
-    /// If the seq matches any literal or other contains no literals, then
-    /// there is no meaningful suffix and this returns `None`.
-    ///
-    /// # Example
-    ///
-    /// This shows some example seqs and their longest common suffix.
-    ///
-    /// ```
-    /// use regex_syntax::hir::literal::Seq;
-    ///
-    /// let seq = Seq::new(&["oof", "raboof", "of"]);
-    /// assert_eq!(Some(&b"of"[..]), seq.longest_common_suffix());
-    /// let seq = Seq::new(&["foo", "foo"]);
-    /// assert_eq!(Some(&b"foo"[..]), seq.longest_common_suffix());
-    /// let seq = Seq::new(&["foo", "bar"]);
-    /// assert_eq!(Some(&b""[..]), seq.longest_common_suffix());
-    /// let seq = Seq::new(&[""]);
-    /// assert_eq!(Some(&b""[..]), seq.longest_common_suffix());
-    ///
-    /// let seq = Seq::infinite();
-    /// assert_eq!(None, seq.longest_common_suffix());
-    /// let seq = Seq::empty();
-    /// assert_eq!(None, seq.longest_common_suffix());
-    /// ```
-    #[inline]
-    pub fn longest_common_suffix(&self) -> Option<&[u8]> {
-        // If we match everything or match nothing, then there's no meaningful
-        // longest common suffix.
-        let lits = match self.literals {
-            None => return None,
-            Some(ref lits) => lits,
-        };
-        if lits.len() == 0 {
-            return None;
-        }
-        let base = lits[0].as_bytes();
-        let mut len = base.len();
-        for m in lits.iter().skip(1) {
-            len = m
-                .as_bytes()
-                .iter()
-                .rev()
-                .zip(base[base.len() - len..].iter().rev())
-                .take_while(|&(a, b)| a == b)
-                .count();
-            if len == 0 {
-                return Some(&[]);
-            }
-        }
-        Some(&base[base.len() - len..])
-    }
-
-    /// Optimizes this seq while treating its literals as prefixes and
-    /// respecting the preference order of its literals.
-    ///
-    /// The specific way "optimization" works is meant to be an implementation
-    /// detail, as it essentially represents a set of heuristics. The goal
-    /// that optimization tries to accomplish is to make the literals in this
-    /// set reflect inputs that will result in a more effective prefilter.
-    /// Principally by reducing the false positive rate of candidates found by
-    /// the literals in this sequence. That is, when a match of a literal is
-    /// found, we would like it to be a strong predictor of the overall match
-    /// of the regex. If it isn't, then much time will be spent starting and
-    /// stopping the prefilter search and attempting to confirm the match only
-    /// to have it fail.
-    ///
-    /// Some of those heuristics might be:
-    ///
-    /// * Identifying a common prefix from a larger sequence of literals, and
-    /// shrinking the sequence down to that single common prefix.
-    /// * Rejecting the sequence entirely if it is believed to result in very
-    /// high false positive rate. When this happens, the sequence is made
-    /// infinite.
-    /// * Shrinking the sequence to a smaller number of literals representing
-    /// prefixes, but not shrinking it so much as to make literals too short.
-    /// (A sequence with very short literals, of 1 or 2 bytes, will typically
-    /// result in a higher false positive rate.)
-    ///
-    /// Optimization should only be run once extraction is complete. Namely,
-    /// optimization may make assumptions that do not compose with other
-    /// operations in the middle of extraction. For example, optimization will
-    /// reduce `[E(sam), E(samwise)]` to `[E(sam)]`, but such a transformation
-    /// is only valid if no other extraction will occur. If other extraction
-    /// may occur, then the correct transformation would be to `[I(sam)]`.
-    ///
-    /// The [`Seq::optimize_for_suffix_by_preference`] does the same thing, but
-    /// for suffixes.
-    ///
-    /// # Example
-    ///
-    /// This shows how optimization might transform a sequence. Note that
-    /// the specific behavior is not a documented guarantee. The heuristics
-    /// used are an implementation detail and may change over time in semver
-    /// compatible releases.
-    ///
-    /// ```
-    /// use regex_syntax::hir::literal::{Seq, Literal};
-    ///
-    /// let mut seq = Seq::new(&[
-    ///     "samantha",
-    ///     "sam",
-    ///     "samwise",
-    ///     "frodo",
-    /// ]);
-    /// seq.optimize_for_prefix_by_preference();
-    /// assert_eq!(Seq::from_iter([
-    ///     Literal::exact("samantha"),
-    ///     // Kept exact even though 'samwise' got pruned
-    ///     // because optimization assumes literal extraction
-    ///     // has finished.
-    ///     Literal::exact("sam"),
-    ///     Literal::exact("frodo"),
-    /// ]), seq);
-    /// ```
-    ///
-    /// # Example: optimization may make the sequence infinite
-    ///
-    /// If the heuristics deem that the sequence could cause a very high false
-    /// positive rate, then it may make the sequence infinite, effectively
-    /// disabling its use as a prefilter.
-    ///
-    /// ```
-    /// use regex_syntax::hir::literal::{Seq, Literal};
-    ///
-    /// let mut seq = Seq::new(&[
-    ///     "samantha",
-    ///     // An empty string matches at every position,
-    ///     // thus rendering the prefilter completely
-    ///     // ineffective.
-    ///     "",
-    ///     "sam",
-    ///     "samwise",
-    ///     "frodo",
-    /// ]);
-    /// seq.optimize_for_prefix_by_preference();
-    /// assert!(!seq.is_finite());
-    /// ```
-    ///
-    /// Do note that just because there is a `" "` in the sequence, that
-    /// doesn't mean the sequence will always be made infinite after it is
-    /// optimized. Namely, if the sequence is considered exact (any match
-    /// corresponds to an overall match of the original regex), then any match
-    /// is an overall match, and so the false positive rate is always `0`.
-    ///
-    /// To demonstrate this, we remove `samwise` from our sequence. This
-    /// results in no optimization happening and all literals remain exact.
-    /// Thus the entire sequence is exact, and it is kept as-is, even though
-    /// one is an ASCII space:
-    ///
-    /// ```
-    /// use regex_syntax::hir::literal::{Seq, Literal};
-    ///
-    /// let mut seq = Seq::new(&[
-    ///     "samantha",
-    ///     " ",
-    ///     "sam",
-    ///     "frodo",
-    /// ]);
-    /// seq.optimize_for_prefix_by_preference();
-    /// assert!(seq.is_finite());
-    /// ```
-    #[inline]
-    pub fn optimize_for_prefix_by_preference(&mut self) {
-        self.optimize_by_preference(true);
-    }
-
-    /// Optimizes this seq while treating its literals as suffixes and
-    /// respecting the preference order of its literals.
-    ///
-    /// Optimization should only be run once extraction is complete.
-    ///
-    /// The [`Seq::optimize_for_prefix_by_preference`] does the same thing, but
-    /// for prefixes. See its documentation for more explanation.
-    #[inline]
-    pub fn optimize_for_suffix_by_preference(&mut self) {
-        self.optimize_by_preference(false);
-    }
-
-    fn optimize_by_preference(&mut self, prefix: bool) {
-        let origlen = match self.len() {
-            None => return,
-            Some(len) => len,
-        };
-        // Just give up now if our sequence contains an empty string.
-        if self.min_literal_len().map_or(false, |len| len == 0) {
-            // We squash the sequence so that nobody else gets any bright
-            // ideas to try and use it. An empty string implies a match at
-            // every position. A prefilter cannot help you here.
-            self.make_infinite();
-            return;
-        }
-        // Make sure we start with the smallest sequence possible. We use a
-        // special version of preference minimization that retains exactness.
-        // This is legal because optimization is only expected to occur once
-        // extraction is complete.
-        if prefix {
-            if let Some(ref mut lits) = self.literals {
-                PreferenceTrie::minimize(lits, true);
-            }
-        }
-
-        // Look for a common prefix (or suffix). If we found one of those and
-        // it's long enough, then it's a good bet that it will be our fastest
-        // possible prefilter since single-substring search is so fast.
-        let fix = if prefix {
-            self.longest_common_prefix()
-        } else {
-            self.longest_common_suffix()
-        };
-        if let Some(fix) = fix {
-            // As a special case, if we have a common prefix and the leading
-            // byte of that prefix is one that we think probably occurs rarely,
-            // then strip everything down to just that single byte. This should
-            // promote the use of memchr.
-            //
-            // ... we only do this though if our sequence has more than one
-            // literal. Otherwise, we'd rather just stick with a single literal
-            // scan. That is, using memchr is probably better than looking
-            // for 2 or more literals, but probably not as good as a straight
-            // memmem search.
-            //
-            // ... and also only do this when the prefix is short and probably
-            // not too discriminatory anyway. If it's longer, then it's
-            // probably quite discriminatory and thus is likely to have a low
-            // false positive rate.
-            if prefix
-                && origlen > 1
-                && fix.len() >= 1
-                && fix.len() <= 3
-                && rank(fix[0]) < 200
-            {
-                self.keep_first_bytes(1);
-                self.dedup();
-                return;
-            }
-            // We only strip down to the common prefix/suffix if we think
-            // the existing set of literals isn't great, or if the common
-            // prefix/suffix is expected to be particularly discriminatory.
-            let isfast =
-                self.is_exact() && self.len().map_or(false, |len| len <= 16);
-            let usefix = fix.len() > 4 || (fix.len() > 1 && !isfast);
-            if usefix {
-                // If we keep exactly the number of bytes equal to the length
-                // of the prefix (or suffix), then by the definition of a
-                // prefix, every literal in the sequence will be equivalent.
-                // Thus, 'dedup' will leave us with one literal.
-                //
-                // We do it this way to avoid an alloc, but also to make sure
-                // the exactness of literals is kept (or not).
-                if prefix {
-                    self.keep_first_bytes(fix.len());
-                } else {
-                    self.keep_last_bytes(fix.len());
-                }
-                self.dedup();
-                assert_eq!(Some(1), self.len());
-                // We still fall through here. In particular, we want our
-                // longest common prefix to be subject to the poison check.
-            }
-        }
-        // If we have an exact sequence, we *probably* just want to keep it
-        // as-is. But there are some cases where we don't. So we save a copy of
-        // the exact sequence now, and then try to do some more optimizations
-        // below. If those don't work out, we go back to this exact sequence.
-        //
-        // The specific motivation for this is that we sometimes wind up with
-        // an exact sequence with a hefty number of literals. Say, 100. If we
-        // stuck with that, it would be too big for Teddy and would result in
-        // using Aho-Corasick. Which is fine... but the lazy DFA is plenty
-        // suitable in such cases. The real issue is that we will wind up not
-        // using a fast prefilter at all. So in cases like this, even though
-        // we have an exact sequence, it would be better to try and shrink the
-        // sequence (which we do below) and use it as a prefilter that can
-        // produce false positive matches.
-        //
-        // But if the shrinking below results in a sequence that "sucks," then
-        // we don't want to use that because we already have an exact sequence
-        // in hand.
-        let exact: Option<Seq> =
-            if self.is_exact() { Some(self.clone()) } else { None };
-        // Now we attempt to shorten the sequence. The idea here is that we
-        // don't want to look for too many literals, but we want to shorten
-        // our sequence enough to improve our odds of using better algorithms
-        // downstream (such as Teddy).
-        //
-        // The pair of numbers in this list corresponds to the maximal prefix
-        // (in bytes) to keep for all literals and the length of the sequence
-        // at which to do it.
-        //
-        // So for example, the pair (3, 500) would mean, "if we have more than
-        // 500 literals in our sequence, then truncate all of our literals
-        // such that they are at most 3 bytes in length and the minimize the
-        // sequence."
-        const ATTEMPTS: [(usize, usize); 5] =
-            [(5, 10), (4, 10), (3, 64), (2, 64), (1, 10)];
-        for (keep, limit) in ATTEMPTS {
-            let len = match self.len() {
-                None => break,
-                Some(len) => len,
-            };
-            if len <= limit {
-                break;
-            }
-            if prefix {
-                self.keep_first_bytes(keep);
-            } else {
-                self.keep_last_bytes(keep);
-            }
-            if prefix {
-                if let Some(ref mut lits) = self.literals {
-                    PreferenceTrie::minimize(lits, true);
-                }
-            }
-        }
-        // Check for a poison literal. A poison literal is one that is short
-        // and is believed to have a very high match count. These poisons
-        // generally lead to a prefilter with a very high false positive rate,
-        // and thus overall worse performance.
-        //
-        // We do this last because we could have gone from a non-poisonous
-        // sequence to a poisonous one. Perhaps we should add some code to
-        // prevent such transitions in the first place, but then again, we
-        // likely only made the transition in the first place if the sequence
-        // was itself huge. And huge sequences are themselves poisonous. So...
-        if let Some(lits) = self.literals() {
-            if lits.iter().any(|lit| lit.is_poisonous()) {
-                self.make_infinite();
-            }
-        }
-        // OK, if we had an exact sequence before attempting more optimizations
-        // above and our post-optimized sequence sucks for some reason or
-        // another, then we go back to the exact sequence.
-        if let Some(exact) = exact {
-            // If optimizing resulted in dropping our literals, then certainly
-            // backup and use the exact sequence that we had.
-            if !self.is_finite() {
-                *self = exact;
-                return;
-            }
-            // If our optimized sequence contains a short literal, then it's
-            // *probably* not so great. So throw it away and revert to the
-            // exact sequence.
-            if self.min_literal_len().map_or(true, |len| len <= 2) {
-                *self = exact;
-                return;
-            }
-            // Finally, if our optimized sequence is "big" (i.e., can't use
-            // Teddy), then also don't use it and rely on the exact sequence.
-            if self.len().map_or(true, |len| len > 64) {
-                *self = exact;
-                return;
-            }
-        }
-    }
-}
-
-impl core::fmt::Debug for Seq {
-    fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-        write!(f, "Seq")?;
-        if let Some(lits) = self.literals() {
-            f.debug_list().entries(lits.iter()).finish()
-        } else {
-            write!(f, "[∞]")
-        }
-    }
-}
-
-impl FromIterator<Literal> for Seq {
-    fn from_iter<T: IntoIterator<Item = Literal>>(it: T) -> Seq {
-        let mut seq = Seq::empty();
-        for literal in it {
-            seq.push(literal);
-        }
-        seq
-    }
-}
-
-/// A single literal extracted from an [`Hir`] expression.
-///
-/// A literal is composed of two things:
-///
-/// * A sequence of bytes. No guarantees with respect to UTF-8 are provided.
-/// In particular, even if the regex a literal is extracted from is UTF-8, the
-/// literal extracted may not be valid UTF-8. (For example, if an [`Extractor`]
-/// limit resulted in trimming a literal in a way that splits a codepoint.)
-/// * Whether the literal is "exact" or not. An "exact" literal means that it
-/// has not been trimmed, and may continue to be extended. If a literal is
-/// "exact" after visiting the entire `Hir` expression, then this implies that
-/// the literal leads to a match state. (Although it doesn't necessarily imply
-/// all occurrences of the literal correspond to a match of the regex, since
-/// literal extraction ignores look-around assertions.)
-#[derive(Clone, Eq, PartialEq, PartialOrd, Ord)]
-pub struct Literal {
-    bytes: Vec<u8>,
-    exact: bool,
-}
-
-impl Literal {
-    /// Returns a new exact literal containing the bytes given.
-    #[inline]
-    pub fn exact<B: Into<Vec<u8>>>(bytes: B) -> Literal {
-        Literal { bytes: bytes.into(), exact: true }
-    }
-
-    /// Returns a new inexact literal containing the bytes given.
-    #[inline]
-    pub fn inexact<B: Into<Vec<u8>>>(bytes: B) -> Literal {
-        Literal { bytes: bytes.into(), exact: false }
-    }
-
-    /// Returns the bytes in this literal.
-    #[inline]
-    pub fn as_bytes(&self) -> &[u8] {
-        &self.bytes
-    }
-
-    /// Yields ownership of the bytes inside this literal.
-    ///
-    /// Note that this throws away whether the literal is "exact" or not.
-    #[inline]
-    pub fn into_bytes(self) -> Vec<u8> {
-        self.bytes
-    }
-
-    /// Returns the length of this literal in bytes.
-    #[inline]
-    pub fn len(&self) -> usize {
-        self.as_bytes().len()
-    }
-
-    /// Returns true if and only if this literal has zero bytes.
-    #[inline]
-    pub fn is_empty(&self) -> bool {
-        self.len() == 0
-    }
-
-    /// Returns true if and only if this literal is exact.
-    #[inline]
-    pub fn is_exact(&self) -> bool {
-        self.exact
-    }
-
-    /// Marks this literal as inexact.
-    ///
-    /// Inexact literals can never be extended. For example,
-    /// [`Seq::cross_forward`] will not extend inexact literals.
-    #[inline]
-    pub fn make_inexact(&mut self) {
-        self.exact = false;
-    }
-
-    /// Reverse the bytes in this literal.
-    #[inline]
-    pub fn reverse(&mut self) {
-        self.bytes.reverse();
-    }
-
-    /// Extend this literal with the literal given.
-    ///
-    /// If this literal is inexact, then this is a no-op.
-    #[inline]
-    pub fn extend(&mut self, lit: &Literal) {
-        if !self.is_exact() {
-            return;
-        }
-        self.bytes.extend_from_slice(&lit.bytes);
-    }
-
-    /// Trims this literal such that only the first `len` bytes remain. If
-    /// this literal has fewer than `len` bytes, then it remains unchanged.
-    /// Otherwise, the literal is marked as inexact.
-    #[inline]
-    pub fn keep_first_bytes(&mut self, len: usize) {
-        if len >= self.len() {
-            return;
-        }
-        self.make_inexact();
-        self.bytes.truncate(len);
-    }
-
-    /// Trims this literal such that only the last `len` bytes remain. If this
-    /// literal has fewer than `len` bytes, then it remains unchanged.
-    /// Otherwise, the literal is marked as inexact.
-    #[inline]
-    pub fn keep_last_bytes(&mut self, len: usize) {
-        if len >= self.len() {
-            return;
-        }
-        self.make_inexact();
-        self.bytes.drain(..self.len() - len);
-    }
-
-    /// Returns true if it is believe that this literal is likely to match very
-    /// frequently, and is thus not a good candidate for a prefilter.
-    fn is_poisonous(&self) -> bool {
-        self.is_empty() || (self.len() == 1 && rank(self.as_bytes()[0]) >= 250)
-    }
-}
-
-impl From<u8> for Literal {
-    fn from(byte: u8) -> Literal {
-        Literal::exact(vec![byte])
-    }
-}
-
-impl From<char> for Literal {
-    fn from(ch: char) -> Literal {
-        use alloc::string::ToString;
-        Literal::exact(ch.encode_utf8(&mut [0; 4]).to_string())
-    }
-}
-
-impl AsRef<[u8]> for Literal {
-    fn as_ref(&self) -> &[u8] {
-        self.as_bytes()
-    }
-}
-
-impl core::fmt::Debug for Literal {
-    fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-        let tag = if self.exact { "E" } else { "I" };
-        f.debug_tuple(tag)
-            .field(&crate::debug::Bytes(self.as_bytes()))
-            .finish()
-    }
-}
-
-/// A "preference" trie that rejects literals that will never match when
-/// executing a leftmost first or "preference" search.
-///
-/// For example, if 'sam' is inserted, then trying to insert 'samwise' will be
-/// rejected because 'samwise' can never match since 'sam' will always take
-/// priority. However, if 'samwise' is inserted first, then inserting 'sam'
-/// after it is accepted. In this case, either 'samwise' or 'sam' can match in
-/// a "preference" search.
-///
-/// Note that we only use this trie as a "set." That is, given a sequence of
-/// literals, we insert each one in order. An `insert` will reject a literal
-/// if a prefix of that literal already exists in the trie. Thus, to rebuild
-/// the "minimal" sequence, we simply only keep literals that were successfully
-/// inserted. (Since we don't need traversal, one wonders whether we can make
-/// some simplifications here, but I haven't given it a ton of thought and I've
-/// never seen this show up on a profile. Because of the heuristic limits
-/// imposed on literal extractions, the size of the inputs here is usually
-/// very small.)
-#[derive(Debug)]
-struct PreferenceTrie {
-    /// The states in this trie. The index of a state in this vector is its ID.
-    states: Vec<State>,
-    /// This vec indicates which states are match states. It always has
-    /// the same length as `states` and is indexed by the same state ID.
-    /// A state with identifier `sid` is a match state if and only if
-    /// `matches[sid].is_some()`. The option contains the index of the literal
-    /// corresponding to the match. The index is offset by 1 so that it fits in
-    /// a NonZeroUsize.
-    matches: Vec<Option<NonZeroUsize>>,
-    /// The index to allocate to the next literal added to this trie. Starts at
-    /// 1 and increments by 1 for every literal successfully added to the trie.
-    next_literal_index: usize,
-}
-
-/// A single state in a trie. Uses a sparse representation for its transitions.
-#[derive(Debug, Default)]
-struct State {
-    /// Sparse representation of the transitions out of this state. Transitions
-    /// are sorted by byte. There is at most one such transition for any
-    /// particular byte.
-    trans: Vec<(u8, usize)>,
-}
-
-impl PreferenceTrie {
-    /// Minimizes the given sequence of literals while preserving preference
-    /// order semantics.
-    ///
-    /// When `keep_exact` is true, the exactness of every literal retained is
-    /// kept. This is useful when dealing with a fully extracted `Seq` that
-    /// only contains exact literals. In that case, we can keep all retained
-    /// literals as exact because we know we'll never need to match anything
-    /// after them and because any removed literals are guaranteed to never
-    /// match.
-    fn minimize(literals: &mut Vec<Literal>, keep_exact: bool) {
-        let mut trie = PreferenceTrie {
-            states: vec![],
-            matches: vec![],
-            next_literal_index: 1,
-        };
-        let mut make_inexact = vec![];
-        literals.retain_mut(|lit| match trie.insert(lit.as_bytes()) {
-            Ok(_) => true,
-            Err(i) => {
-                if !keep_exact {
-                    make_inexact.push(i.checked_sub(1).unwrap());
-                }
-                false
-            }
-        });
-        for i in make_inexact {
-            literals[i].make_inexact();
-        }
-    }
-
-    /// Returns `Ok` if the given byte string is accepted into this trie and
-    /// `Err` otherwise. The index for the success case corresponds to the
-    /// index of the literal added. The index for the error case corresponds to
-    /// the index of the literal already in the trie that prevented the given
-    /// byte string from being added. (Which implies it is a prefix of the one
-    /// given.)
-    ///
-    /// In short, the byte string given is accepted into the trie if and only
-    /// if it is possible for it to match when executing a preference order
-    /// search.
-    fn insert(&mut self, bytes: &[u8]) -> Result<usize, usize> {
-        let mut prev = self.root();
-        if let Some(idx) = self.matches[prev] {
-            return Err(idx.get());
-        }
-        for &b in bytes.iter() {
-            match self.states[prev].trans.binary_search_by_key(&b, |t| t.0) {
-                Ok(i) => {
-                    prev = self.states[prev].trans[i].1;
-                    if let Some(idx) = self.matches[prev] {
-                        return Err(idx.get());
-                    }
-                }
-                Err(i) => {
-                    let next = self.create_state();
-                    self.states[prev].trans.insert(i, (b, next));
-                    prev = next;
-                }
-            }
-        }
-        let idx = self.next_literal_index;
-        self.next_literal_index += 1;
-        self.matches[prev] = NonZeroUsize::new(idx);
-        Ok(idx)
-    }
-
-    /// Returns the root state ID, and if it doesn't exist, creates it.
-    fn root(&mut self) -> usize {
-        if !self.states.is_empty() {
-            0
-        } else {
-            self.create_state()
-        }
-    }
-
-    /// Creates a new empty state and returns its ID.
-    fn create_state(&mut self) -> usize {
-        let id = self.states.len();
-        self.states.push(State::default());
-        self.matches.push(None);
-        id
-    }
-}
-
-/// Returns the "rank" of the given byte.
-///
-/// The minimum rank value is `0` and the maximum rank value is `255`.
-///
-/// The rank of a byte is derived from a heuristic background distribution of
-/// relative frequencies of bytes. The heuristic says that lower the rank of a
-/// byte, the less likely that byte is to appear in any arbitrary haystack.
-pub fn rank(byte: u8) -> u8 {
-    crate::rank::BYTE_FREQUENCIES[usize::from(byte)]
-}
-
-#[cfg(test)]
-mod tests {
-    use super::*;
-
-    fn parse(pattern: &str) -> Hir {
-        crate::ParserBuilder::new().utf8(false).build().parse(pattern).unwrap()
-    }
-
-    fn prefixes(pattern: &str) -> Seq {
-        Extractor::new().kind(ExtractKind::Prefix).extract(&parse(pattern))
-    }
-
-    fn suffixes(pattern: &str) -> Seq {
-        Extractor::new().kind(ExtractKind::Suffix).extract(&parse(pattern))
-    }
-
-    fn e(pattern: &str) -> (Seq, Seq) {
-        (prefixes(pattern), suffixes(pattern))
-    }
-
-    #[allow(non_snake_case)]
-    fn E(x: &str) -> Literal {
-        Literal::exact(x.as_bytes())
-    }
-
-    #[allow(non_snake_case)]
-    fn I(x: &str) -> Literal {
-        Literal::inexact(x.as_bytes())
-    }
-
-    fn seq<I: IntoIterator<Item = Literal>>(it: I) -> Seq {
-        Seq::from_iter(it)
-    }
-
-    fn infinite() -> (Seq, Seq) {
-        (Seq::infinite(), Seq::infinite())
-    }
-
-    fn inexact<I1, I2>(it1: I1, it2: I2) -> (Seq, Seq)
-    where
-        I1: IntoIterator<Item = Literal>,
-        I2: IntoIterator<Item = Literal>,
-    {
-        (Seq::from_iter(it1), Seq::from_iter(it2))
-    }
-
-    fn exact<B: AsRef<[u8]>, I: IntoIterator<Item = B>>(it: I) -> (Seq, Seq) {
-        let s1 = Seq::new(it);
-        let s2 = s1.clone();
-        (s1, s2)
-    }
-
-    fn opt<B: AsRef<[u8]>, I: IntoIterator<Item = B>>(it: I) -> (Seq, Seq) {
-        let (mut p, mut s) = exact(it);
-        p.optimize_for_prefix_by_preference();
-        s.optimize_for_suffix_by_preference();
-        (p, s)
-    }
-
-    #[test]
-    fn literal() {
-        assert_eq!(exact(["a"]), e("a"));
-        assert_eq!(exact(["aaaaa"]), e("aaaaa"));
-        assert_eq!(exact(["A", "a"]), e("(?i-u)a"));
-        assert_eq!(exact(["AB", "Ab", "aB", "ab"]), e("(?i-u)ab"));
-        assert_eq!(exact(["abC", "abc"]), e("ab(?i-u)c"));
-
-        assert_eq!(exact([b"\xFF"]), e(r"(?-u:\xFF)"));
-
-        #[cfg(feature = "unicode-case")]
-        {
-            assert_eq!(exact(["☃"]), e("☃"));
-            assert_eq!(exact(["☃"]), e("(?i)☃"));
-            assert_eq!(exact(["☃☃☃☃☃"]), e("☃☃☃☃☃"));
-
-            assert_eq!(exact(["Δ"]), e("Δ"));
-            assert_eq!(exact(["δ"]), e("δ"));
-            assert_eq!(exact(["Δ", "δ"]), e("(?i)Δ"));
-            assert_eq!(exact(["Δ", "δ"]), e("(?i)δ"));
-
-            assert_eq!(exact(["S", "s", "Ćż"]), e("(?i)S"));
-            assert_eq!(exact(["S", "s", "Ćż"]), e("(?i)s"));
-            assert_eq!(exact(["S", "s", "Ćż"]), e("(?i)Ćż"));
-        }
-
-        let letters = "ͱͳͷΐΏέΟίΰαβγδεζηθικλμνξοπρςστυφχψωϊϋ";
-        assert_eq!(exact([letters]), e(letters));
-    }
-
-    #[test]
-    fn class() {
-        assert_eq!(exact(["a", "b", "c"]), e("[abc]"));
-        assert_eq!(exact(["a1b", "a2b", "a3b"]), e("a[123]b"));
-        assert_eq!(exact(["δ", "ε"]), e("[εδ]"));
-        #[cfg(feature = "unicode-case")]
-        {
-            assert_eq!(exact(["Δ", "Ε", "δ", "ε", "Ï”"]), e(r"(?i)[εδ]"));
-        }
-    }
-
-    #[test]
-    fn look() {
-        assert_eq!(exact(["ab"]), e(r"a\Ab"));
-        assert_eq!(exact(["ab"]), e(r"a\zb"));
-        assert_eq!(exact(["ab"]), e(r"a(?m:^)b"));
-        assert_eq!(exact(["ab"]), e(r"a(?m:$)b"));
-        assert_eq!(exact(["ab"]), e(r"a\bb"));
-        assert_eq!(exact(["ab"]), e(r"a\Bb"));
-        assert_eq!(exact(["ab"]), e(r"a(?-u:\b)b"));
-        assert_eq!(exact(["ab"]), e(r"a(?-u:\B)b"));
-
-        assert_eq!(exact(["ab"]), e(r"^ab"));
-        assert_eq!(exact(["ab"]), e(r"$ab"));
-        assert_eq!(exact(["ab"]), e(r"(?m:^)ab"));
-        assert_eq!(exact(["ab"]), e(r"(?m:$)ab"));
-        assert_eq!(exact(["ab"]), e(r"\bab"));
-        assert_eq!(exact(["ab"]), e(r"\Bab"));
-        assert_eq!(exact(["ab"]), e(r"(?-u:\b)ab"));
-        assert_eq!(exact(["ab"]), e(r"(?-u:\B)ab"));
-
-        assert_eq!(exact(["ab"]), e(r"ab^"));
-        assert_eq!(exact(["ab"]), e(r"ab$"));
-        assert_eq!(exact(["ab"]), e(r"ab(?m:^)"));
-        assert_eq!(exact(["ab"]), e(r"ab(?m:$)"));
-        assert_eq!(exact(["ab"]), e(r"ab\b"));
-        assert_eq!(exact(["ab"]), e(r"ab\B"));
-        assert_eq!(exact(["ab"]), e(r"ab(?-u:\b)"));
-        assert_eq!(exact(["ab"]), e(r"ab(?-u:\B)"));
-
-        let expected = (seq([I("aZ"), E("ab")]), seq([I("Zb"), E("ab")]));
-        assert_eq!(expected, e(r"^aZ*b"));
-    }
-
-    #[test]
-    fn repetition() {
-        assert_eq!(exact(["a", ""]), e(r"a?"));
-        assert_eq!(exact(["", "a"]), e(r"a??"));
-        assert_eq!(inexact([I("a"), E("")], [I("a"), E("")]), e(r"a*"));
-        assert_eq!(inexact([E(""), I("a")], [E(""), I("a")]), e(r"a*?"));
-        assert_eq!(inexact([I("a")], [I("a")]), e(r"a+"));
-        assert_eq!(inexact([I("a")], [I("a")]), e(r"(a+)+"));
-
-        assert_eq!(exact(["ab"]), e(r"aZ{0}b"));
-        assert_eq!(exact(["aZb", "ab"]), e(r"aZ?b"));
-        assert_eq!(exact(["ab", "aZb"]), e(r"aZ??b"));
-        assert_eq!(
-            inexact([I("aZ"), E("ab")], [I("Zb"), E("ab")]),
-            e(r"aZ*b")
-        );
-        assert_eq!(
-            inexact([E("ab"), I("aZ")], [E("ab"), I("Zb")]),
-            e(r"aZ*?b")
-        );
-        assert_eq!(inexact([I("aZ")], [I("Zb")]), e(r"aZ+b"));
-        assert_eq!(inexact([I("aZ")], [I("Zb")]), e(r"aZ+?b"));
-
-        assert_eq!(exact(["aZZb"]), e(r"aZ{2}b"));
-        assert_eq!(inexact([I("aZZ")], [I("ZZb")]), e(r"aZ{2,3}b"));
-
-        assert_eq!(exact(["abc", ""]), e(r"(abc)?"));
-        assert_eq!(exact(["", "abc"]), e(r"(abc)??"));
-
-        assert_eq!(inexact([I("a"), E("b")], [I("ab"), E("b")]), e(r"a*b"));
-        assert_eq!(inexact([E("b"), I("a")], [E("b"), I("ab")]), e(r"a*?b"));
-        assert_eq!(inexact([I("ab")], [I("b")]), e(r"ab+"));
-        assert_eq!(inexact([I("a"), I("b")], [I("b")]), e(r"a*b+"));
-
-        // FIXME: The suffixes for this don't look quite right to me. I think
-        // the right suffixes would be: [I(ac), I(bc), E(c)]. The main issue I
-        // think is that suffixes are computed by iterating over concatenations
-        // in reverse, and then [bc, ac, c] ordering is indeed correct from
-        // that perspective. We also test a few more equivalent regexes, and
-        // we get the same result, so it is consistent at least I suppose.
-        //
-        // The reason why this isn't an issue is that it only messes up
-        // preference order, and currently, suffixes are never used in a
-        // context where preference order matters. For prefixes it matters
-        // because we sometimes want to use prefilters without confirmation
-        // when all of the literals are exact (and there's no look-around). But
-        // we never do that for suffixes. Any time we use suffixes, we always
-        // include a confirmation step. If that ever changes, then it's likely
-        // this bug will need to be fixed, but last time I looked, it appears
-        // hard to do so.
-        assert_eq!(
-            inexact([I("a"), I("b"), E("c")], [I("bc"), I("ac"), E("c")]),
-            e(r"a*b*c")
-        );
-        assert_eq!(
-            inexact([I("a"), I("b"), E("c")], [I("bc"), I("ac"), E("c")]),
-            e(r"(a+)?(b+)?c")
-        );
-        assert_eq!(
-            inexact([I("a"), I("b"), E("c")], [I("bc"), I("ac"), E("c")]),
-            e(r"(a+|)(b+|)c")
-        );
-        // A few more similarish but not identical regexes. These may have a
-        // similar problem as above.
-        assert_eq!(
-            inexact(
-                [I("a"), I("b"), I("c"), E("")],
-                [I("c"), I("b"), I("a"), E("")]
-            ),
-            e(r"a*b*c*")
-        );
-        assert_eq!(inexact([I("a"), I("b"), I("c")], [I("c")]), e(r"a*b*c+"));
-        assert_eq!(inexact([I("a"), I("b")], [I("bc")]), e(r"a*b+c"));
-        assert_eq!(inexact([I("a"), I("b")], [I("c"), I("b")]), e(r"a*b+c*"));
-        assert_eq!(inexact([I("ab"), E("a")], [I("b"), E("a")]), e(r"ab*"));
-        assert_eq!(
-            inexact([I("ab"), E("ac")], [I("bc"), E("ac")]),
-            e(r"ab*c")
-        );
-        assert_eq!(inexact([I("ab")], [I("b")]), e(r"ab+"));
-        assert_eq!(inexact([I("ab")], [I("bc")]), e(r"ab+c"));
-
-        assert_eq!(
-            inexact([I("z"), E("azb")], [I("zazb"), E("azb")]),
-            e(r"z*azb")
-        );
-
-        let expected =
-            exact(["aaa", "aab", "aba", "abb", "baa", "bab", "bba", "bbb"]);
-        assert_eq!(expected, e(r"[ab]{3}"));
-        let expected = inexact(
-            [
-                I("aaa"),
-                I("aab"),
-                I("aba"),
-                I("abb"),
-                I("baa"),
-                I("bab"),
-                I("bba"),
-                I("bbb"),
-            ],
-            [
-                I("aaa"),
-                I("aab"),
-                I("aba"),
-                I("abb"),
-                I("baa"),
-                I("bab"),
-                I("bba"),
-                I("bbb"),
-            ],
-        );
-        assert_eq!(expected, e(r"[ab]{3,4}"));
-    }
-
-    #[test]
-    fn concat() {
-        let empty: [&str; 0] = [];
-
-        assert_eq!(exact(["abcxyz"]), e(r"abc()xyz"));
-        assert_eq!(exact(["abcxyz"]), e(r"(abc)(xyz)"));
-        assert_eq!(exact(["abcmnoxyz"]), e(r"abc()mno()xyz"));
-        assert_eq!(exact(empty), e(r"abc[a&&b]xyz"));
-        assert_eq!(exact(["abcxyz"]), e(r"abc[a&&b]*xyz"));
-    }
-
-    #[test]
-    fn alternation() {
-        assert_eq!(exact(["abc", "mno", "xyz"]), e(r"abc|mno|xyz"));
-        assert_eq!(
-            inexact(
-                [E("abc"), I("mZ"), E("mo"), E("xyz")],
-                [E("abc"), I("Zo"), E("mo"), E("xyz")]
-            ),
-            e(r"abc|mZ*o|xyz")
-        );
-        assert_eq!(exact(["abc", "xyz"]), e(r"abc|M[a&&b]N|xyz"));
-        assert_eq!(exact(["abc", "MN", "xyz"]), e(r"abc|M[a&&b]*N|xyz"));
-
-        assert_eq!(exact(["aaa", "aaaaa"]), e(r"(?:|aa)aaa"));
-        assert_eq!(
-            inexact(
-                [I("aaa"), E(""), I("aaaaa"), E("aa")],
-                [I("aaa"), E(""), E("aa")]
-            ),
-            e(r"(?:|aa)(?:aaa)*")
-        );
-        assert_eq!(
-            inexact(
-                [E(""), I("aaa"), E("aa"), I("aaaaa")],
-                [E(""), I("aaa"), E("aa")]
-            ),
-            e(r"(?:|aa)(?:aaa)*?")
-        );
-
-        assert_eq!(
-            inexact([E("a"), I("b"), E("")], [E("a"), I("b"), E("")]),
-            e(r"a|b*")
-        );
-        assert_eq!(inexact([E("a"), I("b")], [E("a"), I("b")]), e(r"a|b+"));
-
-        assert_eq!(
-            inexact([I("a"), E("b"), E("c")], [I("ab"), E("b"), E("c")]),
-            e(r"a*b|c")
-        );
-
-        assert_eq!(
-            inexact(
-                [E("a"), E("b"), I("c"), E("")],
-                [E("a"), E("b"), I("c"), E("")]
-            ),
-            e(r"a|(?:b|c*)")
-        );
-
-        assert_eq!(
-            inexact(
-                [I("a"), I("b"), E("c"), I("a"), I("ab"), E("c")],
-                [I("ac"), I("bc"), E("c"), I("ac"), I("abc"), E("c")],
-            ),
-            e(r"(a|b)*c|(a|ab)*c")
-        );
-
-        assert_eq!(
-            exact(["abef", "abgh", "cdef", "cdgh"]),
-            e(r"(ab|cd)(ef|gh)")
-        );
-        assert_eq!(
-            exact([
-                "abefij", "abefkl", "abghij", "abghkl", "cdefij", "cdefkl",
-                "cdghij", "cdghkl",
-            ]),
-            e(r"(ab|cd)(ef|gh)(ij|kl)")
-        );
-
-        assert_eq!(inexact([E("abab")], [E("abab")]), e(r"(ab){2}"));
-
-        assert_eq!(inexact([I("abab")], [I("abab")]), e(r"(ab){2,3}"));
-
-        assert_eq!(inexact([I("abab")], [I("abab")]), e(r"(ab){2,}"));
-    }
-
-    #[test]
-    fn impossible() {
-        let empty: [&str; 0] = [];
-
-        assert_eq!(exact(empty), e(r"[a&&b]"));
-        assert_eq!(exact(empty), e(r"a[a&&b]"));
-        assert_eq!(exact(empty), e(r"[a&&b]b"));
-        assert_eq!(exact(empty), e(r"a[a&&b]b"));
-        assert_eq!(exact(["a", "b"]), e(r"a|[a&&b]|b"));
-        assert_eq!(exact(["a", "b"]), e(r"a|c[a&&b]|b"));
-        assert_eq!(exact(["a", "b"]), e(r"a|[a&&b]d|b"));
-        assert_eq!(exact(["a", "b"]), e(r"a|c[a&&b]d|b"));
-        assert_eq!(exact([""]), e(r"[a&&b]*"));
-        assert_eq!(exact(["MN"]), e(r"M[a&&b]*N"));
-    }
-
-    // This tests patterns that contain something that defeats literal
-    // detection, usually because it would blow some limit on the total number
-    // of literals that can be returned.
-    //
-    // The main idea is that when literal extraction sees something that
-    // it knows will blow a limit, it replaces it with a marker that says
-    // "any literal will match here." While not necessarily true, the
-    // over-estimation is just fine for the purposes of literal extraction,
-    // because the imprecision doesn't matter: too big is too big.
-    //
-    // This is one of the trickier parts of literal extraction, since we need
-    // to make sure all of our literal extraction operations correctly compose
-    // with the markers.
-    #[test]
-    fn anything() {
-        assert_eq!(infinite(), e(r"."));
-        assert_eq!(infinite(), e(r"(?s)."));
-        assert_eq!(infinite(), e(r"[A-Za-z]"));
-        assert_eq!(infinite(), e(r"[A-Z]"));
-        assert_eq!(exact([""]), e(r"[A-Z]{0}"));
-        assert_eq!(infinite(), e(r"[A-Z]?"));
-        assert_eq!(infinite(), e(r"[A-Z]*"));
-        assert_eq!(infinite(), e(r"[A-Z]+"));
-        assert_eq!((seq([I("1")]), Seq::infinite()), e(r"1[A-Z]"));
-        assert_eq!((seq([I("1")]), seq([I("2")])), e(r"1[A-Z]2"));
-        assert_eq!((Seq::infinite(), seq([I("123")])), e(r"[A-Z]+123"));
-        assert_eq!(infinite(), e(r"[A-Z]+123[A-Z]+"));
-        assert_eq!(infinite(), e(r"1|[A-Z]|3"));
-        assert_eq!(
-            (seq([E("1"), I("2"), E("3")]), Seq::infinite()),
-            e(r"1|2[A-Z]|3"),
-        );
-        assert_eq!(
-            (Seq::infinite(), seq([E("1"), I("2"), E("3")])),
-            e(r"1|[A-Z]2|3"),
-        );
-        assert_eq!(
-            (seq([E("1"), I("2"), E("4")]), seq([E("1"), I("3"), E("4")])),
-            e(r"1|2[A-Z]3|4"),
-        );
-        assert_eq!((Seq::infinite(), seq([I("2")])), e(r"(?:|1)[A-Z]2"));
-        assert_eq!(inexact([I("a")], [I("z")]), e(r"a.z"));
-    }
-
-    // Like the 'anything' test, but it uses smaller limits in order to test
-    // the logic for effectively aborting literal extraction when the seqs get
-    // too big.
-    #[test]
-    fn anything_small_limits() {
-        fn prefixes(pattern: &str) -> Seq {
-            Extractor::new()
-                .kind(ExtractKind::Prefix)
-                .limit_total(10)
-                .extract(&parse(pattern))
-        }
-
-        fn suffixes(pattern: &str) -> Seq {
-            Extractor::new()
-                .kind(ExtractKind::Suffix)
-                .limit_total(10)
-                .extract(&parse(pattern))
-        }
-
-        fn e(pattern: &str) -> (Seq, Seq) {
-            (prefixes(pattern), suffixes(pattern))
-        }
-
-        assert_eq!(
-            (
-                seq([
-                    I("aaa"),
-                    I("aab"),
-                    I("aba"),
-                    I("abb"),
-                    I("baa"),
-                    I("bab"),
-                    I("bba"),
-                    I("bbb")
-                ]),
-                seq([
-                    I("aaa"),
-                    I("aab"),
-                    I("aba"),
-                    I("abb"),
-                    I("baa"),
-                    I("bab"),
-                    I("bba"),
-                    I("bbb")
-                ])
-            ),
-            e(r"[ab]{3}{3}")
-        );
-
-        assert_eq!(infinite(), e(r"ab|cd|ef|gh|ij|kl|mn|op|qr|st|uv|wx|yz"));
-    }
-
-    #[test]
-    fn empty() {
-        assert_eq!(exact([""]), e(r""));
-        assert_eq!(exact([""]), e(r"^"));
-        assert_eq!(exact([""]), e(r"$"));
-        assert_eq!(exact([""]), e(r"(?m:^)"));
-        assert_eq!(exact([""]), e(r"(?m:$)"));
-        assert_eq!(exact([""]), e(r"\b"));
-        assert_eq!(exact([""]), e(r"\B"));
-        assert_eq!(exact([""]), e(r"(?-u:\b)"));
-        assert_eq!(exact([""]), e(r"(?-u:\B)"));
-    }
-
-    #[test]
-    fn odds_and_ends() {
-        assert_eq!((Seq::infinite(), seq([I("a")])), e(r".a"));
-        assert_eq!((seq([I("a")]), Seq::infinite()), e(r"a."));
-        assert_eq!(infinite(), e(r"a|."));
-        assert_eq!(infinite(), e(r".|a"));
-
-        let pat = r"M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]";
-        let expected = inexact(
-            ["Mo'am", "Moam", "Mu'am", "Muam"].map(I),
-            [
-                "ddafi", "ddafy", "dhafi", "dhafy", "dzafi", "dzafy", "dafi",
-                "dafy", "tdafi", "tdafy", "thafi", "thafy", "tzafi", "tzafy",
-                "tafi", "tafy", "zdafi", "zdafy", "zhafi", "zhafy", "zzafi",
-                "zzafy", "zafi", "zafy",
-            ]
-            .map(I),
-        );
-        assert_eq!(expected, e(pat));
-
-        assert_eq!(
-            (seq(["fn is_", "fn as_"].map(I)), Seq::infinite()),
-            e(r"fn is_([A-Z]+)|fn as_([A-Z]+)"),
-        );
-        assert_eq!(
-            inexact([I("foo")], [I("quux")]),
-            e(r"foo[A-Z]+bar[A-Z]+quux")
-        );
-        assert_eq!(infinite(), e(r"[A-Z]+bar[A-Z]+"));
-        assert_eq!(
-            exact(["Sherlock Holmes"]),
-            e(r"(?m)^Sherlock Holmes|Sherlock Holmes$")
-        );
-
-        assert_eq!(exact(["sa", "sb"]), e(r"\bs(?:[ab])"));
-    }
-
-    // This tests a specific regex along with some heuristic steps to reduce
-    // the sequences extracted. This is meant to roughly correspond to the
-    // types of heuristics used to shrink literal sets in practice. (Shrinking
-    // is done because you want to balance "spend too much work looking for
-    // too many literals" and "spend too much work processing false positive
-    // matches from short literals.")
-    #[test]
-    #[cfg(feature = "unicode-case")]
-    fn holmes() {
-        let expected = inexact(
-            ["HOL", "HOl", "HoL", "Hol", "hOL", "hOl", "hoL", "hol"].map(I),
-            [
-                "MES", "MEs", "EĆż", "MeS", "Mes", "eĆż", "mES", "mEs", "meS",
-                "mes",
-            ]
-            .map(I),
-        );
-        let (mut prefixes, mut suffixes) = e(r"(?i)Holmes");
-        prefixes.keep_first_bytes(3);
-        suffixes.keep_last_bytes(3);
-        prefixes.minimize_by_preference();
-        suffixes.minimize_by_preference();
-        assert_eq!(expected, (prefixes, suffixes));
-    }
-
-    // This tests that we get some kind of literals extracted for a beefier
-    // alternation with case insensitive mode enabled. At one point during
-    // development, this returned nothing, and motivated some special case
-    // code in Extractor::union to try and trim down the literal sequences
-    // if the union would blow the limits set.
-    #[test]
-    #[cfg(feature = "unicode-case")]
-    fn holmes_alt() {
-        let mut pre =
-            prefixes(r"(?i)Sherlock|Holmes|Watson|Irene|Adler|John|Baker");
-        assert!(pre.len().unwrap() > 0);
-        pre.optimize_for_prefix_by_preference();
-        assert!(pre.len().unwrap() > 0);
-    }
-
-    // See: https://github.com/rust-lang/regex/security/advisories/GHSA-m5pq-gvj9-9vr8
-    // See: CVE-2022-24713
-    //
-    // We test this here to ensure literal extraction completes in reasonable
-    // time and isn't materially impacted by these sorts of pathological
-    // repeats.
-    #[test]
-    fn crazy_repeats() {
-        assert_eq!(inexact([E("")], [E("")]), e(r"(?:){4294967295}"));
-        assert_eq!(
-            inexact([E("")], [E("")]),
-            e(r"(?:){64}{64}{64}{64}{64}{64}")
-        );
-        assert_eq!(inexact([E("")], [E("")]), e(r"x{0}{4294967295}"));
-        assert_eq!(inexact([E("")], [E("")]), e(r"(?:|){4294967295}"));
-
-        assert_eq!(
-            inexact([E("")], [E("")]),
-            e(r"(?:){8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}")
-        );
-        let repa = "a".repeat(100);
-        assert_eq!(
-            inexact([I(&repa)], [I(&repa)]),
-            e(r"a{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}")
-        );
-    }
-
-    #[test]
-    fn huge() {
-        let pat = r#"(?-u)
-        2(?:
-          [45]\d{3}|
-          7(?:
-            1[0-267]|
-            2[0-289]|
-            3[0-29]|
-            4[01]|
-            5[1-3]|
-            6[013]|
-            7[0178]|
-            91
-          )|
-          8(?:
-            0[125]|
-            [139][1-6]|
-            2[0157-9]|
-            41|
-            6[1-35]|
-            7[1-5]|
-            8[1-8]|
-            90
-          )|
-          9(?:
-            0[0-2]|
-            1[0-4]|
-            2[568]|
-            3[3-6]|
-            5[5-7]|
-            6[0167]|
-            7[15]|
-            8[0146-9]
-          )
-        )\d{4}|
-        3(?:
-          12?[5-7]\d{2}|
-          0(?:
-            2(?:
-              [025-79]\d|
-              [348]\d{1,2}
-            )|
-            3(?:
-              [2-4]\d|
-              [56]\d?
-            )
-          )|
-          2(?:
-            1\d{2}|
-            2(?:
-              [12]\d|
-              [35]\d{1,2}|
-              4\d?
-            )
-          )|
-          3(?:
-            1\d{2}|
-            2(?:
-              [2356]\d|
-              4\d{1,2}
-            )
-          )|
-          4(?:
-            1\d{2}|
-            2(?:
-              2\d{1,2}|
-              [47]|
-              5\d{2}
-            )
-          )|
-          5(?:
-            1\d{2}|
-            29
-          )|
-          [67]1\d{2}|
-          8(?:
-            1\d{2}|
-            2(?:
-              2\d{2}|
-              3|
-              4\d
-            )
-          )
-        )\d{3}|
-        4(?:
-          0(?:
-            2(?:
-              [09]\d|
-              7
-            )|
-            33\d{2}
-          )|
-          1\d{3}|
-          2(?:
-            1\d{2}|
-            2(?:
-              [25]\d?|
-              [348]\d|
-              [67]\d{1,2}
-            )
-          )|
-          3(?:
-            1\d{2}(?:
-              \d{2}
-            )?|
-            2(?:
-              [045]\d|
-              [236-9]\d{1,2}
-            )|
-            32\d{2}
-          )|
-          4(?:
-            [18]\d{2}|
-            2(?:
-              [2-46]\d{2}|
-              3
-            )|
-            5[25]\d{2}
-          )|
-          5(?:
-            1\d{2}|
-            2(?:
-              3\d|
-              5
-            )
-          )|
-          6(?:
-            [18]\d{2}|
-            2(?:
-              3(?:
-                \d{2}
-              )?|
-              [46]\d{1,2}|
-              5\d{2}|
-              7\d
-            )|
-            5(?:
-              3\d?|
-              4\d|
-              [57]\d{1,2}|
-              6\d{2}|
-              8
-            )
-          )|
-          71\d{2}|
-          8(?:
-            [18]\d{2}|
-            23\d{2}|
-            54\d{2}
-          )|
-          9(?:
-            [18]\d{2}|
-            2[2-5]\d{2}|
-            53\d{1,2}
-          )
-        )\d{3}|
-        5(?:
-          02[03489]\d{2}|
-          1\d{2}|
-          2(?:
-            1\d{2}|
-            2(?:
-              2(?:
-                \d{2}
-              )?|
-              [457]\d{2}
-            )
-          )|
-          3(?:
-            1\d{2}|
-            2(?:
-              [37](?:
-                \d{2}
-              )?|
-              [569]\d{2}
-            )
-          )|
-          4(?:
-            1\d{2}|
-            2[46]\d{2}
-          )|
-          5(?:
-            1\d{2}|
-            26\d{1,2}
-          )|
-          6(?:
-            [18]\d{2}|
-            2|
-            53\d{2}
-          )|
-          7(?:
-            1|
-            24
-          )\d{2}|
-          8(?:
-            1|
-            26
-          )\d{2}|
-          91\d{2}
-        )\d{3}|
-        6(?:
-          0(?:
-            1\d{2}|
-            2(?:
-              3\d{2}|
-              4\d{1,2}
-            )
-          )|
-          2(?:
-            2[2-5]\d{2}|
-            5(?:
-              [3-5]\d{2}|
-              7
-            )|
-            8\d{2}
-          )|
-          3(?:
-            1|
-            2[3478]
-          )\d{2}|
-          4(?:
-            1|
-            2[34]
-          )\d{2}|
-          5(?:
-            1|
-            2[47]
-          )\d{2}|
-          6(?:
-            [18]\d{2}|
-            6(?:
-              2(?:
-                2\d|
-                [34]\d{2}
-              )|
-              5(?:
-                [24]\d{2}|
-                3\d|
-                5\d{1,2}
-              )
-            )
-          )|
-          72[2-5]\d{2}|
-          8(?:
-            1\d{2}|
-            2[2-5]\d{2}
-          )|
-          9(?:
-            1\d{2}|
-            2[2-6]\d{2}
-          )
-        )\d{3}|
-        7(?:
-          (?:
-            02|
-            [3-589]1|
-            6[12]|
-            72[24]
-          )\d{2}|
-          21\d{3}|
-          32
-        )\d{3}|
-        8(?:
-          (?:
-            4[12]|
-            [5-7]2|
-            1\d?
-          )|
-          (?:
-            0|
-            3[12]|
-            [5-7]1|
-            217
-          )\d
-        )\d{4}|
-        9(?:
-          [35]1|
-          (?:
-            [024]2|
-            81
-          )\d|
-          (?:
-            1|
-            [24]1
-          )\d{2}
-        )\d{3}
-        "#;
-        // TODO: This is a good candidate of a seq of literals that could be
-        // shrunk quite a bit and still be very productive with respect to
-        // literal optimizations.
-        let (prefixes, suffixes) = e(pat);
-        assert!(!suffixes.is_finite());
-        assert_eq!(Some(243), prefixes.len());
-    }
-
-    #[test]
-    fn optimize() {
-        // This gets a common prefix that isn't too short.
-        let (p, s) =
-            opt(["foobarfoobar", "foobar", "foobarzfoobar", "foobarfoobar"]);
-        assert_eq!(seq([I("foobar")]), p);
-        assert_eq!(seq([I("foobar")]), s);
-
-        // This also finds a common prefix, but since it's only one byte, it
-        // prefers the multiple literals.
-        let (p, s) = opt(["abba", "akka", "abccba"]);
-        assert_eq!(exact(["abba", "akka", "abccba"]), (p, s));
-
-        let (p, s) = opt(["sam", "samwise"]);
-        assert_eq!((seq([E("sam")]), seq([E("sam"), E("samwise")])), (p, s));
-
-        // The empty string is poisonous, so our seq becomes infinite, even
-        // though all literals are exact.
-        let (p, s) = opt(["foobarfoo", "foo", "", "foozfoo", "foofoo"]);
-        assert!(!p.is_finite());
-        assert!(!s.is_finite());
-
-        // A space is also poisonous, so our seq becomes infinite. But this
-        // only gets triggered when we don't have a completely exact sequence.
-        // When the sequence is exact, spaces are okay, since we presume that
-        // any prefilter will match a space more quickly than the regex engine.
-        // (When the sequence is exact, there's a chance of the prefilter being
-        // used without needing the regex engine at all.)
-        let mut p = seq([E("foobarfoo"), I("foo"), E(" "), E("foofoo")]);
-        p.optimize_for_prefix_by_preference();
-        assert!(!p.is_finite());
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/hir/mod.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/hir/mod.rs
deleted file mode 100644
index 5db7843..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/hir/mod.rs
+++ /dev/null
@@ -1,3873 +0,0 @@
-/*!
-Defines a high-level intermediate (HIR) representation for regular expressions.
-
-The HIR is represented by the [`Hir`] type, and it principally constructed via
-[translation](translate) from an [`Ast`](crate::ast::Ast). Alternatively, users
-may use the smart constructors defined on `Hir` to build their own by hand. The
-smart constructors simultaneously simplify and "optimize" the HIR, and are also
-the same routines used by translation.
-
-Most regex engines only have an HIR like this, and usually construct it
-directly from the concrete syntax. This crate however first parses the
-concrete syntax into an `Ast`, and only then creates the HIR from the `Ast`,
-as mentioned above. It's done this way to facilitate better error reporting,
-and to have a structured representation of a regex that faithfully represents
-its concrete syntax. Namely, while an `Hir` value can be converted back to an
-equivalent regex pattern string, it is unlikely to look like the original due
-to its simplified structure.
-*/
-
-use core::{char, cmp};
-
-use alloc::{
-    boxed::Box,
-    format,
-    string::{String, ToString},
-    vec,
-    vec::Vec,
-};
-
-use crate::{
-    ast::Span,
-    hir::interval::{Interval, IntervalSet, IntervalSetIter},
-    unicode,
-};
-
-pub use crate::{
-    hir::visitor::{visit, Visitor},
-    unicode::CaseFoldError,
-};
-
-mod interval;
-pub mod literal;
-pub mod print;
-pub mod translate;
-mod visitor;
-
-/// An error that can occur while translating an `Ast` to a `Hir`.
-#[derive(Clone, Debug, Eq, PartialEq)]
-pub struct Error {
-    /// The kind of error.
-    kind: ErrorKind,
-    /// The original pattern that the translator's Ast was parsed from. Every
-    /// span in an error is a valid range into this string.
-    pattern: String,
-    /// The span of this error, derived from the Ast given to the translator.
-    span: Span,
-}
-
-impl Error {
-    /// Return the type of this error.
-    pub fn kind(&self) -> &ErrorKind {
-        &self.kind
-    }
-
-    /// The original pattern string in which this error occurred.
-    ///
-    /// Every span reported by this error is reported in terms of this string.
-    pub fn pattern(&self) -> &str {
-        &self.pattern
-    }
-
-    /// Return the span at which this error occurred.
-    pub fn span(&self) -> &Span {
-        &self.span
-    }
-}
-
-/// The type of an error that occurred while building an `Hir`.
-///
-/// This error type is marked as `non_exhaustive`. This means that adding a
-/// new variant is not considered a breaking change.
-#[non_exhaustive]
-#[derive(Clone, Debug, Eq, PartialEq)]
-pub enum ErrorKind {
-    /// This error occurs when a Unicode feature is used when Unicode
-    /// support is disabled. For example `(?-u:\pL)` would trigger this error.
-    UnicodeNotAllowed,
-    /// This error occurs when translating a pattern that could match a byte
-    /// sequence that isn't UTF-8 and `utf8` was enabled.
-    InvalidUtf8,
-    /// This error occurs when one uses a non-ASCII byte for a line terminator,
-    /// but where Unicode mode is enabled and UTF-8 mode is disabled.
-    InvalidLineTerminator,
-    /// This occurs when an unrecognized Unicode property name could not
-    /// be found.
-    UnicodePropertyNotFound,
-    /// This occurs when an unrecognized Unicode property value could not
-    /// be found.
-    UnicodePropertyValueNotFound,
-    /// This occurs when a Unicode-aware Perl character class (`\w`, `\s` or
-    /// `\d`) could not be found. This can occur when the `unicode-perl`
-    /// crate feature is not enabled.
-    UnicodePerlClassNotFound,
-    /// This occurs when the Unicode simple case mapping tables are not
-    /// available, and the regular expression required Unicode aware case
-    /// insensitivity.
-    UnicodeCaseUnavailable,
-}
-
-#[cfg(feature = "std")]
-impl std::error::Error for Error {}
-
-impl core::fmt::Display for Error {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        crate::error::Formatter::from(self).fmt(f)
-    }
-}
-
-impl core::fmt::Display for ErrorKind {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        use self::ErrorKind::*;
-
-        let msg = match *self {
-            UnicodeNotAllowed => "Unicode not allowed here",
-            InvalidUtf8 => "pattern can match invalid UTF-8",
-            InvalidLineTerminator => "invalid line terminator, must be ASCII",
-            UnicodePropertyNotFound => "Unicode property not found",
-            UnicodePropertyValueNotFound => "Unicode property value not found",
-            UnicodePerlClassNotFound => {
-                "Unicode-aware Perl class not found \
-                 (make sure the unicode-perl feature is enabled)"
-            }
-            UnicodeCaseUnavailable => {
-                "Unicode-aware case insensitivity matching is not available \
-                 (make sure the unicode-case feature is enabled)"
-            }
-        };
-        f.write_str(msg)
-    }
-}
-
-/// A high-level intermediate representation (HIR) for a regular expression.
-///
-/// An HIR value is a combination of a [`HirKind`] and a set of [`Properties`].
-/// An `HirKind` indicates what kind of regular expression it is (a literal,
-/// a repetition, a look-around assertion, etc.), where as a `Properties`
-/// describes various facts about the regular expression. For example, whether
-/// it matches UTF-8 or if it matches the empty string.
-///
-/// The HIR of a regular expression represents an intermediate step between
-/// its abstract syntax (a structured description of the concrete syntax) and
-/// an actual regex matcher. The purpose of HIR is to make regular expressions
-/// easier to analyze. In particular, the AST is much more complex than the
-/// HIR. For example, while an AST supports arbitrarily nested character
-/// classes, the HIR will flatten all nested classes into a single set. The HIR
-/// will also "compile away" every flag present in the concrete syntax. For
-/// example, users of HIR expressions never need to worry about case folding;
-/// it is handled automatically by the translator (e.g., by translating
-/// `(?i:A)` to `[aA]`).
-///
-/// The specific type of an HIR expression can be accessed via its `kind`
-/// or `into_kind` methods. This extra level of indirection exists for two
-/// reasons:
-///
-/// 1. Construction of an HIR expression *must* use the constructor methods on
-/// this `Hir` type instead of building the `HirKind` values directly. This
-/// permits construction to enforce invariants like "concatenations always
-/// consist of two or more sub-expressions."
-/// 2. Every HIR expression contains attributes that are defined inductively,
-/// and can be computed cheaply during the construction process. For example,
-/// one such attribute is whether the expression must match at the beginning of
-/// the haystack.
-///
-/// In particular, if you have an `HirKind` value, then there is intentionally
-/// no way to build an `Hir` value from it. You instead need to do case
-/// analysis on the `HirKind` value and build the `Hir` value using its smart
-/// constructors.
-///
-/// # UTF-8
-///
-/// If the HIR was produced by a translator with
-/// [`TranslatorBuilder::utf8`](translate::TranslatorBuilder::utf8) enabled,
-/// then the HIR is guaranteed to match UTF-8 exclusively for all non-empty
-/// matches.
-///
-/// For empty matches, those can occur at any position. It is the
-/// responsibility of the regex engine to determine whether empty matches are
-/// permitted between the code units of a single codepoint.
-///
-/// # Stack space
-///
-/// This type defines its own destructor that uses constant stack space and
-/// heap space proportional to the size of the HIR.
-///
-/// Also, an `Hir`'s `fmt::Display` implementation prints an HIR as a regular
-/// expression pattern string, and uses constant stack space and heap space
-/// proportional to the size of the `Hir`. The regex it prints is guaranteed to
-/// be _semantically_ equivalent to the original concrete syntax, but it may
-/// look very different. (And potentially not practically readable by a human.)
-///
-/// An `Hir`'s `fmt::Debug` implementation currently does not use constant
-/// stack space. The implementation will also suppress some details (such as
-/// the `Properties` inlined into every `Hir` value to make it less noisy).
-#[derive(Clone, Eq, PartialEq)]
-pub struct Hir {
-    /// The underlying HIR kind.
-    kind: HirKind,
-    /// Analysis info about this HIR, computed during construction.
-    props: Properties,
-}
-
-/// Methods for accessing the underlying `HirKind` and `Properties`.
-impl Hir {
-    /// Returns a reference to the underlying HIR kind.
-    pub fn kind(&self) -> &HirKind {
-        &self.kind
-    }
-
-    /// Consumes ownership of this HIR expression and returns its underlying
-    /// `HirKind`.
-    pub fn into_kind(mut self) -> HirKind {
-        core::mem::replace(&mut self.kind, HirKind::Empty)
-    }
-
-    /// Returns the properties computed for this `Hir`.
-    pub fn properties(&self) -> &Properties {
-        &self.props
-    }
-
-    /// Splits this HIR into its constituent parts.
-    ///
-    /// This is useful because `let Hir { kind, props } = hir;` does not work
-    /// because of `Hir`'s custom `Drop` implementation.
-    fn into_parts(mut self) -> (HirKind, Properties) {
-        (
-            core::mem::replace(&mut self.kind, HirKind::Empty),
-            core::mem::replace(&mut self.props, Properties::empty()),
-        )
-    }
-}
-
-/// Smart constructors for HIR values.
-///
-/// These constructors are called "smart" because they do inductive work or
-/// simplifications. For example, calling `Hir::repetition` with a repetition
-/// like `a{0}` will actually return a `Hir` with a `HirKind::Empty` kind
-/// since it is equivalent to an empty regex. Another example is calling
-/// `Hir::concat(vec![expr])`. Instead of getting a `HirKind::Concat`, you'll
-/// just get back the original `expr` since it's precisely equivalent.
-///
-/// Smart constructors enable maintaining invariants about the HIR data type
-/// while also simulanteously keeping the representation as simple as possible.
-impl Hir {
-    /// Returns an empty HIR expression.
-    ///
-    /// An empty HIR expression always matches, including the empty string.
-    #[inline]
-    pub fn empty() -> Hir {
-        let props = Properties::empty();
-        Hir { kind: HirKind::Empty, props }
-    }
-
-    /// Returns an HIR expression that can never match anything. That is,
-    /// the size of the set of strings in the language described by the HIR
-    /// returned is `0`.
-    ///
-    /// This is distinct from [`Hir::empty`] in that the empty string matches
-    /// the HIR returned by `Hir::empty`. That is, the set of strings in the
-    /// language describe described by `Hir::empty` is non-empty.
-    ///
-    /// Note that currently, the HIR returned uses an empty character class to
-    /// indicate that nothing can match. An equivalent expression that cannot
-    /// match is an empty alternation, but all such "fail" expressions are
-    /// normalized (via smart constructors) to empty character classes. This is
-    /// because empty character classes can be spelled in the concrete syntax
-    /// of a regex (e.g., `\P{any}` or `(?-u:[^\x00-\xFF])` or `[a&&b]`), but
-    /// empty alternations cannot.
-    #[inline]
-    pub fn fail() -> Hir {
-        let class = Class::Bytes(ClassBytes::empty());
-        let props = Properties::class(&class);
-        // We can't just call Hir::class here because it defers to Hir::fail
-        // in order to canonicalize the Hir value used to represent "cannot
-        // match."
-        Hir { kind: HirKind::Class(class), props }
-    }
-
-    /// Creates a literal HIR expression.
-    ///
-    /// This accepts anything that can be converted into a `Box<[u8]>`.
-    ///
-    /// Note that there is no mechanism for storing a `char` or a `Box<str>`
-    /// in an HIR. Everything is "just bytes." Whether a `Literal` (or
-    /// any HIR node) matches valid UTF-8 exclusively can be queried via
-    /// [`Properties::is_utf8`].
-    ///
-    /// # Example
-    ///
-    /// This example shows that concatenations of `Literal` HIR values will
-    /// automatically get flattened and combined together. So for example, even
-    /// if you concat multiple `Literal` values that are themselves not valid
-    /// UTF-8, they might add up to valid UTF-8. This also demonstrates just
-    /// how "smart" Hir's smart constructors are.
-    ///
-    /// ```
-    /// use regex_syntax::hir::{Hir, HirKind, Literal};
-    ///
-    /// let literals = vec![
-    ///     Hir::literal([0xE2]),
-    ///     Hir::literal([0x98]),
-    ///     Hir::literal([0x83]),
-    /// ];
-    /// // Each literal, on its own, is invalid UTF-8.
-    /// assert!(literals.iter().all(|hir| !hir.properties().is_utf8()));
-    ///
-    /// let concat = Hir::concat(literals);
-    /// // But the concatenation is valid UTF-8!
-    /// assert!(concat.properties().is_utf8());
-    ///
-    /// // And also notice that the literals have been concatenated into a
-    /// // single `Literal`, to the point where there is no explicit `Concat`!
-    /// let expected = HirKind::Literal(Literal(Box::from("☃".as_bytes())));
-    /// assert_eq!(&expected, concat.kind());
-    /// ```
-    ///
-    /// # Example: building a literal from a `char`
-    ///
-    /// This example shows how to build a single `Hir` literal from a `char`
-    /// value. Since a [`Literal`] is just bytes, we just need to UTF-8
-    /// encode a `char` value:
-    ///
-    /// ```
-    /// use regex_syntax::hir::{Hir, HirKind, Literal};
-    ///
-    /// let ch = '☃';
-    /// let got = Hir::literal(ch.encode_utf8(&mut [0; 4]).as_bytes());
-    ///
-    /// let expected = HirKind::Literal(Literal(Box::from("☃".as_bytes())));
-    /// assert_eq!(&expected, got.kind());
-    /// ```
-    #[inline]
-    pub fn literal<B: Into<Box<[u8]>>>(lit: B) -> Hir {
-        let bytes = lit.into();
-        if bytes.is_empty() {
-            return Hir::empty();
-        }
-
-        let lit = Literal(bytes);
-        let props = Properties::literal(&lit);
-        Hir { kind: HirKind::Literal(lit), props }
-    }
-
-    /// Creates a class HIR expression. The class may either be defined over
-    /// ranges of Unicode codepoints or ranges of raw byte values.
-    ///
-    /// Note that an empty class is permitted. An empty class is equivalent to
-    /// `Hir::fail()`.
-    #[inline]
-    pub fn class(class: Class) -> Hir {
-        if class.is_empty() {
-            return Hir::fail();
-        } else if let Some(bytes) = class.literal() {
-            return Hir::literal(bytes);
-        }
-        let props = Properties::class(&class);
-        Hir { kind: HirKind::Class(class), props }
-    }
-
-    /// Creates a look-around assertion HIR expression.
-    #[inline]
-    pub fn look(look: Look) -> Hir {
-        let props = Properties::look(look);
-        Hir { kind: HirKind::Look(look), props }
-    }
-
-    /// Creates a repetition HIR expression.
-    #[inline]
-    pub fn repetition(mut rep: Repetition) -> Hir {
-        // If the sub-expression of a repetition can only match the empty
-        // string, then we force its maximum to be at most 1.
-        if rep.sub.properties().maximum_len() == Some(0) {
-            rep.min = cmp::min(rep.min, 1);
-            rep.max = rep.max.map(|n| cmp::min(n, 1)).or(Some(1));
-        }
-        // The regex 'a{0}' is always equivalent to the empty regex. This is
-        // true even when 'a' is an expression that never matches anything
-        // (like '\P{any}').
-        //
-        // Additionally, the regex 'a{1}' is always equivalent to 'a'.
-        if rep.min == 0 && rep.max == Some(0) {
-            return Hir::empty();
-        } else if rep.min == 1 && rep.max == Some(1) {
-            return *rep.sub;
-        }
-        let props = Properties::repetition(&rep);
-        Hir { kind: HirKind::Repetition(rep), props }
-    }
-
-    /// Creates a capture HIR expression.
-    ///
-    /// Note that there is no explicit HIR value for a non-capturing group.
-    /// Since a non-capturing group only exists to override precedence in the
-    /// concrete syntax and since an HIR already does its own grouping based on
-    /// what is parsed, there is no need to explicitly represent non-capturing
-    /// groups in the HIR.
-    #[inline]
-    pub fn capture(capture: Capture) -> Hir {
-        let props = Properties::capture(&capture);
-        Hir { kind: HirKind::Capture(capture), props }
-    }
-
-    /// Returns the concatenation of the given expressions.
-    ///
-    /// This attempts to flatten and simplify the concatenation as appropriate.
-    ///
-    /// # Example
-    ///
-    /// This shows a simple example of basic flattening of both concatenations
-    /// and literals.
-    ///
-    /// ```
-    /// use regex_syntax::hir::Hir;
-    ///
-    /// let hir = Hir::concat(vec![
-    ///     Hir::concat(vec![
-    ///         Hir::literal([b'a']),
-    ///         Hir::literal([b'b']),
-    ///         Hir::literal([b'c']),
-    ///     ]),
-    ///     Hir::concat(vec![
-    ///         Hir::literal([b'x']),
-    ///         Hir::literal([b'y']),
-    ///         Hir::literal([b'z']),
-    ///     ]),
-    /// ]);
-    /// let expected = Hir::literal("abcxyz".as_bytes());
-    /// assert_eq!(expected, hir);
-    /// ```
-    pub fn concat(subs: Vec<Hir>) -> Hir {
-        // We rebuild the concatenation by simplifying it. Would be nice to do
-        // it in place, but that seems a little tricky?
-        let mut new = vec![];
-        // This gobbles up any adjacent literals in a concatenation and smushes
-        // them together. Basically, when we see a literal, we add its bytes
-        // to 'prior_lit', and whenever we see anything else, we first take
-        // any bytes in 'prior_lit' and add it to the 'new' concatenation.
-        let mut prior_lit: Option<Vec<u8>> = None;
-        for sub in subs {
-            let (kind, props) = sub.into_parts();
-            match kind {
-                HirKind::Literal(Literal(bytes)) => {
-                    if let Some(ref mut prior_bytes) = prior_lit {
-                        prior_bytes.extend_from_slice(&bytes);
-                    } else {
-                        prior_lit = Some(bytes.to_vec());
-                    }
-                }
-                // We also flatten concats that are direct children of another
-                // concat. We only need to do this one level deep since
-                // Hir::concat is the only way to build concatenations, and so
-                // flattening happens inductively.
-                HirKind::Concat(subs2) => {
-                    for sub2 in subs2 {
-                        let (kind2, props2) = sub2.into_parts();
-                        match kind2 {
-                            HirKind::Literal(Literal(bytes)) => {
-                                if let Some(ref mut prior_bytes) = prior_lit {
-                                    prior_bytes.extend_from_slice(&bytes);
-                                } else {
-                                    prior_lit = Some(bytes.to_vec());
-                                }
-                            }
-                            kind2 => {
-                                if let Some(prior_bytes) = prior_lit.take() {
-                                    new.push(Hir::literal(prior_bytes));
-                                }
-                                new.push(Hir { kind: kind2, props: props2 });
-                            }
-                        }
-                    }
-                }
-                // We can just skip empty HIRs.
-                HirKind::Empty => {}
-                kind => {
-                    if let Some(prior_bytes) = prior_lit.take() {
-                        new.push(Hir::literal(prior_bytes));
-                    }
-                    new.push(Hir { kind, props });
-                }
-            }
-        }
-        if let Some(prior_bytes) = prior_lit.take() {
-            new.push(Hir::literal(prior_bytes));
-        }
-        if new.is_empty() {
-            return Hir::empty();
-        } else if new.len() == 1 {
-            return new.pop().unwrap();
-        }
-        let props = Properties::concat(&new);
-        Hir { kind: HirKind::Concat(new), props }
-    }
-
-    /// Returns the alternation of the given expressions.
-    ///
-    /// This flattens and simplifies the alternation as appropriate. This may
-    /// include factoring out common prefixes or even rewriting the alternation
-    /// as a character class.
-    ///
-    /// Note that an empty alternation is equivalent to `Hir::fail()`. (It
-    /// is not possible for one to write an empty alternation, or even an
-    /// alternation with a single sub-expression, in the concrete syntax of a
-    /// regex.)
-    ///
-    /// # Example
-    ///
-    /// This is a simple example showing how an alternation might get
-    /// simplified.
-    ///
-    /// ```
-    /// use regex_syntax::hir::{Hir, Class, ClassUnicode, ClassUnicodeRange};
-    ///
-    /// let hir = Hir::alternation(vec![
-    ///     Hir::literal([b'a']),
-    ///     Hir::literal([b'b']),
-    ///     Hir::literal([b'c']),
-    ///     Hir::literal([b'd']),
-    ///     Hir::literal([b'e']),
-    ///     Hir::literal([b'f']),
-    /// ]);
-    /// let expected = Hir::class(Class::Unicode(ClassUnicode::new([
-    ///     ClassUnicodeRange::new('a', 'f'),
-    /// ])));
-    /// assert_eq!(expected, hir);
-    /// ```
-    ///
-    /// And another example showing how common prefixes might get factored
-    /// out.
-    ///
-    /// ```
-    /// use regex_syntax::hir::{Hir, Class, ClassUnicode, ClassUnicodeRange};
-    ///
-    /// let hir = Hir::alternation(vec![
-    ///     Hir::concat(vec![
-    ///         Hir::literal("abc".as_bytes()),
-    ///         Hir::class(Class::Unicode(ClassUnicode::new([
-    ///             ClassUnicodeRange::new('A', 'Z'),
-    ///         ]))),
-    ///     ]),
-    ///     Hir::concat(vec![
-    ///         Hir::literal("abc".as_bytes()),
-    ///         Hir::class(Class::Unicode(ClassUnicode::new([
-    ///             ClassUnicodeRange::new('a', 'z'),
-    ///         ]))),
-    ///     ]),
-    /// ]);
-    /// let expected = Hir::concat(vec![
-    ///     Hir::literal("abc".as_bytes()),
-    ///     Hir::alternation(vec![
-    ///         Hir::class(Class::Unicode(ClassUnicode::new([
-    ///             ClassUnicodeRange::new('A', 'Z'),
-    ///         ]))),
-    ///         Hir::class(Class::Unicode(ClassUnicode::new([
-    ///             ClassUnicodeRange::new('a', 'z'),
-    ///         ]))),
-    ///     ]),
-    /// ]);
-    /// assert_eq!(expected, hir);
-    /// ```
-    ///
-    /// Note that these sorts of simplifications are not guaranteed.
-    pub fn alternation(subs: Vec<Hir>) -> Hir {
-        // We rebuild the alternation by simplifying it. We proceed similarly
-        // as the concatenation case. But in this case, there's no literal
-        // simplification happening. We're just flattening alternations.
-        let mut new = Vec::with_capacity(subs.len());
-        for sub in subs {
-            let (kind, props) = sub.into_parts();
-            match kind {
-                HirKind::Alternation(subs2) => {
-                    new.extend(subs2);
-                }
-                kind => {
-                    new.push(Hir { kind, props });
-                }
-            }
-        }
-        if new.is_empty() {
-            return Hir::fail();
-        } else if new.len() == 1 {
-            return new.pop().unwrap();
-        }
-        // Now that it's completely flattened, look for the special case of
-        // 'char1|char2|...|charN' and collapse that into a class. Note that
-        // we look for 'char' first and then bytes. The issue here is that if
-        // we find both non-ASCII codepoints and non-ASCII singleton bytes,
-        // then it isn't actually possible to smush them into a single class.
-        // (Because classes are either "all codepoints" or "all bytes." You
-        // can have a class that both matches non-ASCII but valid UTF-8 and
-        // invalid UTF-8.) So we look for all chars and then all bytes, and
-        // don't handle anything else.
-        if let Some(singletons) = singleton_chars(&new) {
-            let it = singletons
-                .into_iter()
-                .map(|ch| ClassUnicodeRange { start: ch, end: ch });
-            return Hir::class(Class::Unicode(ClassUnicode::new(it)));
-        }
-        if let Some(singletons) = singleton_bytes(&new) {
-            let it = singletons
-                .into_iter()
-                .map(|b| ClassBytesRange { start: b, end: b });
-            return Hir::class(Class::Bytes(ClassBytes::new(it)));
-        }
-        // Similar to singleton chars, we can also look for alternations of
-        // classes. Those can be smushed into a single class.
-        if let Some(cls) = class_chars(&new) {
-            return Hir::class(cls);
-        }
-        if let Some(cls) = class_bytes(&new) {
-            return Hir::class(cls);
-        }
-        // Factor out a common prefix if we can, which might potentially
-        // simplify the expression and unlock other optimizations downstream.
-        // It also might generally make NFA matching and DFA construction
-        // faster by reducing the scope of branching in the regex.
-        new = match lift_common_prefix(new) {
-            Ok(hir) => return hir,
-            Err(unchanged) => unchanged,
-        };
-        let props = Properties::alternation(&new);
-        Hir { kind: HirKind::Alternation(new), props }
-    }
-
-    /// Returns an HIR expression for `.`.
-    ///
-    /// * [`Dot::AnyChar`] maps to `(?su-R:.)`.
-    /// * [`Dot::AnyByte`] maps to `(?s-Ru:.)`.
-    /// * [`Dot::AnyCharExceptLF`] maps to `(?u-Rs:.)`.
-    /// * [`Dot::AnyCharExceptCRLF`] maps to `(?Ru-s:.)`.
-    /// * [`Dot::AnyByteExceptLF`] maps to `(?-Rsu:.)`.
-    /// * [`Dot::AnyByteExceptCRLF`] maps to `(?R-su:.)`.
-    ///
-    /// # Example
-    ///
-    /// Note that this is a convenience routine for constructing the correct
-    /// character class based on the value of `Dot`. There is no explicit "dot"
-    /// HIR value. It is just an abbreviation for a common character class.
-    ///
-    /// ```
-    /// use regex_syntax::hir::{Hir, Dot, Class, ClassBytes, ClassBytesRange};
-    ///
-    /// let hir = Hir::dot(Dot::AnyByte);
-    /// let expected = Hir::class(Class::Bytes(ClassBytes::new([
-    ///     ClassBytesRange::new(0x00, 0xFF),
-    /// ])));
-    /// assert_eq!(expected, hir);
-    /// ```
-    #[inline]
-    pub fn dot(dot: Dot) -> Hir {
-        match dot {
-            Dot::AnyChar => Hir::class(Class::Unicode(ClassUnicode::new([
-                ClassUnicodeRange::new('\0', '\u{10FFFF}'),
-            ]))),
-            Dot::AnyByte => Hir::class(Class::Bytes(ClassBytes::new([
-                ClassBytesRange::new(b'\0', b'\xFF'),
-            ]))),
-            Dot::AnyCharExcept(ch) => {
-                let mut cls =
-                    ClassUnicode::new([ClassUnicodeRange::new(ch, ch)]);
-                cls.negate();
-                Hir::class(Class::Unicode(cls))
-            }
-            Dot::AnyCharExceptLF => {
-                Hir::class(Class::Unicode(ClassUnicode::new([
-                    ClassUnicodeRange::new('\0', '\x09'),
-                    ClassUnicodeRange::new('\x0B', '\u{10FFFF}'),
-                ])))
-            }
-            Dot::AnyCharExceptCRLF => {
-                Hir::class(Class::Unicode(ClassUnicode::new([
-                    ClassUnicodeRange::new('\0', '\x09'),
-                    ClassUnicodeRange::new('\x0B', '\x0C'),
-                    ClassUnicodeRange::new('\x0E', '\u{10FFFF}'),
-                ])))
-            }
-            Dot::AnyByteExcept(byte) => {
-                let mut cls =
-                    ClassBytes::new([ClassBytesRange::new(byte, byte)]);
-                cls.negate();
-                Hir::class(Class::Bytes(cls))
-            }
-            Dot::AnyByteExceptLF => {
-                Hir::class(Class::Bytes(ClassBytes::new([
-                    ClassBytesRange::new(b'\0', b'\x09'),
-                    ClassBytesRange::new(b'\x0B', b'\xFF'),
-                ])))
-            }
-            Dot::AnyByteExceptCRLF => {
-                Hir::class(Class::Bytes(ClassBytes::new([
-                    ClassBytesRange::new(b'\0', b'\x09'),
-                    ClassBytesRange::new(b'\x0B', b'\x0C'),
-                    ClassBytesRange::new(b'\x0E', b'\xFF'),
-                ])))
-            }
-        }
-    }
-}
-
-/// The underlying kind of an arbitrary [`Hir`] expression.
-///
-/// An `HirKind` is principally useful for doing case analysis on the type
-/// of a regular expression. If you're looking to build new `Hir` values,
-/// then you _must_ use the smart constructors defined on `Hir`, like
-/// [`Hir::repetition`], to build new `Hir` values. The API intentionally does
-/// not expose any way of building an `Hir` directly from an `HirKind`.
-#[derive(Clone, Debug, Eq, PartialEq)]
-pub enum HirKind {
-    /// The empty regular expression, which matches everything, including the
-    /// empty string.
-    Empty,
-    /// A literalstring that matches exactly these bytes.
-    Literal(Literal),
-    /// A single character class that matches any of the characters in the
-    /// class. A class can either consist of Unicode scalar values as
-    /// characters, or it can use bytes.
-    ///
-    /// A class may be empty. In which case, it matches nothing.
-    Class(Class),
-    /// A look-around assertion. A look-around match always has zero length.
-    Look(Look),
-    /// A repetition operation applied to a sub-expression.
-    Repetition(Repetition),
-    /// A capturing group, which contains a sub-expression.
-    Capture(Capture),
-    /// A concatenation of expressions.
-    ///
-    /// A concatenation matches only if each of its sub-expressions match one
-    /// after the other.
-    ///
-    /// Concatenations are guaranteed by `Hir`'s smart constructors to always
-    /// have at least two sub-expressions.
-    Concat(Vec<Hir>),
-    /// An alternation of expressions.
-    ///
-    /// An alternation matches only if at least one of its sub-expressions
-    /// match. If multiple sub-expressions match, then the leftmost is
-    /// preferred.
-    ///
-    /// Alternations are guaranteed by `Hir`'s smart constructors to always
-    /// have at least two sub-expressions.
-    Alternation(Vec<Hir>),
-}
-
-impl HirKind {
-    /// Returns a slice of this kind's sub-expressions, if any.
-    pub fn subs(&self) -> &[Hir] {
-        use core::slice::from_ref;
-
-        match *self {
-            HirKind::Empty
-            | HirKind::Literal(_)
-            | HirKind::Class(_)
-            | HirKind::Look(_) => &[],
-            HirKind::Repetition(Repetition { ref sub, .. }) => from_ref(sub),
-            HirKind::Capture(Capture { ref sub, .. }) => from_ref(sub),
-            HirKind::Concat(ref subs) => subs,
-            HirKind::Alternation(ref subs) => subs,
-        }
-    }
-}
-
-impl core::fmt::Debug for Hir {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        self.kind.fmt(f)
-    }
-}
-
-/// Print a display representation of this Hir.
-///
-/// The result of this is a valid regular expression pattern string.
-///
-/// This implementation uses constant stack space and heap space proportional
-/// to the size of the `Hir`.
-impl core::fmt::Display for Hir {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        crate::hir::print::Printer::new().print(self, f)
-    }
-}
-
-/// The high-level intermediate representation of a literal.
-///
-/// A literal corresponds to `0` or more bytes that should be matched
-/// literally. The smart constructors defined on `Hir` will automatically
-/// concatenate adjacent literals into one literal, and will even automatically
-/// replace empty literals with `Hir::empty()`.
-///
-/// Note that despite a literal being represented by a sequence of bytes, its
-/// `Debug` implementation will attempt to print it as a normal string. (That
-/// is, not a sequence of decimal numbers.)
-#[derive(Clone, Eq, PartialEq)]
-pub struct Literal(pub Box<[u8]>);
-
-impl core::fmt::Debug for Literal {
-    fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-        crate::debug::Bytes(&self.0).fmt(f)
-    }
-}
-
-/// The high-level intermediate representation of a character class.
-///
-/// A character class corresponds to a set of characters. A character is either
-/// defined by a Unicode scalar value or a byte.
-///
-/// A character class, regardless of its character type, is represented by a
-/// sequence of non-overlapping non-adjacent ranges of characters.
-///
-/// There are no guarantees about which class variant is used. Generally
-/// speaking, the Unicode variat is used whenever a class needs to contain
-/// non-ASCII Unicode scalar values. But the Unicode variant can be used even
-/// when Unicode mode is disabled. For example, at the time of writing, the
-/// regex `(?-u:a|\xc2\xa0)` will compile down to HIR for the Unicode class
-/// `[a\u00A0]` due to optimizations.
-///
-/// Note that `Bytes` variant may be produced even when it exclusively matches
-/// valid UTF-8. This is because a `Bytes` variant represents an intention by
-/// the author of the regular expression to disable Unicode mode, which in turn
-/// impacts the semantics of case insensitive matching. For example, `(?i)k`
-/// and `(?i-u)k` will not match the same set of strings.
-#[derive(Clone, Eq, PartialEq)]
-pub enum Class {
-    /// A set of characters represented by Unicode scalar values.
-    Unicode(ClassUnicode),
-    /// A set of characters represented by arbitrary bytes (one byte per
-    /// character).
-    Bytes(ClassBytes),
-}
-
-impl Class {
-    /// Apply Unicode simple case folding to this character class, in place.
-    /// The character class will be expanded to include all simple case folded
-    /// character variants.
-    ///
-    /// If this is a byte oriented character class, then this will be limited
-    /// to the ASCII ranges `A-Z` and `a-z`.
-    ///
-    /// # Panics
-    ///
-    /// This routine panics when the case mapping data necessary for this
-    /// routine to complete is unavailable. This occurs when the `unicode-case`
-    /// feature is not enabled and the underlying class is Unicode oriented.
-    ///
-    /// Callers should prefer using `try_case_fold_simple` instead, which will
-    /// return an error instead of panicking.
-    pub fn case_fold_simple(&mut self) {
-        match *self {
-            Class::Unicode(ref mut x) => x.case_fold_simple(),
-            Class::Bytes(ref mut x) => x.case_fold_simple(),
-        }
-    }
-
-    /// Apply Unicode simple case folding to this character class, in place.
-    /// The character class will be expanded to include all simple case folded
-    /// character variants.
-    ///
-    /// If this is a byte oriented character class, then this will be limited
-    /// to the ASCII ranges `A-Z` and `a-z`.
-    ///
-    /// # Error
-    ///
-    /// This routine returns an error when the case mapping data necessary
-    /// for this routine to complete is unavailable. This occurs when the
-    /// `unicode-case` feature is not enabled and the underlying class is
-    /// Unicode oriented.
-    pub fn try_case_fold_simple(
-        &mut self,
-    ) -> core::result::Result<(), CaseFoldError> {
-        match *self {
-            Class::Unicode(ref mut x) => x.try_case_fold_simple()?,
-            Class::Bytes(ref mut x) => x.case_fold_simple(),
-        }
-        Ok(())
-    }
-
-    /// Negate this character class in place.
-    ///
-    /// After completion, this character class will contain precisely the
-    /// characters that weren't previously in the class.
-    pub fn negate(&mut self) {
-        match *self {
-            Class::Unicode(ref mut x) => x.negate(),
-            Class::Bytes(ref mut x) => x.negate(),
-        }
-    }
-
-    /// Returns true if and only if this character class will only ever match
-    /// valid UTF-8.
-    ///
-    /// A character class can match invalid UTF-8 only when the following
-    /// conditions are met:
-    ///
-    /// 1. The translator was configured to permit generating an expression
-    ///    that can match invalid UTF-8. (By default, this is disabled.)
-    /// 2. Unicode mode (via the `u` flag) was disabled either in the concrete
-    ///    syntax or in the parser builder. By default, Unicode mode is
-    ///    enabled.
-    pub fn is_utf8(&self) -> bool {
-        match *self {
-            Class::Unicode(_) => true,
-            Class::Bytes(ref x) => x.is_ascii(),
-        }
-    }
-
-    /// Returns the length, in bytes, of the smallest string matched by this
-    /// character class.
-    ///
-    /// For non-empty byte oriented classes, this always returns `1`. For
-    /// non-empty Unicode oriented classes, this can return `1`, `2`, `3` or
-    /// `4`. For empty classes, `None` is returned. It is impossible for `0` to
-    /// be returned.
-    ///
-    /// # Example
-    ///
-    /// This example shows some examples of regexes and their corresponding
-    /// minimum length, if any.
-    ///
-    /// ```
-    /// use regex_syntax::{hir::Properties, parse};
-    ///
-    /// // The empty string has a min length of 0.
-    /// let hir = parse(r"")?;
-    /// assert_eq!(Some(0), hir.properties().minimum_len());
-    /// // As do other types of regexes that only match the empty string.
-    /// let hir = parse(r"^$\b\B")?;
-    /// assert_eq!(Some(0), hir.properties().minimum_len());
-    /// // A regex that can match the empty string but match more is still 0.
-    /// let hir = parse(r"a*")?;
-    /// assert_eq!(Some(0), hir.properties().minimum_len());
-    /// // A regex that matches nothing has no minimum defined.
-    /// let hir = parse(r"[a&&b]")?;
-    /// assert_eq!(None, hir.properties().minimum_len());
-    /// // Character classes usually have a minimum length of 1.
-    /// let hir = parse(r"\w")?;
-    /// assert_eq!(Some(1), hir.properties().minimum_len());
-    /// // But sometimes Unicode classes might be bigger!
-    /// let hir = parse(r"\p{Cyrillic}")?;
-    /// assert_eq!(Some(2), hir.properties().minimum_len());
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    pub fn minimum_len(&self) -> Option<usize> {
-        match *self {
-            Class::Unicode(ref x) => x.minimum_len(),
-            Class::Bytes(ref x) => x.minimum_len(),
-        }
-    }
-
-    /// Returns the length, in bytes, of the longest string matched by this
-    /// character class.
-    ///
-    /// For non-empty byte oriented classes, this always returns `1`. For
-    /// non-empty Unicode oriented classes, this can return `1`, `2`, `3` or
-    /// `4`. For empty classes, `None` is returned. It is impossible for `0` to
-    /// be returned.
-    ///
-    /// # Example
-    ///
-    /// This example shows some examples of regexes and their corresponding
-    /// maximum length, if any.
-    ///
-    /// ```
-    /// use regex_syntax::{hir::Properties, parse};
-    ///
-    /// // The empty string has a max length of 0.
-    /// let hir = parse(r"")?;
-    /// assert_eq!(Some(0), hir.properties().maximum_len());
-    /// // As do other types of regexes that only match the empty string.
-    /// let hir = parse(r"^$\b\B")?;
-    /// assert_eq!(Some(0), hir.properties().maximum_len());
-    /// // A regex that matches nothing has no maximum defined.
-    /// let hir = parse(r"[a&&b]")?;
-    /// assert_eq!(None, hir.properties().maximum_len());
-    /// // Bounded repeats work as you expect.
-    /// let hir = parse(r"x{2,10}")?;
-    /// assert_eq!(Some(10), hir.properties().maximum_len());
-    /// // An unbounded repeat means there is no maximum.
-    /// let hir = parse(r"x{2,}")?;
-    /// assert_eq!(None, hir.properties().maximum_len());
-    /// // With Unicode enabled, \w can match up to 4 bytes!
-    /// let hir = parse(r"\w")?;
-    /// assert_eq!(Some(4), hir.properties().maximum_len());
-    /// // Without Unicode enabled, \w matches at most 1 byte.
-    /// let hir = parse(r"(?-u)\w")?;
-    /// assert_eq!(Some(1), hir.properties().maximum_len());
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    pub fn maximum_len(&self) -> Option<usize> {
-        match *self {
-            Class::Unicode(ref x) => x.maximum_len(),
-            Class::Bytes(ref x) => x.maximum_len(),
-        }
-    }
-
-    /// Returns true if and only if this character class is empty. That is,
-    /// it has no elements.
-    ///
-    /// An empty character can never match anything, including an empty string.
-    pub fn is_empty(&self) -> bool {
-        match *self {
-            Class::Unicode(ref x) => x.ranges().is_empty(),
-            Class::Bytes(ref x) => x.ranges().is_empty(),
-        }
-    }
-
-    /// If this class consists of exactly one element (whether a codepoint or a
-    /// byte), then return it as a literal byte string.
-    ///
-    /// If this class is empty or contains more than one element, then `None`
-    /// is returned.
-    pub fn literal(&self) -> Option<Vec<u8>> {
-        match *self {
-            Class::Unicode(ref x) => x.literal(),
-            Class::Bytes(ref x) => x.literal(),
-        }
-    }
-}
-
-impl core::fmt::Debug for Class {
-    fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-        use crate::debug::Byte;
-
-        let mut fmter = f.debug_set();
-        match *self {
-            Class::Unicode(ref cls) => {
-                for r in cls.ranges().iter() {
-                    fmter.entry(&(r.start..=r.end));
-                }
-            }
-            Class::Bytes(ref cls) => {
-                for r in cls.ranges().iter() {
-                    fmter.entry(&(Byte(r.start)..=Byte(r.end)));
-                }
-            }
-        }
-        fmter.finish()
-    }
-}
-
-/// A set of characters represented by Unicode scalar values.
-#[derive(Clone, Debug, Eq, PartialEq)]
-pub struct ClassUnicode {
-    set: IntervalSet<ClassUnicodeRange>,
-}
-
-impl ClassUnicode {
-    /// Create a new class from a sequence of ranges.
-    ///
-    /// The given ranges do not need to be in any specific order, and ranges
-    /// may overlap. Ranges will automatically be sorted into a canonical
-    /// non-overlapping order.
-    pub fn new<I>(ranges: I) -> ClassUnicode
-    where
-        I: IntoIterator<Item = ClassUnicodeRange>,
-    {
-        ClassUnicode { set: IntervalSet::new(ranges) }
-    }
-
-    /// Create a new class with no ranges.
-    ///
-    /// An empty class matches nothing. That is, it is equivalent to
-    /// [`Hir::fail`].
-    pub fn empty() -> ClassUnicode {
-        ClassUnicode::new(vec![])
-    }
-
-    /// Add a new range to this set.
-    pub fn push(&mut self, range: ClassUnicodeRange) {
-        self.set.push(range);
-    }
-
-    /// Return an iterator over all ranges in this class.
-    ///
-    /// The iterator yields ranges in ascending order.
-    pub fn iter(&self) -> ClassUnicodeIter<'_> {
-        ClassUnicodeIter(self.set.iter())
-    }
-
-    /// Return the underlying ranges as a slice.
-    pub fn ranges(&self) -> &[ClassUnicodeRange] {
-        self.set.intervals()
-    }
-
-    /// Expand this character class such that it contains all case folded
-    /// characters, according to Unicode's "simple" mapping. For example, if
-    /// this class consists of the range `a-z`, then applying case folding will
-    /// result in the class containing both the ranges `a-z` and `A-Z`.
-    ///
-    /// # Panics
-    ///
-    /// This routine panics when the case mapping data necessary for this
-    /// routine to complete is unavailable. This occurs when the `unicode-case`
-    /// feature is not enabled.
-    ///
-    /// Callers should prefer using `try_case_fold_simple` instead, which will
-    /// return an error instead of panicking.
-    pub fn case_fold_simple(&mut self) {
-        self.set
-            .case_fold_simple()
-            .expect("unicode-case feature must be enabled");
-    }
-
-    /// Expand this character class such that it contains all case folded
-    /// characters, according to Unicode's "simple" mapping. For example, if
-    /// this class consists of the range `a-z`, then applying case folding will
-    /// result in the class containing both the ranges `a-z` and `A-Z`.
-    ///
-    /// # Error
-    ///
-    /// This routine returns an error when the case mapping data necessary
-    /// for this routine to complete is unavailable. This occurs when the
-    /// `unicode-case` feature is not enabled.
-    pub fn try_case_fold_simple(
-        &mut self,
-    ) -> core::result::Result<(), CaseFoldError> {
-        self.set.case_fold_simple()
-    }
-
-    /// Negate this character class.
-    ///
-    /// For all `c` where `c` is a Unicode scalar value, if `c` was in this
-    /// set, then it will not be in this set after negation.
-    pub fn negate(&mut self) {
-        self.set.negate();
-    }
-
-    /// Union this character class with the given character class, in place.
-    pub fn union(&mut self, other: &ClassUnicode) {
-        self.set.union(&other.set);
-    }
-
-    /// Intersect this character class with the given character class, in
-    /// place.
-    pub fn intersect(&mut self, other: &ClassUnicode) {
-        self.set.intersect(&other.set);
-    }
-
-    /// Subtract the given character class from this character class, in place.
-    pub fn difference(&mut self, other: &ClassUnicode) {
-        self.set.difference(&other.set);
-    }
-
-    /// Compute the symmetric difference of the given character classes, in
-    /// place.
-    ///
-    /// This computes the symmetric difference of two character classes. This
-    /// removes all elements in this class that are also in the given class,
-    /// but all adds all elements from the given class that aren't in this
-    /// class. That is, the class will contain all elements in either class,
-    /// but will not contain any elements that are in both classes.
-    pub fn symmetric_difference(&mut self, other: &ClassUnicode) {
-        self.set.symmetric_difference(&other.set);
-    }
-
-    /// Returns true if and only if this character class will either match
-    /// nothing or only ASCII bytes. Stated differently, this returns false
-    /// if and only if this class contains a non-ASCII codepoint.
-    pub fn is_ascii(&self) -> bool {
-        self.set.intervals().last().map_or(true, |r| r.end <= '\x7F')
-    }
-
-    /// Returns the length, in bytes, of the smallest string matched by this
-    /// character class.
-    ///
-    /// Returns `None` when the class is empty.
-    pub fn minimum_len(&self) -> Option<usize> {
-        let first = self.ranges().get(0)?;
-        // Correct because c1 < c2 implies c1.len_utf8() < c2.len_utf8().
-        Some(first.start.len_utf8())
-    }
-
-    /// Returns the length, in bytes, of the longest string matched by this
-    /// character class.
-    ///
-    /// Returns `None` when the class is empty.
-    pub fn maximum_len(&self) -> Option<usize> {
-        let last = self.ranges().last()?;
-        // Correct because c1 < c2 implies c1.len_utf8() < c2.len_utf8().
-        Some(last.end.len_utf8())
-    }
-
-    /// If this class consists of exactly one codepoint, then return it as
-    /// a literal byte string.
-    ///
-    /// If this class is empty or contains more than one codepoint, then `None`
-    /// is returned.
-    pub fn literal(&self) -> Option<Vec<u8>> {
-        let rs = self.ranges();
-        if rs.len() == 1 && rs[0].start == rs[0].end {
-            Some(rs[0].start.encode_utf8(&mut [0; 4]).to_string().into_bytes())
-        } else {
-            None
-        }
-    }
-
-    /// If this class consists of only ASCII ranges, then return its
-    /// corresponding and equivalent byte class.
-    pub fn to_byte_class(&self) -> Option<ClassBytes> {
-        if !self.is_ascii() {
-            return None;
-        }
-        Some(ClassBytes::new(self.ranges().iter().map(|r| {
-            // Since we are guaranteed that our codepoint range is ASCII, the
-            // 'u8::try_from' calls below are guaranteed to be correct.
-            ClassBytesRange {
-                start: u8::try_from(r.start).unwrap(),
-                end: u8::try_from(r.end).unwrap(),
-            }
-        })))
-    }
-}
-
-/// An iterator over all ranges in a Unicode character class.
-///
-/// The lifetime `'a` refers to the lifetime of the underlying class.
-#[derive(Debug)]
-pub struct ClassUnicodeIter<'a>(IntervalSetIter<'a, ClassUnicodeRange>);
-
-impl<'a> Iterator for ClassUnicodeIter<'a> {
-    type Item = &'a ClassUnicodeRange;
-
-    fn next(&mut self) -> Option<&'a ClassUnicodeRange> {
-        self.0.next()
-    }
-}
-
-/// A single range of characters represented by Unicode scalar values.
-///
-/// The range is closed. That is, the start and end of the range are included
-/// in the range.
-#[derive(Clone, Copy, Default, Eq, PartialEq, PartialOrd, Ord)]
-pub struct ClassUnicodeRange {
-    start: char,
-    end: char,
-}
-
-impl core::fmt::Debug for ClassUnicodeRange {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        let start = if !self.start.is_whitespace() && !self.start.is_control()
-        {
-            self.start.to_string()
-        } else {
-            format!("0x{:X}", u32::from(self.start))
-        };
-        let end = if !self.end.is_whitespace() && !self.end.is_control() {
-            self.end.to_string()
-        } else {
-            format!("0x{:X}", u32::from(self.end))
-        };
-        f.debug_struct("ClassUnicodeRange")
-            .field("start", &start)
-            .field("end", &end)
-            .finish()
-    }
-}
-
-impl Interval for ClassUnicodeRange {
-    type Bound = char;
-
-    #[inline]
-    fn lower(&self) -> char {
-        self.start
-    }
-    #[inline]
-    fn upper(&self) -> char {
-        self.end
-    }
-    #[inline]
-    fn set_lower(&mut self, bound: char) {
-        self.start = bound;
-    }
-    #[inline]
-    fn set_upper(&mut self, bound: char) {
-        self.end = bound;
-    }
-
-    /// Apply simple case folding to this Unicode scalar value range.
-    ///
-    /// Additional ranges are appended to the given vector. Canonical ordering
-    /// is *not* maintained in the given vector.
-    fn case_fold_simple(
-        &self,
-        ranges: &mut Vec<ClassUnicodeRange>,
-    ) -> Result<(), unicode::CaseFoldError> {
-        let mut folder = unicode::SimpleCaseFolder::new()?;
-        if !folder.overlaps(self.start, self.end) {
-            return Ok(());
-        }
-        let (start, end) = (u32::from(self.start), u32::from(self.end));
-        for cp in (start..=end).filter_map(char::from_u32) {
-            for &cp_folded in folder.mapping(cp) {
-                ranges.push(ClassUnicodeRange::new(cp_folded, cp_folded));
-            }
-        }
-        Ok(())
-    }
-}
-
-impl ClassUnicodeRange {
-    /// Create a new Unicode scalar value range for a character class.
-    ///
-    /// The returned range is always in a canonical form. That is, the range
-    /// returned always satisfies the invariant that `start <= end`.
-    pub fn new(start: char, end: char) -> ClassUnicodeRange {
-        ClassUnicodeRange::create(start, end)
-    }
-
-    /// Return the start of this range.
-    ///
-    /// The start of a range is always less than or equal to the end of the
-    /// range.
-    pub fn start(&self) -> char {
-        self.start
-    }
-
-    /// Return the end of this range.
-    ///
-    /// The end of a range is always greater than or equal to the start of the
-    /// range.
-    pub fn end(&self) -> char {
-        self.end
-    }
-
-    /// Returns the number of codepoints in this range.
-    pub fn len(&self) -> usize {
-        let diff = 1 + u32::from(self.end) - u32::from(self.start);
-        // This is likely to panic in 16-bit targets since a usize can only fit
-        // 2^16. It's not clear what to do here, other than to return an error
-        // when building a Unicode class that contains a range whose length
-        // overflows usize. (Which, to be honest, is probably quite common on
-        // 16-bit targets. For example, this would imply that '.' and '\p{any}'
-        // would be impossible to build.)
-        usize::try_from(diff).expect("char class len fits in usize")
-    }
-}
-
-/// A set of characters represented by arbitrary bytes.
-///
-/// Each byte corresponds to one character.
-#[derive(Clone, Debug, Eq, PartialEq)]
-pub struct ClassBytes {
-    set: IntervalSet<ClassBytesRange>,
-}
-
-impl ClassBytes {
-    /// Create a new class from a sequence of ranges.
-    ///
-    /// The given ranges do not need to be in any specific order, and ranges
-    /// may overlap. Ranges will automatically be sorted into a canonical
-    /// non-overlapping order.
-    pub fn new<I>(ranges: I) -> ClassBytes
-    where
-        I: IntoIterator<Item = ClassBytesRange>,
-    {
-        ClassBytes { set: IntervalSet::new(ranges) }
-    }
-
-    /// Create a new class with no ranges.
-    ///
-    /// An empty class matches nothing. That is, it is equivalent to
-    /// [`Hir::fail`].
-    pub fn empty() -> ClassBytes {
-        ClassBytes::new(vec![])
-    }
-
-    /// Add a new range to this set.
-    pub fn push(&mut self, range: ClassBytesRange) {
-        self.set.push(range);
-    }
-
-    /// Return an iterator over all ranges in this class.
-    ///
-    /// The iterator yields ranges in ascending order.
-    pub fn iter(&self) -> ClassBytesIter<'_> {
-        ClassBytesIter(self.set.iter())
-    }
-
-    /// Return the underlying ranges as a slice.
-    pub fn ranges(&self) -> &[ClassBytesRange] {
-        self.set.intervals()
-    }
-
-    /// Expand this character class such that it contains all case folded
-    /// characters. For example, if this class consists of the range `a-z`,
-    /// then applying case folding will result in the class containing both the
-    /// ranges `a-z` and `A-Z`.
-    ///
-    /// Note that this only applies ASCII case folding, which is limited to the
-    /// characters `a-z` and `A-Z`.
-    pub fn case_fold_simple(&mut self) {
-        self.set.case_fold_simple().expect("ASCII case folding never fails");
-    }
-
-    /// Negate this byte class.
-    ///
-    /// For all `b` where `b` is a any byte, if `b` was in this set, then it
-    /// will not be in this set after negation.
-    pub fn negate(&mut self) {
-        self.set.negate();
-    }
-
-    /// Union this byte class with the given byte class, in place.
-    pub fn union(&mut self, other: &ClassBytes) {
-        self.set.union(&other.set);
-    }
-
-    /// Intersect this byte class with the given byte class, in place.
-    pub fn intersect(&mut self, other: &ClassBytes) {
-        self.set.intersect(&other.set);
-    }
-
-    /// Subtract the given byte class from this byte class, in place.
-    pub fn difference(&mut self, other: &ClassBytes) {
-        self.set.difference(&other.set);
-    }
-
-    /// Compute the symmetric difference of the given byte classes, in place.
-    ///
-    /// This computes the symmetric difference of two byte classes. This
-    /// removes all elements in this class that are also in the given class,
-    /// but all adds all elements from the given class that aren't in this
-    /// class. That is, the class will contain all elements in either class,
-    /// but will not contain any elements that are in both classes.
-    pub fn symmetric_difference(&mut self, other: &ClassBytes) {
-        self.set.symmetric_difference(&other.set);
-    }
-
-    /// Returns true if and only if this character class will either match
-    /// nothing or only ASCII bytes. Stated differently, this returns false
-    /// if and only if this class contains a non-ASCII byte.
-    pub fn is_ascii(&self) -> bool {
-        self.set.intervals().last().map_or(true, |r| r.end <= 0x7F)
-    }
-
-    /// Returns the length, in bytes, of the smallest string matched by this
-    /// character class.
-    ///
-    /// Returns `None` when the class is empty.
-    pub fn minimum_len(&self) -> Option<usize> {
-        if self.ranges().is_empty() {
-            None
-        } else {
-            Some(1)
-        }
-    }
-
-    /// Returns the length, in bytes, of the longest string matched by this
-    /// character class.
-    ///
-    /// Returns `None` when the class is empty.
-    pub fn maximum_len(&self) -> Option<usize> {
-        if self.ranges().is_empty() {
-            None
-        } else {
-            Some(1)
-        }
-    }
-
-    /// If this class consists of exactly one byte, then return it as
-    /// a literal byte string.
-    ///
-    /// If this class is empty or contains more than one byte, then `None`
-    /// is returned.
-    pub fn literal(&self) -> Option<Vec<u8>> {
-        let rs = self.ranges();
-        if rs.len() == 1 && rs[0].start == rs[0].end {
-            Some(vec![rs[0].start])
-        } else {
-            None
-        }
-    }
-
-    /// If this class consists of only ASCII ranges, then return its
-    /// corresponding and equivalent Unicode class.
-    pub fn to_unicode_class(&self) -> Option<ClassUnicode> {
-        if !self.is_ascii() {
-            return None;
-        }
-        Some(ClassUnicode::new(self.ranges().iter().map(|r| {
-            // Since we are guaranteed that our byte range is ASCII, the
-            // 'char::from' calls below are correct and will not erroneously
-            // convert a raw byte value into its corresponding codepoint.
-            ClassUnicodeRange {
-                start: char::from(r.start),
-                end: char::from(r.end),
-            }
-        })))
-    }
-}
-
-/// An iterator over all ranges in a byte character class.
-///
-/// The lifetime `'a` refers to the lifetime of the underlying class.
-#[derive(Debug)]
-pub struct ClassBytesIter<'a>(IntervalSetIter<'a, ClassBytesRange>);
-
-impl<'a> Iterator for ClassBytesIter<'a> {
-    type Item = &'a ClassBytesRange;
-
-    fn next(&mut self) -> Option<&'a ClassBytesRange> {
-        self.0.next()
-    }
-}
-
-/// A single range of characters represented by arbitrary bytes.
-///
-/// The range is closed. That is, the start and end of the range are included
-/// in the range.
-#[derive(Clone, Copy, Default, Eq, PartialEq, PartialOrd, Ord)]
-pub struct ClassBytesRange {
-    start: u8,
-    end: u8,
-}
-
-impl Interval for ClassBytesRange {
-    type Bound = u8;
-
-    #[inline]
-    fn lower(&self) -> u8 {
-        self.start
-    }
-    #[inline]
-    fn upper(&self) -> u8 {
-        self.end
-    }
-    #[inline]
-    fn set_lower(&mut self, bound: u8) {
-        self.start = bound;
-    }
-    #[inline]
-    fn set_upper(&mut self, bound: u8) {
-        self.end = bound;
-    }
-
-    /// Apply simple case folding to this byte range. Only ASCII case mappings
-    /// (for a-z) are applied.
-    ///
-    /// Additional ranges are appended to the given vector. Canonical ordering
-    /// is *not* maintained in the given vector.
-    fn case_fold_simple(
-        &self,
-        ranges: &mut Vec<ClassBytesRange>,
-    ) -> Result<(), unicode::CaseFoldError> {
-        if !ClassBytesRange::new(b'a', b'z').is_intersection_empty(self) {
-            let lower = cmp::max(self.start, b'a');
-            let upper = cmp::min(self.end, b'z');
-            ranges.push(ClassBytesRange::new(lower - 32, upper - 32));
-        }
-        if !ClassBytesRange::new(b'A', b'Z').is_intersection_empty(self) {
-            let lower = cmp::max(self.start, b'A');
-            let upper = cmp::min(self.end, b'Z');
-            ranges.push(ClassBytesRange::new(lower + 32, upper + 32));
-        }
-        Ok(())
-    }
-}
-
-impl ClassBytesRange {
-    /// Create a new byte range for a character class.
-    ///
-    /// The returned range is always in a canonical form. That is, the range
-    /// returned always satisfies the invariant that `start <= end`.
-    pub fn new(start: u8, end: u8) -> ClassBytesRange {
-        ClassBytesRange::create(start, end)
-    }
-
-    /// Return the start of this range.
-    ///
-    /// The start of a range is always less than or equal to the end of the
-    /// range.
-    pub fn start(&self) -> u8 {
-        self.start
-    }
-
-    /// Return the end of this range.
-    ///
-    /// The end of a range is always greater than or equal to the start of the
-    /// range.
-    pub fn end(&self) -> u8 {
-        self.end
-    }
-
-    /// Returns the number of bytes in this range.
-    pub fn len(&self) -> usize {
-        usize::from(self.end.checked_sub(self.start).unwrap())
-            .checked_add(1)
-            .unwrap()
-    }
-}
-
-impl core::fmt::Debug for ClassBytesRange {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        f.debug_struct("ClassBytesRange")
-            .field("start", &crate::debug::Byte(self.start))
-            .field("end", &crate::debug::Byte(self.end))
-            .finish()
-    }
-}
-
-/// The high-level intermediate representation for a look-around assertion.
-///
-/// An assertion match is always zero-length. Also called an "empty match."
-#[derive(Clone, Copy, Debug, Eq, PartialEq)]
-pub enum Look {
-    /// Match the beginning of text. Specifically, this matches at the starting
-    /// position of the input.
-    Start = 1 << 0,
-    /// Match the end of text. Specifically, this matches at the ending
-    /// position of the input.
-    End = 1 << 1,
-    /// Match the beginning of a line or the beginning of text. Specifically,
-    /// this matches at the starting position of the input, or at the position
-    /// immediately following a `\n` character.
-    StartLF = 1 << 2,
-    /// Match the end of a line or the end of text. Specifically, this matches
-    /// at the end position of the input, or at the position immediately
-    /// preceding a `\n` character.
-    EndLF = 1 << 3,
-    /// Match the beginning of a line or the beginning of text. Specifically,
-    /// this matches at the starting position of the input, or at the position
-    /// immediately following either a `\r` or `\n` character, but never after
-    /// a `\r` when a `\n` follows.
-    StartCRLF = 1 << 4,
-    /// Match the end of a line or the end of text. Specifically, this matches
-    /// at the end position of the input, or at the position immediately
-    /// preceding a `\r` or `\n` character, but never before a `\n` when a `\r`
-    /// precedes it.
-    EndCRLF = 1 << 5,
-    /// Match an ASCII-only word boundary. That is, this matches a position
-    /// where the left adjacent character and right adjacent character
-    /// correspond to a word and non-word or a non-word and word character.
-    WordAscii = 1 << 6,
-    /// Match an ASCII-only negation of a word boundary.
-    WordAsciiNegate = 1 << 7,
-    /// Match a Unicode-aware word boundary. That is, this matches a position
-    /// where the left adjacent character and right adjacent character
-    /// correspond to a word and non-word or a non-word and word character.
-    WordUnicode = 1 << 8,
-    /// Match a Unicode-aware negation of a word boundary.
-    WordUnicodeNegate = 1 << 9,
-    /// Match the start of an ASCII-only word boundary. That is, this matches a
-    /// position at either the beginning of the haystack or where the previous
-    /// character is not a word character and the following character is a word
-    /// character.
-    WordStartAscii = 1 << 10,
-    /// Match the end of an ASCII-only word boundary. That is, this matches
-    /// a position at either the end of the haystack or where the previous
-    /// character is a word character and the following character is not a word
-    /// character.
-    WordEndAscii = 1 << 11,
-    /// Match the start of a Unicode word boundary. That is, this matches a
-    /// position at either the beginning of the haystack or where the previous
-    /// character is not a word character and the following character is a word
-    /// character.
-    WordStartUnicode = 1 << 12,
-    /// Match the end of a Unicode word boundary. That is, this matches a
-    /// position at either the end of the haystack or where the previous
-    /// character is a word character and the following character is not a word
-    /// character.
-    WordEndUnicode = 1 << 13,
-    /// Match the start half of an ASCII-only word boundary. That is, this
-    /// matches a position at either the beginning of the haystack or where the
-    /// previous character is not a word character.
-    WordStartHalfAscii = 1 << 14,
-    /// Match the end half of an ASCII-only word boundary. That is, this
-    /// matches a position at either the end of the haystack or where the
-    /// following character is not a word character.
-    WordEndHalfAscii = 1 << 15,
-    /// Match the start half of a Unicode word boundary. That is, this matches
-    /// a position at either the beginning of the haystack or where the
-    /// previous character is not a word character.
-    WordStartHalfUnicode = 1 << 16,
-    /// Match the end half of a Unicode word boundary. That is, this matches
-    /// a position at either the end of the haystack or where the following
-    /// character is not a word character.
-    WordEndHalfUnicode = 1 << 17,
-}
-
-impl Look {
-    /// Flip the look-around assertion to its equivalent for reverse searches.
-    /// For example, `StartLF` gets translated to `EndLF`.
-    ///
-    /// Some assertions, such as `WordUnicode`, remain the same since they
-    /// match the same positions regardless of the direction of the search.
-    #[inline]
-    pub const fn reversed(self) -> Look {
-        match self {
-            Look::Start => Look::End,
-            Look::End => Look::Start,
-            Look::StartLF => Look::EndLF,
-            Look::EndLF => Look::StartLF,
-            Look::StartCRLF => Look::EndCRLF,
-            Look::EndCRLF => Look::StartCRLF,
-            Look::WordAscii => Look::WordAscii,
-            Look::WordAsciiNegate => Look::WordAsciiNegate,
-            Look::WordUnicode => Look::WordUnicode,
-            Look::WordUnicodeNegate => Look::WordUnicodeNegate,
-            Look::WordStartAscii => Look::WordEndAscii,
-            Look::WordEndAscii => Look::WordStartAscii,
-            Look::WordStartUnicode => Look::WordEndUnicode,
-            Look::WordEndUnicode => Look::WordStartUnicode,
-            Look::WordStartHalfAscii => Look::WordEndHalfAscii,
-            Look::WordEndHalfAscii => Look::WordStartHalfAscii,
-            Look::WordStartHalfUnicode => Look::WordEndHalfUnicode,
-            Look::WordEndHalfUnicode => Look::WordStartHalfUnicode,
-        }
-    }
-
-    /// Return the underlying representation of this look-around enumeration
-    /// as an integer. Giving the return value to the [`Look::from_repr`]
-    /// constructor is guaranteed to return the same look-around variant that
-    /// one started with within a semver compatible release of this crate.
-    #[inline]
-    pub const fn as_repr(self) -> u32 {
-        // AFAIK, 'as' is the only way to zero-cost convert an int enum to an
-        // actual int.
-        self as u32
-    }
-
-    /// Given the underlying representation of a `Look` value, return the
-    /// corresponding `Look` value if the representation is valid. Otherwise
-    /// `None` is returned.
-    #[inline]
-    pub const fn from_repr(repr: u32) -> Option<Look> {
-        match repr {
-            0b00_0000_0000_0000_0001 => Some(Look::Start),
-            0b00_0000_0000_0000_0010 => Some(Look::End),
-            0b00_0000_0000_0000_0100 => Some(Look::StartLF),
-            0b00_0000_0000_0000_1000 => Some(Look::EndLF),
-            0b00_0000_0000_0001_0000 => Some(Look::StartCRLF),
-            0b00_0000_0000_0010_0000 => Some(Look::EndCRLF),
-            0b00_0000_0000_0100_0000 => Some(Look::WordAscii),
-            0b00_0000_0000_1000_0000 => Some(Look::WordAsciiNegate),
-            0b00_0000_0001_0000_0000 => Some(Look::WordUnicode),
-            0b00_0000_0010_0000_0000 => Some(Look::WordUnicodeNegate),
-            0b00_0000_0100_0000_0000 => Some(Look::WordStartAscii),
-            0b00_0000_1000_0000_0000 => Some(Look::WordEndAscii),
-            0b00_0001_0000_0000_0000 => Some(Look::WordStartUnicode),
-            0b00_0010_0000_0000_0000 => Some(Look::WordEndUnicode),
-            0b00_0100_0000_0000_0000 => Some(Look::WordStartHalfAscii),
-            0b00_1000_0000_0000_0000 => Some(Look::WordEndHalfAscii),
-            0b01_0000_0000_0000_0000 => Some(Look::WordStartHalfUnicode),
-            0b10_0000_0000_0000_0000 => Some(Look::WordEndHalfUnicode),
-            _ => None,
-        }
-    }
-
-    /// Returns a convenient single codepoint representation of this
-    /// look-around assertion. Each assertion is guaranteed to be represented
-    /// by a distinct character.
-    ///
-    /// This is useful for succinctly representing a look-around assertion in
-    /// human friendly but succinct output intended for a programmer working on
-    /// regex internals.
-    #[inline]
-    pub const fn as_char(self) -> char {
-        match self {
-            Look::Start => 'A',
-            Look::End => 'z',
-            Look::StartLF => '^',
-            Look::EndLF => '$',
-            Look::StartCRLF => 'r',
-            Look::EndCRLF => 'R',
-            Look::WordAscii => 'b',
-            Look::WordAsciiNegate => 'B',
-            Look::WordUnicode => '𝛃',
-            Look::WordUnicodeNegate => 'đš©',
-            Look::WordStartAscii => '<',
-            Look::WordEndAscii => '>',
-            Look::WordStartUnicode => '〈',
-            Look::WordEndUnicode => '〉',
-            Look::WordStartHalfAscii => '◁',
-            Look::WordEndHalfAscii => '▷',
-            Look::WordStartHalfUnicode => '◀',
-            Look::WordEndHalfUnicode => '▶',
-        }
-    }
-}
-
-/// The high-level intermediate representation for a capturing group.
-///
-/// A capturing group always has an index and a child expression. It may
-/// also have a name associated with it (e.g., `(?P<foo>\w)`), but it's not
-/// necessary.
-///
-/// Note that there is no explicit representation of a non-capturing group
-/// in a `Hir`. Instead, non-capturing grouping is handled automatically by
-/// the recursive structure of the `Hir` itself.
-#[derive(Clone, Debug, Eq, PartialEq)]
-pub struct Capture {
-    /// The capture index of the capture.
-    pub index: u32,
-    /// The name of the capture, if it exists.
-    pub name: Option<Box<str>>,
-    /// The expression inside the capturing group, which may be empty.
-    pub sub: Box<Hir>,
-}
-
-/// The high-level intermediate representation of a repetition operator.
-///
-/// A repetition operator permits the repetition of an arbitrary
-/// sub-expression.
-#[derive(Clone, Debug, Eq, PartialEq)]
-pub struct Repetition {
-    /// The minimum range of the repetition.
-    ///
-    /// Note that special cases like `?`, `+` and `*` all get translated into
-    /// the ranges `{0,1}`, `{1,}` and `{0,}`, respectively.
-    ///
-    /// When `min` is zero, this expression can match the empty string
-    /// regardless of what its sub-expression is.
-    pub min: u32,
-    /// The maximum range of the repetition.
-    ///
-    /// Note that when `max` is `None`, `min` acts as a lower bound but where
-    /// there is no upper bound. For something like `x{5}` where the min and
-    /// max are equivalent, `min` will be set to `5` and `max` will be set to
-    /// `Some(5)`.
-    pub max: Option<u32>,
-    /// Whether this repetition operator is greedy or not. A greedy operator
-    /// will match as much as it can. A non-greedy operator will match as
-    /// little as it can.
-    ///
-    /// Typically, operators are greedy by default and are only non-greedy when
-    /// a `?` suffix is used, e.g., `(expr)*` is greedy while `(expr)*?` is
-    /// not. However, this can be inverted via the `U` "ungreedy" flag.
-    pub greedy: bool,
-    /// The expression being repeated.
-    pub sub: Box<Hir>,
-}
-
-impl Repetition {
-    /// Returns a new repetition with the same `min`, `max` and `greedy`
-    /// values, but with its sub-expression replaced with the one given.
-    pub fn with(&self, sub: Hir) -> Repetition {
-        Repetition {
-            min: self.min,
-            max: self.max,
-            greedy: self.greedy,
-            sub: Box::new(sub),
-        }
-    }
-}
-
-/// A type describing the different flavors of `.`.
-///
-/// This type is meant to be used with [`Hir::dot`], which is a convenience
-/// routine for building HIR values derived from the `.` regex.
-#[non_exhaustive]
-#[derive(Clone, Copy, Debug, Eq, PartialEq)]
-pub enum Dot {
-    /// Matches the UTF-8 encoding of any Unicode scalar value.
-    ///
-    /// This is equivalent to `(?su:.)` and also `\p{any}`.
-    AnyChar,
-    /// Matches any byte value.
-    ///
-    /// This is equivalent to `(?s-u:.)` and also `(?-u:[\x00-\xFF])`.
-    AnyByte,
-    /// Matches the UTF-8 encoding of any Unicode scalar value except for the
-    /// `char` given.
-    ///
-    /// This is equivalent to using `(?u-s:.)` with the line terminator set
-    /// to a particular ASCII byte. (Because of peculiarities in the regex
-    /// engines, a line terminator must be a single byte. It follows that when
-    /// UTF-8 mode is enabled, this single byte must also be a Unicode scalar
-    /// value. That is, ti must be ASCII.)
-    ///
-    /// (This and `AnyCharExceptLF` both exist because of legacy reasons.
-    /// `AnyCharExceptLF` will be dropped in the next breaking change release.)
-    AnyCharExcept(char),
-    /// Matches the UTF-8 encoding of any Unicode scalar value except for `\n`.
-    ///
-    /// This is equivalent to `(?u-s:.)` and also `[\p{any}--\n]`.
-    AnyCharExceptLF,
-    /// Matches the UTF-8 encoding of any Unicode scalar value except for `\r`
-    /// and `\n`.
-    ///
-    /// This is equivalent to `(?uR-s:.)` and also `[\p{any}--\r\n]`.
-    AnyCharExceptCRLF,
-    /// Matches any byte value except for the `u8` given.
-    ///
-    /// This is equivalent to using `(?-us:.)` with the line terminator set
-    /// to a particular ASCII byte. (Because of peculiarities in the regex
-    /// engines, a line terminator must be a single byte. It follows that when
-    /// UTF-8 mode is enabled, this single byte must also be a Unicode scalar
-    /// value. That is, ti must be ASCII.)
-    ///
-    /// (This and `AnyByteExceptLF` both exist because of legacy reasons.
-    /// `AnyByteExceptLF` will be dropped in the next breaking change release.)
-    AnyByteExcept(u8),
-    /// Matches any byte value except for `\n`.
-    ///
-    /// This is equivalent to `(?-su:.)` and also `(?-u:[[\x00-\xFF]--\n])`.
-    AnyByteExceptLF,
-    /// Matches any byte value except for `\r` and `\n`.
-    ///
-    /// This is equivalent to `(?R-su:.)` and also `(?-u:[[\x00-\xFF]--\r\n])`.
-    AnyByteExceptCRLF,
-}
-
-/// A custom `Drop` impl is used for `HirKind` such that it uses constant stack
-/// space but heap space proportional to the depth of the total `Hir`.
-impl Drop for Hir {
-    fn drop(&mut self) {
-        use core::mem;
-
-        match *self.kind() {
-            HirKind::Empty
-            | HirKind::Literal(_)
-            | HirKind::Class(_)
-            | HirKind::Look(_) => return,
-            HirKind::Capture(ref x) if x.sub.kind.subs().is_empty() => return,
-            HirKind::Repetition(ref x) if x.sub.kind.subs().is_empty() => {
-                return
-            }
-            HirKind::Concat(ref x) if x.is_empty() => return,
-            HirKind::Alternation(ref x) if x.is_empty() => return,
-            _ => {}
-        }
-
-        let mut stack = vec![mem::replace(self, Hir::empty())];
-        while let Some(mut expr) = stack.pop() {
-            match expr.kind {
-                HirKind::Empty
-                | HirKind::Literal(_)
-                | HirKind::Class(_)
-                | HirKind::Look(_) => {}
-                HirKind::Capture(ref mut x) => {
-                    stack.push(mem::replace(&mut x.sub, Hir::empty()));
-                }
-                HirKind::Repetition(ref mut x) => {
-                    stack.push(mem::replace(&mut x.sub, Hir::empty()));
-                }
-                HirKind::Concat(ref mut x) => {
-                    stack.extend(x.drain(..));
-                }
-                HirKind::Alternation(ref mut x) => {
-                    stack.extend(x.drain(..));
-                }
-            }
-        }
-    }
-}
-
-/// A type that collects various properties of an HIR value.
-///
-/// Properties are always scalar values and represent meta data that is
-/// computed inductively on an HIR value. Properties are defined for all
-/// HIR values.
-///
-/// All methods on a `Properties` value take constant time and are meant to
-/// be cheap to call.
-#[derive(Clone, Debug, Eq, PartialEq)]
-pub struct Properties(Box<PropertiesI>);
-
-/// The property definition. It is split out so that we can box it, and
-/// there by make `Properties` use less stack size. This is kind-of important
-/// because every HIR value has a `Properties` attached to it.
-///
-/// This does have the unfortunate consequence that creating any HIR value
-/// always leads to at least one alloc for properties, but this is generally
-/// true anyway (for pretty much all HirKinds except for look-arounds).
-#[derive(Clone, Debug, Eq, PartialEq)]
-struct PropertiesI {
-    minimum_len: Option<usize>,
-    maximum_len: Option<usize>,
-    look_set: LookSet,
-    look_set_prefix: LookSet,
-    look_set_suffix: LookSet,
-    look_set_prefix_any: LookSet,
-    look_set_suffix_any: LookSet,
-    utf8: bool,
-    explicit_captures_len: usize,
-    static_explicit_captures_len: Option<usize>,
-    literal: bool,
-    alternation_literal: bool,
-}
-
-impl Properties {
-    /// Returns the length (in bytes) of the smallest string matched by this
-    /// HIR.
-    ///
-    /// A return value of `0` is possible and occurs when the HIR can match an
-    /// empty string.
-    ///
-    /// `None` is returned when there is no minimum length. This occurs in
-    /// precisely the cases where the HIR matches nothing. i.e., The language
-    /// the regex matches is empty. An example of such a regex is `\P{any}`.
-    #[inline]
-    pub fn minimum_len(&self) -> Option<usize> {
-        self.0.minimum_len
-    }
-
-    /// Returns the length (in bytes) of the longest string matched by this
-    /// HIR.
-    ///
-    /// A return value of `0` is possible and occurs when nothing longer than
-    /// the empty string is in the language described by this HIR.
-    ///
-    /// `None` is returned when there is no longest matching string. This
-    /// occurs when the HIR matches nothing or when there is no upper bound on
-    /// the length of matching strings. Example of such regexes are `\P{any}`
-    /// (matches nothing) and `a+` (has no upper bound).
-    #[inline]
-    pub fn maximum_len(&self) -> Option<usize> {
-        self.0.maximum_len
-    }
-
-    /// Returns a set of all look-around assertions that appear at least once
-    /// in this HIR value.
-    #[inline]
-    pub fn look_set(&self) -> LookSet {
-        self.0.look_set
-    }
-
-    /// Returns a set of all look-around assertions that appear as a prefix for
-    /// this HIR value. That is, the set returned corresponds to the set of
-    /// assertions that must be passed before matching any bytes in a haystack.
-    ///
-    /// For example, `hir.look_set_prefix().contains(Look::Start)` returns true
-    /// if and only if the HIR is fully anchored at the start.
-    #[inline]
-    pub fn look_set_prefix(&self) -> LookSet {
-        self.0.look_set_prefix
-    }
-
-    /// Returns a set of all look-around assertions that appear as a _possible_
-    /// prefix for this HIR value. That is, the set returned corresponds to the
-    /// set of assertions that _may_ be passed before matching any bytes in a
-    /// haystack.
-    ///
-    /// For example, `hir.look_set_prefix_any().contains(Look::Start)` returns
-    /// true if and only if it's possible for the regex to match through a
-    /// anchored assertion before consuming any input.
-    #[inline]
-    pub fn look_set_prefix_any(&self) -> LookSet {
-        self.0.look_set_prefix_any
-    }
-
-    /// Returns a set of all look-around assertions that appear as a suffix for
-    /// this HIR value. That is, the set returned corresponds to the set of
-    /// assertions that must be passed in order to be considered a match after
-    /// all other consuming HIR expressions.
-    ///
-    /// For example, `hir.look_set_suffix().contains(Look::End)` returns true
-    /// if and only if the HIR is fully anchored at the end.
-    #[inline]
-    pub fn look_set_suffix(&self) -> LookSet {
-        self.0.look_set_suffix
-    }
-
-    /// Returns a set of all look-around assertions that appear as a _possible_
-    /// suffix for this HIR value. That is, the set returned corresponds to the
-    /// set of assertions that _may_ be passed before matching any bytes in a
-    /// haystack.
-    ///
-    /// For example, `hir.look_set_suffix_any().contains(Look::End)` returns
-    /// true if and only if it's possible for the regex to match through a
-    /// anchored assertion at the end of a match without consuming any input.
-    #[inline]
-    pub fn look_set_suffix_any(&self) -> LookSet {
-        self.0.look_set_suffix_any
-    }
-
-    /// Return true if and only if the corresponding HIR will always match
-    /// valid UTF-8.
-    ///
-    /// When this returns false, then it is possible for this HIR expression to
-    /// match invalid UTF-8, including by matching between the code units of
-    /// a single UTF-8 encoded codepoint.
-    ///
-    /// Note that this returns true even when the corresponding HIR can match
-    /// the empty string. Since an empty string can technically appear between
-    /// UTF-8 code units, it is possible for a match to be reported that splits
-    /// a codepoint which could in turn be considered matching invalid UTF-8.
-    /// However, it is generally assumed that such empty matches are handled
-    /// specially by the search routine if it is absolutely required that
-    /// matches not split a codepoint.
-    ///
-    /// # Example
-    ///
-    /// This code example shows the UTF-8 property of a variety of patterns.
-    ///
-    /// ```
-    /// use regex_syntax::{ParserBuilder, parse};
-    ///
-    /// // Examples of 'is_utf8() == true'.
-    /// assert!(parse(r"a")?.properties().is_utf8());
-    /// assert!(parse(r"[^a]")?.properties().is_utf8());
-    /// assert!(parse(r".")?.properties().is_utf8());
-    /// assert!(parse(r"\W")?.properties().is_utf8());
-    /// assert!(parse(r"\b")?.properties().is_utf8());
-    /// assert!(parse(r"\B")?.properties().is_utf8());
-    /// assert!(parse(r"(?-u)\b")?.properties().is_utf8());
-    /// assert!(parse(r"(?-u)\B")?.properties().is_utf8());
-    /// // Unicode mode is enabled by default, and in
-    /// // that mode, all \x hex escapes are treated as
-    /// // codepoints. So this actually matches the UTF-8
-    /// // encoding of U+00FF.
-    /// assert!(parse(r"\xFF")?.properties().is_utf8());
-    ///
-    /// // Now we show examples of 'is_utf8() == false'.
-    /// // The only way to do this is to force the parser
-    /// // to permit invalid UTF-8, otherwise all of these
-    /// // would fail to parse!
-    /// let parse = |pattern| {
-    ///     ParserBuilder::new().utf8(false).build().parse(pattern)
-    /// };
-    /// assert!(!parse(r"(?-u)[^a]")?.properties().is_utf8());
-    /// assert!(!parse(r"(?-u).")?.properties().is_utf8());
-    /// assert!(!parse(r"(?-u)\W")?.properties().is_utf8());
-    /// // Conversely to the equivalent example above,
-    /// // when Unicode mode is disabled, \x hex escapes
-    /// // are treated as their raw byte values.
-    /// assert!(!parse(r"(?-u)\xFF")?.properties().is_utf8());
-    /// // Note that just because we disabled UTF-8 in the
-    /// // parser doesn't mean we still can't use Unicode.
-    /// // It is enabled by default, so \xFF is still
-    /// // equivalent to matching the UTF-8 encoding of
-    /// // U+00FF by default.
-    /// assert!(parse(r"\xFF")?.properties().is_utf8());
-    /// // Even though we use raw bytes that individually
-    /// // are not valid UTF-8, when combined together, the
-    /// // overall expression *does* match valid UTF-8!
-    /// assert!(parse(r"(?-u)\xE2\x98\x83")?.properties().is_utf8());
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    #[inline]
-    pub fn is_utf8(&self) -> bool {
-        self.0.utf8
-    }
-
-    /// Returns the total number of explicit capturing groups in the
-    /// corresponding HIR.
-    ///
-    /// Note that this does not include the implicit capturing group
-    /// corresponding to the entire match that is typically included by regex
-    /// engines.
-    ///
-    /// # Example
-    ///
-    /// This method will return `0` for `a` and `1` for `(a)`:
-    ///
-    /// ```
-    /// use regex_syntax::parse;
-    ///
-    /// assert_eq!(0, parse("a")?.properties().explicit_captures_len());
-    /// assert_eq!(1, parse("(a)")?.properties().explicit_captures_len());
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    #[inline]
-    pub fn explicit_captures_len(&self) -> usize {
-        self.0.explicit_captures_len
-    }
-
-    /// Returns the total number of explicit capturing groups that appear in
-    /// every possible match.
-    ///
-    /// If the number of capture groups can vary depending on the match, then
-    /// this returns `None`. That is, a value is only returned when the number
-    /// of matching groups is invariant or "static."
-    ///
-    /// Note that this does not include the implicit capturing group
-    /// corresponding to the entire match.
-    ///
-    /// # Example
-    ///
-    /// This shows a few cases where a static number of capture groups is
-    /// available and a few cases where it is not.
-    ///
-    /// ```
-    /// use regex_syntax::parse;
-    ///
-    /// let len = |pattern| {
-    ///     parse(pattern).map(|h| {
-    ///         h.properties().static_explicit_captures_len()
-    ///     })
-    /// };
-    ///
-    /// assert_eq!(Some(0), len("a")?);
-    /// assert_eq!(Some(1), len("(a)")?);
-    /// assert_eq!(Some(1), len("(a)|(b)")?);
-    /// assert_eq!(Some(2), len("(a)(b)|(c)(d)")?);
-    /// assert_eq!(None, len("(a)|b")?);
-    /// assert_eq!(None, len("a|(b)")?);
-    /// assert_eq!(None, len("(b)*")?);
-    /// assert_eq!(Some(1), len("(b)+")?);
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    #[inline]
-    pub fn static_explicit_captures_len(&self) -> Option<usize> {
-        self.0.static_explicit_captures_len
-    }
-
-    /// Return true if and only if this HIR is a simple literal. This is
-    /// only true when this HIR expression is either itself a `Literal` or a
-    /// concatenation of only `Literal`s.
-    ///
-    /// For example, `f` and `foo` are literals, but `f+`, `(foo)`, `foo()` and
-    /// the empty string are not (even though they contain sub-expressions that
-    /// are literals).
-    #[inline]
-    pub fn is_literal(&self) -> bool {
-        self.0.literal
-    }
-
-    /// Return true if and only if this HIR is either a simple literal or an
-    /// alternation of simple literals. This is only
-    /// true when this HIR expression is either itself a `Literal` or a
-    /// concatenation of only `Literal`s or an alternation of only `Literal`s.
-    ///
-    /// For example, `f`, `foo`, `a|b|c`, and `foo|bar|baz` are alternation
-    /// literals, but `f+`, `(foo)`, `foo()`, and the empty pattern are not
-    /// (even though that contain sub-expressions that are literals).
-    #[inline]
-    pub fn is_alternation_literal(&self) -> bool {
-        self.0.alternation_literal
-    }
-
-    /// Returns the total amount of heap memory usage, in bytes, used by this
-    /// `Properties` value.
-    #[inline]
-    pub fn memory_usage(&self) -> usize {
-        core::mem::size_of::<PropertiesI>()
-    }
-
-    /// Returns a new set of properties that corresponds to the union of the
-    /// iterator of properties given.
-    ///
-    /// This is useful when one has multiple `Hir` expressions and wants
-    /// to combine them into a single alternation without constructing the
-    /// corresponding `Hir`. This routine provides a way of combining the
-    /// properties of each `Hir` expression into one set of properties
-    /// representing the union of those expressions.
-    ///
-    /// # Example: union with HIRs that never match
-    ///
-    /// This example shows that unioning properties together with one that
-    /// represents a regex that never matches will "poison" certain attributes,
-    /// like the minimum and maximum lengths.
-    ///
-    /// ```
-    /// use regex_syntax::{hir::Properties, parse};
-    ///
-    /// let hir1 = parse("ab?c?")?;
-    /// assert_eq!(Some(1), hir1.properties().minimum_len());
-    /// assert_eq!(Some(3), hir1.properties().maximum_len());
-    ///
-    /// let hir2 = parse(r"[a&&b]")?;
-    /// assert_eq!(None, hir2.properties().minimum_len());
-    /// assert_eq!(None, hir2.properties().maximum_len());
-    ///
-    /// let hir3 = parse(r"wxy?z?")?;
-    /// assert_eq!(Some(2), hir3.properties().minimum_len());
-    /// assert_eq!(Some(4), hir3.properties().maximum_len());
-    ///
-    /// let unioned = Properties::union([
-    ///		hir1.properties(),
-    ///		hir2.properties(),
-    ///		hir3.properties(),
-    ///	]);
-    /// assert_eq!(None, unioned.minimum_len());
-    /// assert_eq!(None, unioned.maximum_len());
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    ///
-    /// The maximum length can also be "poisoned" by a pattern that has no
-    /// upper bound on the length of a match. The minimum length remains
-    /// unaffected:
-    ///
-    /// ```
-    /// use regex_syntax::{hir::Properties, parse};
-    ///
-    /// let hir1 = parse("ab?c?")?;
-    /// assert_eq!(Some(1), hir1.properties().minimum_len());
-    /// assert_eq!(Some(3), hir1.properties().maximum_len());
-    ///
-    /// let hir2 = parse(r"a+")?;
-    /// assert_eq!(Some(1), hir2.properties().minimum_len());
-    /// assert_eq!(None, hir2.properties().maximum_len());
-    ///
-    /// let hir3 = parse(r"wxy?z?")?;
-    /// assert_eq!(Some(2), hir3.properties().minimum_len());
-    /// assert_eq!(Some(4), hir3.properties().maximum_len());
-    ///
-    /// let unioned = Properties::union([
-    ///		hir1.properties(),
-    ///		hir2.properties(),
-    ///		hir3.properties(),
-    ///	]);
-    /// assert_eq!(Some(1), unioned.minimum_len());
-    /// assert_eq!(None, unioned.maximum_len());
-    ///
-    /// # Ok::<(), Box<dyn std::error::Error>>(())
-    /// ```
-    pub fn union<I, P>(props: I) -> Properties
-    where
-        I: IntoIterator<Item = P>,
-        P: core::borrow::Borrow<Properties>,
-    {
-        let mut it = props.into_iter().peekable();
-        // While empty alternations aren't possible, we still behave as if they
-        // are. When we have an empty alternate, then clearly the look-around
-        // prefix and suffix is empty. Otherwise, it is the intersection of all
-        // prefixes and suffixes (respectively) of the branches.
-        let fix = if it.peek().is_none() {
-            LookSet::empty()
-        } else {
-            LookSet::full()
-        };
-        // And also, an empty alternate means we have 0 static capture groups,
-        // but we otherwise start with the number corresponding to the first
-        // alternate. If any subsequent alternate has a different number of
-        // static capture groups, then we overall have a variation and not a
-        // static number of groups.
-        let static_explicit_captures_len =
-            it.peek().and_then(|p| p.borrow().static_explicit_captures_len());
-        // The base case is an empty alternation, which matches nothing.
-        // Note though that empty alternations aren't possible, because the
-        // Hir::alternation smart constructor rewrites those as empty character
-        // classes.
-        let mut props = PropertiesI {
-            minimum_len: None,
-            maximum_len: None,
-            look_set: LookSet::empty(),
-            look_set_prefix: fix,
-            look_set_suffix: fix,
-            look_set_prefix_any: LookSet::empty(),
-            look_set_suffix_any: LookSet::empty(),
-            utf8: true,
-            explicit_captures_len: 0,
-            static_explicit_captures_len,
-            literal: false,
-            alternation_literal: true,
-        };
-        let (mut min_poisoned, mut max_poisoned) = (false, false);
-        // Handle properties that need to visit every child hir.
-        for prop in it {
-            let p = prop.borrow();
-            props.look_set.set_union(p.look_set());
-            props.look_set_prefix.set_intersect(p.look_set_prefix());
-            props.look_set_suffix.set_intersect(p.look_set_suffix());
-            props.look_set_prefix_any.set_union(p.look_set_prefix_any());
-            props.look_set_suffix_any.set_union(p.look_set_suffix_any());
-            props.utf8 = props.utf8 && p.is_utf8();
-            props.explicit_captures_len = props
-                .explicit_captures_len
-                .saturating_add(p.explicit_captures_len());
-            if props.static_explicit_captures_len
-                != p.static_explicit_captures_len()
-            {
-                props.static_explicit_captures_len = None;
-            }
-            props.alternation_literal =
-                props.alternation_literal && p.is_literal();
-            if !min_poisoned {
-                if let Some(xmin) = p.minimum_len() {
-                    if props.minimum_len.map_or(true, |pmin| xmin < pmin) {
-                        props.minimum_len = Some(xmin);
-                    }
-                } else {
-                    props.minimum_len = None;
-                    min_poisoned = true;
-                }
-            }
-            if !max_poisoned {
-                if let Some(xmax) = p.maximum_len() {
-                    if props.maximum_len.map_or(true, |pmax| xmax > pmax) {
-                        props.maximum_len = Some(xmax);
-                    }
-                } else {
-                    props.maximum_len = None;
-                    max_poisoned = true;
-                }
-            }
-        }
-        Properties(Box::new(props))
-    }
-}
-
-impl Properties {
-    /// Create a new set of HIR properties for an empty regex.
-    fn empty() -> Properties {
-        let inner = PropertiesI {
-            minimum_len: Some(0),
-            maximum_len: Some(0),
-            look_set: LookSet::empty(),
-            look_set_prefix: LookSet::empty(),
-            look_set_suffix: LookSet::empty(),
-            look_set_prefix_any: LookSet::empty(),
-            look_set_suffix_any: LookSet::empty(),
-            // It is debatable whether an empty regex always matches at valid
-            // UTF-8 boundaries. Strictly speaking, at a byte oriented view,
-            // it is clearly false. There are, for example, many empty strings
-            // between the bytes encoding a '☃'.
-            //
-            // However, when Unicode mode is enabled, the fundamental atom
-            // of matching is really a codepoint. And in that scenario, an
-            // empty regex is defined to only match at valid UTF-8 boundaries
-            // and to never split a codepoint. It just so happens that this
-            // enforcement is somewhat tricky to do for regexes that match
-            // the empty string inside regex engines themselves. It usually
-            // requires some layer above the regex engine to filter out such
-            // matches.
-            //
-            // In any case, 'true' is really the only coherent option. If it
-            // were false, for example, then 'a*' would also need to be false
-            // since it too can match the empty string.
-            utf8: true,
-            explicit_captures_len: 0,
-            static_explicit_captures_len: Some(0),
-            literal: false,
-            alternation_literal: false,
-        };
-        Properties(Box::new(inner))
-    }
-
-    /// Create a new set of HIR properties for a literal regex.
-    fn literal(lit: &Literal) -> Properties {
-        let inner = PropertiesI {
-            minimum_len: Some(lit.0.len()),
-            maximum_len: Some(lit.0.len()),
-            look_set: LookSet::empty(),
-            look_set_prefix: LookSet::empty(),
-            look_set_suffix: LookSet::empty(),
-            look_set_prefix_any: LookSet::empty(),
-            look_set_suffix_any: LookSet::empty(),
-            utf8: core::str::from_utf8(&lit.0).is_ok(),
-            explicit_captures_len: 0,
-            static_explicit_captures_len: Some(0),
-            literal: true,
-            alternation_literal: true,
-        };
-        Properties(Box::new(inner))
-    }
-
-    /// Create a new set of HIR properties for a character class.
-    fn class(class: &Class) -> Properties {
-        let inner = PropertiesI {
-            minimum_len: class.minimum_len(),
-            maximum_len: class.maximum_len(),
-            look_set: LookSet::empty(),
-            look_set_prefix: LookSet::empty(),
-            look_set_suffix: LookSet::empty(),
-            look_set_prefix_any: LookSet::empty(),
-            look_set_suffix_any: LookSet::empty(),
-            utf8: class.is_utf8(),
-            explicit_captures_len: 0,
-            static_explicit_captures_len: Some(0),
-            literal: false,
-            alternation_literal: false,
-        };
-        Properties(Box::new(inner))
-    }
-
-    /// Create a new set of HIR properties for a look-around assertion.
-    fn look(look: Look) -> Properties {
-        let inner = PropertiesI {
-            minimum_len: Some(0),
-            maximum_len: Some(0),
-            look_set: LookSet::singleton(look),
-            look_set_prefix: LookSet::singleton(look),
-            look_set_suffix: LookSet::singleton(look),
-            look_set_prefix_any: LookSet::singleton(look),
-            look_set_suffix_any: LookSet::singleton(look),
-            // This requires a little explanation. Basically, we don't consider
-            // matching an empty string to be equivalent to matching invalid
-            // UTF-8, even though technically matching every empty string will
-            // split the UTF-8 encoding of a single codepoint when treating a
-            // UTF-8 encoded string as a sequence of bytes. Our defense here is
-            // that in such a case, a codepoint should logically be treated as
-            // the fundamental atom for matching, and thus the only valid match
-            // points are between codepoints and not bytes.
-            //
-            // More practically, this is true here because it's also true
-            // for 'Hir::empty()', otherwise something like 'a*' would be
-            // considered to match invalid UTF-8. That in turn makes this
-            // property borderline useless.
-            utf8: true,
-            explicit_captures_len: 0,
-            static_explicit_captures_len: Some(0),
-            literal: false,
-            alternation_literal: false,
-        };
-        Properties(Box::new(inner))
-    }
-
-    /// Create a new set of HIR properties for a repetition.
-    fn repetition(rep: &Repetition) -> Properties {
-        let p = rep.sub.properties();
-        let minimum_len = p.minimum_len().map(|child_min| {
-            let rep_min = usize::try_from(rep.min).unwrap_or(usize::MAX);
-            child_min.saturating_mul(rep_min)
-        });
-        let maximum_len = rep.max.and_then(|rep_max| {
-            let rep_max = usize::try_from(rep_max).ok()?;
-            let child_max = p.maximum_len()?;
-            child_max.checked_mul(rep_max)
-        });
-
-        let mut inner = PropertiesI {
-            minimum_len,
-            maximum_len,
-            look_set: p.look_set(),
-            look_set_prefix: LookSet::empty(),
-            look_set_suffix: LookSet::empty(),
-            look_set_prefix_any: p.look_set_prefix_any(),
-            look_set_suffix_any: p.look_set_suffix_any(),
-            utf8: p.is_utf8(),
-            explicit_captures_len: p.explicit_captures_len(),
-            static_explicit_captures_len: p.static_explicit_captures_len(),
-            literal: false,
-            alternation_literal: false,
-        };
-        // If the repetition operator can match the empty string, then its
-        // lookset prefix and suffixes themselves remain empty since they are
-        // no longer required to match.
-        if rep.min > 0 {
-            inner.look_set_prefix = p.look_set_prefix();
-            inner.look_set_suffix = p.look_set_suffix();
-        }
-        // If the static captures len of the sub-expression is not known or
-        // is greater than zero, then it automatically propagates to the
-        // repetition, regardless of the repetition. Otherwise, it might
-        // change, but only when the repetition can match 0 times.
-        if rep.min == 0
-            && inner.static_explicit_captures_len.map_or(false, |len| len > 0)
-        {
-            // If we require a match 0 times, then our captures len is
-            // guaranteed to be zero. Otherwise, if we *can* match the empty
-            // string, then it's impossible to know how many captures will be
-            // in the resulting match.
-            if rep.max == Some(0) {
-                inner.static_explicit_captures_len = Some(0);
-            } else {
-                inner.static_explicit_captures_len = None;
-            }
-        }
-        Properties(Box::new(inner))
-    }
-
-    /// Create a new set of HIR properties for a capture.
-    fn capture(capture: &Capture) -> Properties {
-        let p = capture.sub.properties();
-        Properties(Box::new(PropertiesI {
-            explicit_captures_len: p.explicit_captures_len().saturating_add(1),
-            static_explicit_captures_len: p
-                .static_explicit_captures_len()
-                .map(|len| len.saturating_add(1)),
-            literal: false,
-            alternation_literal: false,
-            ..*p.0.clone()
-        }))
-    }
-
-    /// Create a new set of HIR properties for a concatenation.
-    fn concat(concat: &[Hir]) -> Properties {
-        // The base case is an empty concatenation, which matches the empty
-        // string. Note though that empty concatenations aren't possible,
-        // because the Hir::concat smart constructor rewrites those as
-        // Hir::empty.
-        let mut props = PropertiesI {
-            minimum_len: Some(0),
-            maximum_len: Some(0),
-            look_set: LookSet::empty(),
-            look_set_prefix: LookSet::empty(),
-            look_set_suffix: LookSet::empty(),
-            look_set_prefix_any: LookSet::empty(),
-            look_set_suffix_any: LookSet::empty(),
-            utf8: true,
-            explicit_captures_len: 0,
-            static_explicit_captures_len: Some(0),
-            literal: true,
-            alternation_literal: true,
-        };
-        // Handle properties that need to visit every child hir.
-        for x in concat.iter() {
-            let p = x.properties();
-            props.look_set.set_union(p.look_set());
-            props.utf8 = props.utf8 && p.is_utf8();
-            props.explicit_captures_len = props
-                .explicit_captures_len
-                .saturating_add(p.explicit_captures_len());
-            props.static_explicit_captures_len = p
-                .static_explicit_captures_len()
-                .and_then(|len1| {
-                    Some((len1, props.static_explicit_captures_len?))
-                })
-                .and_then(|(len1, len2)| Some(len1.saturating_add(len2)));
-            props.literal = props.literal && p.is_literal();
-            props.alternation_literal =
-                props.alternation_literal && p.is_alternation_literal();
-            if let Some(minimum_len) = props.minimum_len {
-                match p.minimum_len() {
-                    None => props.minimum_len = None,
-                    Some(len) => {
-                        // We use saturating arithmetic here because the
-                        // minimum is just a lower bound. We can't go any
-                        // higher than what our number types permit.
-                        props.minimum_len =
-                            Some(minimum_len.saturating_add(len));
-                    }
-                }
-            }
-            if let Some(maximum_len) = props.maximum_len {
-                match p.maximum_len() {
-                    None => props.maximum_len = None,
-                    Some(len) => {
-                        props.maximum_len = maximum_len.checked_add(len)
-                    }
-                }
-            }
-        }
-        // Handle the prefix properties, which only requires visiting
-        // child exprs until one matches more than the empty string.
-        let mut it = concat.iter();
-        while let Some(x) = it.next() {
-            props.look_set_prefix.set_union(x.properties().look_set_prefix());
-            props
-                .look_set_prefix_any
-                .set_union(x.properties().look_set_prefix_any());
-            if x.properties().maximum_len().map_or(true, |x| x > 0) {
-                break;
-            }
-        }
-        // Same thing for the suffix properties, but in reverse.
-        let mut it = concat.iter().rev();
-        while let Some(x) = it.next() {
-            props.look_set_suffix.set_union(x.properties().look_set_suffix());
-            props
-                .look_set_suffix_any
-                .set_union(x.properties().look_set_suffix_any());
-            if x.properties().maximum_len().map_or(true, |x| x > 0) {
-                break;
-            }
-        }
-        Properties(Box::new(props))
-    }
-
-    /// Create a new set of HIR properties for a concatenation.
-    fn alternation(alts: &[Hir]) -> Properties {
-        Properties::union(alts.iter().map(|hir| hir.properties()))
-    }
-}
-
-/// A set of look-around assertions.
-///
-/// This is useful for efficiently tracking look-around assertions. For
-/// example, an [`Hir`] provides properties that return `LookSet`s.
-#[derive(Clone, Copy, Default, Eq, PartialEq)]
-pub struct LookSet {
-    /// The underlying representation this set is exposed to make it possible
-    /// to store it somewhere efficiently. The representation is that
-    /// of a bitset, where each assertion occupies bit `i` where `i =
-    /// Look::as_repr()`.
-    ///
-    /// Note that users of this internal representation must permit the full
-    /// range of `u16` values to be represented. For example, even if the
-    /// current implementation only makes use of the 10 least significant bits,
-    /// it may use more bits in a future semver compatible release.
-    pub bits: u32,
-}
-
-impl LookSet {
-    /// Create an empty set of look-around assertions.
-    #[inline]
-    pub fn empty() -> LookSet {
-        LookSet { bits: 0 }
-    }
-
-    /// Create a full set of look-around assertions.
-    ///
-    /// This set contains all possible look-around assertions.
-    #[inline]
-    pub fn full() -> LookSet {
-        LookSet { bits: !0 }
-    }
-
-    /// Create a look-around set containing the look-around assertion given.
-    ///
-    /// This is a convenience routine for creating an empty set and inserting
-    /// one look-around assertions.
-    #[inline]
-    pub fn singleton(look: Look) -> LookSet {
-        LookSet::empty().insert(look)
-    }
-
-    /// Returns the total number of look-around assertions in this set.
-    #[inline]
-    pub fn len(self) -> usize {
-        // OK because max value always fits in a u8, which in turn always
-        // fits in a usize, regardless of target.
-        usize::try_from(self.bits.count_ones()).unwrap()
-    }
-
-    /// Returns true if and only if this set is empty.
-    #[inline]
-    pub fn is_empty(self) -> bool {
-        self.len() == 0
-    }
-
-    /// Returns true if and only if the given look-around assertion is in this
-    /// set.
-    #[inline]
-    pub fn contains(self, look: Look) -> bool {
-        self.bits & look.as_repr() != 0
-    }
-
-    /// Returns true if and only if this set contains any anchor assertions.
-    /// This includes both "start/end of haystack" and "start/end of line."
-    #[inline]
-    pub fn contains_anchor(&self) -> bool {
-        self.contains_anchor_haystack() || self.contains_anchor_line()
-    }
-
-    /// Returns true if and only if this set contains any "start/end of
-    /// haystack" anchors. This doesn't include "start/end of line" anchors.
-    #[inline]
-    pub fn contains_anchor_haystack(&self) -> bool {
-        self.contains(Look::Start) || self.contains(Look::End)
-    }
-
-    /// Returns true if and only if this set contains any "start/end of line"
-    /// anchors. This doesn't include "start/end of haystack" anchors. This
-    /// includes both `\n` line anchors and CRLF (`\r\n`) aware line anchors.
-    #[inline]
-    pub fn contains_anchor_line(&self) -> bool {
-        self.contains(Look::StartLF)
-            || self.contains(Look::EndLF)
-            || self.contains(Look::StartCRLF)
-            || self.contains(Look::EndCRLF)
-    }
-
-    /// Returns true if and only if this set contains any "start/end of line"
-    /// anchors that only treat `\n` as line terminators. This does not include
-    /// haystack anchors or CRLF aware line anchors.
-    #[inline]
-    pub fn contains_anchor_lf(&self) -> bool {
-        self.contains(Look::StartLF) || self.contains(Look::EndLF)
-    }
-
-    /// Returns true if and only if this set contains any "start/end of line"
-    /// anchors that are CRLF-aware. This doesn't include "start/end of
-    /// haystack" or "start/end of line-feed" anchors.
-    #[inline]
-    pub fn contains_anchor_crlf(&self) -> bool {
-        self.contains(Look::StartCRLF) || self.contains(Look::EndCRLF)
-    }
-
-    /// Returns true if and only if this set contains any word boundary or
-    /// negated word boundary assertions. This include both Unicode and ASCII
-    /// word boundaries.
-    #[inline]
-    pub fn contains_word(self) -> bool {
-        self.contains_word_unicode() || self.contains_word_ascii()
-    }
-
-    /// Returns true if and only if this set contains any Unicode word boundary
-    /// or negated Unicode word boundary assertions.
-    #[inline]
-    pub fn contains_word_unicode(self) -> bool {
-        self.contains(Look::WordUnicode)
-            || self.contains(Look::WordUnicodeNegate)
-            || self.contains(Look::WordStartUnicode)
-            || self.contains(Look::WordEndUnicode)
-            || self.contains(Look::WordStartHalfUnicode)
-            || self.contains(Look::WordEndHalfUnicode)
-    }
-
-    /// Returns true if and only if this set contains any ASCII word boundary
-    /// or negated ASCII word boundary assertions.
-    #[inline]
-    pub fn contains_word_ascii(self) -> bool {
-        self.contains(Look::WordAscii)
-            || self.contains(Look::WordAsciiNegate)
-            || self.contains(Look::WordStartAscii)
-            || self.contains(Look::WordEndAscii)
-            || self.contains(Look::WordStartHalfAscii)
-            || self.contains(Look::WordEndHalfAscii)
-    }
-
-    /// Returns an iterator over all of the look-around assertions in this set.
-    #[inline]
-    pub fn iter(self) -> LookSetIter {
-        LookSetIter { set: self }
-    }
-
-    /// Return a new set that is equivalent to the original, but with the given
-    /// assertion added to it. If the assertion is already in the set, then the
-    /// returned set is equivalent to the original.
-    #[inline]
-    pub fn insert(self, look: Look) -> LookSet {
-        LookSet { bits: self.bits | look.as_repr() }
-    }
-
-    /// Updates this set in place with the result of inserting the given
-    /// assertion into this set.
-    #[inline]
-    pub fn set_insert(&mut self, look: Look) {
-        *self = self.insert(look);
-    }
-
-    /// Return a new set that is equivalent to the original, but with the given
-    /// assertion removed from it. If the assertion is not in the set, then the
-    /// returned set is equivalent to the original.
-    #[inline]
-    pub fn remove(self, look: Look) -> LookSet {
-        LookSet { bits: self.bits & !look.as_repr() }
-    }
-
-    /// Updates this set in place with the result of removing the given
-    /// assertion from this set.
-    #[inline]
-    pub fn set_remove(&mut self, look: Look) {
-        *self = self.remove(look);
-    }
-
-    /// Returns a new set that is the result of subtracting the given set from
-    /// this set.
-    #[inline]
-    pub fn subtract(self, other: LookSet) -> LookSet {
-        LookSet { bits: self.bits & !other.bits }
-    }
-
-    /// Updates this set in place with the result of subtracting the given set
-    /// from this set.
-    #[inline]
-    pub fn set_subtract(&mut self, other: LookSet) {
-        *self = self.subtract(other);
-    }
-
-    /// Returns a new set that is the union of this and the one given.
-    #[inline]
-    pub fn union(self, other: LookSet) -> LookSet {
-        LookSet { bits: self.bits | other.bits }
-    }
-
-    /// Updates this set in place with the result of unioning it with the one
-    /// given.
-    #[inline]
-    pub fn set_union(&mut self, other: LookSet) {
-        *self = self.union(other);
-    }
-
-    /// Returns a new set that is the intersection of this and the one given.
-    #[inline]
-    pub fn intersect(self, other: LookSet) -> LookSet {
-        LookSet { bits: self.bits & other.bits }
-    }
-
-    /// Updates this set in place with the result of intersecting it with the
-    /// one given.
-    #[inline]
-    pub fn set_intersect(&mut self, other: LookSet) {
-        *self = self.intersect(other);
-    }
-
-    /// Return a `LookSet` from the slice given as a native endian 32-bit
-    /// integer.
-    ///
-    /// # Panics
-    ///
-    /// This panics if `slice.len() < 4`.
-    #[inline]
-    pub fn read_repr(slice: &[u8]) -> LookSet {
-        let bits = u32::from_ne_bytes(slice[..4].try_into().unwrap());
-        LookSet { bits }
-    }
-
-    /// Write a `LookSet` as a native endian 32-bit integer to the beginning
-    /// of the slice given.
-    ///
-    /// # Panics
-    ///
-    /// This panics if `slice.len() < 4`.
-    #[inline]
-    pub fn write_repr(self, slice: &mut [u8]) {
-        let raw = self.bits.to_ne_bytes();
-        slice[0] = raw[0];
-        slice[1] = raw[1];
-        slice[2] = raw[2];
-        slice[3] = raw[3];
-    }
-}
-
-impl core::fmt::Debug for LookSet {
-    fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-        if self.is_empty() {
-            return write!(f, "∅");
-        }
-        for look in self.iter() {
-            write!(f, "{}", look.as_char())?;
-        }
-        Ok(())
-    }
-}
-
-/// An iterator over all look-around assertions in a [`LookSet`].
-///
-/// This iterator is created by [`LookSet::iter`].
-#[derive(Clone, Debug)]
-pub struct LookSetIter {
-    set: LookSet,
-}
-
-impl Iterator for LookSetIter {
-    type Item = Look;
-
-    #[inline]
-    fn next(&mut self) -> Option<Look> {
-        if self.set.is_empty() {
-            return None;
-        }
-        // We'll never have more than u8::MAX distinct look-around assertions,
-        // so 'bit' will always fit into a u16.
-        let bit = u16::try_from(self.set.bits.trailing_zeros()).unwrap();
-        let look = Look::from_repr(1 << bit)?;
-        self.set = self.set.remove(look);
-        Some(look)
-    }
-}
-
-/// Given a sequence of HIR values where each value corresponds to a Unicode
-/// class (or an all-ASCII byte class), return a single Unicode class
-/// corresponding to the union of the classes found.
-fn class_chars(hirs: &[Hir]) -> Option<Class> {
-    let mut cls = ClassUnicode::new(vec![]);
-    for hir in hirs.iter() {
-        match *hir.kind() {
-            HirKind::Class(Class::Unicode(ref cls2)) => {
-                cls.union(cls2);
-            }
-            HirKind::Class(Class::Bytes(ref cls2)) => {
-                cls.union(&cls2.to_unicode_class()?);
-            }
-            _ => return None,
-        };
-    }
-    Some(Class::Unicode(cls))
-}
-
-/// Given a sequence of HIR values where each value corresponds to a byte class
-/// (or an all-ASCII Unicode class), return a single byte class corresponding
-/// to the union of the classes found.
-fn class_bytes(hirs: &[Hir]) -> Option<Class> {
-    let mut cls = ClassBytes::new(vec![]);
-    for hir in hirs.iter() {
-        match *hir.kind() {
-            HirKind::Class(Class::Unicode(ref cls2)) => {
-                cls.union(&cls2.to_byte_class()?);
-            }
-            HirKind::Class(Class::Bytes(ref cls2)) => {
-                cls.union(cls2);
-            }
-            _ => return None,
-        };
-    }
-    Some(Class::Bytes(cls))
-}
-
-/// Given a sequence of HIR values where each value corresponds to a literal
-/// that is a single `char`, return that sequence of `char`s. Otherwise return
-/// None. No deduplication is done.
-fn singleton_chars(hirs: &[Hir]) -> Option<Vec<char>> {
-    let mut singletons = vec![];
-    for hir in hirs.iter() {
-        let literal = match *hir.kind() {
-            HirKind::Literal(Literal(ref bytes)) => bytes,
-            _ => return None,
-        };
-        let ch = match crate::debug::utf8_decode(literal) {
-            None => return None,
-            Some(Err(_)) => return None,
-            Some(Ok(ch)) => ch,
-        };
-        if literal.len() != ch.len_utf8() {
-            return None;
-        }
-        singletons.push(ch);
-    }
-    Some(singletons)
-}
-
-/// Given a sequence of HIR values where each value corresponds to a literal
-/// that is a single byte, return that sequence of bytes. Otherwise return
-/// None. No deduplication is done.
-fn singleton_bytes(hirs: &[Hir]) -> Option<Vec<u8>> {
-    let mut singletons = vec![];
-    for hir in hirs.iter() {
-        let literal = match *hir.kind() {
-            HirKind::Literal(Literal(ref bytes)) => bytes,
-            _ => return None,
-        };
-        if literal.len() != 1 {
-            return None;
-        }
-        singletons.push(literal[0]);
-    }
-    Some(singletons)
-}
-
-/// Looks for a common prefix in the list of alternation branches given. If one
-/// is found, then an equivalent but (hopefully) simplified Hir is returned.
-/// Otherwise, the original given list of branches is returned unmodified.
-///
-/// This is not quite as good as it could be. Right now, it requires that
-/// all branches are 'Concat' expressions. It also doesn't do well with
-/// literals. For example, given 'foofoo|foobar', it will not refactor it to
-/// 'foo(?:foo|bar)' because literals are flattened into their own special
-/// concatenation. (One wonders if perhaps 'Literal' should be a single atom
-/// instead of a string of bytes because of this. Otherwise, handling the
-/// current representation in this routine will be pretty gnarly. Sigh.)
-fn lift_common_prefix(hirs: Vec<Hir>) -> Result<Hir, Vec<Hir>> {
-    if hirs.len() <= 1 {
-        return Err(hirs);
-    }
-    let mut prefix = match hirs[0].kind() {
-        HirKind::Concat(ref xs) => &**xs,
-        _ => return Err(hirs),
-    };
-    if prefix.is_empty() {
-        return Err(hirs);
-    }
-    for h in hirs.iter().skip(1) {
-        let concat = match h.kind() {
-            HirKind::Concat(ref xs) => xs,
-            _ => return Err(hirs),
-        };
-        let common_len = prefix
-            .iter()
-            .zip(concat.iter())
-            .take_while(|(x, y)| x == y)
-            .count();
-        prefix = &prefix[..common_len];
-        if prefix.is_empty() {
-            return Err(hirs);
-        }
-    }
-    let len = prefix.len();
-    assert_ne!(0, len);
-    let mut prefix_concat = vec![];
-    let mut suffix_alts = vec![];
-    for h in hirs {
-        let mut concat = match h.into_kind() {
-            HirKind::Concat(xs) => xs,
-            // We required all sub-expressions to be
-            // concats above, so we're only here if we
-            // have a concat.
-            _ => unreachable!(),
-        };
-        suffix_alts.push(Hir::concat(concat.split_off(len)));
-        if prefix_concat.is_empty() {
-            prefix_concat = concat;
-        }
-    }
-    let mut concat = prefix_concat;
-    concat.push(Hir::alternation(suffix_alts));
-    Ok(Hir::concat(concat))
-}
-
-#[cfg(test)]
-mod tests {
-    use super::*;
-
-    fn uclass(ranges: &[(char, char)]) -> ClassUnicode {
-        let ranges: Vec<ClassUnicodeRange> = ranges
-            .iter()
-            .map(|&(s, e)| ClassUnicodeRange::new(s, e))
-            .collect();
-        ClassUnicode::new(ranges)
-    }
-
-    fn bclass(ranges: &[(u8, u8)]) -> ClassBytes {
-        let ranges: Vec<ClassBytesRange> =
-            ranges.iter().map(|&(s, e)| ClassBytesRange::new(s, e)).collect();
-        ClassBytes::new(ranges)
-    }
-
-    fn uranges(cls: &ClassUnicode) -> Vec<(char, char)> {
-        cls.iter().map(|x| (x.start(), x.end())).collect()
-    }
-
-    #[cfg(feature = "unicode-case")]
-    fn ucasefold(cls: &ClassUnicode) -> ClassUnicode {
-        let mut cls_ = cls.clone();
-        cls_.case_fold_simple();
-        cls_
-    }
-
-    fn uunion(cls1: &ClassUnicode, cls2: &ClassUnicode) -> ClassUnicode {
-        let mut cls_ = cls1.clone();
-        cls_.union(cls2);
-        cls_
-    }
-
-    fn uintersect(cls1: &ClassUnicode, cls2: &ClassUnicode) -> ClassUnicode {
-        let mut cls_ = cls1.clone();
-        cls_.intersect(cls2);
-        cls_
-    }
-
-    fn udifference(cls1: &ClassUnicode, cls2: &ClassUnicode) -> ClassUnicode {
-        let mut cls_ = cls1.clone();
-        cls_.difference(cls2);
-        cls_
-    }
-
-    fn usymdifference(
-        cls1: &ClassUnicode,
-        cls2: &ClassUnicode,
-    ) -> ClassUnicode {
-        let mut cls_ = cls1.clone();
-        cls_.symmetric_difference(cls2);
-        cls_
-    }
-
-    fn unegate(cls: &ClassUnicode) -> ClassUnicode {
-        let mut cls_ = cls.clone();
-        cls_.negate();
-        cls_
-    }
-
-    fn branges(cls: &ClassBytes) -> Vec<(u8, u8)> {
-        cls.iter().map(|x| (x.start(), x.end())).collect()
-    }
-
-    fn bcasefold(cls: &ClassBytes) -> ClassBytes {
-        let mut cls_ = cls.clone();
-        cls_.case_fold_simple();
-        cls_
-    }
-
-    fn bunion(cls1: &ClassBytes, cls2: &ClassBytes) -> ClassBytes {
-        let mut cls_ = cls1.clone();
-        cls_.union(cls2);
-        cls_
-    }
-
-    fn bintersect(cls1: &ClassBytes, cls2: &ClassBytes) -> ClassBytes {
-        let mut cls_ = cls1.clone();
-        cls_.intersect(cls2);
-        cls_
-    }
-
-    fn bdifference(cls1: &ClassBytes, cls2: &ClassBytes) -> ClassBytes {
-        let mut cls_ = cls1.clone();
-        cls_.difference(cls2);
-        cls_
-    }
-
-    fn bsymdifference(cls1: &ClassBytes, cls2: &ClassBytes) -> ClassBytes {
-        let mut cls_ = cls1.clone();
-        cls_.symmetric_difference(cls2);
-        cls_
-    }
-
-    fn bnegate(cls: &ClassBytes) -> ClassBytes {
-        let mut cls_ = cls.clone();
-        cls_.negate();
-        cls_
-    }
-
-    #[test]
-    fn class_range_canonical_unicode() {
-        let range = ClassUnicodeRange::new('\u{00FF}', '\0');
-        assert_eq!('\0', range.start());
-        assert_eq!('\u{00FF}', range.end());
-    }
-
-    #[test]
-    fn class_range_canonical_bytes() {
-        let range = ClassBytesRange::new(b'\xFF', b'\0');
-        assert_eq!(b'\0', range.start());
-        assert_eq!(b'\xFF', range.end());
-    }
-
-    #[test]
-    fn class_canonicalize_unicode() {
-        let cls = uclass(&[('a', 'c'), ('x', 'z')]);
-        let expected = vec![('a', 'c'), ('x', 'z')];
-        assert_eq!(expected, uranges(&cls));
-
-        let cls = uclass(&[('x', 'z'), ('a', 'c')]);
-        let expected = vec![('a', 'c'), ('x', 'z')];
-        assert_eq!(expected, uranges(&cls));
-
-        let cls = uclass(&[('x', 'z'), ('w', 'y')]);
-        let expected = vec![('w', 'z')];
-        assert_eq!(expected, uranges(&cls));
-
-        let cls = uclass(&[
-            ('c', 'f'),
-            ('a', 'g'),
-            ('d', 'j'),
-            ('a', 'c'),
-            ('m', 'p'),
-            ('l', 's'),
-        ]);
-        let expected = vec![('a', 'j'), ('l', 's')];
-        assert_eq!(expected, uranges(&cls));
-
-        let cls = uclass(&[('x', 'z'), ('u', 'w')]);
-        let expected = vec![('u', 'z')];
-        assert_eq!(expected, uranges(&cls));
-
-        let cls = uclass(&[('\x00', '\u{10FFFF}'), ('\x00', '\u{10FFFF}')]);
-        let expected = vec![('\x00', '\u{10FFFF}')];
-        assert_eq!(expected, uranges(&cls));
-
-        let cls = uclass(&[('a', 'a'), ('b', 'b')]);
-        let expected = vec![('a', 'b')];
-        assert_eq!(expected, uranges(&cls));
-    }
-
-    #[test]
-    fn class_canonicalize_bytes() {
-        let cls = bclass(&[(b'a', b'c'), (b'x', b'z')]);
-        let expected = vec![(b'a', b'c'), (b'x', b'z')];
-        assert_eq!(expected, branges(&cls));
-
-        let cls = bclass(&[(b'x', b'z'), (b'a', b'c')]);
-        let expected = vec![(b'a', b'c'), (b'x', b'z')];
-        assert_eq!(expected, branges(&cls));
-
-        let cls = bclass(&[(b'x', b'z'), (b'w', b'y')]);
-        let expected = vec![(b'w', b'z')];
-        assert_eq!(expected, branges(&cls));
-
-        let cls = bclass(&[
-            (b'c', b'f'),
-            (b'a', b'g'),
-            (b'd', b'j'),
-            (b'a', b'c'),
-            (b'm', b'p'),
-            (b'l', b's'),
-        ]);
-        let expected = vec![(b'a', b'j'), (b'l', b's')];
-        assert_eq!(expected, branges(&cls));
-
-        let cls = bclass(&[(b'x', b'z'), (b'u', b'w')]);
-        let expected = vec![(b'u', b'z')];
-        assert_eq!(expected, branges(&cls));
-
-        let cls = bclass(&[(b'\x00', b'\xFF'), (b'\x00', b'\xFF')]);
-        let expected = vec![(b'\x00', b'\xFF')];
-        assert_eq!(expected, branges(&cls));
-
-        let cls = bclass(&[(b'a', b'a'), (b'b', b'b')]);
-        let expected = vec![(b'a', b'b')];
-        assert_eq!(expected, branges(&cls));
-    }
-
-    #[test]
-    #[cfg(feature = "unicode-case")]
-    fn class_case_fold_unicode() {
-        let cls = uclass(&[
-            ('C', 'F'),
-            ('A', 'G'),
-            ('D', 'J'),
-            ('A', 'C'),
-            ('M', 'P'),
-            ('L', 'S'),
-            ('c', 'f'),
-        ]);
-        let expected = uclass(&[
-            ('A', 'J'),
-            ('L', 'S'),
-            ('a', 'j'),
-            ('l', 's'),
-            ('\u{17F}', '\u{17F}'),
-        ]);
-        assert_eq!(expected, ucasefold(&cls));
-
-        let cls = uclass(&[('A', 'Z')]);
-        let expected = uclass(&[
-            ('A', 'Z'),
-            ('a', 'z'),
-            ('\u{17F}', '\u{17F}'),
-            ('\u{212A}', '\u{212A}'),
-        ]);
-        assert_eq!(expected, ucasefold(&cls));
-
-        let cls = uclass(&[('a', 'z')]);
-        let expected = uclass(&[
-            ('A', 'Z'),
-            ('a', 'z'),
-            ('\u{17F}', '\u{17F}'),
-            ('\u{212A}', '\u{212A}'),
-        ]);
-        assert_eq!(expected, ucasefold(&cls));
-
-        let cls = uclass(&[('A', 'A'), ('_', '_')]);
-        let expected = uclass(&[('A', 'A'), ('_', '_'), ('a', 'a')]);
-        assert_eq!(expected, ucasefold(&cls));
-
-        let cls = uclass(&[('A', 'A'), ('=', '=')]);
-        let expected = uclass(&[('=', '='), ('A', 'A'), ('a', 'a')]);
-        assert_eq!(expected, ucasefold(&cls));
-
-        let cls = uclass(&[('\x00', '\x10')]);
-        assert_eq!(cls, ucasefold(&cls));
-
-        let cls = uclass(&[('k', 'k')]);
-        let expected =
-            uclass(&[('K', 'K'), ('k', 'k'), ('\u{212A}', '\u{212A}')]);
-        assert_eq!(expected, ucasefold(&cls));
-
-        let cls = uclass(&[('@', '@')]);
-        assert_eq!(cls, ucasefold(&cls));
-    }
-
-    #[test]
-    #[cfg(not(feature = "unicode-case"))]
-    fn class_case_fold_unicode_disabled() {
-        let mut cls = uclass(&[
-            ('C', 'F'),
-            ('A', 'G'),
-            ('D', 'J'),
-            ('A', 'C'),
-            ('M', 'P'),
-            ('L', 'S'),
-            ('c', 'f'),
-        ]);
-        assert!(cls.try_case_fold_simple().is_err());
-    }
-
-    #[test]
-    #[should_panic]
-    #[cfg(not(feature = "unicode-case"))]
-    fn class_case_fold_unicode_disabled_panics() {
-        let mut cls = uclass(&[
-            ('C', 'F'),
-            ('A', 'G'),
-            ('D', 'J'),
-            ('A', 'C'),
-            ('M', 'P'),
-            ('L', 'S'),
-            ('c', 'f'),
-        ]);
-        cls.case_fold_simple();
-    }
-
-    #[test]
-    fn class_case_fold_bytes() {
-        let cls = bclass(&[
-            (b'C', b'F'),
-            (b'A', b'G'),
-            (b'D', b'J'),
-            (b'A', b'C'),
-            (b'M', b'P'),
-            (b'L', b'S'),
-            (b'c', b'f'),
-        ]);
-        let expected =
-            bclass(&[(b'A', b'J'), (b'L', b'S'), (b'a', b'j'), (b'l', b's')]);
-        assert_eq!(expected, bcasefold(&cls));
-
-        let cls = bclass(&[(b'A', b'Z')]);
-        let expected = bclass(&[(b'A', b'Z'), (b'a', b'z')]);
-        assert_eq!(expected, bcasefold(&cls));
-
-        let cls = bclass(&[(b'a', b'z')]);
-        let expected = bclass(&[(b'A', b'Z'), (b'a', b'z')]);
-        assert_eq!(expected, bcasefold(&cls));
-
-        let cls = bclass(&[(b'A', b'A'), (b'_', b'_')]);
-        let expected = bclass(&[(b'A', b'A'), (b'_', b'_'), (b'a', b'a')]);
-        assert_eq!(expected, bcasefold(&cls));
-
-        let cls = bclass(&[(b'A', b'A'), (b'=', b'=')]);
-        let expected = bclass(&[(b'=', b'='), (b'A', b'A'), (b'a', b'a')]);
-        assert_eq!(expected, bcasefold(&cls));
-
-        let cls = bclass(&[(b'\x00', b'\x10')]);
-        assert_eq!(cls, bcasefold(&cls));
-
-        let cls = bclass(&[(b'k', b'k')]);
-        let expected = bclass(&[(b'K', b'K'), (b'k', b'k')]);
-        assert_eq!(expected, bcasefold(&cls));
-
-        let cls = bclass(&[(b'@', b'@')]);
-        assert_eq!(cls, bcasefold(&cls));
-    }
-
-    #[test]
-    fn class_negate_unicode() {
-        let cls = uclass(&[('a', 'a')]);
-        let expected = uclass(&[('\x00', '\x60'), ('\x62', '\u{10FFFF}')]);
-        assert_eq!(expected, unegate(&cls));
-
-        let cls = uclass(&[('a', 'a'), ('b', 'b')]);
-        let expected = uclass(&[('\x00', '\x60'), ('\x63', '\u{10FFFF}')]);
-        assert_eq!(expected, unegate(&cls));
-
-        let cls = uclass(&[('a', 'c'), ('x', 'z')]);
-        let expected = uclass(&[
-            ('\x00', '\x60'),
-            ('\x64', '\x77'),
-            ('\x7B', '\u{10FFFF}'),
-        ]);
-        assert_eq!(expected, unegate(&cls));
-
-        let cls = uclass(&[('\x00', 'a')]);
-        let expected = uclass(&[('\x62', '\u{10FFFF}')]);
-        assert_eq!(expected, unegate(&cls));
-
-        let cls = uclass(&[('a', '\u{10FFFF}')]);
-        let expected = uclass(&[('\x00', '\x60')]);
-        assert_eq!(expected, unegate(&cls));
-
-        let cls = uclass(&[('\x00', '\u{10FFFF}')]);
-        let expected = uclass(&[]);
-        assert_eq!(expected, unegate(&cls));
-
-        let cls = uclass(&[]);
-        let expected = uclass(&[('\x00', '\u{10FFFF}')]);
-        assert_eq!(expected, unegate(&cls));
-
-        let cls =
-            uclass(&[('\x00', '\u{10FFFD}'), ('\u{10FFFF}', '\u{10FFFF}')]);
-        let expected = uclass(&[('\u{10FFFE}', '\u{10FFFE}')]);
-        assert_eq!(expected, unegate(&cls));
-
-        let cls = uclass(&[('\x00', '\u{D7FF}')]);
-        let expected = uclass(&[('\u{E000}', '\u{10FFFF}')]);
-        assert_eq!(expected, unegate(&cls));
-
-        let cls = uclass(&[('\x00', '\u{D7FE}')]);
-        let expected = uclass(&[('\u{D7FF}', '\u{10FFFF}')]);
-        assert_eq!(expected, unegate(&cls));
-
-        let cls = uclass(&[('\u{E000}', '\u{10FFFF}')]);
-        let expected = uclass(&[('\x00', '\u{D7FF}')]);
-        assert_eq!(expected, unegate(&cls));
-
-        let cls = uclass(&[('\u{E001}', '\u{10FFFF}')]);
-        let expected = uclass(&[('\x00', '\u{E000}')]);
-        assert_eq!(expected, unegate(&cls));
-    }
-
-    #[test]
-    fn class_negate_bytes() {
-        let cls = bclass(&[(b'a', b'a')]);
-        let expected = bclass(&[(b'\x00', b'\x60'), (b'\x62', b'\xFF')]);
-        assert_eq!(expected, bnegate(&cls));
-
-        let cls = bclass(&[(b'a', b'a'), (b'b', b'b')]);
-        let expected = bclass(&[(b'\x00', b'\x60'), (b'\x63', b'\xFF')]);
-        assert_eq!(expected, bnegate(&cls));
-
-        let cls = bclass(&[(b'a', b'c'), (b'x', b'z')]);
-        let expected = bclass(&[
-            (b'\x00', b'\x60'),
-            (b'\x64', b'\x77'),
-            (b'\x7B', b'\xFF'),
-        ]);
-        assert_eq!(expected, bnegate(&cls));
-
-        let cls = bclass(&[(b'\x00', b'a')]);
-        let expected = bclass(&[(b'\x62', b'\xFF')]);
-        assert_eq!(expected, bnegate(&cls));
-
-        let cls = bclass(&[(b'a', b'\xFF')]);
-        let expected = bclass(&[(b'\x00', b'\x60')]);
-        assert_eq!(expected, bnegate(&cls));
-
-        let cls = bclass(&[(b'\x00', b'\xFF')]);
-        let expected = bclass(&[]);
-        assert_eq!(expected, bnegate(&cls));
-
-        let cls = bclass(&[]);
-        let expected = bclass(&[(b'\x00', b'\xFF')]);
-        assert_eq!(expected, bnegate(&cls));
-
-        let cls = bclass(&[(b'\x00', b'\xFD'), (b'\xFF', b'\xFF')]);
-        let expected = bclass(&[(b'\xFE', b'\xFE')]);
-        assert_eq!(expected, bnegate(&cls));
-    }
-
-    #[test]
-    fn class_union_unicode() {
-        let cls1 = uclass(&[('a', 'g'), ('m', 't'), ('A', 'C')]);
-        let cls2 = uclass(&[('a', 'z')]);
-        let expected = uclass(&[('a', 'z'), ('A', 'C')]);
-        assert_eq!(expected, uunion(&cls1, &cls2));
-    }
-
-    #[test]
-    fn class_union_bytes() {
-        let cls1 = bclass(&[(b'a', b'g'), (b'm', b't'), (b'A', b'C')]);
-        let cls2 = bclass(&[(b'a', b'z')]);
-        let expected = bclass(&[(b'a', b'z'), (b'A', b'C')]);
-        assert_eq!(expected, bunion(&cls1, &cls2));
-    }
-
-    #[test]
-    fn class_intersect_unicode() {
-        let cls1 = uclass(&[]);
-        let cls2 = uclass(&[('a', 'a')]);
-        let expected = uclass(&[]);
-        assert_eq!(expected, uintersect(&cls1, &cls2));
-
-        let cls1 = uclass(&[('a', 'a')]);
-        let cls2 = uclass(&[('a', 'a')]);
-        let expected = uclass(&[('a', 'a')]);
-        assert_eq!(expected, uintersect(&cls1, &cls2));
-
-        let cls1 = uclass(&[('a', 'a')]);
-        let cls2 = uclass(&[('b', 'b')]);
-        let expected = uclass(&[]);
-        assert_eq!(expected, uintersect(&cls1, &cls2));
-
-        let cls1 = uclass(&[('a', 'a')]);
-        let cls2 = uclass(&[('a', 'c')]);
-        let expected = uclass(&[('a', 'a')]);
-        assert_eq!(expected, uintersect(&cls1, &cls2));
-
-        let cls1 = uclass(&[('a', 'b')]);
-        let cls2 = uclass(&[('a', 'c')]);
-        let expected = uclass(&[('a', 'b')]);
-        assert_eq!(expected, uintersect(&cls1, &cls2));
-
-        let cls1 = uclass(&[('a', 'b')]);
-        let cls2 = uclass(&[('b', 'c')]);
-        let expected = uclass(&[('b', 'b')]);
-        assert_eq!(expected, uintersect(&cls1, &cls2));
-
-        let cls1 = uclass(&[('a', 'b')]);
-        let cls2 = uclass(&[('c', 'd')]);
-        let expected = uclass(&[]);
-        assert_eq!(expected, uintersect(&cls1, &cls2));
-
-        let cls1 = uclass(&[('b', 'c')]);
-        let cls2 = uclass(&[('a', 'd')]);
-        let expected = uclass(&[('b', 'c')]);
-        assert_eq!(expected, uintersect(&cls1, &cls2));
-
-        let cls1 = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]);
-        let cls2 = uclass(&[('a', 'h')]);
-        let expected = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]);
-        assert_eq!(expected, uintersect(&cls1, &cls2));
-
-        let cls1 = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]);
-        let cls2 = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]);
-        let expected = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]);
-        assert_eq!(expected, uintersect(&cls1, &cls2));
-
-        let cls1 = uclass(&[('a', 'b'), ('g', 'h')]);
-        let cls2 = uclass(&[('d', 'e'), ('k', 'l')]);
-        let expected = uclass(&[]);
-        assert_eq!(expected, uintersect(&cls1, &cls2));
-
-        let cls1 = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]);
-        let cls2 = uclass(&[('h', 'h')]);
-        let expected = uclass(&[('h', 'h')]);
-        assert_eq!(expected, uintersect(&cls1, &cls2));
-
-        let cls1 = uclass(&[('a', 'b'), ('e', 'f'), ('i', 'j')]);
-        let cls2 = uclass(&[('c', 'd'), ('g', 'h'), ('k', 'l')]);
-        let expected = uclass(&[]);
-        assert_eq!(expected, uintersect(&cls1, &cls2));
-
-        let cls1 = uclass(&[('a', 'b'), ('c', 'd'), ('e', 'f')]);
-        let cls2 = uclass(&[('b', 'c'), ('d', 'e'), ('f', 'g')]);
-        let expected = uclass(&[('b', 'f')]);
-        assert_eq!(expected, uintersect(&cls1, &cls2));
-    }
-
-    #[test]
-    fn class_intersect_bytes() {
-        let cls1 = bclass(&[]);
-        let cls2 = bclass(&[(b'a', b'a')]);
-        let expected = bclass(&[]);
-        assert_eq!(expected, bintersect(&cls1, &cls2));
-
-        let cls1 = bclass(&[(b'a', b'a')]);
-        let cls2 = bclass(&[(b'a', b'a')]);
-        let expected = bclass(&[(b'a', b'a')]);
-        assert_eq!(expected, bintersect(&cls1, &cls2));
-
-        let cls1 = bclass(&[(b'a', b'a')]);
-        let cls2 = bclass(&[(b'b', b'b')]);
-        let expected = bclass(&[]);
-        assert_eq!(expected, bintersect(&cls1, &cls2));
-
-        let cls1 = bclass(&[(b'a', b'a')]);
-        let cls2 = bclass(&[(b'a', b'c')]);
-        let expected = bclass(&[(b'a', b'a')]);
-        assert_eq!(expected, bintersect(&cls1, &cls2));
-
-        let cls1 = bclass(&[(b'a', b'b')]);
-        let cls2 = bclass(&[(b'a', b'c')]);
-        let expected = bclass(&[(b'a', b'b')]);
-        assert_eq!(expected, bintersect(&cls1, &cls2));
-
-        let cls1 = bclass(&[(b'a', b'b')]);
-        let cls2 = bclass(&[(b'b', b'c')]);
-        let expected = bclass(&[(b'b', b'b')]);
-        assert_eq!(expected, bintersect(&cls1, &cls2));
-
-        let cls1 = bclass(&[(b'a', b'b')]);
-        let cls2 = bclass(&[(b'c', b'd')]);
-        let expected = bclass(&[]);
-        assert_eq!(expected, bintersect(&cls1, &cls2));
-
-        let cls1 = bclass(&[(b'b', b'c')]);
-        let cls2 = bclass(&[(b'a', b'd')]);
-        let expected = bclass(&[(b'b', b'c')]);
-        assert_eq!(expected, bintersect(&cls1, &cls2));
-
-        let cls1 = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]);
-        let cls2 = bclass(&[(b'a', b'h')]);
-        let expected = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]);
-        assert_eq!(expected, bintersect(&cls1, &cls2));
-
-        let cls1 = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]);
-        let cls2 = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]);
-        let expected = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]);
-        assert_eq!(expected, bintersect(&cls1, &cls2));
-
-        let cls1 = bclass(&[(b'a', b'b'), (b'g', b'h')]);
-        let cls2 = bclass(&[(b'd', b'e'), (b'k', b'l')]);
-        let expected = bclass(&[]);
-        assert_eq!(expected, bintersect(&cls1, &cls2));
-
-        let cls1 = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]);
-        let cls2 = bclass(&[(b'h', b'h')]);
-        let expected = bclass(&[(b'h', b'h')]);
-        assert_eq!(expected, bintersect(&cls1, &cls2));
-
-        let cls1 = bclass(&[(b'a', b'b'), (b'e', b'f'), (b'i', b'j')]);
-        let cls2 = bclass(&[(b'c', b'd'), (b'g', b'h'), (b'k', b'l')]);
-        let expected = bclass(&[]);
-        assert_eq!(expected, bintersect(&cls1, &cls2));
-
-        let cls1 = bclass(&[(b'a', b'b'), (b'c', b'd'), (b'e', b'f')]);
-        let cls2 = bclass(&[(b'b', b'c'), (b'd', b'e'), (b'f', b'g')]);
-        let expected = bclass(&[(b'b', b'f')]);
-        assert_eq!(expected, bintersect(&cls1, &cls2));
-    }
-
-    #[test]
-    fn class_difference_unicode() {
-        let cls1 = uclass(&[('a', 'a')]);
-        let cls2 = uclass(&[('a', 'a')]);
-        let expected = uclass(&[]);
-        assert_eq!(expected, udifference(&cls1, &cls2));
-
-        let cls1 = uclass(&[('a', 'a')]);
-        let cls2 = uclass(&[]);
-        let expected = uclass(&[('a', 'a')]);
-        assert_eq!(expected, udifference(&cls1, &cls2));
-
-        let cls1 = uclass(&[]);
-        let cls2 = uclass(&[('a', 'a')]);
-        let expected = uclass(&[]);
-        assert_eq!(expected, udifference(&cls1, &cls2));
-
-        let cls1 = uclass(&[('a', 'z')]);
-        let cls2 = uclass(&[('a', 'a')]);
-        let expected = uclass(&[('b', 'z')]);
-        assert_eq!(expected, udifference(&cls1, &cls2));
-
-        let cls1 = uclass(&[('a', 'z')]);
-        let cls2 = uclass(&[('z', 'z')]);
-        let expected = uclass(&[('a', 'y')]);
-        assert_eq!(expected, udifference(&cls1, &cls2));
-
-        let cls1 = uclass(&[('a', 'z')]);
-        let cls2 = uclass(&[('m', 'm')]);
-        let expected = uclass(&[('a', 'l'), ('n', 'z')]);
-        assert_eq!(expected, udifference(&cls1, &cls2));
-
-        let cls1 = uclass(&[('a', 'c'), ('g', 'i'), ('r', 't')]);
-        let cls2 = uclass(&[('a', 'z')]);
-        let expected = uclass(&[]);
-        assert_eq!(expected, udifference(&cls1, &cls2));
-
-        let cls1 = uclass(&[('a', 'c'), ('g', 'i'), ('r', 't')]);
-        let cls2 = uclass(&[('d', 'v')]);
-        let expected = uclass(&[('a', 'c')]);
-        assert_eq!(expected, udifference(&cls1, &cls2));
-
-        let cls1 = uclass(&[('a', 'c'), ('g', 'i'), ('r', 't')]);
-        let cls2 = uclass(&[('b', 'g'), ('s', 'u')]);
-        let expected = uclass(&[('a', 'a'), ('h', 'i'), ('r', 'r')]);
-        assert_eq!(expected, udifference(&cls1, &cls2));
-
-        let cls1 = uclass(&[('a', 'c'), ('g', 'i'), ('r', 't')]);
-        let cls2 = uclass(&[('b', 'd'), ('e', 'g'), ('s', 'u')]);
-        let expected = uclass(&[('a', 'a'), ('h', 'i'), ('r', 'r')]);
-        assert_eq!(expected, udifference(&cls1, &cls2));
-
-        let cls1 = uclass(&[('x', 'z')]);
-        let cls2 = uclass(&[('a', 'c'), ('e', 'g'), ('s', 'u')]);
-        let expected = uclass(&[('x', 'z')]);
-        assert_eq!(expected, udifference(&cls1, &cls2));
-
-        let cls1 = uclass(&[('a', 'z')]);
-        let cls2 = uclass(&[('a', 'c'), ('e', 'g'), ('s', 'u')]);
-        let expected = uclass(&[('d', 'd'), ('h', 'r'), ('v', 'z')]);
-        assert_eq!(expected, udifference(&cls1, &cls2));
-    }
-
-    #[test]
-    fn class_difference_bytes() {
-        let cls1 = bclass(&[(b'a', b'a')]);
-        let cls2 = bclass(&[(b'a', b'a')]);
-        let expected = bclass(&[]);
-        assert_eq!(expected, bdifference(&cls1, &cls2));
-
-        let cls1 = bclass(&[(b'a', b'a')]);
-        let cls2 = bclass(&[]);
-        let expected = bclass(&[(b'a', b'a')]);
-        assert_eq!(expected, bdifference(&cls1, &cls2));
-
-        let cls1 = bclass(&[]);
-        let cls2 = bclass(&[(b'a', b'a')]);
-        let expected = bclass(&[]);
-        assert_eq!(expected, bdifference(&cls1, &cls2));
-
-        let cls1 = bclass(&[(b'a', b'z')]);
-        let cls2 = bclass(&[(b'a', b'a')]);
-        let expected = bclass(&[(b'b', b'z')]);
-        assert_eq!(expected, bdifference(&cls1, &cls2));
-
-        let cls1 = bclass(&[(b'a', b'z')]);
-        let cls2 = bclass(&[(b'z', b'z')]);
-        let expected = bclass(&[(b'a', b'y')]);
-        assert_eq!(expected, bdifference(&cls1, &cls2));
-
-        let cls1 = bclass(&[(b'a', b'z')]);
-        let cls2 = bclass(&[(b'm', b'm')]);
-        let expected = bclass(&[(b'a', b'l'), (b'n', b'z')]);
-        assert_eq!(expected, bdifference(&cls1, &cls2));
-
-        let cls1 = bclass(&[(b'a', b'c'), (b'g', b'i'), (b'r', b't')]);
-        let cls2 = bclass(&[(b'a', b'z')]);
-        let expected = bclass(&[]);
-        assert_eq!(expected, bdifference(&cls1, &cls2));
-
-        let cls1 = bclass(&[(b'a', b'c'), (b'g', b'i'), (b'r', b't')]);
-        let cls2 = bclass(&[(b'd', b'v')]);
-        let expected = bclass(&[(b'a', b'c')]);
-        assert_eq!(expected, bdifference(&cls1, &cls2));
-
-        let cls1 = bclass(&[(b'a', b'c'), (b'g', b'i'), (b'r', b't')]);
-        let cls2 = bclass(&[(b'b', b'g'), (b's', b'u')]);
-        let expected = bclass(&[(b'a', b'a'), (b'h', b'i'), (b'r', b'r')]);
-        assert_eq!(expected, bdifference(&cls1, &cls2));
-
-        let cls1 = bclass(&[(b'a', b'c'), (b'g', b'i'), (b'r', b't')]);
-        let cls2 = bclass(&[(b'b', b'd'), (b'e', b'g'), (b's', b'u')]);
-        let expected = bclass(&[(b'a', b'a'), (b'h', b'i'), (b'r', b'r')]);
-        assert_eq!(expected, bdifference(&cls1, &cls2));
-
-        let cls1 = bclass(&[(b'x', b'z')]);
-        let cls2 = bclass(&[(b'a', b'c'), (b'e', b'g'), (b's', b'u')]);
-        let expected = bclass(&[(b'x', b'z')]);
-        assert_eq!(expected, bdifference(&cls1, &cls2));
-
-        let cls1 = bclass(&[(b'a', b'z')]);
-        let cls2 = bclass(&[(b'a', b'c'), (b'e', b'g'), (b's', b'u')]);
-        let expected = bclass(&[(b'd', b'd'), (b'h', b'r'), (b'v', b'z')]);
-        assert_eq!(expected, bdifference(&cls1, &cls2));
-    }
-
-    #[test]
-    fn class_symmetric_difference_unicode() {
-        let cls1 = uclass(&[('a', 'm')]);
-        let cls2 = uclass(&[('g', 't')]);
-        let expected = uclass(&[('a', 'f'), ('n', 't')]);
-        assert_eq!(expected, usymdifference(&cls1, &cls2));
-    }
-
-    #[test]
-    fn class_symmetric_difference_bytes() {
-        let cls1 = bclass(&[(b'a', b'm')]);
-        let cls2 = bclass(&[(b'g', b't')]);
-        let expected = bclass(&[(b'a', b'f'), (b'n', b't')]);
-        assert_eq!(expected, bsymdifference(&cls1, &cls2));
-    }
-
-    // We use a thread with an explicit stack size to test that our destructor
-    // for Hir can handle arbitrarily sized expressions in constant stack
-    // space. In case we run on a platform without threads (WASM?), we limit
-    // this test to Windows/Unix.
-    #[test]
-    #[cfg(any(unix, windows))]
-    fn no_stack_overflow_on_drop() {
-        use std::thread;
-
-        let run = || {
-            let mut expr = Hir::empty();
-            for _ in 0..100 {
-                expr = Hir::capture(Capture {
-                    index: 1,
-                    name: None,
-                    sub: Box::new(expr),
-                });
-                expr = Hir::repetition(Repetition {
-                    min: 0,
-                    max: Some(1),
-                    greedy: true,
-                    sub: Box::new(expr),
-                });
-
-                expr = Hir {
-                    kind: HirKind::Concat(vec![expr]),
-                    props: Properties::empty(),
-                };
-                expr = Hir {
-                    kind: HirKind::Alternation(vec![expr]),
-                    props: Properties::empty(),
-                };
-            }
-            assert!(!matches!(*expr.kind(), HirKind::Empty));
-        };
-
-        // We run our test on a thread with a small stack size so we can
-        // force the issue more easily.
-        //
-        // NOTE(2023-03-21): See the corresponding test in 'crate::ast::tests'
-        // for context on the specific stack size chosen here.
-        thread::Builder::new()
-            .stack_size(16 << 10)
-            .spawn(run)
-            .unwrap()
-            .join()
-            .unwrap();
-    }
-
-    #[test]
-    fn look_set_iter() {
-        let set = LookSet::empty();
-        assert_eq!(0, set.iter().count());
-
-        let set = LookSet::full();
-        assert_eq!(18, set.iter().count());
-
-        let set =
-            LookSet::empty().insert(Look::StartLF).insert(Look::WordUnicode);
-        assert_eq!(2, set.iter().count());
-
-        let set = LookSet::empty().insert(Look::StartLF);
-        assert_eq!(1, set.iter().count());
-
-        let set = LookSet::empty().insert(Look::WordAsciiNegate);
-        assert_eq!(1, set.iter().count());
-    }
-
-    #[test]
-    fn look_set_debug() {
-        let res = format!("{:?}", LookSet::empty());
-        assert_eq!("∅", res);
-        let res = format!("{:?}", LookSet::full());
-        assert_eq!("Az^$rRbBđ›ƒđš©<>ă€ˆă€‰â—â–·â—€â–¶", res);
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/hir/print.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/hir/print.rs
deleted file mode 100644
index dfa6d403..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/hir/print.rs
+++ /dev/null
@@ -1,608 +0,0 @@
-/*!
-This module provides a regular expression printer for `Hir`.
-*/
-
-use core::fmt;
-
-use crate::{
-    hir::{
-        self,
-        visitor::{self, Visitor},
-        Hir, HirKind,
-    },
-    is_meta_character,
-};
-
-/// A builder for constructing a printer.
-///
-/// Note that since a printer doesn't have any configuration knobs, this type
-/// remains unexported.
-#[derive(Clone, Debug)]
-struct PrinterBuilder {
-    _priv: (),
-}
-
-impl Default for PrinterBuilder {
-    fn default() -> PrinterBuilder {
-        PrinterBuilder::new()
-    }
-}
-
-impl PrinterBuilder {
-    fn new() -> PrinterBuilder {
-        PrinterBuilder { _priv: () }
-    }
-
-    fn build(&self) -> Printer {
-        Printer { _priv: () }
-    }
-}
-
-/// A printer for a regular expression's high-level intermediate
-/// representation.
-///
-/// A printer converts a high-level intermediate representation (HIR) to a
-/// regular expression pattern string. This particular printer uses constant
-/// stack space and heap space proportional to the size of the HIR.
-///
-/// Since this printer is only using the HIR, the pattern it prints will likely
-/// not resemble the original pattern at all. For example, a pattern like
-/// `\pL` will have its entire class written out.
-///
-/// The purpose of this printer is to provide a means to mutate an HIR and then
-/// build a regular expression from the result of that mutation. (A regex
-/// library could provide a constructor from this HIR explicitly, but that
-/// creates an unnecessary public coupling between the regex library and this
-/// specific HIR representation.)
-#[derive(Debug)]
-pub struct Printer {
-    _priv: (),
-}
-
-impl Printer {
-    /// Create a new printer.
-    pub fn new() -> Printer {
-        PrinterBuilder::new().build()
-    }
-
-    /// Print the given `Ast` to the given writer. The writer must implement
-    /// `fmt::Write`. Typical implementations of `fmt::Write` that can be used
-    /// here are a `fmt::Formatter` (which is available in `fmt::Display`
-    /// implementations) or a `&mut String`.
-    pub fn print<W: fmt::Write>(&mut self, hir: &Hir, wtr: W) -> fmt::Result {
-        visitor::visit(hir, Writer { wtr })
-    }
-}
-
-#[derive(Debug)]
-struct Writer<W> {
-    wtr: W,
-}
-
-impl<W: fmt::Write> Visitor for Writer<W> {
-    type Output = ();
-    type Err = fmt::Error;
-
-    fn finish(self) -> fmt::Result {
-        Ok(())
-    }
-
-    fn visit_pre(&mut self, hir: &Hir) -> fmt::Result {
-        match *hir.kind() {
-            HirKind::Empty => {
-                // Technically an empty sub-expression could be "printed" by
-                // just ignoring it, but in practice, you could have a
-                // repetition operator attached to an empty expression, and you
-                // really need something in the concrete syntax to make that
-                // work as you'd expect.
-                self.wtr.write_str(r"(?:)")?;
-            }
-            // Repetition operators are strictly suffix oriented.
-            HirKind::Repetition(_) => {}
-            HirKind::Literal(hir::Literal(ref bytes)) => {
-                // See the comment on the 'Concat' and 'Alternation' case below
-                // for why we put parens here. Literals are, conceptually,
-                // a special case of concatenation where each element is a
-                // character. The HIR flattens this into a Box<[u8]>, but we
-                // still need to treat it like a concatenation for correct
-                // printing. As a special case, we don't write parens if there
-                // is only one character. One character means there is no
-                // concat so we don't need parens. Adding parens would still be
-                // correct, but we drop them here because it tends to create
-                // rather noisy regexes even in simple cases.
-                let result = core::str::from_utf8(bytes);
-                let len = result.map_or(bytes.len(), |s| s.chars().count());
-                if len > 1 {
-                    self.wtr.write_str(r"(?:")?;
-                }
-                match result {
-                    Ok(string) => {
-                        for c in string.chars() {
-                            self.write_literal_char(c)?;
-                        }
-                    }
-                    Err(_) => {
-                        for &b in bytes.iter() {
-                            self.write_literal_byte(b)?;
-                        }
-                    }
-                }
-                if len > 1 {
-                    self.wtr.write_str(r")")?;
-                }
-            }
-            HirKind::Class(hir::Class::Unicode(ref cls)) => {
-                if cls.ranges().is_empty() {
-                    return self.wtr.write_str("[a&&b]");
-                }
-                self.wtr.write_str("[")?;
-                for range in cls.iter() {
-                    if range.start() == range.end() {
-                        self.write_literal_char(range.start())?;
-                    } else if u32::from(range.start()) + 1
-                        == u32::from(range.end())
-                    {
-                        self.write_literal_char(range.start())?;
-                        self.write_literal_char(range.end())?;
-                    } else {
-                        self.write_literal_char(range.start())?;
-                        self.wtr.write_str("-")?;
-                        self.write_literal_char(range.end())?;
-                    }
-                }
-                self.wtr.write_str("]")?;
-            }
-            HirKind::Class(hir::Class::Bytes(ref cls)) => {
-                if cls.ranges().is_empty() {
-                    return self.wtr.write_str("[a&&b]");
-                }
-                self.wtr.write_str("(?-u:[")?;
-                for range in cls.iter() {
-                    if range.start() == range.end() {
-                        self.write_literal_class_byte(range.start())?;
-                    } else if range.start() + 1 == range.end() {
-                        self.write_literal_class_byte(range.start())?;
-                        self.write_literal_class_byte(range.end())?;
-                    } else {
-                        self.write_literal_class_byte(range.start())?;
-                        self.wtr.write_str("-")?;
-                        self.write_literal_class_byte(range.end())?;
-                    }
-                }
-                self.wtr.write_str("])")?;
-            }
-            HirKind::Look(ref look) => match *look {
-                hir::Look::Start => {
-                    self.wtr.write_str(r"\A")?;
-                }
-                hir::Look::End => {
-                    self.wtr.write_str(r"\z")?;
-                }
-                hir::Look::StartLF => {
-                    self.wtr.write_str("(?m:^)")?;
-                }
-                hir::Look::EndLF => {
-                    self.wtr.write_str("(?m:$)")?;
-                }
-                hir::Look::StartCRLF => {
-                    self.wtr.write_str("(?mR:^)")?;
-                }
-                hir::Look::EndCRLF => {
-                    self.wtr.write_str("(?mR:$)")?;
-                }
-                hir::Look::WordAscii => {
-                    self.wtr.write_str(r"(?-u:\b)")?;
-                }
-                hir::Look::WordAsciiNegate => {
-                    self.wtr.write_str(r"(?-u:\B)")?;
-                }
-                hir::Look::WordUnicode => {
-                    self.wtr.write_str(r"\b")?;
-                }
-                hir::Look::WordUnicodeNegate => {
-                    self.wtr.write_str(r"\B")?;
-                }
-                hir::Look::WordStartAscii => {
-                    self.wtr.write_str(r"(?-u:\b{start})")?;
-                }
-                hir::Look::WordEndAscii => {
-                    self.wtr.write_str(r"(?-u:\b{end})")?;
-                }
-                hir::Look::WordStartUnicode => {
-                    self.wtr.write_str(r"\b{start}")?;
-                }
-                hir::Look::WordEndUnicode => {
-                    self.wtr.write_str(r"\b{end}")?;
-                }
-                hir::Look::WordStartHalfAscii => {
-                    self.wtr.write_str(r"(?-u:\b{start-half})")?;
-                }
-                hir::Look::WordEndHalfAscii => {
-                    self.wtr.write_str(r"(?-u:\b{end-half})")?;
-                }
-                hir::Look::WordStartHalfUnicode => {
-                    self.wtr.write_str(r"\b{start-half}")?;
-                }
-                hir::Look::WordEndHalfUnicode => {
-                    self.wtr.write_str(r"\b{end-half}")?;
-                }
-            },
-            HirKind::Capture(hir::Capture { ref name, .. }) => {
-                self.wtr.write_str("(")?;
-                if let Some(ref name) = *name {
-                    write!(self.wtr, "?P<{}>", name)?;
-                }
-            }
-            // Why do this? Wrapping concats and alts in non-capturing groups
-            // is not *always* necessary, but is sometimes necessary. For
-            // example, 'concat(a, alt(b, c))' should be written as 'a(?:b|c)'
-            // and not 'ab|c'. The former is clearly the intended meaning, but
-            // the latter is actually 'alt(concat(a, b), c)'.
-            //
-            // It would be possible to only group these things in cases where
-            // it's strictly necessary, but it requires knowing the parent
-            // expression. And since this technique is simpler and always
-            // correct, we take this route. More to the point, it is a non-goal
-            // of an HIR printer to show a nice easy-to-read regex. Indeed,
-            // its construction forbids it from doing so. Therefore, inserting
-            // extra groups where they aren't necessary is perfectly okay.
-            HirKind::Concat(_) | HirKind::Alternation(_) => {
-                self.wtr.write_str(r"(?:")?;
-            }
-        }
-        Ok(())
-    }
-
-    fn visit_post(&mut self, hir: &Hir) -> fmt::Result {
-        match *hir.kind() {
-            // Handled during visit_pre
-            HirKind::Empty
-            | HirKind::Literal(_)
-            | HirKind::Class(_)
-            | HirKind::Look(_) => {}
-            HirKind::Repetition(ref x) => {
-                match (x.min, x.max) {
-                    (0, Some(1)) => {
-                        self.wtr.write_str("?")?;
-                    }
-                    (0, None) => {
-                        self.wtr.write_str("*")?;
-                    }
-                    (1, None) => {
-                        self.wtr.write_str("+")?;
-                    }
-                    (1, Some(1)) => {
-                        // 'a{1}' and 'a{1}?' are exactly equivalent to 'a'.
-                        return Ok(());
-                    }
-                    (m, None) => {
-                        write!(self.wtr, "{{{},}}", m)?;
-                    }
-                    (m, Some(n)) if m == n => {
-                        write!(self.wtr, "{{{}}}", m)?;
-                        // a{m} and a{m}? are always exactly equivalent.
-                        return Ok(());
-                    }
-                    (m, Some(n)) => {
-                        write!(self.wtr, "{{{},{}}}", m, n)?;
-                    }
-                }
-                if !x.greedy {
-                    self.wtr.write_str("?")?;
-                }
-            }
-            HirKind::Capture(_)
-            | HirKind::Concat(_)
-            | HirKind::Alternation(_) => {
-                self.wtr.write_str(r")")?;
-            }
-        }
-        Ok(())
-    }
-
-    fn visit_alternation_in(&mut self) -> fmt::Result {
-        self.wtr.write_str("|")
-    }
-}
-
-impl<W: fmt::Write> Writer<W> {
-    fn write_literal_char(&mut self, c: char) -> fmt::Result {
-        if is_meta_character(c) {
-            self.wtr.write_str("\\")?;
-        }
-        self.wtr.write_char(c)
-    }
-
-    fn write_literal_byte(&mut self, b: u8) -> fmt::Result {
-        if b <= 0x7F && !b.is_ascii_control() && !b.is_ascii_whitespace() {
-            self.write_literal_char(char::try_from(b).unwrap())
-        } else {
-            write!(self.wtr, "(?-u:\\x{:02X})", b)
-        }
-    }
-
-    fn write_literal_class_byte(&mut self, b: u8) -> fmt::Result {
-        if b <= 0x7F && !b.is_ascii_control() && !b.is_ascii_whitespace() {
-            self.write_literal_char(char::try_from(b).unwrap())
-        } else {
-            write!(self.wtr, "\\x{:02X}", b)
-        }
-    }
-}
-
-#[cfg(test)]
-mod tests {
-    use alloc::{
-        boxed::Box,
-        string::{String, ToString},
-    };
-
-    use crate::ParserBuilder;
-
-    use super::*;
-
-    fn roundtrip(given: &str, expected: &str) {
-        roundtrip_with(|b| b, given, expected);
-    }
-
-    fn roundtrip_bytes(given: &str, expected: &str) {
-        roundtrip_with(|b| b.utf8(false), given, expected);
-    }
-
-    fn roundtrip_with<F>(mut f: F, given: &str, expected: &str)
-    where
-        F: FnMut(&mut ParserBuilder) -> &mut ParserBuilder,
-    {
-        let mut builder = ParserBuilder::new();
-        f(&mut builder);
-        let hir = builder.build().parse(given).unwrap();
-
-        let mut printer = Printer::new();
-        let mut dst = String::new();
-        printer.print(&hir, &mut dst).unwrap();
-
-        // Check that the result is actually valid.
-        builder.build().parse(&dst).unwrap();
-
-        assert_eq!(expected, dst);
-    }
-
-    #[test]
-    fn print_literal() {
-        roundtrip("a", "a");
-        roundtrip(r"\xff", "\u{FF}");
-        roundtrip_bytes(r"\xff", "\u{FF}");
-        roundtrip_bytes(r"(?-u)\xff", r"(?-u:\xFF)");
-        roundtrip("☃", "☃");
-    }
-
-    #[test]
-    fn print_class() {
-        roundtrip(r"[a]", r"a");
-        roundtrip(r"[ab]", r"[ab]");
-        roundtrip(r"[a-z]", r"[a-z]");
-        roundtrip(r"[a-z--b-c--x-y]", r"[ad-wz]");
-        roundtrip(r"[^\x01-\u{10FFFF}]", "\u{0}");
-        roundtrip(r"[-]", r"\-");
-        roundtrip(r"[☃-⛄]", r"[☃-⛄]");
-
-        roundtrip(r"(?-u)[a]", r"a");
-        roundtrip(r"(?-u)[ab]", r"(?-u:[ab])");
-        roundtrip(r"(?-u)[a-z]", r"(?-u:[a-z])");
-        roundtrip_bytes(r"(?-u)[a-\xFF]", r"(?-u:[a-\xFF])");
-
-        // The following test that the printer escapes meta characters
-        // in character classes.
-        roundtrip(r"[\[]", r"\[");
-        roundtrip(r"[Z-_]", r"[Z-_]");
-        roundtrip(r"[Z-_--Z]", r"[\[-_]");
-
-        // The following test that the printer escapes meta characters
-        // in byte oriented character classes.
-        roundtrip_bytes(r"(?-u)[\[]", r"\[");
-        roundtrip_bytes(r"(?-u)[Z-_]", r"(?-u:[Z-_])");
-        roundtrip_bytes(r"(?-u)[Z-_--Z]", r"(?-u:[\[-_])");
-
-        // This tests that an empty character class is correctly roundtripped.
-        #[cfg(feature = "unicode-gencat")]
-        roundtrip(r"\P{any}", r"[a&&b]");
-        roundtrip_bytes(r"(?-u)[^\x00-\xFF]", r"[a&&b]");
-    }
-
-    #[test]
-    fn print_anchor() {
-        roundtrip(r"^", r"\A");
-        roundtrip(r"$", r"\z");
-        roundtrip(r"(?m)^", r"(?m:^)");
-        roundtrip(r"(?m)$", r"(?m:$)");
-    }
-
-    #[test]
-    fn print_word_boundary() {
-        roundtrip(r"\b", r"\b");
-        roundtrip(r"\B", r"\B");
-        roundtrip(r"(?-u)\b", r"(?-u:\b)");
-        roundtrip_bytes(r"(?-u)\B", r"(?-u:\B)");
-    }
-
-    #[test]
-    fn print_repetition() {
-        roundtrip("a?", "a?");
-        roundtrip("a??", "a??");
-        roundtrip("(?U)a?", "a??");
-
-        roundtrip("a*", "a*");
-        roundtrip("a*?", "a*?");
-        roundtrip("(?U)a*", "a*?");
-
-        roundtrip("a+", "a+");
-        roundtrip("a+?", "a+?");
-        roundtrip("(?U)a+", "a+?");
-
-        roundtrip("a{1}", "a");
-        roundtrip("a{2}", "a{2}");
-        roundtrip("a{1,}", "a+");
-        roundtrip("a{1,5}", "a{1,5}");
-        roundtrip("a{1}?", "a");
-        roundtrip("a{2}?", "a{2}");
-        roundtrip("a{1,}?", "a+?");
-        roundtrip("a{1,5}?", "a{1,5}?");
-        roundtrip("(?U)a{1}", "a");
-        roundtrip("(?U)a{2}", "a{2}");
-        roundtrip("(?U)a{1,}", "a+?");
-        roundtrip("(?U)a{1,5}", "a{1,5}?");
-
-        // Test that various zero-length repetitions always translate to an
-        // empty regex. This is more a property of HIR's smart constructors
-        // than the printer though.
-        roundtrip("a{0}", "(?:)");
-        roundtrip("(?:ab){0}", "(?:)");
-        #[cfg(feature = "unicode-gencat")]
-        {
-            roundtrip(r"\p{any}{0}", "(?:)");
-            roundtrip(r"\P{any}{0}", "(?:)");
-        }
-    }
-
-    #[test]
-    fn print_group() {
-        roundtrip("()", "((?:))");
-        roundtrip("(?P<foo>)", "(?P<foo>(?:))");
-        roundtrip("(?:)", "(?:)");
-
-        roundtrip("(a)", "(a)");
-        roundtrip("(?P<foo>a)", "(?P<foo>a)");
-        roundtrip("(?:a)", "a");
-
-        roundtrip("((((a))))", "((((a))))");
-    }
-
-    #[test]
-    fn print_alternation() {
-        roundtrip("|", "(?:(?:)|(?:))");
-        roundtrip("||", "(?:(?:)|(?:)|(?:))");
-
-        roundtrip("a|b", "[ab]");
-        roundtrip("ab|cd", "(?:(?:ab)|(?:cd))");
-        roundtrip("a|b|c", "[a-c]");
-        roundtrip("ab|cd|ef", "(?:(?:ab)|(?:cd)|(?:ef))");
-        roundtrip("foo|bar|quux", "(?:(?:foo)|(?:bar)|(?:quux))");
-    }
-
-    // This is a regression test that stresses a peculiarity of how the HIR
-    // is both constructed and printed. Namely, it is legal for a repetition
-    // to directly contain a concatenation. This particular construct isn't
-    // really possible to build from the concrete syntax directly, since you'd
-    // be forced to put the concatenation into (at least) a non-capturing
-    // group. Concurrently, the printer doesn't consider this case and just
-    // kind of naively prints the child expression and tacks on the repetition
-    // operator.
-    //
-    // As a result, if you attached '+' to a 'concat(a, b)', the printer gives
-    // you 'ab+', but clearly it really should be '(?:ab)+'.
-    //
-    // This bug isn't easy to surface because most ways of building an HIR
-    // come directly from the concrete syntax, and as mentioned above, it just
-    // isn't possible to build this kind of HIR from the concrete syntax.
-    // Nevertheless, this is definitely a bug.
-    //
-    // See: https://github.com/rust-lang/regex/issues/731
-    #[test]
-    fn regression_repetition_concat() {
-        let expr = Hir::concat(alloc::vec![
-            Hir::literal("x".as_bytes()),
-            Hir::repetition(hir::Repetition {
-                min: 1,
-                max: None,
-                greedy: true,
-                sub: Box::new(Hir::literal("ab".as_bytes())),
-            }),
-            Hir::literal("y".as_bytes()),
-        ]);
-        assert_eq!(r"(?:x(?:ab)+y)", expr.to_string());
-
-        let expr = Hir::concat(alloc::vec![
-            Hir::look(hir::Look::Start),
-            Hir::repetition(hir::Repetition {
-                min: 1,
-                max: None,
-                greedy: true,
-                sub: Box::new(Hir::concat(alloc::vec![
-                    Hir::look(hir::Look::Start),
-                    Hir::look(hir::Look::End),
-                ])),
-            }),
-            Hir::look(hir::Look::End),
-        ]);
-        assert_eq!(r"(?:\A\A\z\z)", expr.to_string());
-    }
-
-    // Just like regression_repetition_concat, but with the repetition using
-    // an alternation as a child expression instead.
-    //
-    // See: https://github.com/rust-lang/regex/issues/731
-    #[test]
-    fn regression_repetition_alternation() {
-        let expr = Hir::concat(alloc::vec![
-            Hir::literal("ab".as_bytes()),
-            Hir::repetition(hir::Repetition {
-                min: 1,
-                max: None,
-                greedy: true,
-                sub: Box::new(Hir::alternation(alloc::vec![
-                    Hir::literal("cd".as_bytes()),
-                    Hir::literal("ef".as_bytes()),
-                ])),
-            }),
-            Hir::literal("gh".as_bytes()),
-        ]);
-        assert_eq!(r"(?:(?:ab)(?:(?:cd)|(?:ef))+(?:gh))", expr.to_string());
-
-        let expr = Hir::concat(alloc::vec![
-            Hir::look(hir::Look::Start),
-            Hir::repetition(hir::Repetition {
-                min: 1,
-                max: None,
-                greedy: true,
-                sub: Box::new(Hir::alternation(alloc::vec![
-                    Hir::look(hir::Look::Start),
-                    Hir::look(hir::Look::End),
-                ])),
-            }),
-            Hir::look(hir::Look::End),
-        ]);
-        assert_eq!(r"(?:\A(?:\A|\z)\z)", expr.to_string());
-    }
-
-    // This regression test is very similar in flavor to
-    // regression_repetition_concat in that the root of the issue lies in a
-    // peculiarity of how the HIR is represented and how the printer writes it
-    // out. Like the other regression, this one is also rooted in the fact that
-    // you can't produce the peculiar HIR from the concrete syntax. Namely, you
-    // just can't have a 'concat(a, alt(b, c))' because the 'alt' will normally
-    // be in (at least) a non-capturing group. Why? Because the '|' has very
-    // low precedence (lower that concatenation), and so something like 'ab|c'
-    // is actually 'alt(ab, c)'.
-    //
-    // See: https://github.com/rust-lang/regex/issues/516
-    #[test]
-    fn regression_alternation_concat() {
-        let expr = Hir::concat(alloc::vec![
-            Hir::literal("ab".as_bytes()),
-            Hir::alternation(alloc::vec![
-                Hir::literal("mn".as_bytes()),
-                Hir::literal("xy".as_bytes()),
-            ]),
-        ]);
-        assert_eq!(r"(?:(?:ab)(?:(?:mn)|(?:xy)))", expr.to_string());
-
-        let expr = Hir::concat(alloc::vec![
-            Hir::look(hir::Look::Start),
-            Hir::alternation(alloc::vec![
-                Hir::look(hir::Look::Start),
-                Hir::look(hir::Look::End),
-            ]),
-        ]);
-        assert_eq!(r"(?:\A(?:\A|\z))", expr.to_string());
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/hir/translate.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/hir/translate.rs
deleted file mode 100644
index e8e5a881..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/hir/translate.rs
+++ /dev/null
@@ -1,3744 +0,0 @@
-/*!
-Defines a translator that converts an `Ast` to an `Hir`.
-*/
-
-use core::cell::{Cell, RefCell};
-
-use alloc::{boxed::Box, string::ToString, vec, vec::Vec};
-
-use crate::{
-    ast::{self, Ast, Span, Visitor},
-    either::Either,
-    hir::{self, Error, ErrorKind, Hir, HirKind},
-    unicode::{self, ClassQuery},
-};
-
-type Result<T> = core::result::Result<T, Error>;
-
-/// A builder for constructing an AST->HIR translator.
-#[derive(Clone, Debug)]
-pub struct TranslatorBuilder {
-    utf8: bool,
-    line_terminator: u8,
-    flags: Flags,
-}
-
-impl Default for TranslatorBuilder {
-    fn default() -> TranslatorBuilder {
-        TranslatorBuilder::new()
-    }
-}
-
-impl TranslatorBuilder {
-    /// Create a new translator builder with a default c onfiguration.
-    pub fn new() -> TranslatorBuilder {
-        TranslatorBuilder {
-            utf8: true,
-            line_terminator: b'\n',
-            flags: Flags::default(),
-        }
-    }
-
-    /// Build a translator using the current configuration.
-    pub fn build(&self) -> Translator {
-        Translator {
-            stack: RefCell::new(vec![]),
-            flags: Cell::new(self.flags),
-            utf8: self.utf8,
-            line_terminator: self.line_terminator,
-        }
-    }
-
-    /// When disabled, translation will permit the construction of a regular
-    /// expression that may match invalid UTF-8.
-    ///
-    /// When enabled (the default), the translator is guaranteed to produce an
-    /// expression that, for non-empty matches, will only ever produce spans
-    /// that are entirely valid UTF-8 (otherwise, the translator will return an
-    /// error).
-    ///
-    /// Perhaps surprisingly, when UTF-8 is enabled, an empty regex or even
-    /// a negated ASCII word boundary (uttered as `(?-u:\B)` in the concrete
-    /// syntax) will be allowed even though they can produce matches that split
-    /// a UTF-8 encoded codepoint. This only applies to zero-width or "empty"
-    /// matches, and it is expected that the regex engine itself must handle
-    /// these cases if necessary (perhaps by suppressing any zero-width matches
-    /// that split a codepoint).
-    pub fn utf8(&mut self, yes: bool) -> &mut TranslatorBuilder {
-        self.utf8 = yes;
-        self
-    }
-
-    /// Sets the line terminator for use with `(?u-s:.)` and `(?-us:.)`.
-    ///
-    /// Namely, instead of `.` (by default) matching everything except for `\n`,
-    /// this will cause `.` to match everything except for the byte given.
-    ///
-    /// If `.` is used in a context where Unicode mode is enabled and this byte
-    /// isn't ASCII, then an error will be returned. When Unicode mode is
-    /// disabled, then any byte is permitted, but will return an error if UTF-8
-    /// mode is enabled and it is a non-ASCII byte.
-    ///
-    /// In short, any ASCII value for a line terminator is always okay. But a
-    /// non-ASCII byte might result in an error depending on whether Unicode
-    /// mode or UTF-8 mode are enabled.
-    ///
-    /// Note that if `R` mode is enabled then it always takes precedence and
-    /// the line terminator will be treated as `\r` and `\n` simultaneously.
-    ///
-    /// Note also that this *doesn't* impact the look-around assertions
-    /// `(?m:^)` and `(?m:$)`. That's usually controlled by additional
-    /// configuration in the regex engine itself.
-    pub fn line_terminator(&mut self, byte: u8) -> &mut TranslatorBuilder {
-        self.line_terminator = byte;
-        self
-    }
-
-    /// Enable or disable the case insensitive flag (`i`) by default.
-    pub fn case_insensitive(&mut self, yes: bool) -> &mut TranslatorBuilder {
-        self.flags.case_insensitive = if yes { Some(true) } else { None };
-        self
-    }
-
-    /// Enable or disable the multi-line matching flag (`m`) by default.
-    pub fn multi_line(&mut self, yes: bool) -> &mut TranslatorBuilder {
-        self.flags.multi_line = if yes { Some(true) } else { None };
-        self
-    }
-
-    /// Enable or disable the "dot matches any character" flag (`s`) by
-    /// default.
-    pub fn dot_matches_new_line(
-        &mut self,
-        yes: bool,
-    ) -> &mut TranslatorBuilder {
-        self.flags.dot_matches_new_line = if yes { Some(true) } else { None };
-        self
-    }
-
-    /// Enable or disable the CRLF mode flag (`R`) by default.
-    pub fn crlf(&mut self, yes: bool) -> &mut TranslatorBuilder {
-        self.flags.crlf = if yes { Some(true) } else { None };
-        self
-    }
-
-    /// Enable or disable the "swap greed" flag (`U`) by default.
-    pub fn swap_greed(&mut self, yes: bool) -> &mut TranslatorBuilder {
-        self.flags.swap_greed = if yes { Some(true) } else { None };
-        self
-    }
-
-    /// Enable or disable the Unicode flag (`u`) by default.
-    pub fn unicode(&mut self, yes: bool) -> &mut TranslatorBuilder {
-        self.flags.unicode = if yes { None } else { Some(false) };
-        self
-    }
-}
-
-/// A translator maps abstract syntax to a high level intermediate
-/// representation.
-///
-/// A translator may be benefit from reuse. That is, a translator can translate
-/// many abstract syntax trees.
-///
-/// A `Translator` can be configured in more detail via a
-/// [`TranslatorBuilder`].
-#[derive(Clone, Debug)]
-pub struct Translator {
-    /// Our call stack, but on the heap.
-    stack: RefCell<Vec<HirFrame>>,
-    /// The current flag settings.
-    flags: Cell<Flags>,
-    /// Whether we're allowed to produce HIR that can match arbitrary bytes.
-    utf8: bool,
-    /// The line terminator to use for `.`.
-    line_terminator: u8,
-}
-
-impl Translator {
-    /// Create a new translator using the default configuration.
-    pub fn new() -> Translator {
-        TranslatorBuilder::new().build()
-    }
-
-    /// Translate the given abstract syntax tree (AST) into a high level
-    /// intermediate representation (HIR).
-    ///
-    /// If there was a problem doing the translation, then an HIR-specific
-    /// error is returned.
-    ///
-    /// The original pattern string used to produce the `Ast` *must* also be
-    /// provided. The translator does not use the pattern string during any
-    /// correct translation, but is used for error reporting.
-    pub fn translate(&mut self, pattern: &str, ast: &Ast) -> Result<Hir> {
-        ast::visit(ast, TranslatorI::new(self, pattern))
-    }
-}
-
-/// An HirFrame is a single stack frame, represented explicitly, which is
-/// created for each item in the Ast that we traverse.
-///
-/// Note that technically, this type doesn't represent our entire stack
-/// frame. In particular, the Ast visitor represents any state associated with
-/// traversing the Ast itself.
-#[derive(Clone, Debug)]
-enum HirFrame {
-    /// An arbitrary HIR expression. These get pushed whenever we hit a base
-    /// case in the Ast. They get popped after an inductive (i.e., recursive)
-    /// step is complete.
-    Expr(Hir),
-    /// A literal that is being constructed, character by character, from the
-    /// AST. We need this because the AST gives each individual character its
-    /// own node. So as we see characters, we peek at the top-most HirFrame.
-    /// If it's a literal, then we add to it. Otherwise, we push a new literal.
-    /// When it comes time to pop it, we convert it to an Hir via Hir::literal.
-    Literal(Vec<u8>),
-    /// A Unicode character class. This frame is mutated as we descend into
-    /// the Ast of a character class (which is itself its own mini recursive
-    /// structure).
-    ClassUnicode(hir::ClassUnicode),
-    /// A byte-oriented character class. This frame is mutated as we descend
-    /// into the Ast of a character class (which is itself its own mini
-    /// recursive structure).
-    ///
-    /// Byte character classes are created when Unicode mode (`u`) is disabled.
-    /// If `utf8` is enabled (the default), then a byte character is only
-    /// permitted to match ASCII text.
-    ClassBytes(hir::ClassBytes),
-    /// This is pushed whenever a repetition is observed. After visiting every
-    /// sub-expression in the repetition, the translator's stack is expected to
-    /// have this sentinel at the top.
-    ///
-    /// This sentinel only exists to stop other things (like flattening
-    /// literals) from reaching across repetition operators.
-    Repetition,
-    /// This is pushed on to the stack upon first seeing any kind of capture,
-    /// indicated by parentheses (including non-capturing groups). It is popped
-    /// upon leaving a group.
-    Group {
-        /// The old active flags when this group was opened.
-        ///
-        /// If this group sets flags, then the new active flags are set to the
-        /// result of merging the old flags with the flags introduced by this
-        /// group. If the group doesn't set any flags, then this is simply
-        /// equivalent to whatever flags were set when the group was opened.
-        ///
-        /// When this group is popped, the active flags should be restored to
-        /// the flags set here.
-        ///
-        /// The "active" flags correspond to whatever flags are set in the
-        /// Translator.
-        old_flags: Flags,
-    },
-    /// This is pushed whenever a concatenation is observed. After visiting
-    /// every sub-expression in the concatenation, the translator's stack is
-    /// popped until it sees a Concat frame.
-    Concat,
-    /// This is pushed whenever an alternation is observed. After visiting
-    /// every sub-expression in the alternation, the translator's stack is
-    /// popped until it sees an Alternation frame.
-    Alternation,
-    /// This is pushed immediately before each sub-expression in an
-    /// alternation. This separates the branches of an alternation on the
-    /// stack and prevents literal flattening from reaching across alternation
-    /// branches.
-    ///
-    /// It is popped after each expression in a branch until an 'Alternation'
-    /// frame is observed when doing a post visit on an alternation.
-    AlternationBranch,
-}
-
-impl HirFrame {
-    /// Assert that the current stack frame is an Hir expression and return it.
-    fn unwrap_expr(self) -> Hir {
-        match self {
-            HirFrame::Expr(expr) => expr,
-            HirFrame::Literal(lit) => Hir::literal(lit),
-            _ => panic!("tried to unwrap expr from HirFrame, got: {:?}", self),
-        }
-    }
-
-    /// Assert that the current stack frame is a Unicode class expression and
-    /// return it.
-    fn unwrap_class_unicode(self) -> hir::ClassUnicode {
-        match self {
-            HirFrame::ClassUnicode(cls) => cls,
-            _ => panic!(
-                "tried to unwrap Unicode class \
-                 from HirFrame, got: {:?}",
-                self
-            ),
-        }
-    }
-
-    /// Assert that the current stack frame is a byte class expression and
-    /// return it.
-    fn unwrap_class_bytes(self) -> hir::ClassBytes {
-        match self {
-            HirFrame::ClassBytes(cls) => cls,
-            _ => panic!(
-                "tried to unwrap byte class \
-                 from HirFrame, got: {:?}",
-                self
-            ),
-        }
-    }
-
-    /// Assert that the current stack frame is a repetition sentinel. If it
-    /// isn't, then panic.
-    fn unwrap_repetition(self) {
-        match self {
-            HirFrame::Repetition => {}
-            _ => {
-                panic!(
-                    "tried to unwrap repetition from HirFrame, got: {:?}",
-                    self
-                )
-            }
-        }
-    }
-
-    /// Assert that the current stack frame is a group indicator and return
-    /// its corresponding flags (the flags that were active at the time the
-    /// group was entered).
-    fn unwrap_group(self) -> Flags {
-        match self {
-            HirFrame::Group { old_flags } => old_flags,
-            _ => {
-                panic!("tried to unwrap group from HirFrame, got: {:?}", self)
-            }
-        }
-    }
-
-    /// Assert that the current stack frame is an alternation pipe sentinel. If
-    /// it isn't, then panic.
-    fn unwrap_alternation_pipe(self) {
-        match self {
-            HirFrame::AlternationBranch => {}
-            _ => {
-                panic!(
-                    "tried to unwrap alt pipe from HirFrame, got: {:?}",
-                    self
-                )
-            }
-        }
-    }
-}
-
-impl<'t, 'p> Visitor for TranslatorI<'t, 'p> {
-    type Output = Hir;
-    type Err = Error;
-
-    fn finish(self) -> Result<Hir> {
-        // ... otherwise, we should have exactly one HIR on the stack.
-        assert_eq!(self.trans().stack.borrow().len(), 1);
-        Ok(self.pop().unwrap().unwrap_expr())
-    }
-
-    fn visit_pre(&mut self, ast: &Ast) -> Result<()> {
-        match *ast {
-            Ast::ClassBracketed(_) => {
-                if self.flags().unicode() {
-                    let cls = hir::ClassUnicode::empty();
-                    self.push(HirFrame::ClassUnicode(cls));
-                } else {
-                    let cls = hir::ClassBytes::empty();
-                    self.push(HirFrame::ClassBytes(cls));
-                }
-            }
-            Ast::Repetition(_) => self.push(HirFrame::Repetition),
-            Ast::Group(ref x) => {
-                let old_flags = x
-                    .flags()
-                    .map(|ast| self.set_flags(ast))
-                    .unwrap_or_else(|| self.flags());
-                self.push(HirFrame::Group { old_flags });
-            }
-            Ast::Concat(_) => {
-                self.push(HirFrame::Concat);
-            }
-            Ast::Alternation(ref x) => {
-                self.push(HirFrame::Alternation);
-                if !x.asts.is_empty() {
-                    self.push(HirFrame::AlternationBranch);
-                }
-            }
-            _ => {}
-        }
-        Ok(())
-    }
-
-    fn visit_post(&mut self, ast: &Ast) -> Result<()> {
-        match *ast {
-            Ast::Empty(_) => {
-                self.push(HirFrame::Expr(Hir::empty()));
-            }
-            Ast::Flags(ref x) => {
-                self.set_flags(&x.flags);
-                // Flags in the AST are generally considered directives and
-                // not actual sub-expressions. However, they can be used in
-                // the concrete syntax like `((?i))`, and we need some kind of
-                // indication of an expression there, and Empty is the correct
-                // choice.
-                //
-                // There can also be things like `(?i)+`, but we rule those out
-                // in the parser. In the future, we might allow them for
-                // consistency sake.
-                self.push(HirFrame::Expr(Hir::empty()));
-            }
-            Ast::Literal(ref x) => match self.ast_literal_to_scalar(x)? {
-                Either::Right(byte) => self.push_byte(byte),
-                Either::Left(ch) => match self.case_fold_char(x.span, ch)? {
-                    None => self.push_char(ch),
-                    Some(expr) => self.push(HirFrame::Expr(expr)),
-                },
-            },
-            Ast::Dot(ref span) => {
-                self.push(HirFrame::Expr(self.hir_dot(**span)?));
-            }
-            Ast::Assertion(ref x) => {
-                self.push(HirFrame::Expr(self.hir_assertion(x)?));
-            }
-            Ast::ClassPerl(ref x) => {
-                if self.flags().unicode() {
-                    let cls = self.hir_perl_unicode_class(x)?;
-                    let hcls = hir::Class::Unicode(cls);
-                    self.push(HirFrame::Expr(Hir::class(hcls)));
-                } else {
-                    let cls = self.hir_perl_byte_class(x)?;
-                    let hcls = hir::Class::Bytes(cls);
-                    self.push(HirFrame::Expr(Hir::class(hcls)));
-                }
-            }
-            Ast::ClassUnicode(ref x) => {
-                let cls = hir::Class::Unicode(self.hir_unicode_class(x)?);
-                self.push(HirFrame::Expr(Hir::class(cls)));
-            }
-            Ast::ClassBracketed(ref ast) => {
-                if self.flags().unicode() {
-                    let mut cls = self.pop().unwrap().unwrap_class_unicode();
-                    self.unicode_fold_and_negate(
-                        &ast.span,
-                        ast.negated,
-                        &mut cls,
-                    )?;
-                    let expr = Hir::class(hir::Class::Unicode(cls));
-                    self.push(HirFrame::Expr(expr));
-                } else {
-                    let mut cls = self.pop().unwrap().unwrap_class_bytes();
-                    self.bytes_fold_and_negate(
-                        &ast.span,
-                        ast.negated,
-                        &mut cls,
-                    )?;
-                    let expr = Hir::class(hir::Class::Bytes(cls));
-                    self.push(HirFrame::Expr(expr));
-                }
-            }
-            Ast::Repetition(ref x) => {
-                let expr = self.pop().unwrap().unwrap_expr();
-                self.pop().unwrap().unwrap_repetition();
-                self.push(HirFrame::Expr(self.hir_repetition(x, expr)));
-            }
-            Ast::Group(ref x) => {
-                let expr = self.pop().unwrap().unwrap_expr();
-                let old_flags = self.pop().unwrap().unwrap_group();
-                self.trans().flags.set(old_flags);
-                self.push(HirFrame::Expr(self.hir_capture(x, expr)));
-            }
-            Ast::Concat(_) => {
-                let mut exprs = vec![];
-                while let Some(expr) = self.pop_concat_expr() {
-                    if !matches!(*expr.kind(), HirKind::Empty) {
-                        exprs.push(expr);
-                    }
-                }
-                exprs.reverse();
-                self.push(HirFrame::Expr(Hir::concat(exprs)));
-            }
-            Ast::Alternation(_) => {
-                let mut exprs = vec![];
-                while let Some(expr) = self.pop_alt_expr() {
-                    self.pop().unwrap().unwrap_alternation_pipe();
-                    exprs.push(expr);
-                }
-                exprs.reverse();
-                self.push(HirFrame::Expr(Hir::alternation(exprs)));
-            }
-        }
-        Ok(())
-    }
-
-    fn visit_alternation_in(&mut self) -> Result<()> {
-        self.push(HirFrame::AlternationBranch);
-        Ok(())
-    }
-
-    fn visit_class_set_item_pre(
-        &mut self,
-        ast: &ast::ClassSetItem,
-    ) -> Result<()> {
-        match *ast {
-            ast::ClassSetItem::Bracketed(_) => {
-                if self.flags().unicode() {
-                    let cls = hir::ClassUnicode::empty();
-                    self.push(HirFrame::ClassUnicode(cls));
-                } else {
-                    let cls = hir::ClassBytes::empty();
-                    self.push(HirFrame::ClassBytes(cls));
-                }
-            }
-            // We needn't handle the Union case here since the visitor will
-            // do it for us.
-            _ => {}
-        }
-        Ok(())
-    }
-
-    fn visit_class_set_item_post(
-        &mut self,
-        ast: &ast::ClassSetItem,
-    ) -> Result<()> {
-        match *ast {
-            ast::ClassSetItem::Empty(_) => {}
-            ast::ClassSetItem::Literal(ref x) => {
-                if self.flags().unicode() {
-                    let mut cls = self.pop().unwrap().unwrap_class_unicode();
-                    cls.push(hir::ClassUnicodeRange::new(x.c, x.c));
-                    self.push(HirFrame::ClassUnicode(cls));
-                } else {
-                    let mut cls = self.pop().unwrap().unwrap_class_bytes();
-                    let byte = self.class_literal_byte(x)?;
-                    cls.push(hir::ClassBytesRange::new(byte, byte));
-                    self.push(HirFrame::ClassBytes(cls));
-                }
-            }
-            ast::ClassSetItem::Range(ref x) => {
-                if self.flags().unicode() {
-                    let mut cls = self.pop().unwrap().unwrap_class_unicode();
-                    cls.push(hir::ClassUnicodeRange::new(x.start.c, x.end.c));
-                    self.push(HirFrame::ClassUnicode(cls));
-                } else {
-                    let mut cls = self.pop().unwrap().unwrap_class_bytes();
-                    let start = self.class_literal_byte(&x.start)?;
-                    let end = self.class_literal_byte(&x.end)?;
-                    cls.push(hir::ClassBytesRange::new(start, end));
-                    self.push(HirFrame::ClassBytes(cls));
-                }
-            }
-            ast::ClassSetItem::Ascii(ref x) => {
-                if self.flags().unicode() {
-                    let xcls = self.hir_ascii_unicode_class(x)?;
-                    let mut cls = self.pop().unwrap().unwrap_class_unicode();
-                    cls.union(&xcls);
-                    self.push(HirFrame::ClassUnicode(cls));
-                } else {
-                    let xcls = self.hir_ascii_byte_class(x)?;
-                    let mut cls = self.pop().unwrap().unwrap_class_bytes();
-                    cls.union(&xcls);
-                    self.push(HirFrame::ClassBytes(cls));
-                }
-            }
-            ast::ClassSetItem::Unicode(ref x) => {
-                let xcls = self.hir_unicode_class(x)?;
-                let mut cls = self.pop().unwrap().unwrap_class_unicode();
-                cls.union(&xcls);
-                self.push(HirFrame::ClassUnicode(cls));
-            }
-            ast::ClassSetItem::Perl(ref x) => {
-                if self.flags().unicode() {
-                    let xcls = self.hir_perl_unicode_class(x)?;
-                    let mut cls = self.pop().unwrap().unwrap_class_unicode();
-                    cls.union(&xcls);
-                    self.push(HirFrame::ClassUnicode(cls));
-                } else {
-                    let xcls = self.hir_perl_byte_class(x)?;
-                    let mut cls = self.pop().unwrap().unwrap_class_bytes();
-                    cls.union(&xcls);
-                    self.push(HirFrame::ClassBytes(cls));
-                }
-            }
-            ast::ClassSetItem::Bracketed(ref ast) => {
-                if self.flags().unicode() {
-                    let mut cls1 = self.pop().unwrap().unwrap_class_unicode();
-                    self.unicode_fold_and_negate(
-                        &ast.span,
-                        ast.negated,
-                        &mut cls1,
-                    )?;
-
-                    let mut cls2 = self.pop().unwrap().unwrap_class_unicode();
-                    cls2.union(&cls1);
-                    self.push(HirFrame::ClassUnicode(cls2));
-                } else {
-                    let mut cls1 = self.pop().unwrap().unwrap_class_bytes();
-                    self.bytes_fold_and_negate(
-                        &ast.span,
-                        ast.negated,
-                        &mut cls1,
-                    )?;
-
-                    let mut cls2 = self.pop().unwrap().unwrap_class_bytes();
-                    cls2.union(&cls1);
-                    self.push(HirFrame::ClassBytes(cls2));
-                }
-            }
-            // This is handled automatically by the visitor.
-            ast::ClassSetItem::Union(_) => {}
-        }
-        Ok(())
-    }
-
-    fn visit_class_set_binary_op_pre(
-        &mut self,
-        _op: &ast::ClassSetBinaryOp,
-    ) -> Result<()> {
-        if self.flags().unicode() {
-            let cls = hir::ClassUnicode::empty();
-            self.push(HirFrame::ClassUnicode(cls));
-        } else {
-            let cls = hir::ClassBytes::empty();
-            self.push(HirFrame::ClassBytes(cls));
-        }
-        Ok(())
-    }
-
-    fn visit_class_set_binary_op_in(
-        &mut self,
-        _op: &ast::ClassSetBinaryOp,
-    ) -> Result<()> {
-        if self.flags().unicode() {
-            let cls = hir::ClassUnicode::empty();
-            self.push(HirFrame::ClassUnicode(cls));
-        } else {
-            let cls = hir::ClassBytes::empty();
-            self.push(HirFrame::ClassBytes(cls));
-        }
-        Ok(())
-    }
-
-    fn visit_class_set_binary_op_post(
-        &mut self,
-        op: &ast::ClassSetBinaryOp,
-    ) -> Result<()> {
-        use crate::ast::ClassSetBinaryOpKind::*;
-
-        if self.flags().unicode() {
-            let mut rhs = self.pop().unwrap().unwrap_class_unicode();
-            let mut lhs = self.pop().unwrap().unwrap_class_unicode();
-            let mut cls = self.pop().unwrap().unwrap_class_unicode();
-            if self.flags().case_insensitive() {
-                rhs.try_case_fold_simple().map_err(|_| {
-                    self.error(
-                        op.rhs.span().clone(),
-                        ErrorKind::UnicodeCaseUnavailable,
-                    )
-                })?;
-                lhs.try_case_fold_simple().map_err(|_| {
-                    self.error(
-                        op.lhs.span().clone(),
-                        ErrorKind::UnicodeCaseUnavailable,
-                    )
-                })?;
-            }
-            match op.kind {
-                Intersection => lhs.intersect(&rhs),
-                Difference => lhs.difference(&rhs),
-                SymmetricDifference => lhs.symmetric_difference(&rhs),
-            }
-            cls.union(&lhs);
-            self.push(HirFrame::ClassUnicode(cls));
-        } else {
-            let mut rhs = self.pop().unwrap().unwrap_class_bytes();
-            let mut lhs = self.pop().unwrap().unwrap_class_bytes();
-            let mut cls = self.pop().unwrap().unwrap_class_bytes();
-            if self.flags().case_insensitive() {
-                rhs.case_fold_simple();
-                lhs.case_fold_simple();
-            }
-            match op.kind {
-                Intersection => lhs.intersect(&rhs),
-                Difference => lhs.difference(&rhs),
-                SymmetricDifference => lhs.symmetric_difference(&rhs),
-            }
-            cls.union(&lhs);
-            self.push(HirFrame::ClassBytes(cls));
-        }
-        Ok(())
-    }
-}
-
-/// The internal implementation of a translator.
-///
-/// This type is responsible for carrying around the original pattern string,
-/// which is not tied to the internal state of a translator.
-///
-/// A TranslatorI exists for the time it takes to translate a single Ast.
-#[derive(Clone, Debug)]
-struct TranslatorI<'t, 'p> {
-    trans: &'t Translator,
-    pattern: &'p str,
-}
-
-impl<'t, 'p> TranslatorI<'t, 'p> {
-    /// Build a new internal translator.
-    fn new(trans: &'t Translator, pattern: &'p str) -> TranslatorI<'t, 'p> {
-        TranslatorI { trans, pattern }
-    }
-
-    /// Return a reference to the underlying translator.
-    fn trans(&self) -> &Translator {
-        &self.trans
-    }
-
-    /// Push the given frame on to the call stack.
-    fn push(&self, frame: HirFrame) {
-        self.trans().stack.borrow_mut().push(frame);
-    }
-
-    /// Push the given literal char on to the call stack.
-    ///
-    /// If the top-most element of the stack is a literal, then the char
-    /// is appended to the end of that literal. Otherwise, a new literal
-    /// containing just the given char is pushed to the top of the stack.
-    fn push_char(&self, ch: char) {
-        let mut buf = [0; 4];
-        let bytes = ch.encode_utf8(&mut buf).as_bytes();
-        let mut stack = self.trans().stack.borrow_mut();
-        if let Some(HirFrame::Literal(ref mut literal)) = stack.last_mut() {
-            literal.extend_from_slice(bytes);
-        } else {
-            stack.push(HirFrame::Literal(bytes.to_vec()));
-        }
-    }
-
-    /// Push the given literal byte on to the call stack.
-    ///
-    /// If the top-most element of the stack is a literal, then the byte
-    /// is appended to the end of that literal. Otherwise, a new literal
-    /// containing just the given byte is pushed to the top of the stack.
-    fn push_byte(&self, byte: u8) {
-        let mut stack = self.trans().stack.borrow_mut();
-        if let Some(HirFrame::Literal(ref mut literal)) = stack.last_mut() {
-            literal.push(byte);
-        } else {
-            stack.push(HirFrame::Literal(vec![byte]));
-        }
-    }
-
-    /// Pop the top of the call stack. If the call stack is empty, return None.
-    fn pop(&self) -> Option<HirFrame> {
-        self.trans().stack.borrow_mut().pop()
-    }
-
-    /// Pop an HIR expression from the top of the stack for a concatenation.
-    ///
-    /// This returns None if the stack is empty or when a concat frame is seen.
-    /// Otherwise, it panics if it could not find an HIR expression.
-    fn pop_concat_expr(&self) -> Option<Hir> {
-        let frame = self.pop()?;
-        match frame {
-            HirFrame::Concat => None,
-            HirFrame::Expr(expr) => Some(expr),
-            HirFrame::Literal(lit) => Some(Hir::literal(lit)),
-            HirFrame::ClassUnicode(_) => {
-                unreachable!("expected expr or concat, got Unicode class")
-            }
-            HirFrame::ClassBytes(_) => {
-                unreachable!("expected expr or concat, got byte class")
-            }
-            HirFrame::Repetition => {
-                unreachable!("expected expr or concat, got repetition")
-            }
-            HirFrame::Group { .. } => {
-                unreachable!("expected expr or concat, got group")
-            }
-            HirFrame::Alternation => {
-                unreachable!("expected expr or concat, got alt marker")
-            }
-            HirFrame::AlternationBranch => {
-                unreachable!("expected expr or concat, got alt branch marker")
-            }
-        }
-    }
-
-    /// Pop an HIR expression from the top of the stack for an alternation.
-    ///
-    /// This returns None if the stack is empty or when an alternation frame is
-    /// seen. Otherwise, it panics if it could not find an HIR expression.
-    fn pop_alt_expr(&self) -> Option<Hir> {
-        let frame = self.pop()?;
-        match frame {
-            HirFrame::Alternation => None,
-            HirFrame::Expr(expr) => Some(expr),
-            HirFrame::Literal(lit) => Some(Hir::literal(lit)),
-            HirFrame::ClassUnicode(_) => {
-                unreachable!("expected expr or alt, got Unicode class")
-            }
-            HirFrame::ClassBytes(_) => {
-                unreachable!("expected expr or alt, got byte class")
-            }
-            HirFrame::Repetition => {
-                unreachable!("expected expr or alt, got repetition")
-            }
-            HirFrame::Group { .. } => {
-                unreachable!("expected expr or alt, got group")
-            }
-            HirFrame::Concat => {
-                unreachable!("expected expr or alt, got concat marker")
-            }
-            HirFrame::AlternationBranch => {
-                unreachable!("expected expr or alt, got alt branch marker")
-            }
-        }
-    }
-
-    /// Create a new error with the given span and error type.
-    fn error(&self, span: Span, kind: ErrorKind) -> Error {
-        Error { kind, pattern: self.pattern.to_string(), span }
-    }
-
-    /// Return a copy of the active flags.
-    fn flags(&self) -> Flags {
-        self.trans().flags.get()
-    }
-
-    /// Set the flags of this translator from the flags set in the given AST.
-    /// Then, return the old flags.
-    fn set_flags(&self, ast_flags: &ast::Flags) -> Flags {
-        let old_flags = self.flags();
-        let mut new_flags = Flags::from_ast(ast_flags);
-        new_flags.merge(&old_flags);
-        self.trans().flags.set(new_flags);
-        old_flags
-    }
-
-    /// Convert an Ast literal to its scalar representation.
-    ///
-    /// When Unicode mode is enabled, then this always succeeds and returns a
-    /// `char` (Unicode scalar value).
-    ///
-    /// When Unicode mode is disabled, then a `char` will still be returned
-    /// whenever possible. A byte is returned only when invalid UTF-8 is
-    /// allowed and when the byte is not ASCII. Otherwise, a non-ASCII byte
-    /// will result in an error when invalid UTF-8 is not allowed.
-    fn ast_literal_to_scalar(
-        &self,
-        lit: &ast::Literal,
-    ) -> Result<Either<char, u8>> {
-        if self.flags().unicode() {
-            return Ok(Either::Left(lit.c));
-        }
-        let byte = match lit.byte() {
-            None => return Ok(Either::Left(lit.c)),
-            Some(byte) => byte,
-        };
-        if byte <= 0x7F {
-            return Ok(Either::Left(char::try_from(byte).unwrap()));
-        }
-        if self.trans().utf8 {
-            return Err(self.error(lit.span, ErrorKind::InvalidUtf8));
-        }
-        Ok(Either::Right(byte))
-    }
-
-    fn case_fold_char(&self, span: Span, c: char) -> Result<Option<Hir>> {
-        if !self.flags().case_insensitive() {
-            return Ok(None);
-        }
-        if self.flags().unicode() {
-            // If case folding won't do anything, then don't bother trying.
-            let map = unicode::SimpleCaseFolder::new()
-                .map(|f| f.overlaps(c, c))
-                .map_err(|_| {
-                    self.error(span, ErrorKind::UnicodeCaseUnavailable)
-                })?;
-            if !map {
-                return Ok(None);
-            }
-            let mut cls =
-                hir::ClassUnicode::new(vec![hir::ClassUnicodeRange::new(
-                    c, c,
-                )]);
-            cls.try_case_fold_simple().map_err(|_| {
-                self.error(span, ErrorKind::UnicodeCaseUnavailable)
-            })?;
-            Ok(Some(Hir::class(hir::Class::Unicode(cls))))
-        } else {
-            if !c.is_ascii() {
-                return Ok(None);
-            }
-            // If case folding won't do anything, then don't bother trying.
-            match c {
-                'A'..='Z' | 'a'..='z' => {}
-                _ => return Ok(None),
-            }
-            let mut cls =
-                hir::ClassBytes::new(vec![hir::ClassBytesRange::new(
-                    // OK because 'c.len_utf8() == 1' which in turn implies
-                    // that 'c' is ASCII.
-                    u8::try_from(c).unwrap(),
-                    u8::try_from(c).unwrap(),
-                )]);
-            cls.case_fold_simple();
-            Ok(Some(Hir::class(hir::Class::Bytes(cls))))
-        }
-    }
-
-    fn hir_dot(&self, span: Span) -> Result<Hir> {
-        let (utf8, lineterm, flags) =
-            (self.trans().utf8, self.trans().line_terminator, self.flags());
-        if utf8 && (!flags.unicode() || !lineterm.is_ascii()) {
-            return Err(self.error(span, ErrorKind::InvalidUtf8));
-        }
-        let dot = if flags.dot_matches_new_line() {
-            if flags.unicode() {
-                hir::Dot::AnyChar
-            } else {
-                hir::Dot::AnyByte
-            }
-        } else {
-            if flags.unicode() {
-                if flags.crlf() {
-                    hir::Dot::AnyCharExceptCRLF
-                } else {
-                    if !lineterm.is_ascii() {
-                        return Err(
-                            self.error(span, ErrorKind::InvalidLineTerminator)
-                        );
-                    }
-                    hir::Dot::AnyCharExcept(char::from(lineterm))
-                }
-            } else {
-                if flags.crlf() {
-                    hir::Dot::AnyByteExceptCRLF
-                } else {
-                    hir::Dot::AnyByteExcept(lineterm)
-                }
-            }
-        };
-        Ok(Hir::dot(dot))
-    }
-
-    fn hir_assertion(&self, asst: &ast::Assertion) -> Result<Hir> {
-        let unicode = self.flags().unicode();
-        let multi_line = self.flags().multi_line();
-        let crlf = self.flags().crlf();
-        Ok(match asst.kind {
-            ast::AssertionKind::StartLine => Hir::look(if multi_line {
-                if crlf {
-                    hir::Look::StartCRLF
-                } else {
-                    hir::Look::StartLF
-                }
-            } else {
-                hir::Look::Start
-            }),
-            ast::AssertionKind::EndLine => Hir::look(if multi_line {
-                if crlf {
-                    hir::Look::EndCRLF
-                } else {
-                    hir::Look::EndLF
-                }
-            } else {
-                hir::Look::End
-            }),
-            ast::AssertionKind::StartText => Hir::look(hir::Look::Start),
-            ast::AssertionKind::EndText => Hir::look(hir::Look::End),
-            ast::AssertionKind::WordBoundary => Hir::look(if unicode {
-                hir::Look::WordUnicode
-            } else {
-                hir::Look::WordAscii
-            }),
-            ast::AssertionKind::NotWordBoundary => Hir::look(if unicode {
-                hir::Look::WordUnicodeNegate
-            } else {
-                hir::Look::WordAsciiNegate
-            }),
-            ast::AssertionKind::WordBoundaryStart
-            | ast::AssertionKind::WordBoundaryStartAngle => {
-                Hir::look(if unicode {
-                    hir::Look::WordStartUnicode
-                } else {
-                    hir::Look::WordStartAscii
-                })
-            }
-            ast::AssertionKind::WordBoundaryEnd
-            | ast::AssertionKind::WordBoundaryEndAngle => {
-                Hir::look(if unicode {
-                    hir::Look::WordEndUnicode
-                } else {
-                    hir::Look::WordEndAscii
-                })
-            }
-            ast::AssertionKind::WordBoundaryStartHalf => {
-                Hir::look(if unicode {
-                    hir::Look::WordStartHalfUnicode
-                } else {
-                    hir::Look::WordStartHalfAscii
-                })
-            }
-            ast::AssertionKind::WordBoundaryEndHalf => Hir::look(if unicode {
-                hir::Look::WordEndHalfUnicode
-            } else {
-                hir::Look::WordEndHalfAscii
-            }),
-        })
-    }
-
-    fn hir_capture(&self, group: &ast::Group, expr: Hir) -> Hir {
-        let (index, name) = match group.kind {
-            ast::GroupKind::CaptureIndex(index) => (index, None),
-            ast::GroupKind::CaptureName { ref name, .. } => {
-                (name.index, Some(name.name.clone().into_boxed_str()))
-            }
-            // The HIR doesn't need to use non-capturing groups, since the way
-            // in which the data type is defined handles this automatically.
-            ast::GroupKind::NonCapturing(_) => return expr,
-        };
-        Hir::capture(hir::Capture { index, name, sub: Box::new(expr) })
-    }
-
-    fn hir_repetition(&self, rep: &ast::Repetition, expr: Hir) -> Hir {
-        let (min, max) = match rep.op.kind {
-            ast::RepetitionKind::ZeroOrOne => (0, Some(1)),
-            ast::RepetitionKind::ZeroOrMore => (0, None),
-            ast::RepetitionKind::OneOrMore => (1, None),
-            ast::RepetitionKind::Range(ast::RepetitionRange::Exactly(m)) => {
-                (m, Some(m))
-            }
-            ast::RepetitionKind::Range(ast::RepetitionRange::AtLeast(m)) => {
-                (m, None)
-            }
-            ast::RepetitionKind::Range(ast::RepetitionRange::Bounded(
-                m,
-                n,
-            )) => (m, Some(n)),
-        };
-        let greedy =
-            if self.flags().swap_greed() { !rep.greedy } else { rep.greedy };
-        Hir::repetition(hir::Repetition {
-            min,
-            max,
-            greedy,
-            sub: Box::new(expr),
-        })
-    }
-
-    fn hir_unicode_class(
-        &self,
-        ast_class: &ast::ClassUnicode,
-    ) -> Result<hir::ClassUnicode> {
-        use crate::ast::ClassUnicodeKind::*;
-
-        if !self.flags().unicode() {
-            return Err(
-                self.error(ast_class.span, ErrorKind::UnicodeNotAllowed)
-            );
-        }
-        let query = match ast_class.kind {
-            OneLetter(name) => ClassQuery::OneLetter(name),
-            Named(ref name) => ClassQuery::Binary(name),
-            NamedValue { ref name, ref value, .. } => ClassQuery::ByValue {
-                property_name: name,
-                property_value: value,
-            },
-        };
-        let mut result = self.convert_unicode_class_error(
-            &ast_class.span,
-            unicode::class(query),
-        );
-        if let Ok(ref mut class) = result {
-            self.unicode_fold_and_negate(
-                &ast_class.span,
-                ast_class.negated,
-                class,
-            )?;
-        }
-        result
-    }
-
-    fn hir_ascii_unicode_class(
-        &self,
-        ast: &ast::ClassAscii,
-    ) -> Result<hir::ClassUnicode> {
-        let mut cls = hir::ClassUnicode::new(
-            ascii_class_as_chars(&ast.kind)
-                .map(|(s, e)| hir::ClassUnicodeRange::new(s, e)),
-        );
-        self.unicode_fold_and_negate(&ast.span, ast.negated, &mut cls)?;
-        Ok(cls)
-    }
-
-    fn hir_ascii_byte_class(
-        &self,
-        ast: &ast::ClassAscii,
-    ) -> Result<hir::ClassBytes> {
-        let mut cls = hir::ClassBytes::new(
-            ascii_class(&ast.kind)
-                .map(|(s, e)| hir::ClassBytesRange::new(s, e)),
-        );
-        self.bytes_fold_and_negate(&ast.span, ast.negated, &mut cls)?;
-        Ok(cls)
-    }
-
-    fn hir_perl_unicode_class(
-        &self,
-        ast_class: &ast::ClassPerl,
-    ) -> Result<hir::ClassUnicode> {
-        use crate::ast::ClassPerlKind::*;
-
-        assert!(self.flags().unicode());
-        let result = match ast_class.kind {
-            Digit => unicode::perl_digit(),
-            Space => unicode::perl_space(),
-            Word => unicode::perl_word(),
-        };
-        let mut class =
-            self.convert_unicode_class_error(&ast_class.span, result)?;
-        // We needn't apply case folding here because the Perl Unicode classes
-        // are already closed under Unicode simple case folding.
-        if ast_class.negated {
-            class.negate();
-        }
-        Ok(class)
-    }
-
-    fn hir_perl_byte_class(
-        &self,
-        ast_class: &ast::ClassPerl,
-    ) -> Result<hir::ClassBytes> {
-        use crate::ast::ClassPerlKind::*;
-
-        assert!(!self.flags().unicode());
-        let mut class = match ast_class.kind {
-            Digit => hir_ascii_class_bytes(&ast::ClassAsciiKind::Digit),
-            Space => hir_ascii_class_bytes(&ast::ClassAsciiKind::Space),
-            Word => hir_ascii_class_bytes(&ast::ClassAsciiKind::Word),
-        };
-        // We needn't apply case folding here because the Perl ASCII classes
-        // are already closed (under ASCII case folding).
-        if ast_class.negated {
-            class.negate();
-        }
-        // Negating a Perl byte class is likely to cause it to match invalid
-        // UTF-8. That's only OK if the translator is configured to allow such
-        // things.
-        if self.trans().utf8 && !class.is_ascii() {
-            return Err(self.error(ast_class.span, ErrorKind::InvalidUtf8));
-        }
-        Ok(class)
-    }
-
-    /// Converts the given Unicode specific error to an HIR translation error.
-    ///
-    /// The span given should approximate the position at which an error would
-    /// occur.
-    fn convert_unicode_class_error(
-        &self,
-        span: &Span,
-        result: core::result::Result<hir::ClassUnicode, unicode::Error>,
-    ) -> Result<hir::ClassUnicode> {
-        result.map_err(|err| {
-            let sp = span.clone();
-            match err {
-                unicode::Error::PropertyNotFound => {
-                    self.error(sp, ErrorKind::UnicodePropertyNotFound)
-                }
-                unicode::Error::PropertyValueNotFound => {
-                    self.error(sp, ErrorKind::UnicodePropertyValueNotFound)
-                }
-                unicode::Error::PerlClassNotFound => {
-                    self.error(sp, ErrorKind::UnicodePerlClassNotFound)
-                }
-            }
-        })
-    }
-
-    fn unicode_fold_and_negate(
-        &self,
-        span: &Span,
-        negated: bool,
-        class: &mut hir::ClassUnicode,
-    ) -> Result<()> {
-        // Note that we must apply case folding before negation!
-        // Consider `(?i)[^x]`. If we applied negation first, then
-        // the result would be the character class that matched any
-        // Unicode scalar value.
-        if self.flags().case_insensitive() {
-            class.try_case_fold_simple().map_err(|_| {
-                self.error(span.clone(), ErrorKind::UnicodeCaseUnavailable)
-            })?;
-        }
-        if negated {
-            class.negate();
-        }
-        Ok(())
-    }
-
-    fn bytes_fold_and_negate(
-        &self,
-        span: &Span,
-        negated: bool,
-        class: &mut hir::ClassBytes,
-    ) -> Result<()> {
-        // Note that we must apply case folding before negation!
-        // Consider `(?i)[^x]`. If we applied negation first, then
-        // the result would be the character class that matched any
-        // Unicode scalar value.
-        if self.flags().case_insensitive() {
-            class.case_fold_simple();
-        }
-        if negated {
-            class.negate();
-        }
-        if self.trans().utf8 && !class.is_ascii() {
-            return Err(self.error(span.clone(), ErrorKind::InvalidUtf8));
-        }
-        Ok(())
-    }
-
-    /// Return a scalar byte value suitable for use as a literal in a byte
-    /// character class.
-    fn class_literal_byte(&self, ast: &ast::Literal) -> Result<u8> {
-        match self.ast_literal_to_scalar(ast)? {
-            Either::Right(byte) => Ok(byte),
-            Either::Left(ch) => {
-                if ch.is_ascii() {
-                    Ok(u8::try_from(ch).unwrap())
-                } else {
-                    // We can't feasibly support Unicode in
-                    // byte oriented classes. Byte classes don't
-                    // do Unicode case folding.
-                    Err(self.error(ast.span, ErrorKind::UnicodeNotAllowed))
-                }
-            }
-        }
-    }
-}
-
-/// A translator's representation of a regular expression's flags at any given
-/// moment in time.
-///
-/// Each flag can be in one of three states: absent, present but disabled or
-/// present but enabled.
-#[derive(Clone, Copy, Debug, Default)]
-struct Flags {
-    case_insensitive: Option<bool>,
-    multi_line: Option<bool>,
-    dot_matches_new_line: Option<bool>,
-    swap_greed: Option<bool>,
-    unicode: Option<bool>,
-    crlf: Option<bool>,
-    // Note that `ignore_whitespace` is omitted here because it is handled
-    // entirely in the parser.
-}
-
-impl Flags {
-    fn from_ast(ast: &ast::Flags) -> Flags {
-        let mut flags = Flags::default();
-        let mut enable = true;
-        for item in &ast.items {
-            match item.kind {
-                ast::FlagsItemKind::Negation => {
-                    enable = false;
-                }
-                ast::FlagsItemKind::Flag(ast::Flag::CaseInsensitive) => {
-                    flags.case_insensitive = Some(enable);
-                }
-                ast::FlagsItemKind::Flag(ast::Flag::MultiLine) => {
-                    flags.multi_line = Some(enable);
-                }
-                ast::FlagsItemKind::Flag(ast::Flag::DotMatchesNewLine) => {
-                    flags.dot_matches_new_line = Some(enable);
-                }
-                ast::FlagsItemKind::Flag(ast::Flag::SwapGreed) => {
-                    flags.swap_greed = Some(enable);
-                }
-                ast::FlagsItemKind::Flag(ast::Flag::Unicode) => {
-                    flags.unicode = Some(enable);
-                }
-                ast::FlagsItemKind::Flag(ast::Flag::CRLF) => {
-                    flags.crlf = Some(enable);
-                }
-                ast::FlagsItemKind::Flag(ast::Flag::IgnoreWhitespace) => {}
-            }
-        }
-        flags
-    }
-
-    fn merge(&mut self, previous: &Flags) {
-        if self.case_insensitive.is_none() {
-            self.case_insensitive = previous.case_insensitive;
-        }
-        if self.multi_line.is_none() {
-            self.multi_line = previous.multi_line;
-        }
-        if self.dot_matches_new_line.is_none() {
-            self.dot_matches_new_line = previous.dot_matches_new_line;
-        }
-        if self.swap_greed.is_none() {
-            self.swap_greed = previous.swap_greed;
-        }
-        if self.unicode.is_none() {
-            self.unicode = previous.unicode;
-        }
-        if self.crlf.is_none() {
-            self.crlf = previous.crlf;
-        }
-    }
-
-    fn case_insensitive(&self) -> bool {
-        self.case_insensitive.unwrap_or(false)
-    }
-
-    fn multi_line(&self) -> bool {
-        self.multi_line.unwrap_or(false)
-    }
-
-    fn dot_matches_new_line(&self) -> bool {
-        self.dot_matches_new_line.unwrap_or(false)
-    }
-
-    fn swap_greed(&self) -> bool {
-        self.swap_greed.unwrap_or(false)
-    }
-
-    fn unicode(&self) -> bool {
-        self.unicode.unwrap_or(true)
-    }
-
-    fn crlf(&self) -> bool {
-        self.crlf.unwrap_or(false)
-    }
-}
-
-fn hir_ascii_class_bytes(kind: &ast::ClassAsciiKind) -> hir::ClassBytes {
-    let ranges: Vec<_> = ascii_class(kind)
-        .map(|(s, e)| hir::ClassBytesRange::new(s, e))
-        .collect();
-    hir::ClassBytes::new(ranges)
-}
-
-fn ascii_class(kind: &ast::ClassAsciiKind) -> impl Iterator<Item = (u8, u8)> {
-    use crate::ast::ClassAsciiKind::*;
-
-    let slice: &'static [(u8, u8)] = match *kind {
-        Alnum => &[(b'0', b'9'), (b'A', b'Z'), (b'a', b'z')],
-        Alpha => &[(b'A', b'Z'), (b'a', b'z')],
-        Ascii => &[(b'\x00', b'\x7F')],
-        Blank => &[(b'\t', b'\t'), (b' ', b' ')],
-        Cntrl => &[(b'\x00', b'\x1F'), (b'\x7F', b'\x7F')],
-        Digit => &[(b'0', b'9')],
-        Graph => &[(b'!', b'~')],
-        Lower => &[(b'a', b'z')],
-        Print => &[(b' ', b'~')],
-        Punct => &[(b'!', b'/'), (b':', b'@'), (b'[', b'`'), (b'{', b'~')],
-        Space => &[
-            (b'\t', b'\t'),
-            (b'\n', b'\n'),
-            (b'\x0B', b'\x0B'),
-            (b'\x0C', b'\x0C'),
-            (b'\r', b'\r'),
-            (b' ', b' '),
-        ],
-        Upper => &[(b'A', b'Z')],
-        Word => &[(b'0', b'9'), (b'A', b'Z'), (b'_', b'_'), (b'a', b'z')],
-        Xdigit => &[(b'0', b'9'), (b'A', b'F'), (b'a', b'f')],
-    };
-    slice.iter().copied()
-}
-
-fn ascii_class_as_chars(
-    kind: &ast::ClassAsciiKind,
-) -> impl Iterator<Item = (char, char)> {
-    ascii_class(kind).map(|(s, e)| (char::from(s), char::from(e)))
-}
-
-#[cfg(test)]
-mod tests {
-    use crate::{
-        ast::{parse::ParserBuilder, Position},
-        hir::{Look, Properties},
-    };
-
-    use super::*;
-
-    // We create these errors to compare with real hir::Errors in the tests.
-    // We define equality between TestError and hir::Error to disregard the
-    // pattern string in hir::Error, which is annoying to provide in tests.
-    #[derive(Clone, Debug)]
-    struct TestError {
-        span: Span,
-        kind: hir::ErrorKind,
-    }
-
-    impl PartialEq<hir::Error> for TestError {
-        fn eq(&self, other: &hir::Error) -> bool {
-            self.span == other.span && self.kind == other.kind
-        }
-    }
-
-    impl PartialEq<TestError> for hir::Error {
-        fn eq(&self, other: &TestError) -> bool {
-            self.span == other.span && self.kind == other.kind
-        }
-    }
-
-    fn parse(pattern: &str) -> Ast {
-        ParserBuilder::new().octal(true).build().parse(pattern).unwrap()
-    }
-
-    fn t(pattern: &str) -> Hir {
-        TranslatorBuilder::new()
-            .utf8(true)
-            .build()
-            .translate(pattern, &parse(pattern))
-            .unwrap()
-    }
-
-    fn t_err(pattern: &str) -> hir::Error {
-        TranslatorBuilder::new()
-            .utf8(true)
-            .build()
-            .translate(pattern, &parse(pattern))
-            .unwrap_err()
-    }
-
-    fn t_bytes(pattern: &str) -> Hir {
-        TranslatorBuilder::new()
-            .utf8(false)
-            .build()
-            .translate(pattern, &parse(pattern))
-            .unwrap()
-    }
-
-    fn props(pattern: &str) -> Properties {
-        t(pattern).properties().clone()
-    }
-
-    fn props_bytes(pattern: &str) -> Properties {
-        t_bytes(pattern).properties().clone()
-    }
-
-    fn hir_lit(s: &str) -> Hir {
-        hir_blit(s.as_bytes())
-    }
-
-    fn hir_blit(s: &[u8]) -> Hir {
-        Hir::literal(s)
-    }
-
-    fn hir_capture(index: u32, expr: Hir) -> Hir {
-        Hir::capture(hir::Capture { index, name: None, sub: Box::new(expr) })
-    }
-
-    fn hir_capture_name(index: u32, name: &str, expr: Hir) -> Hir {
-        Hir::capture(hir::Capture {
-            index,
-            name: Some(name.into()),
-            sub: Box::new(expr),
-        })
-    }
-
-    fn hir_quest(greedy: bool, expr: Hir) -> Hir {
-        Hir::repetition(hir::Repetition {
-            min: 0,
-            max: Some(1),
-            greedy,
-            sub: Box::new(expr),
-        })
-    }
-
-    fn hir_star(greedy: bool, expr: Hir) -> Hir {
-        Hir::repetition(hir::Repetition {
-            min: 0,
-            max: None,
-            greedy,
-            sub: Box::new(expr),
-        })
-    }
-
-    fn hir_plus(greedy: bool, expr: Hir) -> Hir {
-        Hir::repetition(hir::Repetition {
-            min: 1,
-            max: None,
-            greedy,
-            sub: Box::new(expr),
-        })
-    }
-
-    fn hir_range(greedy: bool, min: u32, max: Option<u32>, expr: Hir) -> Hir {
-        Hir::repetition(hir::Repetition {
-            min,
-            max,
-            greedy,
-            sub: Box::new(expr),
-        })
-    }
-
-    fn hir_alt(alts: Vec<Hir>) -> Hir {
-        Hir::alternation(alts)
-    }
-
-    fn hir_cat(exprs: Vec<Hir>) -> Hir {
-        Hir::concat(exprs)
-    }
-
-    #[allow(dead_code)]
-    fn hir_uclass_query(query: ClassQuery<'_>) -> Hir {
-        Hir::class(hir::Class::Unicode(unicode::class(query).unwrap()))
-    }
-
-    #[allow(dead_code)]
-    fn hir_uclass_perl_word() -> Hir {
-        Hir::class(hir::Class::Unicode(unicode::perl_word().unwrap()))
-    }
-
-    fn hir_ascii_uclass(kind: &ast::ClassAsciiKind) -> Hir {
-        Hir::class(hir::Class::Unicode(hir::ClassUnicode::new(
-            ascii_class_as_chars(kind)
-                .map(|(s, e)| hir::ClassUnicodeRange::new(s, e)),
-        )))
-    }
-
-    fn hir_ascii_bclass(kind: &ast::ClassAsciiKind) -> Hir {
-        Hir::class(hir::Class::Bytes(hir::ClassBytes::new(
-            ascii_class(kind).map(|(s, e)| hir::ClassBytesRange::new(s, e)),
-        )))
-    }
-
-    fn hir_uclass(ranges: &[(char, char)]) -> Hir {
-        Hir::class(uclass(ranges))
-    }
-
-    fn hir_bclass(ranges: &[(u8, u8)]) -> Hir {
-        Hir::class(bclass(ranges))
-    }
-
-    fn hir_case_fold(expr: Hir) -> Hir {
-        match expr.into_kind() {
-            HirKind::Class(mut cls) => {
-                cls.case_fold_simple();
-                Hir::class(cls)
-            }
-            _ => panic!("cannot case fold non-class Hir expr"),
-        }
-    }
-
-    fn hir_negate(expr: Hir) -> Hir {
-        match expr.into_kind() {
-            HirKind::Class(mut cls) => {
-                cls.negate();
-                Hir::class(cls)
-            }
-            _ => panic!("cannot negate non-class Hir expr"),
-        }
-    }
-
-    fn uclass(ranges: &[(char, char)]) -> hir::Class {
-        let ranges: Vec<hir::ClassUnicodeRange> = ranges
-            .iter()
-            .map(|&(s, e)| hir::ClassUnicodeRange::new(s, e))
-            .collect();
-        hir::Class::Unicode(hir::ClassUnicode::new(ranges))
-    }
-
-    fn bclass(ranges: &[(u8, u8)]) -> hir::Class {
-        let ranges: Vec<hir::ClassBytesRange> = ranges
-            .iter()
-            .map(|&(s, e)| hir::ClassBytesRange::new(s, e))
-            .collect();
-        hir::Class::Bytes(hir::ClassBytes::new(ranges))
-    }
-
-    #[cfg(feature = "unicode-case")]
-    fn class_case_fold(mut cls: hir::Class) -> Hir {
-        cls.case_fold_simple();
-        Hir::class(cls)
-    }
-
-    fn class_negate(mut cls: hir::Class) -> Hir {
-        cls.negate();
-        Hir::class(cls)
-    }
-
-    #[allow(dead_code)]
-    fn hir_union(expr1: Hir, expr2: Hir) -> Hir {
-        use crate::hir::Class::{Bytes, Unicode};
-
-        match (expr1.into_kind(), expr2.into_kind()) {
-            (HirKind::Class(Unicode(mut c1)), HirKind::Class(Unicode(c2))) => {
-                c1.union(&c2);
-                Hir::class(hir::Class::Unicode(c1))
-            }
-            (HirKind::Class(Bytes(mut c1)), HirKind::Class(Bytes(c2))) => {
-                c1.union(&c2);
-                Hir::class(hir::Class::Bytes(c1))
-            }
-            _ => panic!("cannot union non-class Hir exprs"),
-        }
-    }
-
-    #[allow(dead_code)]
-    fn hir_difference(expr1: Hir, expr2: Hir) -> Hir {
-        use crate::hir::Class::{Bytes, Unicode};
-
-        match (expr1.into_kind(), expr2.into_kind()) {
-            (HirKind::Class(Unicode(mut c1)), HirKind::Class(Unicode(c2))) => {
-                c1.difference(&c2);
-                Hir::class(hir::Class::Unicode(c1))
-            }
-            (HirKind::Class(Bytes(mut c1)), HirKind::Class(Bytes(c2))) => {
-                c1.difference(&c2);
-                Hir::class(hir::Class::Bytes(c1))
-            }
-            _ => panic!("cannot difference non-class Hir exprs"),
-        }
-    }
-
-    fn hir_look(look: hir::Look) -> Hir {
-        Hir::look(look)
-    }
-
-    #[test]
-    fn empty() {
-        assert_eq!(t(""), Hir::empty());
-        assert_eq!(t("(?i)"), Hir::empty());
-        assert_eq!(t("()"), hir_capture(1, Hir::empty()));
-        assert_eq!(t("(?:)"), Hir::empty());
-        assert_eq!(t("(?P<wat>)"), hir_capture_name(1, "wat", Hir::empty()));
-        assert_eq!(t("|"), hir_alt(vec![Hir::empty(), Hir::empty()]));
-        assert_eq!(
-            t("()|()"),
-            hir_alt(vec![
-                hir_capture(1, Hir::empty()),
-                hir_capture(2, Hir::empty()),
-            ])
-        );
-        assert_eq!(
-            t("(|b)"),
-            hir_capture(1, hir_alt(vec![Hir::empty(), hir_lit("b"),]))
-        );
-        assert_eq!(
-            t("(a|)"),
-            hir_capture(1, hir_alt(vec![hir_lit("a"), Hir::empty(),]))
-        );
-        assert_eq!(
-            t("(a||c)"),
-            hir_capture(
-                1,
-                hir_alt(vec![hir_lit("a"), Hir::empty(), hir_lit("c"),])
-            )
-        );
-        assert_eq!(
-            t("(||)"),
-            hir_capture(
-                1,
-                hir_alt(vec![Hir::empty(), Hir::empty(), Hir::empty(),])
-            )
-        );
-    }
-
-    #[test]
-    fn literal() {
-        assert_eq!(t("a"), hir_lit("a"));
-        assert_eq!(t("(?-u)a"), hir_lit("a"));
-        assert_eq!(t("☃"), hir_lit("☃"));
-        assert_eq!(t("abcd"), hir_lit("abcd"));
-
-        assert_eq!(t_bytes("(?-u)a"), hir_lit("a"));
-        assert_eq!(t_bytes("(?-u)\x61"), hir_lit("a"));
-        assert_eq!(t_bytes(r"(?-u)\x61"), hir_lit("a"));
-        assert_eq!(t_bytes(r"(?-u)\xFF"), hir_blit(b"\xFF"));
-
-        assert_eq!(t("(?-u)☃"), hir_lit("☃"));
-        assert_eq!(
-            t_err(r"(?-u)\xFF"),
-            TestError {
-                kind: hir::ErrorKind::InvalidUtf8,
-                span: Span::new(
-                    Position::new(5, 1, 6),
-                    Position::new(9, 1, 10)
-                ),
-            }
-        );
-    }
-
-    #[test]
-    fn literal_case_insensitive() {
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(t("(?i)a"), hir_uclass(&[('A', 'A'), ('a', 'a'),]));
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(t("(?i:a)"), hir_uclass(&[('A', 'A'), ('a', 'a')]));
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(
-            t("a(?i)a(?-i)a"),
-            hir_cat(vec![
-                hir_lit("a"),
-                hir_uclass(&[('A', 'A'), ('a', 'a')]),
-                hir_lit("a"),
-            ])
-        );
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(
-            t("(?i)ab@c"),
-            hir_cat(vec![
-                hir_uclass(&[('A', 'A'), ('a', 'a')]),
-                hir_uclass(&[('B', 'B'), ('b', 'b')]),
-                hir_lit("@"),
-                hir_uclass(&[('C', 'C'), ('c', 'c')]),
-            ])
-        );
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(
-            t("(?i)β"),
-            hir_uclass(&[('Β', 'Β'), ('β', 'β'), ('ϐ', 'ϐ'),])
-        );
-
-        assert_eq!(t("(?i-u)a"), hir_bclass(&[(b'A', b'A'), (b'a', b'a'),]));
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(
-            t("(?-u)a(?i)a(?-i)a"),
-            hir_cat(vec![
-                hir_lit("a"),
-                hir_bclass(&[(b'A', b'A'), (b'a', b'a')]),
-                hir_lit("a"),
-            ])
-        );
-        assert_eq!(
-            t("(?i-u)ab@c"),
-            hir_cat(vec![
-                hir_bclass(&[(b'A', b'A'), (b'a', b'a')]),
-                hir_bclass(&[(b'B', b'B'), (b'b', b'b')]),
-                hir_lit("@"),
-                hir_bclass(&[(b'C', b'C'), (b'c', b'c')]),
-            ])
-        );
-
-        assert_eq!(
-            t_bytes("(?i-u)a"),
-            hir_bclass(&[(b'A', b'A'), (b'a', b'a'),])
-        );
-        assert_eq!(
-            t_bytes("(?i-u)\x61"),
-            hir_bclass(&[(b'A', b'A'), (b'a', b'a'),])
-        );
-        assert_eq!(
-            t_bytes(r"(?i-u)\x61"),
-            hir_bclass(&[(b'A', b'A'), (b'a', b'a'),])
-        );
-        assert_eq!(t_bytes(r"(?i-u)\xFF"), hir_blit(b"\xFF"));
-
-        assert_eq!(t("(?i-u)β"), hir_lit("β"),);
-    }
-
-    #[test]
-    fn dot() {
-        assert_eq!(
-            t("."),
-            hir_uclass(&[('\0', '\t'), ('\x0B', '\u{10FFFF}')])
-        );
-        assert_eq!(
-            t("(?R)."),
-            hir_uclass(&[
-                ('\0', '\t'),
-                ('\x0B', '\x0C'),
-                ('\x0E', '\u{10FFFF}'),
-            ])
-        );
-        assert_eq!(t("(?s)."), hir_uclass(&[('\0', '\u{10FFFF}')]));
-        assert_eq!(t("(?Rs)."), hir_uclass(&[('\0', '\u{10FFFF}')]));
-        assert_eq!(
-            t_bytes("(?-u)."),
-            hir_bclass(&[(b'\0', b'\t'), (b'\x0B', b'\xFF')])
-        );
-        assert_eq!(
-            t_bytes("(?R-u)."),
-            hir_bclass(&[
-                (b'\0', b'\t'),
-                (b'\x0B', b'\x0C'),
-                (b'\x0E', b'\xFF'),
-            ])
-        );
-        assert_eq!(t_bytes("(?s-u)."), hir_bclass(&[(b'\0', b'\xFF'),]));
-        assert_eq!(t_bytes("(?Rs-u)."), hir_bclass(&[(b'\0', b'\xFF'),]));
-
-        // If invalid UTF-8 isn't allowed, then non-Unicode `.` isn't allowed.
-        assert_eq!(
-            t_err("(?-u)."),
-            TestError {
-                kind: hir::ErrorKind::InvalidUtf8,
-                span: Span::new(
-                    Position::new(5, 1, 6),
-                    Position::new(6, 1, 7)
-                ),
-            }
-        );
-        assert_eq!(
-            t_err("(?R-u)."),
-            TestError {
-                kind: hir::ErrorKind::InvalidUtf8,
-                span: Span::new(
-                    Position::new(6, 1, 7),
-                    Position::new(7, 1, 8)
-                ),
-            }
-        );
-        assert_eq!(
-            t_err("(?s-u)."),
-            TestError {
-                kind: hir::ErrorKind::InvalidUtf8,
-                span: Span::new(
-                    Position::new(6, 1, 7),
-                    Position::new(7, 1, 8)
-                ),
-            }
-        );
-        assert_eq!(
-            t_err("(?Rs-u)."),
-            TestError {
-                kind: hir::ErrorKind::InvalidUtf8,
-                span: Span::new(
-                    Position::new(7, 1, 8),
-                    Position::new(8, 1, 9)
-                ),
-            }
-        );
-    }
-
-    #[test]
-    fn assertions() {
-        assert_eq!(t("^"), hir_look(hir::Look::Start));
-        assert_eq!(t("$"), hir_look(hir::Look::End));
-        assert_eq!(t(r"\A"), hir_look(hir::Look::Start));
-        assert_eq!(t(r"\z"), hir_look(hir::Look::End));
-        assert_eq!(t("(?m)^"), hir_look(hir::Look::StartLF));
-        assert_eq!(t("(?m)$"), hir_look(hir::Look::EndLF));
-        assert_eq!(t(r"(?m)\A"), hir_look(hir::Look::Start));
-        assert_eq!(t(r"(?m)\z"), hir_look(hir::Look::End));
-
-        assert_eq!(t(r"\b"), hir_look(hir::Look::WordUnicode));
-        assert_eq!(t(r"\B"), hir_look(hir::Look::WordUnicodeNegate));
-        assert_eq!(t(r"(?-u)\b"), hir_look(hir::Look::WordAscii));
-        assert_eq!(t(r"(?-u)\B"), hir_look(hir::Look::WordAsciiNegate));
-    }
-
-    #[test]
-    fn group() {
-        assert_eq!(t("(a)"), hir_capture(1, hir_lit("a")));
-        assert_eq!(
-            t("(a)(b)"),
-            hir_cat(vec![
-                hir_capture(1, hir_lit("a")),
-                hir_capture(2, hir_lit("b")),
-            ])
-        );
-        assert_eq!(
-            t("(a)|(b)"),
-            hir_alt(vec![
-                hir_capture(1, hir_lit("a")),
-                hir_capture(2, hir_lit("b")),
-            ])
-        );
-        assert_eq!(t("(?P<foo>)"), hir_capture_name(1, "foo", Hir::empty()));
-        assert_eq!(t("(?P<foo>a)"), hir_capture_name(1, "foo", hir_lit("a")));
-        assert_eq!(
-            t("(?P<foo>a)(?P<bar>b)"),
-            hir_cat(vec![
-                hir_capture_name(1, "foo", hir_lit("a")),
-                hir_capture_name(2, "bar", hir_lit("b")),
-            ])
-        );
-        assert_eq!(t("(?:)"), Hir::empty());
-        assert_eq!(t("(?:a)"), hir_lit("a"));
-        assert_eq!(
-            t("(?:a)(b)"),
-            hir_cat(vec![hir_lit("a"), hir_capture(1, hir_lit("b")),])
-        );
-        assert_eq!(
-            t("(a)(?:b)(c)"),
-            hir_cat(vec![
-                hir_capture(1, hir_lit("a")),
-                hir_lit("b"),
-                hir_capture(2, hir_lit("c")),
-            ])
-        );
-        assert_eq!(
-            t("(a)(?P<foo>b)(c)"),
-            hir_cat(vec![
-                hir_capture(1, hir_lit("a")),
-                hir_capture_name(2, "foo", hir_lit("b")),
-                hir_capture(3, hir_lit("c")),
-            ])
-        );
-        assert_eq!(t("()"), hir_capture(1, Hir::empty()));
-        assert_eq!(t("((?i))"), hir_capture(1, Hir::empty()));
-        assert_eq!(t("((?x))"), hir_capture(1, Hir::empty()));
-        assert_eq!(
-            t("(((?x)))"),
-            hir_capture(1, hir_capture(2, Hir::empty()))
-        );
-    }
-
-    #[test]
-    fn line_anchors() {
-        assert_eq!(t("^"), hir_look(hir::Look::Start));
-        assert_eq!(t("$"), hir_look(hir::Look::End));
-        assert_eq!(t(r"\A"), hir_look(hir::Look::Start));
-        assert_eq!(t(r"\z"), hir_look(hir::Look::End));
-
-        assert_eq!(t(r"(?m)\A"), hir_look(hir::Look::Start));
-        assert_eq!(t(r"(?m)\z"), hir_look(hir::Look::End));
-        assert_eq!(t("(?m)^"), hir_look(hir::Look::StartLF));
-        assert_eq!(t("(?m)$"), hir_look(hir::Look::EndLF));
-
-        assert_eq!(t(r"(?R)\A"), hir_look(hir::Look::Start));
-        assert_eq!(t(r"(?R)\z"), hir_look(hir::Look::End));
-        assert_eq!(t("(?R)^"), hir_look(hir::Look::Start));
-        assert_eq!(t("(?R)$"), hir_look(hir::Look::End));
-
-        assert_eq!(t(r"(?Rm)\A"), hir_look(hir::Look::Start));
-        assert_eq!(t(r"(?Rm)\z"), hir_look(hir::Look::End));
-        assert_eq!(t("(?Rm)^"), hir_look(hir::Look::StartCRLF));
-        assert_eq!(t("(?Rm)$"), hir_look(hir::Look::EndCRLF));
-    }
-
-    #[test]
-    fn flags() {
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(
-            t("(?i:a)a"),
-            hir_cat(
-                vec![hir_uclass(&[('A', 'A'), ('a', 'a')]), hir_lit("a"),]
-            )
-        );
-        assert_eq!(
-            t("(?i-u:a)β"),
-            hir_cat(vec![
-                hir_bclass(&[(b'A', b'A'), (b'a', b'a')]),
-                hir_lit("β"),
-            ])
-        );
-        assert_eq!(
-            t("(?:(?i-u)a)b"),
-            hir_cat(vec![
-                hir_bclass(&[(b'A', b'A'), (b'a', b'a')]),
-                hir_lit("b"),
-            ])
-        );
-        assert_eq!(
-            t("((?i-u)a)b"),
-            hir_cat(vec![
-                hir_capture(1, hir_bclass(&[(b'A', b'A'), (b'a', b'a')])),
-                hir_lit("b"),
-            ])
-        );
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(
-            t("(?i)(?-i:a)a"),
-            hir_cat(
-                vec![hir_lit("a"), hir_uclass(&[('A', 'A'), ('a', 'a')]),]
-            )
-        );
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(
-            t("(?im)a^"),
-            hir_cat(vec![
-                hir_uclass(&[('A', 'A'), ('a', 'a')]),
-                hir_look(hir::Look::StartLF),
-            ])
-        );
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(
-            t("(?im)a^(?i-m)a^"),
-            hir_cat(vec![
-                hir_uclass(&[('A', 'A'), ('a', 'a')]),
-                hir_look(hir::Look::StartLF),
-                hir_uclass(&[('A', 'A'), ('a', 'a')]),
-                hir_look(hir::Look::Start),
-            ])
-        );
-        assert_eq!(
-            t("(?U)a*a*?(?-U)a*a*?"),
-            hir_cat(vec![
-                hir_star(false, hir_lit("a")),
-                hir_star(true, hir_lit("a")),
-                hir_star(true, hir_lit("a")),
-                hir_star(false, hir_lit("a")),
-            ])
-        );
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(
-            t("(?:a(?i)a)a"),
-            hir_cat(vec![
-                hir_cat(vec![
-                    hir_lit("a"),
-                    hir_uclass(&[('A', 'A'), ('a', 'a')]),
-                ]),
-                hir_lit("a"),
-            ])
-        );
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(
-            t("(?i)(?:a(?-i)a)a"),
-            hir_cat(vec![
-                hir_cat(vec![
-                    hir_uclass(&[('A', 'A'), ('a', 'a')]),
-                    hir_lit("a"),
-                ]),
-                hir_uclass(&[('A', 'A'), ('a', 'a')]),
-            ])
-        );
-    }
-
-    #[test]
-    fn escape() {
-        assert_eq!(
-            t(r"\\\.\+\*\?\(\)\|\[\]\{\}\^\$\#"),
-            hir_lit(r"\.+*?()|[]{}^$#")
-        );
-    }
-
-    #[test]
-    fn repetition() {
-        assert_eq!(t("a?"), hir_quest(true, hir_lit("a")));
-        assert_eq!(t("a*"), hir_star(true, hir_lit("a")));
-        assert_eq!(t("a+"), hir_plus(true, hir_lit("a")));
-        assert_eq!(t("a??"), hir_quest(false, hir_lit("a")));
-        assert_eq!(t("a*?"), hir_star(false, hir_lit("a")));
-        assert_eq!(t("a+?"), hir_plus(false, hir_lit("a")));
-
-        assert_eq!(t("a{1}"), hir_range(true, 1, Some(1), hir_lit("a"),));
-        assert_eq!(t("a{1,}"), hir_range(true, 1, None, hir_lit("a"),));
-        assert_eq!(t("a{1,2}"), hir_range(true, 1, Some(2), hir_lit("a"),));
-        assert_eq!(t("a{1}?"), hir_range(false, 1, Some(1), hir_lit("a"),));
-        assert_eq!(t("a{1,}?"), hir_range(false, 1, None, hir_lit("a"),));
-        assert_eq!(t("a{1,2}?"), hir_range(false, 1, Some(2), hir_lit("a"),));
-
-        assert_eq!(
-            t("ab?"),
-            hir_cat(vec![hir_lit("a"), hir_quest(true, hir_lit("b")),])
-        );
-        assert_eq!(t("(ab)?"), hir_quest(true, hir_capture(1, hir_lit("ab"))));
-        assert_eq!(
-            t("a|b?"),
-            hir_alt(vec![hir_lit("a"), hir_quest(true, hir_lit("b")),])
-        );
-    }
-
-    #[test]
-    fn cat_alt() {
-        let a = || hir_look(hir::Look::Start);
-        let b = || hir_look(hir::Look::End);
-        let c = || hir_look(hir::Look::WordUnicode);
-        let d = || hir_look(hir::Look::WordUnicodeNegate);
-
-        assert_eq!(t("(^$)"), hir_capture(1, hir_cat(vec![a(), b()])));
-        assert_eq!(t("^|$"), hir_alt(vec![a(), b()]));
-        assert_eq!(t(r"^|$|\b"), hir_alt(vec![a(), b(), c()]));
-        assert_eq!(
-            t(r"^$|$\b|\b\B"),
-            hir_alt(vec![
-                hir_cat(vec![a(), b()]),
-                hir_cat(vec![b(), c()]),
-                hir_cat(vec![c(), d()]),
-            ])
-        );
-        assert_eq!(t("(^|$)"), hir_capture(1, hir_alt(vec![a(), b()])));
-        assert_eq!(
-            t(r"(^|$|\b)"),
-            hir_capture(1, hir_alt(vec![a(), b(), c()]))
-        );
-        assert_eq!(
-            t(r"(^$|$\b|\b\B)"),
-            hir_capture(
-                1,
-                hir_alt(vec![
-                    hir_cat(vec![a(), b()]),
-                    hir_cat(vec![b(), c()]),
-                    hir_cat(vec![c(), d()]),
-                ])
-            )
-        );
-        assert_eq!(
-            t(r"(^$|($\b|(\b\B)))"),
-            hir_capture(
-                1,
-                hir_alt(vec![
-                    hir_cat(vec![a(), b()]),
-                    hir_capture(
-                        2,
-                        hir_alt(vec![
-                            hir_cat(vec![b(), c()]),
-                            hir_capture(3, hir_cat(vec![c(), d()])),
-                        ])
-                    ),
-                ])
-            )
-        );
-    }
-
-    // Tests the HIR transformation of things like '[a-z]|[A-Z]' into
-    // '[A-Za-z]'. In other words, an alternation of just classes is always
-    // equivalent to a single class corresponding to the union of the branches
-    // in that class. (Unless some branches match invalid UTF-8 and others
-    // match non-ASCII Unicode.)
-    #[test]
-    fn cat_class_flattened() {
-        assert_eq!(t(r"[a-z]|[A-Z]"), hir_uclass(&[('A', 'Z'), ('a', 'z')]));
-        // Combining all of the letter properties should give us the one giant
-        // letter property.
-        #[cfg(feature = "unicode-gencat")]
-        assert_eq!(
-            t(r"(?x)
-                \p{Lowercase_Letter}
-                |\p{Uppercase_Letter}
-                |\p{Titlecase_Letter}
-                |\p{Modifier_Letter}
-                |\p{Other_Letter}
-            "),
-            hir_uclass_query(ClassQuery::Binary("letter"))
-        );
-        // Byte classes that can truly match invalid UTF-8 cannot be combined
-        // with Unicode classes.
-        assert_eq!(
-            t_bytes(r"[Δδ]|(?-u:[\x90-\xFF])|[Λλ]"),
-            hir_alt(vec![
-                hir_uclass(&[('Δ', 'Δ'), ('δ', 'δ')]),
-                hir_bclass(&[(b'\x90', b'\xFF')]),
-                hir_uclass(&[('Λ', 'Λ'), ('λ', 'λ')]),
-            ])
-        );
-        // Byte classes on their own can be combined, even if some are ASCII
-        // and others are invalid UTF-8.
-        assert_eq!(
-            t_bytes(r"[a-z]|(?-u:[\x90-\xFF])|[A-Z]"),
-            hir_bclass(&[(b'A', b'Z'), (b'a', b'z'), (b'\x90', b'\xFF')]),
-        );
-    }
-
-    #[test]
-    fn class_ascii() {
-        assert_eq!(
-            t("[[:alnum:]]"),
-            hir_ascii_uclass(&ast::ClassAsciiKind::Alnum)
-        );
-        assert_eq!(
-            t("[[:alpha:]]"),
-            hir_ascii_uclass(&ast::ClassAsciiKind::Alpha)
-        );
-        assert_eq!(
-            t("[[:ascii:]]"),
-            hir_ascii_uclass(&ast::ClassAsciiKind::Ascii)
-        );
-        assert_eq!(
-            t("[[:blank:]]"),
-            hir_ascii_uclass(&ast::ClassAsciiKind::Blank)
-        );
-        assert_eq!(
-            t("[[:cntrl:]]"),
-            hir_ascii_uclass(&ast::ClassAsciiKind::Cntrl)
-        );
-        assert_eq!(
-            t("[[:digit:]]"),
-            hir_ascii_uclass(&ast::ClassAsciiKind::Digit)
-        );
-        assert_eq!(
-            t("[[:graph:]]"),
-            hir_ascii_uclass(&ast::ClassAsciiKind::Graph)
-        );
-        assert_eq!(
-            t("[[:lower:]]"),
-            hir_ascii_uclass(&ast::ClassAsciiKind::Lower)
-        );
-        assert_eq!(
-            t("[[:print:]]"),
-            hir_ascii_uclass(&ast::ClassAsciiKind::Print)
-        );
-        assert_eq!(
-            t("[[:punct:]]"),
-            hir_ascii_uclass(&ast::ClassAsciiKind::Punct)
-        );
-        assert_eq!(
-            t("[[:space:]]"),
-            hir_ascii_uclass(&ast::ClassAsciiKind::Space)
-        );
-        assert_eq!(
-            t("[[:upper:]]"),
-            hir_ascii_uclass(&ast::ClassAsciiKind::Upper)
-        );
-        assert_eq!(
-            t("[[:word:]]"),
-            hir_ascii_uclass(&ast::ClassAsciiKind::Word)
-        );
-        assert_eq!(
-            t("[[:xdigit:]]"),
-            hir_ascii_uclass(&ast::ClassAsciiKind::Xdigit)
-        );
-
-        assert_eq!(
-            t("[[:^lower:]]"),
-            hir_negate(hir_ascii_uclass(&ast::ClassAsciiKind::Lower))
-        );
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(
-            t("(?i)[[:lower:]]"),
-            hir_uclass(&[
-                ('A', 'Z'),
-                ('a', 'z'),
-                ('\u{17F}', '\u{17F}'),
-                ('\u{212A}', '\u{212A}'),
-            ])
-        );
-
-        assert_eq!(
-            t("(?-u)[[:lower:]]"),
-            hir_ascii_bclass(&ast::ClassAsciiKind::Lower)
-        );
-        assert_eq!(
-            t("(?i-u)[[:lower:]]"),
-            hir_case_fold(hir_ascii_bclass(&ast::ClassAsciiKind::Lower))
-        );
-
-        assert_eq!(
-            t_err("(?-u)[[:^lower:]]"),
-            TestError {
-                kind: hir::ErrorKind::InvalidUtf8,
-                span: Span::new(
-                    Position::new(6, 1, 7),
-                    Position::new(16, 1, 17)
-                ),
-            }
-        );
-        assert_eq!(
-            t_err("(?i-u)[[:^lower:]]"),
-            TestError {
-                kind: hir::ErrorKind::InvalidUtf8,
-                span: Span::new(
-                    Position::new(7, 1, 8),
-                    Position::new(17, 1, 18)
-                ),
-            }
-        );
-    }
-
-    #[test]
-    fn class_ascii_multiple() {
-        // See: https://github.com/rust-lang/regex/issues/680
-        assert_eq!(
-            t("[[:alnum:][:^ascii:]]"),
-            hir_union(
-                hir_ascii_uclass(&ast::ClassAsciiKind::Alnum),
-                hir_uclass(&[('\u{80}', '\u{10FFFF}')]),
-            ),
-        );
-        assert_eq!(
-            t_bytes("(?-u)[[:alnum:][:^ascii:]]"),
-            hir_union(
-                hir_ascii_bclass(&ast::ClassAsciiKind::Alnum),
-                hir_bclass(&[(0x80, 0xFF)]),
-            ),
-        );
-    }
-
-    #[test]
-    #[cfg(feature = "unicode-perl")]
-    fn class_perl_unicode() {
-        // Unicode
-        assert_eq!(t(r"\d"), hir_uclass_query(ClassQuery::Binary("digit")));
-        assert_eq!(t(r"\s"), hir_uclass_query(ClassQuery::Binary("space")));
-        assert_eq!(t(r"\w"), hir_uclass_perl_word());
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(
-            t(r"(?i)\d"),
-            hir_uclass_query(ClassQuery::Binary("digit"))
-        );
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(
-            t(r"(?i)\s"),
-            hir_uclass_query(ClassQuery::Binary("space"))
-        );
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(t(r"(?i)\w"), hir_uclass_perl_word());
-
-        // Unicode, negated
-        assert_eq!(
-            t(r"\D"),
-            hir_negate(hir_uclass_query(ClassQuery::Binary("digit")))
-        );
-        assert_eq!(
-            t(r"\S"),
-            hir_negate(hir_uclass_query(ClassQuery::Binary("space")))
-        );
-        assert_eq!(t(r"\W"), hir_negate(hir_uclass_perl_word()));
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(
-            t(r"(?i)\D"),
-            hir_negate(hir_uclass_query(ClassQuery::Binary("digit")))
-        );
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(
-            t(r"(?i)\S"),
-            hir_negate(hir_uclass_query(ClassQuery::Binary("space")))
-        );
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(t(r"(?i)\W"), hir_negate(hir_uclass_perl_word()));
-    }
-
-    #[test]
-    fn class_perl_ascii() {
-        // ASCII only
-        assert_eq!(
-            t(r"(?-u)\d"),
-            hir_ascii_bclass(&ast::ClassAsciiKind::Digit)
-        );
-        assert_eq!(
-            t(r"(?-u)\s"),
-            hir_ascii_bclass(&ast::ClassAsciiKind::Space)
-        );
-        assert_eq!(
-            t(r"(?-u)\w"),
-            hir_ascii_bclass(&ast::ClassAsciiKind::Word)
-        );
-        assert_eq!(
-            t(r"(?i-u)\d"),
-            hir_ascii_bclass(&ast::ClassAsciiKind::Digit)
-        );
-        assert_eq!(
-            t(r"(?i-u)\s"),
-            hir_ascii_bclass(&ast::ClassAsciiKind::Space)
-        );
-        assert_eq!(
-            t(r"(?i-u)\w"),
-            hir_ascii_bclass(&ast::ClassAsciiKind::Word)
-        );
-
-        // ASCII only, negated
-        assert_eq!(
-            t_bytes(r"(?-u)\D"),
-            hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Digit))
-        );
-        assert_eq!(
-            t_bytes(r"(?-u)\S"),
-            hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Space))
-        );
-        assert_eq!(
-            t_bytes(r"(?-u)\W"),
-            hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Word))
-        );
-        assert_eq!(
-            t_bytes(r"(?i-u)\D"),
-            hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Digit))
-        );
-        assert_eq!(
-            t_bytes(r"(?i-u)\S"),
-            hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Space))
-        );
-        assert_eq!(
-            t_bytes(r"(?i-u)\W"),
-            hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Word))
-        );
-
-        // ASCII only, negated, with UTF-8 mode enabled.
-        // In this case, negating any Perl class results in an error because
-        // all such classes can match invalid UTF-8.
-        assert_eq!(
-            t_err(r"(?-u)\D"),
-            TestError {
-                kind: hir::ErrorKind::InvalidUtf8,
-                span: Span::new(
-                    Position::new(5, 1, 6),
-                    Position::new(7, 1, 8),
-                ),
-            },
-        );
-        assert_eq!(
-            t_err(r"(?-u)\S"),
-            TestError {
-                kind: hir::ErrorKind::InvalidUtf8,
-                span: Span::new(
-                    Position::new(5, 1, 6),
-                    Position::new(7, 1, 8),
-                ),
-            },
-        );
-        assert_eq!(
-            t_err(r"(?-u)\W"),
-            TestError {
-                kind: hir::ErrorKind::InvalidUtf8,
-                span: Span::new(
-                    Position::new(5, 1, 6),
-                    Position::new(7, 1, 8),
-                ),
-            },
-        );
-        assert_eq!(
-            t_err(r"(?i-u)\D"),
-            TestError {
-                kind: hir::ErrorKind::InvalidUtf8,
-                span: Span::new(
-                    Position::new(6, 1, 7),
-                    Position::new(8, 1, 9),
-                ),
-            },
-        );
-        assert_eq!(
-            t_err(r"(?i-u)\S"),
-            TestError {
-                kind: hir::ErrorKind::InvalidUtf8,
-                span: Span::new(
-                    Position::new(6, 1, 7),
-                    Position::new(8, 1, 9),
-                ),
-            },
-        );
-        assert_eq!(
-            t_err(r"(?i-u)\W"),
-            TestError {
-                kind: hir::ErrorKind::InvalidUtf8,
-                span: Span::new(
-                    Position::new(6, 1, 7),
-                    Position::new(8, 1, 9),
-                ),
-            },
-        );
-    }
-
-    #[test]
-    #[cfg(not(feature = "unicode-perl"))]
-    fn class_perl_word_disabled() {
-        assert_eq!(
-            t_err(r"\w"),
-            TestError {
-                kind: hir::ErrorKind::UnicodePerlClassNotFound,
-                span: Span::new(
-                    Position::new(0, 1, 1),
-                    Position::new(2, 1, 3)
-                ),
-            }
-        );
-    }
-
-    #[test]
-    #[cfg(all(not(feature = "unicode-perl"), not(feature = "unicode-bool")))]
-    fn class_perl_space_disabled() {
-        assert_eq!(
-            t_err(r"\s"),
-            TestError {
-                kind: hir::ErrorKind::UnicodePerlClassNotFound,
-                span: Span::new(
-                    Position::new(0, 1, 1),
-                    Position::new(2, 1, 3)
-                ),
-            }
-        );
-    }
-
-    #[test]
-    #[cfg(all(
-        not(feature = "unicode-perl"),
-        not(feature = "unicode-gencat")
-    ))]
-    fn class_perl_digit_disabled() {
-        assert_eq!(
-            t_err(r"\d"),
-            TestError {
-                kind: hir::ErrorKind::UnicodePerlClassNotFound,
-                span: Span::new(
-                    Position::new(0, 1, 1),
-                    Position::new(2, 1, 3)
-                ),
-            }
-        );
-    }
-
-    #[test]
-    #[cfg(feature = "unicode-gencat")]
-    fn class_unicode_gencat() {
-        assert_eq!(t(r"\pZ"), hir_uclass_query(ClassQuery::Binary("Z")));
-        assert_eq!(t(r"\pz"), hir_uclass_query(ClassQuery::Binary("Z")));
-        assert_eq!(
-            t(r"\p{Separator}"),
-            hir_uclass_query(ClassQuery::Binary("Z"))
-        );
-        assert_eq!(
-            t(r"\p{se      PaRa ToR}"),
-            hir_uclass_query(ClassQuery::Binary("Z"))
-        );
-        assert_eq!(
-            t(r"\p{gc:Separator}"),
-            hir_uclass_query(ClassQuery::Binary("Z"))
-        );
-        assert_eq!(
-            t(r"\p{gc=Separator}"),
-            hir_uclass_query(ClassQuery::Binary("Z"))
-        );
-        assert_eq!(
-            t(r"\p{Other}"),
-            hir_uclass_query(ClassQuery::Binary("Other"))
-        );
-        assert_eq!(t(r"\pC"), hir_uclass_query(ClassQuery::Binary("Other")));
-
-        assert_eq!(
-            t(r"\PZ"),
-            hir_negate(hir_uclass_query(ClassQuery::Binary("Z")))
-        );
-        assert_eq!(
-            t(r"\P{separator}"),
-            hir_negate(hir_uclass_query(ClassQuery::Binary("Z")))
-        );
-        assert_eq!(
-            t(r"\P{gc!=separator}"),
-            hir_negate(hir_uclass_query(ClassQuery::Binary("Z")))
-        );
-
-        assert_eq!(t(r"\p{any}"), hir_uclass_query(ClassQuery::Binary("Any")));
-        assert_eq!(
-            t(r"\p{assigned}"),
-            hir_uclass_query(ClassQuery::Binary("Assigned"))
-        );
-        assert_eq!(
-            t(r"\p{ascii}"),
-            hir_uclass_query(ClassQuery::Binary("ASCII"))
-        );
-        assert_eq!(
-            t(r"\p{gc:any}"),
-            hir_uclass_query(ClassQuery::Binary("Any"))
-        );
-        assert_eq!(
-            t(r"\p{gc:assigned}"),
-            hir_uclass_query(ClassQuery::Binary("Assigned"))
-        );
-        assert_eq!(
-            t(r"\p{gc:ascii}"),
-            hir_uclass_query(ClassQuery::Binary("ASCII"))
-        );
-
-        assert_eq!(
-            t_err(r"(?-u)\pZ"),
-            TestError {
-                kind: hir::ErrorKind::UnicodeNotAllowed,
-                span: Span::new(
-                    Position::new(5, 1, 6),
-                    Position::new(8, 1, 9)
-                ),
-            }
-        );
-        assert_eq!(
-            t_err(r"(?-u)\p{Separator}"),
-            TestError {
-                kind: hir::ErrorKind::UnicodeNotAllowed,
-                span: Span::new(
-                    Position::new(5, 1, 6),
-                    Position::new(18, 1, 19)
-                ),
-            }
-        );
-        assert_eq!(
-            t_err(r"\pE"),
-            TestError {
-                kind: hir::ErrorKind::UnicodePropertyNotFound,
-                span: Span::new(
-                    Position::new(0, 1, 1),
-                    Position::new(3, 1, 4)
-                ),
-            }
-        );
-        assert_eq!(
-            t_err(r"\p{Foo}"),
-            TestError {
-                kind: hir::ErrorKind::UnicodePropertyNotFound,
-                span: Span::new(
-                    Position::new(0, 1, 1),
-                    Position::new(7, 1, 8)
-                ),
-            }
-        );
-        assert_eq!(
-            t_err(r"\p{gc:Foo}"),
-            TestError {
-                kind: hir::ErrorKind::UnicodePropertyValueNotFound,
-                span: Span::new(
-                    Position::new(0, 1, 1),
-                    Position::new(10, 1, 11)
-                ),
-            }
-        );
-    }
-
-    #[test]
-    #[cfg(not(feature = "unicode-gencat"))]
-    fn class_unicode_gencat_disabled() {
-        assert_eq!(
-            t_err(r"\p{Separator}"),
-            TestError {
-                kind: hir::ErrorKind::UnicodePropertyNotFound,
-                span: Span::new(
-                    Position::new(0, 1, 1),
-                    Position::new(13, 1, 14)
-                ),
-            }
-        );
-
-        assert_eq!(
-            t_err(r"\p{Any}"),
-            TestError {
-                kind: hir::ErrorKind::UnicodePropertyNotFound,
-                span: Span::new(
-                    Position::new(0, 1, 1),
-                    Position::new(7, 1, 8)
-                ),
-            }
-        );
-    }
-
-    #[test]
-    #[cfg(feature = "unicode-script")]
-    fn class_unicode_script() {
-        assert_eq!(
-            t(r"\p{Greek}"),
-            hir_uclass_query(ClassQuery::Binary("Greek"))
-        );
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(
-            t(r"(?i)\p{Greek}"),
-            hir_case_fold(hir_uclass_query(ClassQuery::Binary("Greek")))
-        );
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(
-            t(r"(?i)\P{Greek}"),
-            hir_negate(hir_case_fold(hir_uclass_query(ClassQuery::Binary(
-                "Greek"
-            ))))
-        );
-
-        assert_eq!(
-            t_err(r"\p{sc:Foo}"),
-            TestError {
-                kind: hir::ErrorKind::UnicodePropertyValueNotFound,
-                span: Span::new(
-                    Position::new(0, 1, 1),
-                    Position::new(10, 1, 11)
-                ),
-            }
-        );
-        assert_eq!(
-            t_err(r"\p{scx:Foo}"),
-            TestError {
-                kind: hir::ErrorKind::UnicodePropertyValueNotFound,
-                span: Span::new(
-                    Position::new(0, 1, 1),
-                    Position::new(11, 1, 12)
-                ),
-            }
-        );
-    }
-
-    #[test]
-    #[cfg(not(feature = "unicode-script"))]
-    fn class_unicode_script_disabled() {
-        assert_eq!(
-            t_err(r"\p{Greek}"),
-            TestError {
-                kind: hir::ErrorKind::UnicodePropertyNotFound,
-                span: Span::new(
-                    Position::new(0, 1, 1),
-                    Position::new(9, 1, 10)
-                ),
-            }
-        );
-
-        assert_eq!(
-            t_err(r"\p{scx:Greek}"),
-            TestError {
-                kind: hir::ErrorKind::UnicodePropertyNotFound,
-                span: Span::new(
-                    Position::new(0, 1, 1),
-                    Position::new(13, 1, 14)
-                ),
-            }
-        );
-    }
-
-    #[test]
-    #[cfg(feature = "unicode-age")]
-    fn class_unicode_age() {
-        assert_eq!(
-            t_err(r"\p{age:Foo}"),
-            TestError {
-                kind: hir::ErrorKind::UnicodePropertyValueNotFound,
-                span: Span::new(
-                    Position::new(0, 1, 1),
-                    Position::new(11, 1, 12)
-                ),
-            }
-        );
-    }
-
-    #[test]
-    #[cfg(feature = "unicode-gencat")]
-    fn class_unicode_any_empty() {
-        assert_eq!(t(r"\P{any}"), hir_uclass(&[]),);
-    }
-
-    #[test]
-    #[cfg(not(feature = "unicode-age"))]
-    fn class_unicode_age_disabled() {
-        assert_eq!(
-            t_err(r"\p{age:3.0}"),
-            TestError {
-                kind: hir::ErrorKind::UnicodePropertyNotFound,
-                span: Span::new(
-                    Position::new(0, 1, 1),
-                    Position::new(11, 1, 12)
-                ),
-            }
-        );
-    }
-
-    #[test]
-    fn class_bracketed() {
-        assert_eq!(t("[a]"), hir_lit("a"));
-        assert_eq!(t("[ab]"), hir_uclass(&[('a', 'b')]));
-        assert_eq!(t("[^[a]]"), class_negate(uclass(&[('a', 'a')])));
-        assert_eq!(t("[a-z]"), hir_uclass(&[('a', 'z')]));
-        assert_eq!(t("[a-fd-h]"), hir_uclass(&[('a', 'h')]));
-        assert_eq!(t("[a-fg-m]"), hir_uclass(&[('a', 'm')]));
-        assert_eq!(t(r"[\x00]"), hir_uclass(&[('\0', '\0')]));
-        assert_eq!(t(r"[\n]"), hir_uclass(&[('\n', '\n')]));
-        assert_eq!(t("[\n]"), hir_uclass(&[('\n', '\n')]));
-        #[cfg(any(feature = "unicode-perl", feature = "unicode-gencat"))]
-        assert_eq!(t(r"[\d]"), hir_uclass_query(ClassQuery::Binary("digit")));
-        #[cfg(feature = "unicode-gencat")]
-        assert_eq!(
-            t(r"[\pZ]"),
-            hir_uclass_query(ClassQuery::Binary("separator"))
-        );
-        #[cfg(feature = "unicode-gencat")]
-        assert_eq!(
-            t(r"[\p{separator}]"),
-            hir_uclass_query(ClassQuery::Binary("separator"))
-        );
-        #[cfg(any(feature = "unicode-perl", feature = "unicode-gencat"))]
-        assert_eq!(t(r"[^\D]"), hir_uclass_query(ClassQuery::Binary("digit")));
-        #[cfg(feature = "unicode-gencat")]
-        assert_eq!(
-            t(r"[^\PZ]"),
-            hir_uclass_query(ClassQuery::Binary("separator"))
-        );
-        #[cfg(feature = "unicode-gencat")]
-        assert_eq!(
-            t(r"[^\P{separator}]"),
-            hir_uclass_query(ClassQuery::Binary("separator"))
-        );
-        #[cfg(all(
-            feature = "unicode-case",
-            any(feature = "unicode-perl", feature = "unicode-gencat")
-        ))]
-        assert_eq!(
-            t(r"(?i)[^\D]"),
-            hir_uclass_query(ClassQuery::Binary("digit"))
-        );
-        #[cfg(all(feature = "unicode-case", feature = "unicode-script"))]
-        assert_eq!(
-            t(r"(?i)[^\P{greek}]"),
-            hir_case_fold(hir_uclass_query(ClassQuery::Binary("greek")))
-        );
-
-        assert_eq!(t("(?-u)[a]"), hir_bclass(&[(b'a', b'a')]));
-        assert_eq!(t(r"(?-u)[\x00]"), hir_bclass(&[(b'\0', b'\0')]));
-        assert_eq!(t_bytes(r"(?-u)[\xFF]"), hir_bclass(&[(b'\xFF', b'\xFF')]));
-
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(t("(?i)[a]"), hir_uclass(&[('A', 'A'), ('a', 'a')]));
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(
-            t("(?i)[k]"),
-            hir_uclass(&[('K', 'K'), ('k', 'k'), ('\u{212A}', '\u{212A}'),])
-        );
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(
-            t("(?i)[β]"),
-            hir_uclass(&[('Β', 'Β'), ('β', 'β'), ('ϐ', 'ϐ'),])
-        );
-        assert_eq!(t("(?i-u)[k]"), hir_bclass(&[(b'K', b'K'), (b'k', b'k'),]));
-
-        assert_eq!(t("[^a]"), class_negate(uclass(&[('a', 'a')])));
-        assert_eq!(t(r"[^\x00]"), class_negate(uclass(&[('\0', '\0')])));
-        assert_eq!(
-            t_bytes("(?-u)[^a]"),
-            class_negate(bclass(&[(b'a', b'a')]))
-        );
-        #[cfg(any(feature = "unicode-perl", feature = "unicode-gencat"))]
-        assert_eq!(
-            t(r"[^\d]"),
-            hir_negate(hir_uclass_query(ClassQuery::Binary("digit")))
-        );
-        #[cfg(feature = "unicode-gencat")]
-        assert_eq!(
-            t(r"[^\pZ]"),
-            hir_negate(hir_uclass_query(ClassQuery::Binary("separator")))
-        );
-        #[cfg(feature = "unicode-gencat")]
-        assert_eq!(
-            t(r"[^\p{separator}]"),
-            hir_negate(hir_uclass_query(ClassQuery::Binary("separator")))
-        );
-        #[cfg(all(feature = "unicode-case", feature = "unicode-script"))]
-        assert_eq!(
-            t(r"(?i)[^\p{greek}]"),
-            hir_negate(hir_case_fold(hir_uclass_query(ClassQuery::Binary(
-                "greek"
-            ))))
-        );
-        #[cfg(all(feature = "unicode-case", feature = "unicode-script"))]
-        assert_eq!(
-            t(r"(?i)[\P{greek}]"),
-            hir_negate(hir_case_fold(hir_uclass_query(ClassQuery::Binary(
-                "greek"
-            ))))
-        );
-
-        // Test some weird cases.
-        assert_eq!(t(r"[\[]"), hir_uclass(&[('[', '[')]));
-
-        assert_eq!(t(r"[&]"), hir_uclass(&[('&', '&')]));
-        assert_eq!(t(r"[\&]"), hir_uclass(&[('&', '&')]));
-        assert_eq!(t(r"[\&\&]"), hir_uclass(&[('&', '&')]));
-        assert_eq!(t(r"[\x00-&]"), hir_uclass(&[('\0', '&')]));
-        assert_eq!(t(r"[&-\xFF]"), hir_uclass(&[('&', '\u{FF}')]));
-
-        assert_eq!(t(r"[~]"), hir_uclass(&[('~', '~')]));
-        assert_eq!(t(r"[\~]"), hir_uclass(&[('~', '~')]));
-        assert_eq!(t(r"[\~\~]"), hir_uclass(&[('~', '~')]));
-        assert_eq!(t(r"[\x00-~]"), hir_uclass(&[('\0', '~')]));
-        assert_eq!(t(r"[~-\xFF]"), hir_uclass(&[('~', '\u{FF}')]));
-
-        assert_eq!(t(r"[-]"), hir_uclass(&[('-', '-')]));
-        assert_eq!(t(r"[\-]"), hir_uclass(&[('-', '-')]));
-        assert_eq!(t(r"[\-\-]"), hir_uclass(&[('-', '-')]));
-        assert_eq!(t(r"[\x00-\-]"), hir_uclass(&[('\0', '-')]));
-        assert_eq!(t(r"[\--\xFF]"), hir_uclass(&[('-', '\u{FF}')]));
-
-        assert_eq!(
-            t_err("(?-u)[^a]"),
-            TestError {
-                kind: hir::ErrorKind::InvalidUtf8,
-                span: Span::new(
-                    Position::new(5, 1, 6),
-                    Position::new(9, 1, 10)
-                ),
-            }
-        );
-        #[cfg(any(feature = "unicode-perl", feature = "unicode-bool"))]
-        assert_eq!(t(r"[^\s\S]"), hir_uclass(&[]),);
-        #[cfg(any(feature = "unicode-perl", feature = "unicode-bool"))]
-        assert_eq!(t_bytes(r"(?-u)[^\s\S]"), hir_bclass(&[]),);
-    }
-
-    #[test]
-    fn class_bracketed_union() {
-        assert_eq!(t("[a-zA-Z]"), hir_uclass(&[('A', 'Z'), ('a', 'z')]));
-        #[cfg(feature = "unicode-gencat")]
-        assert_eq!(
-            t(r"[a\pZb]"),
-            hir_union(
-                hir_uclass(&[('a', 'b')]),
-                hir_uclass_query(ClassQuery::Binary("separator"))
-            )
-        );
-        #[cfg(all(feature = "unicode-gencat", feature = "unicode-script"))]
-        assert_eq!(
-            t(r"[\pZ\p{Greek}]"),
-            hir_union(
-                hir_uclass_query(ClassQuery::Binary("greek")),
-                hir_uclass_query(ClassQuery::Binary("separator"))
-            )
-        );
-        #[cfg(all(
-            feature = "unicode-age",
-            feature = "unicode-gencat",
-            feature = "unicode-script"
-        ))]
-        assert_eq!(
-            t(r"[\p{age:3.0}\pZ\p{Greek}]"),
-            hir_union(
-                hir_uclass_query(ClassQuery::ByValue {
-                    property_name: "age",
-                    property_value: "3.0",
-                }),
-                hir_union(
-                    hir_uclass_query(ClassQuery::Binary("greek")),
-                    hir_uclass_query(ClassQuery::Binary("separator"))
-                )
-            )
-        );
-        #[cfg(all(
-            feature = "unicode-age",
-            feature = "unicode-gencat",
-            feature = "unicode-script"
-        ))]
-        assert_eq!(
-            t(r"[[[\p{age:3.0}\pZ]\p{Greek}][\p{Cyrillic}]]"),
-            hir_union(
-                hir_uclass_query(ClassQuery::ByValue {
-                    property_name: "age",
-                    property_value: "3.0",
-                }),
-                hir_union(
-                    hir_uclass_query(ClassQuery::Binary("cyrillic")),
-                    hir_union(
-                        hir_uclass_query(ClassQuery::Binary("greek")),
-                        hir_uclass_query(ClassQuery::Binary("separator"))
-                    )
-                )
-            )
-        );
-
-        #[cfg(all(
-            feature = "unicode-age",
-            feature = "unicode-case",
-            feature = "unicode-gencat",
-            feature = "unicode-script"
-        ))]
-        assert_eq!(
-            t(r"(?i)[\p{age:3.0}\pZ\p{Greek}]"),
-            hir_case_fold(hir_union(
-                hir_uclass_query(ClassQuery::ByValue {
-                    property_name: "age",
-                    property_value: "3.0",
-                }),
-                hir_union(
-                    hir_uclass_query(ClassQuery::Binary("greek")),
-                    hir_uclass_query(ClassQuery::Binary("separator"))
-                )
-            ))
-        );
-        #[cfg(all(
-            feature = "unicode-age",
-            feature = "unicode-gencat",
-            feature = "unicode-script"
-        ))]
-        assert_eq!(
-            t(r"[^\p{age:3.0}\pZ\p{Greek}]"),
-            hir_negate(hir_union(
-                hir_uclass_query(ClassQuery::ByValue {
-                    property_name: "age",
-                    property_value: "3.0",
-                }),
-                hir_union(
-                    hir_uclass_query(ClassQuery::Binary("greek")),
-                    hir_uclass_query(ClassQuery::Binary("separator"))
-                )
-            ))
-        );
-        #[cfg(all(
-            feature = "unicode-age",
-            feature = "unicode-case",
-            feature = "unicode-gencat",
-            feature = "unicode-script"
-        ))]
-        assert_eq!(
-            t(r"(?i)[^\p{age:3.0}\pZ\p{Greek}]"),
-            hir_negate(hir_case_fold(hir_union(
-                hir_uclass_query(ClassQuery::ByValue {
-                    property_name: "age",
-                    property_value: "3.0",
-                }),
-                hir_union(
-                    hir_uclass_query(ClassQuery::Binary("greek")),
-                    hir_uclass_query(ClassQuery::Binary("separator"))
-                )
-            )))
-        );
-    }
-
-    #[test]
-    fn class_bracketed_nested() {
-        assert_eq!(t(r"[a[^c]]"), class_negate(uclass(&[('c', 'c')])));
-        assert_eq!(t(r"[a-b[^c]]"), class_negate(uclass(&[('c', 'c')])));
-        assert_eq!(t(r"[a-c[^c]]"), class_negate(uclass(&[])));
-
-        assert_eq!(t(r"[^a[^c]]"), hir_uclass(&[('c', 'c')]));
-        assert_eq!(t(r"[^a-b[^c]]"), hir_uclass(&[('c', 'c')]));
-
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(
-            t(r"(?i)[a[^c]]"),
-            hir_negate(class_case_fold(uclass(&[('c', 'c')])))
-        );
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(
-            t(r"(?i)[a-b[^c]]"),
-            hir_negate(class_case_fold(uclass(&[('c', 'c')])))
-        );
-
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(t(r"(?i)[^a[^c]]"), hir_uclass(&[('C', 'C'), ('c', 'c')]));
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(
-            t(r"(?i)[^a-b[^c]]"),
-            hir_uclass(&[('C', 'C'), ('c', 'c')])
-        );
-
-        assert_eq!(t(r"[^a-c[^c]]"), hir_uclass(&[]),);
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(t(r"(?i)[^a-c[^c]]"), hir_uclass(&[]),);
-    }
-
-    #[test]
-    fn class_bracketed_intersect() {
-        assert_eq!(t("[abc&&b-c]"), hir_uclass(&[('b', 'c')]));
-        assert_eq!(t("[abc&&[b-c]]"), hir_uclass(&[('b', 'c')]));
-        assert_eq!(t("[[abc]&&[b-c]]"), hir_uclass(&[('b', 'c')]));
-        assert_eq!(t("[a-z&&b-y&&c-x]"), hir_uclass(&[('c', 'x')]));
-        assert_eq!(t("[c-da-b&&a-d]"), hir_uclass(&[('a', 'd')]));
-        assert_eq!(t("[a-d&&c-da-b]"), hir_uclass(&[('a', 'd')]));
-        assert_eq!(t(r"[a-z&&a-c]"), hir_uclass(&[('a', 'c')]));
-        assert_eq!(t(r"[[a-z&&a-c]]"), hir_uclass(&[('a', 'c')]));
-        assert_eq!(t(r"[^[a-z&&a-c]]"), hir_negate(hir_uclass(&[('a', 'c')])));
-
-        assert_eq!(t("(?-u)[abc&&b-c]"), hir_bclass(&[(b'b', b'c')]));
-        assert_eq!(t("(?-u)[abc&&[b-c]]"), hir_bclass(&[(b'b', b'c')]));
-        assert_eq!(t("(?-u)[[abc]&&[b-c]]"), hir_bclass(&[(b'b', b'c')]));
-        assert_eq!(t("(?-u)[a-z&&b-y&&c-x]"), hir_bclass(&[(b'c', b'x')]));
-        assert_eq!(t("(?-u)[c-da-b&&a-d]"), hir_bclass(&[(b'a', b'd')]));
-        assert_eq!(t("(?-u)[a-d&&c-da-b]"), hir_bclass(&[(b'a', b'd')]));
-
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(
-            t("(?i)[abc&&b-c]"),
-            hir_case_fold(hir_uclass(&[('b', 'c')]))
-        );
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(
-            t("(?i)[abc&&[b-c]]"),
-            hir_case_fold(hir_uclass(&[('b', 'c')]))
-        );
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(
-            t("(?i)[[abc]&&[b-c]]"),
-            hir_case_fold(hir_uclass(&[('b', 'c')]))
-        );
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(
-            t("(?i)[a-z&&b-y&&c-x]"),
-            hir_case_fold(hir_uclass(&[('c', 'x')]))
-        );
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(
-            t("(?i)[c-da-b&&a-d]"),
-            hir_case_fold(hir_uclass(&[('a', 'd')]))
-        );
-        #[cfg(feature = "unicode-case")]
-        assert_eq!(
-            t("(?i)[a-d&&c-da-b]"),
-            hir_case_fold(hir_uclass(&[('a', 'd')]))
-        );
-
-        assert_eq!(
-            t("(?i-u)[abc&&b-c]"),
-            hir_case_fold(hir_bclass(&[(b'b', b'c')]))
-        );
-        assert_eq!(
-            t("(?i-u)[abc&&[b-c]]"),
-            hir_case_fold(hir_bclass(&[(b'b', b'c')]))
-        );
-        assert_eq!(
-            t("(?i-u)[[abc]&&[b-c]]"),
-            hir_case_fold(hir_bclass(&[(b'b', b'c')]))
-        );
-        assert_eq!(
-            t("(?i-u)[a-z&&b-y&&c-x]"),
-            hir_case_fold(hir_bclass(&[(b'c', b'x')]))
-        );
-        assert_eq!(
-            t("(?i-u)[c-da-b&&a-d]"),
-            hir_case_fold(hir_bclass(&[(b'a', b'd')]))
-        );
-        assert_eq!(
-            t("(?i-u)[a-d&&c-da-b]"),
-            hir_case_fold(hir_bclass(&[(b'a', b'd')]))
-        );
-
-        // In `[a^]`, `^` does not need to be escaped, so it makes sense that
-        // `^` is also allowed to be unescaped after `&&`.
-        assert_eq!(t(r"[\^&&^]"), hir_uclass(&[('^', '^')]));
-        // `]` needs to be escaped after `&&` since it's not at start of class.
-        assert_eq!(t(r"[]&&\]]"), hir_uclass(&[(']', ']')]));
-        assert_eq!(t(r"[-&&-]"), hir_uclass(&[('-', '-')]));
-        assert_eq!(t(r"[\&&&&]"), hir_uclass(&[('&', '&')]));
-        assert_eq!(t(r"[\&&&\&]"), hir_uclass(&[('&', '&')]));
-        // Test precedence.
-        assert_eq!(
-            t(r"[a-w&&[^c-g]z]"),
-            hir_uclass(&[('a', 'b'), ('h', 'w')])
-        );
-    }
-
-    #[test]
-    fn class_bracketed_intersect_negate() {
-        #[cfg(feature = "unicode-perl")]
-        assert_eq!(
-            t(r"[^\w&&\d]"),
-            hir_negate(hir_uclass_query(ClassQuery::Binary("digit")))
-        );
-        assert_eq!(t(r"[^[a-z&&a-c]]"), hir_negate(hir_uclass(&[('a', 'c')])));
-        #[cfg(feature = "unicode-perl")]
-        assert_eq!(
-            t(r"[^[\w&&\d]]"),
-            hir_negate(hir_uclass_query(ClassQuery::Binary("digit")))
-        );
-        #[cfg(feature = "unicode-perl")]
-        assert_eq!(
-            t(r"[^[^\w&&\d]]"),
-            hir_uclass_query(ClassQuery::Binary("digit"))
-        );
-        #[cfg(feature = "unicode-perl")]
-        assert_eq!(t(r"[[[^\w]&&[^\d]]]"), hir_negate(hir_uclass_perl_word()));
-
-        #[cfg(feature = "unicode-perl")]
-        assert_eq!(
-            t_bytes(r"(?-u)[^\w&&\d]"),
-            hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Digit))
-        );
-        assert_eq!(
-            t_bytes(r"(?-u)[^[a-z&&a-c]]"),
-            hir_negate(hir_bclass(&[(b'a', b'c')]))
-        );
-        assert_eq!(
-            t_bytes(r"(?-u)[^[\w&&\d]]"),
-            hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Digit))
-        );
-        assert_eq!(
-            t_bytes(r"(?-u)[^[^\w&&\d]]"),
-            hir_ascii_bclass(&ast::ClassAsciiKind::Digit)
-        );
-        assert_eq!(
-            t_bytes(r"(?-u)[[[^\w]&&[^\d]]]"),
-            hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Word))
-        );
-    }
-
-    #[test]
-    fn class_bracketed_difference() {
-        #[cfg(feature = "unicode-gencat")]
-        assert_eq!(
-            t(r"[\pL--[:ascii:]]"),
-            hir_difference(
-                hir_uclass_query(ClassQuery::Binary("letter")),
-                hir_uclass(&[('\0', '\x7F')])
-            )
-        );
-
-        assert_eq!(
-            t(r"(?-u)[[:alpha:]--[:lower:]]"),
-            hir_bclass(&[(b'A', b'Z')])
-        );
-    }
-
-    #[test]
-    fn class_bracketed_symmetric_difference() {
-        #[cfg(feature = "unicode-script")]
-        assert_eq!(
-            t(r"[\p{sc:Greek}~~\p{scx:Greek}]"),
-            // Class({
-            //     '·'..='·',
-            //     '\u{300}'..='\u{301}',
-            //     '\u{304}'..='\u{304}',
-            //     '\u{306}'..='\u{306}',
-            //     '\u{308}'..='\u{308}',
-            //     '\u{313}'..='\u{313}',
-            //     '\u{342}'..='\u{342}',
-            //     '\u{345}'..='\u{345}',
-            //     'ÍŽ'..='ÍŽ',
-            //     '\u{1dc0}'..='\u{1dc1}',
-            //     '⁝'..='⁝',
-            // })
-            hir_uclass(&[
-                ('·', '·'),
-                ('\u{0300}', '\u{0301}'),
-                ('\u{0304}', '\u{0304}'),
-                ('\u{0306}', '\u{0306}'),
-                ('\u{0308}', '\u{0308}'),
-                ('\u{0313}', '\u{0313}'),
-                ('\u{0342}', '\u{0342}'),
-                ('\u{0345}', '\u{0345}'),
-                ('ÍŽ', 'ÍŽ'),
-                ('\u{1DC0}', '\u{1DC1}'),
-                ('⁝', '⁝'),
-            ])
-        );
-        assert_eq!(t(r"[a-g~~c-j]"), hir_uclass(&[('a', 'b'), ('h', 'j')]));
-
-        assert_eq!(
-            t(r"(?-u)[a-g~~c-j]"),
-            hir_bclass(&[(b'a', b'b'), (b'h', b'j')])
-        );
-    }
-
-    #[test]
-    fn ignore_whitespace() {
-        assert_eq!(t(r"(?x)\12 3"), hir_lit("\n3"));
-        assert_eq!(t(r"(?x)\x { 53 }"), hir_lit("S"));
-        assert_eq!(
-            t(r"(?x)\x # comment
-{ # comment
-    53 # comment
-} #comment"),
-            hir_lit("S")
-        );
-
-        assert_eq!(t(r"(?x)\x 53"), hir_lit("S"));
-        assert_eq!(
-            t(r"(?x)\x # comment
-        53 # comment"),
-            hir_lit("S")
-        );
-        assert_eq!(t(r"(?x)\x5 3"), hir_lit("S"));
-
-        #[cfg(feature = "unicode-gencat")]
-        assert_eq!(
-            t(r"(?x)\p # comment
-{ # comment
-    Separator # comment
-} # comment"),
-            hir_uclass_query(ClassQuery::Binary("separator"))
-        );
-
-        assert_eq!(
-            t(r"(?x)a # comment
-{ # comment
-    5 # comment
-    , # comment
-    10 # comment
-} # comment"),
-            hir_range(true, 5, Some(10), hir_lit("a"))
-        );
-
-        assert_eq!(t(r"(?x)a\  # hi there"), hir_lit("a "));
-    }
-
-    #[test]
-    fn analysis_is_utf8() {
-        // Positive examples.
-        assert!(props_bytes(r"a").is_utf8());
-        assert!(props_bytes(r"ab").is_utf8());
-        assert!(props_bytes(r"(?-u)a").is_utf8());
-        assert!(props_bytes(r"(?-u)ab").is_utf8());
-        assert!(props_bytes(r"\xFF").is_utf8());
-        assert!(props_bytes(r"\xFF\xFF").is_utf8());
-        assert!(props_bytes(r"[^a]").is_utf8());
-        assert!(props_bytes(r"[^a][^a]").is_utf8());
-        assert!(props_bytes(r"\b").is_utf8());
-        assert!(props_bytes(r"\B").is_utf8());
-        assert!(props_bytes(r"(?-u)\b").is_utf8());
-        assert!(props_bytes(r"(?-u)\B").is_utf8());
-
-        // Negative examples.
-        assert!(!props_bytes(r"(?-u)\xFF").is_utf8());
-        assert!(!props_bytes(r"(?-u)\xFF\xFF").is_utf8());
-        assert!(!props_bytes(r"(?-u)[^a]").is_utf8());
-        assert!(!props_bytes(r"(?-u)[^a][^a]").is_utf8());
-    }
-
-    #[test]
-    fn analysis_captures_len() {
-        assert_eq!(0, props(r"a").explicit_captures_len());
-        assert_eq!(0, props(r"(?:a)").explicit_captures_len());
-        assert_eq!(0, props(r"(?i-u:a)").explicit_captures_len());
-        assert_eq!(0, props(r"(?i-u)a").explicit_captures_len());
-        assert_eq!(1, props(r"(a)").explicit_captures_len());
-        assert_eq!(1, props(r"(?P<foo>a)").explicit_captures_len());
-        assert_eq!(1, props(r"()").explicit_captures_len());
-        assert_eq!(1, props(r"()a").explicit_captures_len());
-        assert_eq!(1, props(r"(a)+").explicit_captures_len());
-        assert_eq!(2, props(r"(a)(b)").explicit_captures_len());
-        assert_eq!(2, props(r"(a)|(b)").explicit_captures_len());
-        assert_eq!(2, props(r"((a))").explicit_captures_len());
-        assert_eq!(1, props(r"([a&&b])").explicit_captures_len());
-    }
-
-    #[test]
-    fn analysis_static_captures_len() {
-        let len = |pattern| props(pattern).static_explicit_captures_len();
-        assert_eq!(Some(0), len(r""));
-        assert_eq!(Some(0), len(r"foo|bar"));
-        assert_eq!(None, len(r"(foo)|bar"));
-        assert_eq!(None, len(r"foo|(bar)"));
-        assert_eq!(Some(1), len(r"(foo|bar)"));
-        assert_eq!(Some(1), len(r"(a|b|c|d|e|f)"));
-        assert_eq!(Some(1), len(r"(a)|(b)|(c)|(d)|(e)|(f)"));
-        assert_eq!(Some(2), len(r"(a)(b)|(c)(d)|(e)(f)"));
-        assert_eq!(Some(6), len(r"(a)(b)(c)(d)(e)(f)"));
-        assert_eq!(Some(3), len(r"(a)(b)(extra)|(a)(b)()"));
-        assert_eq!(Some(3), len(r"(a)(b)((?:extra)?)"));
-        assert_eq!(None, len(r"(a)(b)(extra)?"));
-        assert_eq!(Some(1), len(r"(foo)|(bar)"));
-        assert_eq!(Some(2), len(r"(foo)(bar)"));
-        assert_eq!(Some(2), len(r"(foo)+(bar)"));
-        assert_eq!(None, len(r"(foo)*(bar)"));
-        assert_eq!(Some(0), len(r"(foo)?{0}"));
-        assert_eq!(None, len(r"(foo)?{1}"));
-        assert_eq!(Some(1), len(r"(foo){1}"));
-        assert_eq!(Some(1), len(r"(foo){1,}"));
-        assert_eq!(Some(1), len(r"(foo){1,}?"));
-        assert_eq!(None, len(r"(foo){1,}??"));
-        assert_eq!(None, len(r"(foo){0,}"));
-        assert_eq!(Some(1), len(r"(foo)(?:bar)"));
-        assert_eq!(Some(2), len(r"(foo(?:bar)+)(?:baz(boo))"));
-        assert_eq!(Some(2), len(r"(?P<bar>foo)(?:bar)(bal|loon)"));
-        assert_eq!(
-            Some(2),
-            len(r#"<(a)[^>]+href="([^"]+)"|<(img)[^>]+src="([^"]+)""#)
-        );
-    }
-
-    #[test]
-    fn analysis_is_all_assertions() {
-        // Positive examples.
-        let p = props(r"\b");
-        assert!(!p.look_set().is_empty());
-        assert_eq!(p.minimum_len(), Some(0));
-
-        let p = props(r"\B");
-        assert!(!p.look_set().is_empty());
-        assert_eq!(p.minimum_len(), Some(0));
-
-        let p = props(r"^");
-        assert!(!p.look_set().is_empty());
-        assert_eq!(p.minimum_len(), Some(0));
-
-        let p = props(r"$");
-        assert!(!p.look_set().is_empty());
-        assert_eq!(p.minimum_len(), Some(0));
-
-        let p = props(r"\A");
-        assert!(!p.look_set().is_empty());
-        assert_eq!(p.minimum_len(), Some(0));
-
-        let p = props(r"\z");
-        assert!(!p.look_set().is_empty());
-        assert_eq!(p.minimum_len(), Some(0));
-
-        let p = props(r"$^\z\A\b\B");
-        assert!(!p.look_set().is_empty());
-        assert_eq!(p.minimum_len(), Some(0));
-
-        let p = props(r"$|^|\z|\A|\b|\B");
-        assert!(!p.look_set().is_empty());
-        assert_eq!(p.minimum_len(), Some(0));
-
-        let p = props(r"^$|$^");
-        assert!(!p.look_set().is_empty());
-        assert_eq!(p.minimum_len(), Some(0));
-
-        let p = props(r"((\b)+())*^");
-        assert!(!p.look_set().is_empty());
-        assert_eq!(p.minimum_len(), Some(0));
-
-        // Negative examples.
-        let p = props(r"^a");
-        assert!(!p.look_set().is_empty());
-        assert_eq!(p.minimum_len(), Some(1));
-    }
-
-    #[test]
-    fn analysis_look_set_prefix_any() {
-        let p = props(r"(?-u)(?i:(?:\b|_)win(?:32|64|dows)?(?:\b|_))");
-        assert!(p.look_set_prefix_any().contains(Look::WordAscii));
-    }
-
-    #[test]
-    fn analysis_is_anchored() {
-        let is_start = |p| props(p).look_set_prefix().contains(Look::Start);
-        let is_end = |p| props(p).look_set_suffix().contains(Look::End);
-
-        // Positive examples.
-        assert!(is_start(r"^"));
-        assert!(is_end(r"$"));
-
-        assert!(is_start(r"^^"));
-        assert!(props(r"$$").look_set_suffix().contains(Look::End));
-
-        assert!(is_start(r"^$"));
-        assert!(is_end(r"^$"));
-
-        assert!(is_start(r"^foo"));
-        assert!(is_end(r"foo$"));
-
-        assert!(is_start(r"^foo|^bar"));
-        assert!(is_end(r"foo$|bar$"));
-
-        assert!(is_start(r"^(foo|bar)"));
-        assert!(is_end(r"(foo|bar)$"));
-
-        assert!(is_start(r"^+"));
-        assert!(is_end(r"$+"));
-        assert!(is_start(r"^++"));
-        assert!(is_end(r"$++"));
-        assert!(is_start(r"(^)+"));
-        assert!(is_end(r"($)+"));
-
-        assert!(is_start(r"$^"));
-        assert!(is_start(r"$^"));
-        assert!(is_start(r"$^|^$"));
-        assert!(is_end(r"$^|^$"));
-
-        assert!(is_start(r"\b^"));
-        assert!(is_end(r"$\b"));
-        assert!(is_start(r"^(?m:^)"));
-        assert!(is_end(r"(?m:$)$"));
-        assert!(is_start(r"(?m:^)^"));
-        assert!(is_end(r"$(?m:$)"));
-
-        // Negative examples.
-        assert!(!is_start(r"(?m)^"));
-        assert!(!is_end(r"(?m)$"));
-        assert!(!is_start(r"(?m:^$)|$^"));
-        assert!(!is_end(r"(?m:^$)|$^"));
-        assert!(!is_start(r"$^|(?m:^$)"));
-        assert!(!is_end(r"$^|(?m:^$)"));
-
-        assert!(!is_start(r"a^"));
-        assert!(!is_start(r"$a"));
-
-        assert!(!is_end(r"a^"));
-        assert!(!is_end(r"$a"));
-
-        assert!(!is_start(r"^foo|bar"));
-        assert!(!is_end(r"foo|bar$"));
-
-        assert!(!is_start(r"^*"));
-        assert!(!is_end(r"$*"));
-        assert!(!is_start(r"^*+"));
-        assert!(!is_end(r"$*+"));
-        assert!(!is_start(r"^+*"));
-        assert!(!is_end(r"$+*"));
-        assert!(!is_start(r"(^)*"));
-        assert!(!is_end(r"($)*"));
-    }
-
-    #[test]
-    fn analysis_is_any_anchored() {
-        let is_start = |p| props(p).look_set().contains(Look::Start);
-        let is_end = |p| props(p).look_set().contains(Look::End);
-
-        // Positive examples.
-        assert!(is_start(r"^"));
-        assert!(is_end(r"$"));
-        assert!(is_start(r"\A"));
-        assert!(is_end(r"\z"));
-
-        // Negative examples.
-        assert!(!is_start(r"(?m)^"));
-        assert!(!is_end(r"(?m)$"));
-        assert!(!is_start(r"$"));
-        assert!(!is_end(r"^"));
-    }
-
-    #[test]
-    fn analysis_can_empty() {
-        // Positive examples.
-        let assert_empty =
-            |p| assert_eq!(Some(0), props_bytes(p).minimum_len());
-        assert_empty(r"");
-        assert_empty(r"()");
-        assert_empty(r"()*");
-        assert_empty(r"()+");
-        assert_empty(r"()?");
-        assert_empty(r"a*");
-        assert_empty(r"a?");
-        assert_empty(r"a{0}");
-        assert_empty(r"a{0,}");
-        assert_empty(r"a{0,1}");
-        assert_empty(r"a{0,10}");
-        #[cfg(feature = "unicode-gencat")]
-        assert_empty(r"\pL*");
-        assert_empty(r"a*|b");
-        assert_empty(r"b|a*");
-        assert_empty(r"a|");
-        assert_empty(r"|a");
-        assert_empty(r"a||b");
-        assert_empty(r"a*a?(abcd)*");
-        assert_empty(r"^");
-        assert_empty(r"$");
-        assert_empty(r"(?m)^");
-        assert_empty(r"(?m)$");
-        assert_empty(r"\A");
-        assert_empty(r"\z");
-        assert_empty(r"\B");
-        assert_empty(r"(?-u)\B");
-        assert_empty(r"\b");
-        assert_empty(r"(?-u)\b");
-
-        // Negative examples.
-        let assert_non_empty =
-            |p| assert_ne!(Some(0), props_bytes(p).minimum_len());
-        assert_non_empty(r"a+");
-        assert_non_empty(r"a{1}");
-        assert_non_empty(r"a{1,}");
-        assert_non_empty(r"a{1,2}");
-        assert_non_empty(r"a{1,10}");
-        assert_non_empty(r"b|a");
-        assert_non_empty(r"a*a+(abcd)*");
-        #[cfg(feature = "unicode-gencat")]
-        assert_non_empty(r"\P{any}");
-        assert_non_empty(r"[a--a]");
-        assert_non_empty(r"[a&&b]");
-    }
-
-    #[test]
-    fn analysis_is_literal() {
-        // Positive examples.
-        assert!(props(r"a").is_literal());
-        assert!(props(r"ab").is_literal());
-        assert!(props(r"abc").is_literal());
-        assert!(props(r"(?m)abc").is_literal());
-        assert!(props(r"(?:a)").is_literal());
-        assert!(props(r"foo(?:a)").is_literal());
-        assert!(props(r"(?:a)foo").is_literal());
-        assert!(props(r"[a]").is_literal());
-
-        // Negative examples.
-        assert!(!props(r"").is_literal());
-        assert!(!props(r"^").is_literal());
-        assert!(!props(r"a|b").is_literal());
-        assert!(!props(r"(a)").is_literal());
-        assert!(!props(r"a+").is_literal());
-        assert!(!props(r"foo(a)").is_literal());
-        assert!(!props(r"(a)foo").is_literal());
-        assert!(!props(r"[ab]").is_literal());
-    }
-
-    #[test]
-    fn analysis_is_alternation_literal() {
-        // Positive examples.
-        assert!(props(r"a").is_alternation_literal());
-        assert!(props(r"ab").is_alternation_literal());
-        assert!(props(r"abc").is_alternation_literal());
-        assert!(props(r"(?m)abc").is_alternation_literal());
-        assert!(props(r"foo|bar").is_alternation_literal());
-        assert!(props(r"foo|bar|baz").is_alternation_literal());
-        assert!(props(r"[a]").is_alternation_literal());
-        assert!(props(r"(?:ab)|cd").is_alternation_literal());
-        assert!(props(r"ab|(?:cd)").is_alternation_literal());
-
-        // Negative examples.
-        assert!(!props(r"").is_alternation_literal());
-        assert!(!props(r"^").is_alternation_literal());
-        assert!(!props(r"(a)").is_alternation_literal());
-        assert!(!props(r"a+").is_alternation_literal());
-        assert!(!props(r"foo(a)").is_alternation_literal());
-        assert!(!props(r"(a)foo").is_alternation_literal());
-        assert!(!props(r"[ab]").is_alternation_literal());
-        assert!(!props(r"[ab]|b").is_alternation_literal());
-        assert!(!props(r"a|[ab]").is_alternation_literal());
-        assert!(!props(r"(a)|b").is_alternation_literal());
-        assert!(!props(r"a|(b)").is_alternation_literal());
-        assert!(!props(r"a|b").is_alternation_literal());
-        assert!(!props(r"a|b|c").is_alternation_literal());
-        assert!(!props(r"[a]|b").is_alternation_literal());
-        assert!(!props(r"a|[b]").is_alternation_literal());
-        assert!(!props(r"(?:a)|b").is_alternation_literal());
-        assert!(!props(r"a|(?:b)").is_alternation_literal());
-        assert!(!props(r"(?:z|xx)@|xx").is_alternation_literal());
-    }
-
-    // This tests that the smart Hir::repetition constructors does some basic
-    // simplifications.
-    #[test]
-    fn smart_repetition() {
-        assert_eq!(t(r"a{0}"), Hir::empty());
-        assert_eq!(t(r"a{1}"), hir_lit("a"));
-        assert_eq!(t(r"\B{32111}"), hir_look(hir::Look::WordUnicodeNegate));
-    }
-
-    // This tests that the smart Hir::concat constructor simplifies the given
-    // exprs in a way we expect.
-    #[test]
-    fn smart_concat() {
-        assert_eq!(t(""), Hir::empty());
-        assert_eq!(t("(?:)"), Hir::empty());
-        assert_eq!(t("abc"), hir_lit("abc"));
-        assert_eq!(t("(?:foo)(?:bar)"), hir_lit("foobar"));
-        assert_eq!(t("quux(?:foo)(?:bar)baz"), hir_lit("quuxfoobarbaz"));
-        assert_eq!(
-            t("foo(?:bar^baz)quux"),
-            hir_cat(vec![
-                hir_lit("foobar"),
-                hir_look(hir::Look::Start),
-                hir_lit("bazquux"),
-            ])
-        );
-        assert_eq!(
-            t("foo(?:ba(?:r^b)az)quux"),
-            hir_cat(vec![
-                hir_lit("foobar"),
-                hir_look(hir::Look::Start),
-                hir_lit("bazquux"),
-            ])
-        );
-    }
-
-    // This tests that the smart Hir::alternation constructor simplifies the
-    // given exprs in a way we expect.
-    #[test]
-    fn smart_alternation() {
-        assert_eq!(
-            t("(?:foo)|(?:bar)"),
-            hir_alt(vec![hir_lit("foo"), hir_lit("bar")])
-        );
-        assert_eq!(
-            t("quux|(?:abc|def|xyz)|baz"),
-            hir_alt(vec![
-                hir_lit("quux"),
-                hir_lit("abc"),
-                hir_lit("def"),
-                hir_lit("xyz"),
-                hir_lit("baz"),
-            ])
-        );
-        assert_eq!(
-            t("quux|(?:abc|(?:def|mno)|xyz)|baz"),
-            hir_alt(vec![
-                hir_lit("quux"),
-                hir_lit("abc"),
-                hir_lit("def"),
-                hir_lit("mno"),
-                hir_lit("xyz"),
-                hir_lit("baz"),
-            ])
-        );
-        assert_eq!(
-            t("a|b|c|d|e|f|x|y|z"),
-            hir_uclass(&[('a', 'f'), ('x', 'z')]),
-        );
-        // Tests that we lift common prefixes out of an alternation.
-        assert_eq!(
-            t("[A-Z]foo|[A-Z]quux"),
-            hir_cat(vec![
-                hir_uclass(&[('A', 'Z')]),
-                hir_alt(vec![hir_lit("foo"), hir_lit("quux")]),
-            ]),
-        );
-        assert_eq!(
-            t("[A-Z][A-Z]|[A-Z]quux"),
-            hir_cat(vec![
-                hir_uclass(&[('A', 'Z')]),
-                hir_alt(vec![hir_uclass(&[('A', 'Z')]), hir_lit("quux")]),
-            ]),
-        );
-        assert_eq!(
-            t("[A-Z][A-Z]|[A-Z][A-Z]quux"),
-            hir_cat(vec![
-                hir_uclass(&[('A', 'Z')]),
-                hir_uclass(&[('A', 'Z')]),
-                hir_alt(vec![Hir::empty(), hir_lit("quux")]),
-            ]),
-        );
-        assert_eq!(
-            t("[A-Z]foo|[A-Z]foobar"),
-            hir_cat(vec![
-                hir_uclass(&[('A', 'Z')]),
-                hir_alt(vec![hir_lit("foo"), hir_lit("foobar")]),
-            ]),
-        );
-    }
-
-    #[test]
-    fn regression_alt_empty_concat() {
-        use crate::ast::{self, Ast};
-
-        let span = Span::splat(Position::new(0, 0, 0));
-        let ast = Ast::alternation(ast::Alternation {
-            span,
-            asts: vec![Ast::concat(ast::Concat { span, asts: vec![] })],
-        });
-
-        let mut t = Translator::new();
-        assert_eq!(Ok(Hir::empty()), t.translate("", &ast));
-    }
-
-    #[test]
-    fn regression_empty_alt() {
-        use crate::ast::{self, Ast};
-
-        let span = Span::splat(Position::new(0, 0, 0));
-        let ast = Ast::concat(ast::Concat {
-            span,
-            asts: vec![Ast::alternation(ast::Alternation {
-                span,
-                asts: vec![],
-            })],
-        });
-
-        let mut t = Translator::new();
-        assert_eq!(Ok(Hir::fail()), t.translate("", &ast));
-    }
-
-    #[test]
-    fn regression_singleton_alt() {
-        use crate::{
-            ast::{self, Ast},
-            hir::Dot,
-        };
-
-        let span = Span::splat(Position::new(0, 0, 0));
-        let ast = Ast::concat(ast::Concat {
-            span,
-            asts: vec![Ast::alternation(ast::Alternation {
-                span,
-                asts: vec![Ast::dot(span)],
-            })],
-        });
-
-        let mut t = Translator::new();
-        assert_eq!(Ok(Hir::dot(Dot::AnyCharExceptLF)), t.translate("", &ast));
-    }
-
-    // See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=63168
-    #[test]
-    fn regression_fuzz_match() {
-        let pat = "[(\u{6} \0-\u{afdf5}]  \0 ";
-        let ast = ParserBuilder::new()
-            .octal(false)
-            .ignore_whitespace(true)
-            .build()
-            .parse(pat)
-            .unwrap();
-        let hir = TranslatorBuilder::new()
-            .utf8(true)
-            .case_insensitive(false)
-            .multi_line(false)
-            .dot_matches_new_line(false)
-            .swap_greed(true)
-            .unicode(true)
-            .build()
-            .translate(pat, &ast)
-            .unwrap();
-        assert_eq!(
-            hir,
-            Hir::concat(vec![
-                hir_uclass(&[('\0', '\u{afdf5}')]),
-                hir_lit("\0"),
-            ])
-        );
-    }
-
-    // See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=63155
-    #[cfg(feature = "unicode")]
-    #[test]
-    fn regression_fuzz_difference1() {
-        let pat = r"\W\W|\W[^\v--\W\W\P{Script_Extensions:Pau_Cin_Hau}\u10A1A1-\U{3E3E3}--~~~~--~~~~~~~~------~~~~~~--~~~~~~]*";
-        let _ = t(pat); // shouldn't panic
-    }
-
-    // See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=63153
-    #[test]
-    fn regression_fuzz_char_decrement1() {
-        let pat = "w[w[^w?\rw\rw[^w?\rw[^w?\rw[^w?\rw[^w?\rw[^w?\rw[^w?\r\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0w?\rw[^w?\rw[^w?\rw[^w\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\u{1}\0]\0\0\0\0\0\0\0\0\0*\0\0\u{1}\0]\0\0-*\0][^w?\rw[^w?\rw[^w?\rw[^w?\rw[^w?\rw[^w?\rw[^w\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\u{1}\0]\0\0\0\0\0\0\0\0\0x\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\0\0\0\0*??\0\u{7f}{2}\u{10}??\0\0\0\0\0\0\0\0\0\u{3}\0\0\0}\0-*\0]\0\0\0\0\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\0\0\u{1}\0]\0\u{1}\u{1}H-i]-]\0\0\0\0\u{1}\0]\0\0\0\u{1}\0]\0\0-*\0\0\0\0\u{1}9-\u{7f}]\0'|-\u{7f}]\0'|(?i-ux)[-\u{7f}]\0'\u{3}\0\0\0}\0-*\0]<D\0\0\0\0\0\0\u{1}]\0\0\0\0]\0\0-*\0]\0\0 ";
-        let _ = t(pat); // shouldn't panic
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/hir/visitor.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/hir/visitor.rs
deleted file mode 100644
index f30f0a1..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/hir/visitor.rs
+++ /dev/null
@@ -1,215 +0,0 @@
-use alloc::{vec, vec::Vec};
-
-use crate::hir::{self, Hir, HirKind};
-
-/// A trait for visiting the high-level IR (HIR) in depth first order.
-///
-/// The principle aim of this trait is to enable callers to perform case
-/// analysis on a high-level intermediate representation of a regular
-/// expression without necessarily using recursion. In particular, this permits
-/// callers to do case analysis with constant stack usage, which can be
-/// important since the size of an HIR may be proportional to end user input.
-///
-/// Typical usage of this trait involves providing an implementation and then
-/// running it using the [`visit`] function.
-pub trait Visitor {
-    /// The result of visiting an HIR.
-    type Output;
-    /// An error that visiting an HIR might return.
-    type Err;
-
-    /// All implementors of `Visitor` must provide a `finish` method, which
-    /// yields the result of visiting the HIR or an error.
-    fn finish(self) -> Result<Self::Output, Self::Err>;
-
-    /// This method is called before beginning traversal of the HIR.
-    fn start(&mut self) {}
-
-    /// This method is called on an `Hir` before descending into child `Hir`
-    /// nodes.
-    fn visit_pre(&mut self, _hir: &Hir) -> Result<(), Self::Err> {
-        Ok(())
-    }
-
-    /// This method is called on an `Hir` after descending all of its child
-    /// `Hir` nodes.
-    fn visit_post(&mut self, _hir: &Hir) -> Result<(), Self::Err> {
-        Ok(())
-    }
-
-    /// This method is called between child nodes of an alternation.
-    fn visit_alternation_in(&mut self) -> Result<(), Self::Err> {
-        Ok(())
-    }
-
-    /// This method is called between child nodes of a concatenation.
-    fn visit_concat_in(&mut self) -> Result<(), Self::Err> {
-        Ok(())
-    }
-}
-
-/// Executes an implementation of `Visitor` in constant stack space.
-///
-/// This function will visit every node in the given `Hir` while calling
-/// appropriate methods provided by the [`Visitor`] trait.
-///
-/// The primary use case for this method is when one wants to perform case
-/// analysis over an `Hir` without using a stack size proportional to the depth
-/// of the `Hir`. Namely, this method will instead use constant stack space,
-/// but will use heap space proportional to the size of the `Hir`. This may be
-/// desirable in cases where the size of `Hir` is proportional to end user
-/// input.
-///
-/// If the visitor returns an error at any point, then visiting is stopped and
-/// the error is returned.
-pub fn visit<V: Visitor>(hir: &Hir, visitor: V) -> Result<V::Output, V::Err> {
-    HeapVisitor::new().visit(hir, visitor)
-}
-
-/// HeapVisitor visits every item in an `Hir` recursively using constant stack
-/// size and a heap size proportional to the size of the `Hir`.
-struct HeapVisitor<'a> {
-    /// A stack of `Hir` nodes. This is roughly analogous to the call stack
-    /// used in a typical recursive visitor.
-    stack: Vec<(&'a Hir, Frame<'a>)>,
-}
-
-/// Represents a single stack frame while performing structural induction over
-/// an `Hir`.
-enum Frame<'a> {
-    /// A stack frame allocated just before descending into a repetition
-    /// operator's child node.
-    Repetition(&'a hir::Repetition),
-    /// A stack frame allocated just before descending into a capture's child
-    /// node.
-    Capture(&'a hir::Capture),
-    /// The stack frame used while visiting every child node of a concatenation
-    /// of expressions.
-    Concat {
-        /// The child node we are currently visiting.
-        head: &'a Hir,
-        /// The remaining child nodes to visit (which may be empty).
-        tail: &'a [Hir],
-    },
-    /// The stack frame used while visiting every child node of an alternation
-    /// of expressions.
-    Alternation {
-        /// The child node we are currently visiting.
-        head: &'a Hir,
-        /// The remaining child nodes to visit (which may be empty).
-        tail: &'a [Hir],
-    },
-}
-
-impl<'a> HeapVisitor<'a> {
-    fn new() -> HeapVisitor<'a> {
-        HeapVisitor { stack: vec![] }
-    }
-
-    fn visit<V: Visitor>(
-        &mut self,
-        mut hir: &'a Hir,
-        mut visitor: V,
-    ) -> Result<V::Output, V::Err> {
-        self.stack.clear();
-
-        visitor.start();
-        loop {
-            visitor.visit_pre(hir)?;
-            if let Some(x) = self.induct(hir) {
-                let child = x.child();
-                self.stack.push((hir, x));
-                hir = child;
-                continue;
-            }
-            // No induction means we have a base case, so we can post visit
-            // it now.
-            visitor.visit_post(hir)?;
-
-            // At this point, we now try to pop our call stack until it is
-            // either empty or we hit another inductive case.
-            loop {
-                let (post_hir, frame) = match self.stack.pop() {
-                    None => return visitor.finish(),
-                    Some((post_hir, frame)) => (post_hir, frame),
-                };
-                // If this is a concat/alternate, then we might have additional
-                // inductive steps to process.
-                if let Some(x) = self.pop(frame) {
-                    match x {
-                        Frame::Alternation { .. } => {
-                            visitor.visit_alternation_in()?;
-                        }
-                        Frame::Concat { .. } => {
-                            visitor.visit_concat_in()?;
-                        }
-                        _ => {}
-                    }
-                    hir = x.child();
-                    self.stack.push((post_hir, x));
-                    break;
-                }
-                // Otherwise, we've finished visiting all the child nodes for
-                // this HIR, so we can post visit it now.
-                visitor.visit_post(post_hir)?;
-            }
-        }
-    }
-
-    /// Build a stack frame for the given HIR if one is needed (which occurs if
-    /// and only if there are child nodes in the HIR). Otherwise, return None.
-    fn induct(&mut self, hir: &'a Hir) -> Option<Frame<'a>> {
-        match *hir.kind() {
-            HirKind::Repetition(ref x) => Some(Frame::Repetition(x)),
-            HirKind::Capture(ref x) => Some(Frame::Capture(x)),
-            HirKind::Concat(ref x) if x.is_empty() => None,
-            HirKind::Concat(ref x) => {
-                Some(Frame::Concat { head: &x[0], tail: &x[1..] })
-            }
-            HirKind::Alternation(ref x) if x.is_empty() => None,
-            HirKind::Alternation(ref x) => {
-                Some(Frame::Alternation { head: &x[0], tail: &x[1..] })
-            }
-            _ => None,
-        }
-    }
-
-    /// Pops the given frame. If the frame has an additional inductive step,
-    /// then return it, otherwise return `None`.
-    fn pop(&self, induct: Frame<'a>) -> Option<Frame<'a>> {
-        match induct {
-            Frame::Repetition(_) => None,
-            Frame::Capture(_) => None,
-            Frame::Concat { tail, .. } => {
-                if tail.is_empty() {
-                    None
-                } else {
-                    Some(Frame::Concat { head: &tail[0], tail: &tail[1..] })
-                }
-            }
-            Frame::Alternation { tail, .. } => {
-                if tail.is_empty() {
-                    None
-                } else {
-                    Some(Frame::Alternation {
-                        head: &tail[0],
-                        tail: &tail[1..],
-                    })
-                }
-            }
-        }
-    }
-}
-
-impl<'a> Frame<'a> {
-    /// Perform the next inductive step on this frame and return the next
-    /// child HIR node to visit.
-    fn child(&self) -> &'a Hir {
-        match *self {
-            Frame::Repetition(rep) => &rep.sub,
-            Frame::Capture(capture) => &capture.sub,
-            Frame::Concat { head, .. } => head,
-            Frame::Alternation { head, .. } => head,
-        }
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/lib.rs
deleted file mode 100644
index 20f25db..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/lib.rs
+++ /dev/null
@@ -1,431 +0,0 @@
-/*!
-This crate provides a robust regular expression parser.
-
-This crate defines two primary types:
-
-* [`Ast`](ast::Ast) is the abstract syntax of a regular expression.
-  An abstract syntax corresponds to a *structured representation* of the
-  concrete syntax of a regular expression, where the concrete syntax is the
-  pattern string itself (e.g., `foo(bar)+`). Given some abstract syntax, it
-  can be converted back to the original concrete syntax (modulo some details,
-  like whitespace). To a first approximation, the abstract syntax is complex
-  and difficult to analyze.
-* [`Hir`](hir::Hir) is the high-level intermediate representation
-  ("HIR" or "high-level IR" for short) of regular expression. It corresponds to
-  an intermediate state of a regular expression that sits between the abstract
-  syntax and the low level compiled opcodes that are eventually responsible for
-  executing a regular expression search. Given some high-level IR, it is not
-  possible to produce the original concrete syntax (although it is possible to
-  produce an equivalent concrete syntax, but it will likely scarcely resemble
-  the original pattern). To a first approximation, the high-level IR is simple
-  and easy to analyze.
-
-These two types come with conversion routines:
-
-* An [`ast::parse::Parser`] converts concrete syntax (a `&str`) to an
-[`Ast`](ast::Ast).
-* A [`hir::translate::Translator`] converts an [`Ast`](ast::Ast) to a
-[`Hir`](hir::Hir).
-
-As a convenience, the above two conversion routines are combined into one via
-the top-level [`Parser`] type. This `Parser` will first convert your pattern to
-an `Ast` and then convert the `Ast` to an `Hir`. It's also exposed as top-level
-[`parse`] free function.
-
-
-# Example
-
-This example shows how to parse a pattern string into its HIR:
-
-```
-use regex_syntax::{hir::Hir, parse};
-
-let hir = parse("a|b")?;
-assert_eq!(hir, Hir::alternation(vec![
-    Hir::literal("a".as_bytes()),
-    Hir::literal("b".as_bytes()),
-]));
-# Ok::<(), Box<dyn std::error::Error>>(())
-```
-
-
-# Concrete syntax supported
-
-The concrete syntax is documented as part of the public API of the
-[`regex` crate](https://docs.rs/regex/%2A/regex/#syntax).
-
-
-# Input safety
-
-A key feature of this library is that it is safe to use with end user facing
-input. This plays a significant role in the internal implementation. In
-particular:
-
-1. Parsers provide a `nest_limit` option that permits callers to control how
-   deeply nested a regular expression is allowed to be. This makes it possible
-   to do case analysis over an `Ast` or an `Hir` using recursion without
-   worrying about stack overflow.
-2. Since relying on a particular stack size is brittle, this crate goes to
-   great lengths to ensure that all interactions with both the `Ast` and the
-   `Hir` do not use recursion. Namely, they use constant stack space and heap
-   space proportional to the size of the original pattern string (in bytes).
-   This includes the type's corresponding destructors. (One exception to this
-   is literal extraction, but this will eventually get fixed.)
-
-
-# Error reporting
-
-The `Display` implementations on all `Error` types exposed in this library
-provide nice human readable errors that are suitable for showing to end users
-in a monospace font.
-
-
-# Literal extraction
-
-This crate provides limited support for [literal extraction from `Hir`
-values](hir::literal). Be warned that literal extraction uses recursion, and
-therefore, stack size proportional to the size of the `Hir`.
-
-The purpose of literal extraction is to speed up searches. That is, if you
-know a regular expression must match a prefix or suffix literal, then it is
-often quicker to search for instances of that literal, and then confirm or deny
-the match using the full regular expression engine. These optimizations are
-done automatically in the `regex` crate.
-
-
-# Crate features
-
-An important feature provided by this crate is its Unicode support. This
-includes things like case folding, boolean properties, general categories,
-scripts and Unicode-aware support for the Perl classes `\w`, `\s` and `\d`.
-However, a downside of this support is that it requires bundling several
-Unicode data tables that are substantial in size.
-
-A fair number of use cases do not require full Unicode support. For this
-reason, this crate exposes a number of features to control which Unicode
-data is available.
-
-If a regular expression attempts to use a Unicode feature that is not available
-because the corresponding crate feature was disabled, then translating that
-regular expression to an `Hir` will return an error. (It is still possible
-construct an `Ast` for such a regular expression, since Unicode data is not
-used until translation to an `Hir`.) Stated differently, enabling or disabling
-any of the features below can only add or subtract from the total set of valid
-regular expressions. Enabling or disabling a feature will never modify the
-match semantics of a regular expression.
-
-The following features are available:
-
-* **std** -
-  Enables support for the standard library. This feature is enabled by default.
-  When disabled, only `core` and `alloc` are used. Otherwise, enabling `std`
-  generally just enables `std::error::Error` trait impls for the various error
-  types.
-* **unicode** -
-  Enables all Unicode features. This feature is enabled by default, and will
-  always cover all Unicode features, even if more are added in the future.
-* **unicode-age** -
-  Provide the data for the
-  [Unicode `Age` property](https://www.unicode.org/reports/tr44/tr44-24.html#Character_Age).
-  This makes it possible to use classes like `\p{Age:6.0}` to refer to all
-  codepoints first introduced in Unicode 6.0
-* **unicode-bool** -
-  Provide the data for numerous Unicode boolean properties. The full list
-  is not included here, but contains properties like `Alphabetic`, `Emoji`,
-  `Lowercase`, `Math`, `Uppercase` and `White_Space`.
-* **unicode-case** -
-  Provide the data for case insensitive matching using
-  [Unicode's "simple loose matches" specification](https://www.unicode.org/reports/tr18/#Simple_Loose_Matches).
-* **unicode-gencat** -
-  Provide the data for
-  [Unicode general categories](https://www.unicode.org/reports/tr44/tr44-24.html#General_Category_Values).
-  This includes, but is not limited to, `Decimal_Number`, `Letter`,
-  `Math_Symbol`, `Number` and `Punctuation`.
-* **unicode-perl** -
-  Provide the data for supporting the Unicode-aware Perl character classes,
-  corresponding to `\w`, `\s` and `\d`. This is also necessary for using
-  Unicode-aware word boundary assertions. Note that if this feature is
-  disabled, the `\s` and `\d` character classes are still available if the
-  `unicode-bool` and `unicode-gencat` features are enabled, respectively.
-* **unicode-script** -
-  Provide the data for
-  [Unicode scripts and script extensions](https://www.unicode.org/reports/tr24/).
-  This includes, but is not limited to, `Arabic`, `Cyrillic`, `Hebrew`,
-  `Latin` and `Thai`.
-* **unicode-segment** -
-  Provide the data necessary to provide the properties used to implement the
-  [Unicode text segmentation algorithms](https://www.unicode.org/reports/tr29/).
-  This enables using classes like `\p{gcb=Extend}`, `\p{wb=Katakana}` and
-  `\p{sb=ATerm}`.
-* **arbitrary** -
-  Enabling this feature introduces a public dependency on the
-  [`arbitrary`](https://crates.io/crates/arbitrary)
-  crate. Namely, it implements the `Arbitrary` trait from that crate for the
-  [`Ast`](crate::ast::Ast) type. This feature is disabled by default.
-*/
-
-#![no_std]
-#![forbid(unsafe_code)]
-#![deny(missing_docs, rustdoc::broken_intra_doc_links)]
-#![warn(missing_debug_implementations)]
-#![cfg_attr(docsrs, feature(doc_auto_cfg))]
-
-#[cfg(any(test, feature = "std"))]
-extern crate std;
-
-extern crate alloc;
-
-pub use crate::{
-    error::Error,
-    parser::{parse, Parser, ParserBuilder},
-    unicode::UnicodeWordError,
-};
-
-use alloc::string::String;
-
-pub mod ast;
-mod debug;
-mod either;
-mod error;
-pub mod hir;
-mod parser;
-mod rank;
-mod unicode;
-mod unicode_tables;
-pub mod utf8;
-
-/// Escapes all regular expression meta characters in `text`.
-///
-/// The string returned may be safely used as a literal in a regular
-/// expression.
-pub fn escape(text: &str) -> String {
-    let mut quoted = String::new();
-    escape_into(text, &mut quoted);
-    quoted
-}
-
-/// Escapes all meta characters in `text` and writes the result into `buf`.
-///
-/// This will append escape characters into the given buffer. The characters
-/// that are appended are safe to use as a literal in a regular expression.
-pub fn escape_into(text: &str, buf: &mut String) {
-    buf.reserve(text.len());
-    for c in text.chars() {
-        if is_meta_character(c) {
-            buf.push('\\');
-        }
-        buf.push(c);
-    }
-}
-
-/// Returns true if the given character has significance in a regex.
-///
-/// Generally speaking, these are the only characters which _must_ be escaped
-/// in order to match their literal meaning. For example, to match a literal
-/// `|`, one could write `\|`. Sometimes escaping isn't always necessary. For
-/// example, `-` is treated as a meta character because of its significance
-/// for writing ranges inside of character classes, but the regex `-` will
-/// match a literal `-` because `-` has no special meaning outside of character
-/// classes.
-///
-/// In order to determine whether a character may be escaped at all, the
-/// [`is_escapeable_character`] routine should be used. The difference between
-/// `is_meta_character` and `is_escapeable_character` is that the latter will
-/// return true for some characters that are _not_ meta characters. For
-/// example, `%` and `\%` both match a literal `%` in all contexts. In other
-/// words, `is_escapeable_character` includes "superfluous" escapes.
-///
-/// Note that the set of characters for which this function returns `true` or
-/// `false` is fixed and won't change in a semver compatible release. (In this
-/// case, "semver compatible release" actually refers to the `regex` crate
-/// itself, since reducing or expanding the set of meta characters would be a
-/// breaking change for not just `regex-syntax` but also `regex` itself.)
-///
-/// # Example
-///
-/// ```
-/// use regex_syntax::is_meta_character;
-///
-/// assert!(is_meta_character('?'));
-/// assert!(is_meta_character('-'));
-/// assert!(is_meta_character('&'));
-/// assert!(is_meta_character('#'));
-///
-/// assert!(!is_meta_character('%'));
-/// assert!(!is_meta_character('/'));
-/// assert!(!is_meta_character('!'));
-/// assert!(!is_meta_character('"'));
-/// assert!(!is_meta_character('e'));
-/// ```
-pub fn is_meta_character(c: char) -> bool {
-    match c {
-        '\\' | '.' | '+' | '*' | '?' | '(' | ')' | '|' | '[' | ']' | '{'
-        | '}' | '^' | '$' | '#' | '&' | '-' | '~' => true,
-        _ => false,
-    }
-}
-
-/// Returns true if the given character can be escaped in a regex.
-///
-/// This returns true in all cases that `is_meta_character` returns true, but
-/// also returns true in some cases where `is_meta_character` returns false.
-/// For example, `%` is not a meta character, but it is escapeable. That is,
-/// `%` and `\%` both match a literal `%` in all contexts.
-///
-/// The purpose of this routine is to provide knowledge about what characters
-/// may be escaped. Namely, most regex engines permit "superfluous" escapes
-/// where characters without any special significance may be escaped even
-/// though there is no actual _need_ to do so.
-///
-/// This will return false for some characters. For example, `e` is not
-/// escapeable. Therefore, `\e` will either result in a parse error (which is
-/// true today), or it could backwards compatibly evolve into a new construct
-/// with its own meaning. Indeed, that is the purpose of banning _some_
-/// superfluous escapes: it provides a way to evolve the syntax in a compatible
-/// manner.
-///
-/// # Example
-///
-/// ```
-/// use regex_syntax::is_escapeable_character;
-///
-/// assert!(is_escapeable_character('?'));
-/// assert!(is_escapeable_character('-'));
-/// assert!(is_escapeable_character('&'));
-/// assert!(is_escapeable_character('#'));
-/// assert!(is_escapeable_character('%'));
-/// assert!(is_escapeable_character('/'));
-/// assert!(is_escapeable_character('!'));
-/// assert!(is_escapeable_character('"'));
-///
-/// assert!(!is_escapeable_character('e'));
-/// ```
-pub fn is_escapeable_character(c: char) -> bool {
-    // Certainly escapeable if it's a meta character.
-    if is_meta_character(c) {
-        return true;
-    }
-    // Any character that isn't ASCII is definitely not escapeable. There's
-    // no real need to allow things like \☃ right?
-    if !c.is_ascii() {
-        return false;
-    }
-    // Otherwise, we basically say that everything is escapeable unless it's a
-    // letter or digit. Things like \3 are either octal (when enabled) or an
-    // error, and we should keep it that way. Otherwise, letters are reserved
-    // for adding new syntax in a backwards compatible way.
-    match c {
-        '0'..='9' | 'A'..='Z' | 'a'..='z' => false,
-        // While not currently supported, we keep these as not escapeable to
-        // give us some flexibility with respect to supporting the \< and
-        // \> word boundary assertions in the future. By rejecting them as
-        // escapeable, \< and \> will result in a parse error. Thus, we can
-        // turn them into something else in the future without it being a
-        // backwards incompatible change.
-        //
-        // OK, now we support \< and \>, and we need to retain them as *not*
-        // escapeable here since the escape sequence is significant.
-        '<' | '>' => false,
-        _ => true,
-    }
-}
-
-/// Returns true if and only if the given character is a Unicode word
-/// character.
-///
-/// A Unicode word character is defined by
-/// [UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties).
-/// In particular, a character
-/// is considered a word character if it is in either of the `Alphabetic` or
-/// `Join_Control` properties, or is in one of the `Decimal_Number`, `Mark`
-/// or `Connector_Punctuation` general categories.
-///
-/// # Panics
-///
-/// If the `unicode-perl` feature is not enabled, then this function
-/// panics. For this reason, it is recommended that callers use
-/// [`try_is_word_character`] instead.
-pub fn is_word_character(c: char) -> bool {
-    try_is_word_character(c).expect("unicode-perl feature must be enabled")
-}
-
-/// Returns true if and only if the given character is a Unicode word
-/// character.
-///
-/// A Unicode word character is defined by
-/// [UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties).
-/// In particular, a character
-/// is considered a word character if it is in either of the `Alphabetic` or
-/// `Join_Control` properties, or is in one of the `Decimal_Number`, `Mark`
-/// or `Connector_Punctuation` general categories.
-///
-/// # Errors
-///
-/// If the `unicode-perl` feature is not enabled, then this function always
-/// returns an error.
-pub fn try_is_word_character(
-    c: char,
-) -> core::result::Result<bool, UnicodeWordError> {
-    unicode::is_word_character(c)
-}
-
-/// Returns true if and only if the given character is an ASCII word character.
-///
-/// An ASCII word character is defined by the following character class:
-/// `[_0-9a-zA-Z]`.
-pub fn is_word_byte(c: u8) -> bool {
-    match c {
-        b'_' | b'0'..=b'9' | b'a'..=b'z' | b'A'..=b'Z' => true,
-        _ => false,
-    }
-}
-
-#[cfg(test)]
-mod tests {
-    use alloc::string::ToString;
-
-    use super::*;
-
-    #[test]
-    fn escape_meta() {
-        assert_eq!(
-            escape(r"\.+*?()|[]{}^$#&-~"),
-            r"\\\.\+\*\?\(\)\|\[\]\{\}\^\$\#\&\-\~".to_string()
-        );
-    }
-
-    #[test]
-    fn word_byte() {
-        assert!(is_word_byte(b'a'));
-        assert!(!is_word_byte(b'-'));
-    }
-
-    #[test]
-    #[cfg(feature = "unicode-perl")]
-    fn word_char() {
-        assert!(is_word_character('a'), "ASCII");
-        assert!(is_word_character('à'), "Latin-1");
-        assert!(is_word_character('β'), "Greek");
-        assert!(is_word_character('\u{11011}'), "Brahmi (Unicode 6.0)");
-        assert!(is_word_character('\u{11611}'), "Modi (Unicode 7.0)");
-        assert!(is_word_character('\u{11711}'), "Ahom (Unicode 8.0)");
-        assert!(is_word_character('\u{17828}'), "Tangut (Unicode 9.0)");
-        assert!(is_word_character('\u{1B1B1}'), "Nushu (Unicode 10.0)");
-        assert!(is_word_character('\u{16E40}'), "Medefaidrin (Unicode 11.0)");
-        assert!(!is_word_character('-'));
-        assert!(!is_word_character('☃'));
-    }
-
-    #[test]
-    #[should_panic]
-    #[cfg(not(feature = "unicode-perl"))]
-    fn word_char_disabled_panic() {
-        assert!(is_word_character('a'));
-    }
-
-    #[test]
-    #[cfg(not(feature = "unicode-perl"))]
-    fn word_char_disabled_error() {
-        assert!(try_is_word_character('a').is_err());
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/parser.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/parser.rs
deleted file mode 100644
index f482b84..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/parser.rs
+++ /dev/null
@@ -1,254 +0,0 @@
-use crate::{ast, hir, Error};
-
-/// A convenience routine for parsing a regex using default options.
-///
-/// This is equivalent to `Parser::new().parse(pattern)`.
-///
-/// If you need to set non-default options, then use a [`ParserBuilder`].
-///
-/// This routine returns an [`Hir`](hir::Hir) value. Namely, it automatically
-/// parses the pattern as an [`Ast`](ast::Ast) and then invokes the translator
-/// to convert the `Ast` into an `Hir`. If you need access to the `Ast`, then
-/// you should use a [`ast::parse::Parser`].
-pub fn parse(pattern: &str) -> Result<hir::Hir, Error> {
-    Parser::new().parse(pattern)
-}
-
-/// A builder for a regular expression parser.
-///
-/// This builder permits modifying configuration options for the parser.
-///
-/// This type combines the builder options for both the [AST
-/// `ParserBuilder`](ast::parse::ParserBuilder) and the [HIR
-/// `TranslatorBuilder`](hir::translate::TranslatorBuilder).
-#[derive(Clone, Debug, Default)]
-pub struct ParserBuilder {
-    ast: ast::parse::ParserBuilder,
-    hir: hir::translate::TranslatorBuilder,
-}
-
-impl ParserBuilder {
-    /// Create a new parser builder with a default configuration.
-    pub fn new() -> ParserBuilder {
-        ParserBuilder::default()
-    }
-
-    /// Build a parser from this configuration with the given pattern.
-    pub fn build(&self) -> Parser {
-        Parser { ast: self.ast.build(), hir: self.hir.build() }
-    }
-
-    /// Set the nesting limit for this parser.
-    ///
-    /// The nesting limit controls how deep the abstract syntax tree is allowed
-    /// to be. If the AST exceeds the given limit (e.g., with too many nested
-    /// groups), then an error is returned by the parser.
-    ///
-    /// The purpose of this limit is to act as a heuristic to prevent stack
-    /// overflow for consumers that do structural induction on an `Ast` using
-    /// explicit recursion. While this crate never does this (instead using
-    /// constant stack space and moving the call stack to the heap), other
-    /// crates may.
-    ///
-    /// This limit is not checked until the entire Ast is parsed. Therefore,
-    /// if callers want to put a limit on the amount of heap space used, then
-    /// they should impose a limit on the length, in bytes, of the concrete
-    /// pattern string. In particular, this is viable since this parser
-    /// implementation will limit itself to heap space proportional to the
-    /// length of the pattern string.
-    ///
-    /// Note that a nest limit of `0` will return a nest limit error for most
-    /// patterns but not all. For example, a nest limit of `0` permits `a` but
-    /// not `ab`, since `ab` requires a concatenation, which results in a nest
-    /// depth of `1`. In general, a nest limit is not something that manifests
-    /// in an obvious way in the concrete syntax, therefore, it should not be
-    /// used in a granular way.
-    pub fn nest_limit(&mut self, limit: u32) -> &mut ParserBuilder {
-        self.ast.nest_limit(limit);
-        self
-    }
-
-    /// Whether to support octal syntax or not.
-    ///
-    /// Octal syntax is a little-known way of uttering Unicode codepoints in
-    /// a regular expression. For example, `a`, `\x61`, `\u0061` and
-    /// `\141` are all equivalent regular expressions, where the last example
-    /// shows octal syntax.
-    ///
-    /// While supporting octal syntax isn't in and of itself a problem, it does
-    /// make good error messages harder. That is, in PCRE based regex engines,
-    /// syntax like `\0` invokes a backreference, which is explicitly
-    /// unsupported in Rust's regex engine. However, many users expect it to
-    /// be supported. Therefore, when octal support is disabled, the error
-    /// message will explicitly mention that backreferences aren't supported.
-    ///
-    /// Octal syntax is disabled by default.
-    pub fn octal(&mut self, yes: bool) -> &mut ParserBuilder {
-        self.ast.octal(yes);
-        self
-    }
-
-    /// When disabled, translation will permit the construction of a regular
-    /// expression that may match invalid UTF-8.
-    ///
-    /// When enabled (the default), the translator is guaranteed to produce an
-    /// expression that, for non-empty matches, will only ever produce spans
-    /// that are entirely valid UTF-8 (otherwise, the translator will return an
-    /// error).
-    ///
-    /// Perhaps surprisingly, when UTF-8 is enabled, an empty regex or even
-    /// a negated ASCII word boundary (uttered as `(?-u:\B)` in the concrete
-    /// syntax) will be allowed even though they can produce matches that split
-    /// a UTF-8 encoded codepoint. This only applies to zero-width or "empty"
-    /// matches, and it is expected that the regex engine itself must handle
-    /// these cases if necessary (perhaps by suppressing any zero-width matches
-    /// that split a codepoint).
-    pub fn utf8(&mut self, yes: bool) -> &mut ParserBuilder {
-        self.hir.utf8(yes);
-        self
-    }
-
-    /// Enable verbose mode in the regular expression.
-    ///
-    /// When enabled, verbose mode permits insignificant whitespace in many
-    /// places in the regular expression, as well as comments. Comments are
-    /// started using `#` and continue until the end of the line.
-    ///
-    /// By default, this is disabled. It may be selectively enabled in the
-    /// regular expression by using the `x` flag regardless of this setting.
-    pub fn ignore_whitespace(&mut self, yes: bool) -> &mut ParserBuilder {
-        self.ast.ignore_whitespace(yes);
-        self
-    }
-
-    /// Enable or disable the case insensitive flag by default.
-    ///
-    /// By default this is disabled. It may alternatively be selectively
-    /// enabled in the regular expression itself via the `i` flag.
-    pub fn case_insensitive(&mut self, yes: bool) -> &mut ParserBuilder {
-        self.hir.case_insensitive(yes);
-        self
-    }
-
-    /// Enable or disable the multi-line matching flag by default.
-    ///
-    /// By default this is disabled. It may alternatively be selectively
-    /// enabled in the regular expression itself via the `m` flag.
-    pub fn multi_line(&mut self, yes: bool) -> &mut ParserBuilder {
-        self.hir.multi_line(yes);
-        self
-    }
-
-    /// Enable or disable the "dot matches any character" flag by default.
-    ///
-    /// By default this is disabled. It may alternatively be selectively
-    /// enabled in the regular expression itself via the `s` flag.
-    pub fn dot_matches_new_line(&mut self, yes: bool) -> &mut ParserBuilder {
-        self.hir.dot_matches_new_line(yes);
-        self
-    }
-
-    /// Enable or disable the CRLF mode flag by default.
-    ///
-    /// By default this is disabled. It may alternatively be selectively
-    /// enabled in the regular expression itself via the `R` flag.
-    ///
-    /// When CRLF mode is enabled, the following happens:
-    ///
-    /// * Unless `dot_matches_new_line` is enabled, `.` will match any character
-    /// except for `\r` and `\n`.
-    /// * When `multi_line` mode is enabled, `^` and `$` will treat `\r\n`,
-    /// `\r` and `\n` as line terminators. And in particular, neither will
-    /// match between a `\r` and a `\n`.
-    pub fn crlf(&mut self, yes: bool) -> &mut ParserBuilder {
-        self.hir.crlf(yes);
-        self
-    }
-
-    /// Sets the line terminator for use with `(?u-s:.)` and `(?-us:.)`.
-    ///
-    /// Namely, instead of `.` (by default) matching everything except for `\n`,
-    /// this will cause `.` to match everything except for the byte given.
-    ///
-    /// If `.` is used in a context where Unicode mode is enabled and this byte
-    /// isn't ASCII, then an error will be returned. When Unicode mode is
-    /// disabled, then any byte is permitted, but will return an error if UTF-8
-    /// mode is enabled and it is a non-ASCII byte.
-    ///
-    /// In short, any ASCII value for a line terminator is always okay. But a
-    /// non-ASCII byte might result in an error depending on whether Unicode
-    /// mode or UTF-8 mode are enabled.
-    ///
-    /// Note that if `R` mode is enabled then it always takes precedence and
-    /// the line terminator will be treated as `\r` and `\n` simultaneously.
-    ///
-    /// Note also that this *doesn't* impact the look-around assertions
-    /// `(?m:^)` and `(?m:$)`. That's usually controlled by additional
-    /// configuration in the regex engine itself.
-    pub fn line_terminator(&mut self, byte: u8) -> &mut ParserBuilder {
-        self.hir.line_terminator(byte);
-        self
-    }
-
-    /// Enable or disable the "swap greed" flag by default.
-    ///
-    /// By default this is disabled. It may alternatively be selectively
-    /// enabled in the regular expression itself via the `U` flag.
-    pub fn swap_greed(&mut self, yes: bool) -> &mut ParserBuilder {
-        self.hir.swap_greed(yes);
-        self
-    }
-
-    /// Enable or disable the Unicode flag (`u`) by default.
-    ///
-    /// By default this is **enabled**. It may alternatively be selectively
-    /// disabled in the regular expression itself via the `u` flag.
-    ///
-    /// Note that unless `utf8` is disabled (it's enabled by default), a
-    /// regular expression will fail to parse if Unicode mode is disabled and a
-    /// sub-expression could possibly match invalid UTF-8.
-    pub fn unicode(&mut self, yes: bool) -> &mut ParserBuilder {
-        self.hir.unicode(yes);
-        self
-    }
-}
-
-/// A convenience parser for regular expressions.
-///
-/// This parser takes as input a regular expression pattern string (the
-/// "concrete syntax") and returns a high-level intermediate representation
-/// (the HIR) suitable for most types of analysis. In particular, this parser
-/// hides the intermediate state of producing an AST (the "abstract syntax").
-/// The AST is itself far more complex than the HIR, so this parser serves as a
-/// convenience for never having to deal with it at all.
-///
-/// If callers have more fine grained use cases that need an AST, then please
-/// see the [`ast::parse`] module.
-///
-/// A `Parser` can be configured in more detail via a [`ParserBuilder`].
-#[derive(Clone, Debug)]
-pub struct Parser {
-    ast: ast::parse::Parser,
-    hir: hir::translate::Translator,
-}
-
-impl Parser {
-    /// Create a new parser with a default configuration.
-    ///
-    /// The parser can be run with `parse` method. The parse method returns
-    /// a high level intermediate representation of the given regular
-    /// expression.
-    ///
-    /// To set configuration options on the parser, use [`ParserBuilder`].
-    pub fn new() -> Parser {
-        ParserBuilder::new().build()
-    }
-
-    /// Parse the regular expression into a high level intermediate
-    /// representation.
-    pub fn parse(&mut self, pattern: &str) -> Result<hir::Hir, Error> {
-        let ast = self.ast.parse(pattern)?;
-        let hir = self.hir.translate(pattern, &ast)?;
-        Ok(hir)
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/rank.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/rank.rs
deleted file mode 100644
index ccb25a2..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/rank.rs
+++ /dev/null
@@ -1,258 +0,0 @@
-pub(crate) const BYTE_FREQUENCIES: [u8; 256] = [
-    55,  // '\x00'
-    52,  // '\x01'
-    51,  // '\x02'
-    50,  // '\x03'
-    49,  // '\x04'
-    48,  // '\x05'
-    47,  // '\x06'
-    46,  // '\x07'
-    45,  // '\x08'
-    103, // '\t'
-    242, // '\n'
-    66,  // '\x0b'
-    67,  // '\x0c'
-    229, // '\r'
-    44,  // '\x0e'
-    43,  // '\x0f'
-    42,  // '\x10'
-    41,  // '\x11'
-    40,  // '\x12'
-    39,  // '\x13'
-    38,  // '\x14'
-    37,  // '\x15'
-    36,  // '\x16'
-    35,  // '\x17'
-    34,  // '\x18'
-    33,  // '\x19'
-    56,  // '\x1a'
-    32,  // '\x1b'
-    31,  // '\x1c'
-    30,  // '\x1d'
-    29,  // '\x1e'
-    28,  // '\x1f'
-    255, // ' '
-    148, // '!'
-    164, // '"'
-    149, // '#'
-    136, // '$'
-    160, // '%'
-    155, // '&'
-    173, // "'"
-    221, // '('
-    222, // ')'
-    134, // '*'
-    122, // '+'
-    232, // ','
-    202, // '-'
-    215, // '.'
-    224, // '/'
-    208, // '0'
-    220, // '1'
-    204, // '2'
-    187, // '3'
-    183, // '4'
-    179, // '5'
-    177, // '6'
-    168, // '7'
-    178, // '8'
-    200, // '9'
-    226, // ':'
-    195, // ';'
-    154, // '<'
-    184, // '='
-    174, // '>'
-    126, // '?'
-    120, // '@'
-    191, // 'A'
-    157, // 'B'
-    194, // 'C'
-    170, // 'D'
-    189, // 'E'
-    162, // 'F'
-    161, // 'G'
-    150, // 'H'
-    193, // 'I'
-    142, // 'J'
-    137, // 'K'
-    171, // 'L'
-    176, // 'M'
-    185, // 'N'
-    167, // 'O'
-    186, // 'P'
-    112, // 'Q'
-    175, // 'R'
-    192, // 'S'
-    188, // 'T'
-    156, // 'U'
-    140, // 'V'
-    143, // 'W'
-    123, // 'X'
-    133, // 'Y'
-    128, // 'Z'
-    147, // '['
-    138, // '\\'
-    146, // ']'
-    114, // '^'
-    223, // '_'
-    151, // '`'
-    249, // 'a'
-    216, // 'b'
-    238, // 'c'
-    236, // 'd'
-    253, // 'e'
-    227, // 'f'
-    218, // 'g'
-    230, // 'h'
-    247, // 'i'
-    135, // 'j'
-    180, // 'k'
-    241, // 'l'
-    233, // 'm'
-    246, // 'n'
-    244, // 'o'
-    231, // 'p'
-    139, // 'q'
-    245, // 'r'
-    243, // 's'
-    251, // 't'
-    235, // 'u'
-    201, // 'v'
-    196, // 'w'
-    240, // 'x'
-    214, // 'y'
-    152, // 'z'
-    182, // '{'
-    205, // '|'
-    181, // '}'
-    127, // '~'
-    27,  // '\x7f'
-    212, // '\x80'
-    211, // '\x81'
-    210, // '\x82'
-    213, // '\x83'
-    228, // '\x84'
-    197, // '\x85'
-    169, // '\x86'
-    159, // '\x87'
-    131, // '\x88'
-    172, // '\x89'
-    105, // '\x8a'
-    80,  // '\x8b'
-    98,  // '\x8c'
-    96,  // '\x8d'
-    97,  // '\x8e'
-    81,  // '\x8f'
-    207, // '\x90'
-    145, // '\x91'
-    116, // '\x92'
-    115, // '\x93'
-    144, // '\x94'
-    130, // '\x95'
-    153, // '\x96'
-    121, // '\x97'
-    107, // '\x98'
-    132, // '\x99'
-    109, // '\x9a'
-    110, // '\x9b'
-    124, // '\x9c'
-    111, // '\x9d'
-    82,  // '\x9e'
-    108, // '\x9f'
-    118, // '\xa0'
-    141, // '¡'
-    113, // '¢'
-    129, // '£'
-    119, // '¤'
-    125, // '¥'
-    165, // '¦'
-    117, // '§'
-    92,  // '¨'
-    106, // '©'
-    83,  // 'ª'
-    72,  // '«'
-    99,  // '¬'
-    93,  // '\xad'
-    65,  // '®'
-    79,  // '¯'
-    166, // '°'
-    237, // '±'
-    163, // '²'
-    199, // '³'
-    190, // '´'
-    225, // 'µ'
-    209, // '¶'
-    203, // '·'
-    198, // '¸'
-    217, // '¹'
-    219, // 'º'
-    206, // '»'
-    234, // '¼'
-    248, // '½'
-    158, // '¾'
-    239, // '¿'
-    255, // 'À'
-    255, // 'Á'
-    255, // 'Â'
-    255, // 'Ã'
-    255, // 'Ä'
-    255, // 'Å'
-    255, // 'Æ'
-    255, // 'Ç'
-    255, // 'È'
-    255, // 'É'
-    255, // 'Ê'
-    255, // 'Ë'
-    255, // 'Ì'
-    255, // 'Í'
-    255, // 'Î'
-    255, // 'Ï'
-    255, // 'Ð'
-    255, // 'Ñ'
-    255, // 'Ò'
-    255, // 'Ó'
-    255, // 'Ô'
-    255, // 'Õ'
-    255, // 'Ö'
-    255, // '×'
-    255, // 'Ø'
-    255, // 'Ù'
-    255, // 'Ú'
-    255, // 'Û'
-    255, // 'Ü'
-    255, // 'Ý'
-    255, // 'Þ'
-    255, // 'ß'
-    255, // 'à'
-    255, // 'á'
-    255, // 'â'
-    255, // 'ã'
-    255, // 'ä'
-    255, // 'å'
-    255, // 'æ'
-    255, // 'ç'
-    255, // 'è'
-    255, // 'é'
-    255, // 'ê'
-    255, // 'ë'
-    255, // 'ì'
-    255, // 'í'
-    255, // 'î'
-    255, // 'ï'
-    255, // 'ð'
-    255, // 'ñ'
-    255, // 'ò'
-    255, // 'ó'
-    255, // 'ô'
-    255, // 'õ'
-    255, // 'ö'
-    255, // '÷'
-    255, // 'ø'
-    255, // 'ù'
-    255, // 'ú'
-    255, // 'û'
-    255, // 'ü'
-    255, // 'ý'
-    255, // 'þ'
-    255, // 'ÿ'
-];
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode.rs
deleted file mode 100644
index 07f7819..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode.rs
+++ /dev/null
@@ -1,1041 +0,0 @@
-use alloc::{
-    string::{String, ToString},
-    vec::Vec,
-};
-
-use crate::hir;
-
-/// An inclusive range of codepoints from a generated file (hence the static
-/// lifetime).
-type Range = &'static [(char, char)];
-
-/// An error that occurs when dealing with Unicode.
-///
-/// We don't impl the Error trait here because these always get converted
-/// into other public errors. (This error type isn't exported.)
-#[derive(Debug)]
-pub enum Error {
-    PropertyNotFound,
-    PropertyValueNotFound,
-    // Not used when unicode-perl is enabled.
-    #[allow(dead_code)]
-    PerlClassNotFound,
-}
-
-/// An error that occurs when Unicode-aware simple case folding fails.
-///
-/// This error can occur when the case mapping tables necessary for Unicode
-/// aware case folding are unavailable. This only occurs when the
-/// `unicode-case` feature is disabled. (The feature is enabled by default.)
-#[derive(Debug)]
-pub struct CaseFoldError(());
-
-#[cfg(feature = "std")]
-impl std::error::Error for CaseFoldError {}
-
-impl core::fmt::Display for CaseFoldError {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        write!(
-            f,
-            "Unicode-aware case folding is not available \
-             (probably because the unicode-case feature is not enabled)"
-        )
-    }
-}
-
-/// An error that occurs when the Unicode-aware `\w` class is unavailable.
-///
-/// This error can occur when the data tables necessary for the Unicode aware
-/// Perl character class `\w` are unavailable. This only occurs when the
-/// `unicode-perl` feature is disabled. (The feature is enabled by default.)
-#[derive(Debug)]
-pub struct UnicodeWordError(());
-
-#[cfg(feature = "std")]
-impl std::error::Error for UnicodeWordError {}
-
-impl core::fmt::Display for UnicodeWordError {
-    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
-        write!(
-            f,
-            "Unicode-aware \\w class is not available \
-             (probably because the unicode-perl feature is not enabled)"
-        )
-    }
-}
-
-/// A state oriented traverser of the simple case folding table.
-///
-/// A case folder can be constructed via `SimpleCaseFolder::new()`, which will
-/// return an error if the underlying case folding table is unavailable.
-///
-/// After construction, it is expected that callers will use
-/// `SimpleCaseFolder::mapping` by calling it with codepoints in strictly
-/// increasing order. For example, calling it on `b` and then on `a` is illegal
-/// and will result in a panic.
-///
-/// The main idea of this type is that it tries hard to make mapping lookups
-/// fast by exploiting the structure of the underlying table, and the ordering
-/// assumption enables this.
-#[derive(Debug)]
-pub struct SimpleCaseFolder {
-    /// The simple case fold table. It's a sorted association list, where the
-    /// keys are Unicode scalar values and the values are the corresponding
-    /// equivalence class (not including the key) of the "simple" case folded
-    /// Unicode scalar values.
-    table: &'static [(char, &'static [char])],
-    /// The last codepoint that was used for a lookup.
-    last: Option<char>,
-    /// The index to the entry in `table` corresponding to the smallest key `k`
-    /// such that `k > k0`, where `k0` is the most recent key lookup. Note that
-    /// in particular, `k0` may not be in the table!
-    next: usize,
-}
-
-impl SimpleCaseFolder {
-    /// Create a new simple case folder, returning an error if the underlying
-    /// case folding table is unavailable.
-    pub fn new() -> Result<SimpleCaseFolder, CaseFoldError> {
-        #[cfg(not(feature = "unicode-case"))]
-        {
-            Err(CaseFoldError(()))
-        }
-        #[cfg(feature = "unicode-case")]
-        {
-            Ok(SimpleCaseFolder {
-                table: crate::unicode_tables::case_folding_simple::CASE_FOLDING_SIMPLE,
-                last: None,
-                next: 0,
-            })
-        }
-    }
-
-    /// Return the equivalence class of case folded codepoints for the given
-    /// codepoint. The equivalence class returned never includes the codepoint
-    /// given. If the given codepoint has no case folded codepoints (i.e.,
-    /// no entry in the underlying case folding table), then this returns an
-    /// empty slice.
-    ///
-    /// # Panics
-    ///
-    /// This panics when called with a `c` that is less than or equal to the
-    /// previous call. In other words, callers need to use this method with
-    /// strictly increasing values of `c`.
-    pub fn mapping(&mut self, c: char) -> &'static [char] {
-        if let Some(last) = self.last {
-            assert!(
-                last < c,
-                "got codepoint U+{:X} which occurs before \
-                 last codepoint U+{:X}",
-                u32::from(c),
-                u32::from(last),
-            );
-        }
-        self.last = Some(c);
-        if self.next >= self.table.len() {
-            return &[];
-        }
-        let (k, v) = self.table[self.next];
-        if k == c {
-            self.next += 1;
-            return v;
-        }
-        match self.get(c) {
-            Err(i) => {
-                self.next = i;
-                &[]
-            }
-            Ok(i) => {
-                // Since we require lookups to proceed
-                // in order, anything we find should be
-                // after whatever we thought might be
-                // next. Otherwise, the caller is either
-                // going out of order or we would have
-                // found our next key at 'self.next'.
-                assert!(i > self.next);
-                self.next = i + 1;
-                self.table[i].1
-            }
-        }
-    }
-
-    /// Returns true if and only if the given range overlaps with any region
-    /// of the underlying case folding table. That is, when true, there exists
-    /// at least one codepoint in the inclusive range `[start, end]` that has
-    /// a non-trivial equivalence class of case folded codepoints. Conversely,
-    /// when this returns false, all codepoints in the range `[start, end]`
-    /// correspond to the trivial equivalence class of case folded codepoints,
-    /// i.e., itself.
-    ///
-    /// This is useful to call before iterating over the codepoints in the
-    /// range and looking up the mapping for each. If you know none of the
-    /// mappings will return anything, then you might be able to skip doing it
-    /// altogether.
-    ///
-    /// # Panics
-    ///
-    /// This panics when `end < start`.
-    pub fn overlaps(&self, start: char, end: char) -> bool {
-        use core::cmp::Ordering;
-
-        assert!(start <= end);
-        self.table
-            .binary_search_by(|&(c, _)| {
-                if start <= c && c <= end {
-                    Ordering::Equal
-                } else if c > end {
-                    Ordering::Greater
-                } else {
-                    Ordering::Less
-                }
-            })
-            .is_ok()
-    }
-
-    /// Returns the index at which `c` occurs in the simple case fold table. If
-    /// `c` does not occur, then this returns an `i` such that `table[i-1].0 <
-    /// c` and `table[i].0 > c`.
-    fn get(&self, c: char) -> Result<usize, usize> {
-        self.table.binary_search_by_key(&c, |&(c1, _)| c1)
-    }
-}
-
-/// A query for finding a character class defined by Unicode. This supports
-/// either use of a property name directly, or lookup by property value. The
-/// former generally refers to Binary properties (see UTS#44, Table 8), but
-/// as a special exception (see UTS#18, Section 1.2) both general categories
-/// (an enumeration) and scripts (a catalog) are supported as if each of their
-/// possible values were a binary property.
-///
-/// In all circumstances, property names and values are normalized and
-/// canonicalized. That is, `GC == gc == GeneralCategory == general_category`.
-///
-/// The lifetime `'a` refers to the shorter of the lifetimes of property name
-/// and property value.
-#[derive(Debug)]
-pub enum ClassQuery<'a> {
-    /// Return a class corresponding to a Unicode binary property, named by
-    /// a single letter.
-    OneLetter(char),
-    /// Return a class corresponding to a Unicode binary property.
-    ///
-    /// Note that, by special exception (see UTS#18, Section 1.2), both
-    /// general category values and script values are permitted here as if
-    /// they were a binary property.
-    Binary(&'a str),
-    /// Return a class corresponding to all codepoints whose property
-    /// (identified by `property_name`) corresponds to the given value
-    /// (identified by `property_value`).
-    ByValue {
-        /// A property name.
-        property_name: &'a str,
-        /// A property value.
-        property_value: &'a str,
-    },
-}
-
-impl<'a> ClassQuery<'a> {
-    fn canonicalize(&self) -> Result<CanonicalClassQuery, Error> {
-        match *self {
-            ClassQuery::OneLetter(c) => self.canonical_binary(&c.to_string()),
-            ClassQuery::Binary(name) => self.canonical_binary(name),
-            ClassQuery::ByValue { property_name, property_value } => {
-                let property_name = symbolic_name_normalize(property_name);
-                let property_value = symbolic_name_normalize(property_value);
-
-                let canon_name = match canonical_prop(&property_name)? {
-                    None => return Err(Error::PropertyNotFound),
-                    Some(canon_name) => canon_name,
-                };
-                Ok(match canon_name {
-                    "General_Category" => {
-                        let canon = match canonical_gencat(&property_value)? {
-                            None => return Err(Error::PropertyValueNotFound),
-                            Some(canon) => canon,
-                        };
-                        CanonicalClassQuery::GeneralCategory(canon)
-                    }
-                    "Script" => {
-                        let canon = match canonical_script(&property_value)? {
-                            None => return Err(Error::PropertyValueNotFound),
-                            Some(canon) => canon,
-                        };
-                        CanonicalClassQuery::Script(canon)
-                    }
-                    _ => {
-                        let vals = match property_values(canon_name)? {
-                            None => return Err(Error::PropertyValueNotFound),
-                            Some(vals) => vals,
-                        };
-                        let canon_val =
-                            match canonical_value(vals, &property_value) {
-                                None => {
-                                    return Err(Error::PropertyValueNotFound)
-                                }
-                                Some(canon_val) => canon_val,
-                            };
-                        CanonicalClassQuery::ByValue {
-                            property_name: canon_name,
-                            property_value: canon_val,
-                        }
-                    }
-                })
-            }
-        }
-    }
-
-    fn canonical_binary(
-        &self,
-        name: &str,
-    ) -> Result<CanonicalClassQuery, Error> {
-        let norm = symbolic_name_normalize(name);
-
-        // This is a special case where 'cf' refers to the 'Format' general
-        // category, but where the 'cf' abbreviation is also an abbreviation
-        // for the 'Case_Folding' property. But we want to treat it as
-        // a general category. (Currently, we don't even support the
-        // 'Case_Folding' property. But if we do in the future, users will be
-        // required to spell it out.)
-        //
-        // Also 'sc' refers to the 'Currency_Symbol' general category, but is
-        // also the abbreviation for the 'Script' property. So we avoid calling
-        // 'canonical_prop' for it too, which would erroneously normalize it
-        // to 'Script'.
-        //
-        // Another case: 'lc' is an abbreviation for the 'Cased_Letter'
-        // general category, but is also an abbreviation for the 'Lowercase_Mapping'
-        // property. We don't currently support the latter, so as with 'cf'
-        // above, we treat 'lc' as 'Cased_Letter'.
-        if norm != "cf" && norm != "sc" && norm != "lc" {
-            if let Some(canon) = canonical_prop(&norm)? {
-                return Ok(CanonicalClassQuery::Binary(canon));
-            }
-        }
-        if let Some(canon) = canonical_gencat(&norm)? {
-            return Ok(CanonicalClassQuery::GeneralCategory(canon));
-        }
-        if let Some(canon) = canonical_script(&norm)? {
-            return Ok(CanonicalClassQuery::Script(canon));
-        }
-        Err(Error::PropertyNotFound)
-    }
-}
-
-/// Like ClassQuery, but its parameters have been canonicalized. This also
-/// differentiates binary properties from flattened general categories and
-/// scripts.
-#[derive(Debug, Eq, PartialEq)]
-enum CanonicalClassQuery {
-    /// The canonical binary property name.
-    Binary(&'static str),
-    /// The canonical general category name.
-    GeneralCategory(&'static str),
-    /// The canonical script name.
-    Script(&'static str),
-    /// An arbitrary association between property and value, both of which
-    /// have been canonicalized.
-    ///
-    /// Note that by construction, the property name of ByValue will never
-    /// be General_Category or Script. Those two cases are subsumed by the
-    /// eponymous variants.
-    ByValue {
-        /// The canonical property name.
-        property_name: &'static str,
-        /// The canonical property value.
-        property_value: &'static str,
-    },
-}
-
-/// Looks up a Unicode class given a query. If one doesn't exist, then
-/// `None` is returned.
-pub fn class(query: ClassQuery<'_>) -> Result<hir::ClassUnicode, Error> {
-    use self::CanonicalClassQuery::*;
-
-    match query.canonicalize()? {
-        Binary(name) => bool_property(name),
-        GeneralCategory(name) => gencat(name),
-        Script(name) => script(name),
-        ByValue { property_name: "Age", property_value } => {
-            let mut class = hir::ClassUnicode::empty();
-            for set in ages(property_value)? {
-                class.union(&hir_class(set));
-            }
-            Ok(class)
-        }
-        ByValue { property_name: "Script_Extensions", property_value } => {
-            script_extension(property_value)
-        }
-        ByValue {
-            property_name: "Grapheme_Cluster_Break",
-            property_value,
-        } => gcb(property_value),
-        ByValue { property_name: "Sentence_Break", property_value } => {
-            sb(property_value)
-        }
-        ByValue { property_name: "Word_Break", property_value } => {
-            wb(property_value)
-        }
-        _ => {
-            // What else should we support?
-            Err(Error::PropertyNotFound)
-        }
-    }
-}
-
-/// Returns a Unicode aware class for \w.
-///
-/// This returns an error if the data is not available for \w.
-pub fn perl_word() -> Result<hir::ClassUnicode, Error> {
-    #[cfg(not(feature = "unicode-perl"))]
-    fn imp() -> Result<hir::ClassUnicode, Error> {
-        Err(Error::PerlClassNotFound)
-    }
-
-    #[cfg(feature = "unicode-perl")]
-    fn imp() -> Result<hir::ClassUnicode, Error> {
-        use crate::unicode_tables::perl_word::PERL_WORD;
-        Ok(hir_class(PERL_WORD))
-    }
-
-    imp()
-}
-
-/// Returns a Unicode aware class for \s.
-///
-/// This returns an error if the data is not available for \s.
-pub fn perl_space() -> Result<hir::ClassUnicode, Error> {
-    #[cfg(not(any(feature = "unicode-perl", feature = "unicode-bool")))]
-    fn imp() -> Result<hir::ClassUnicode, Error> {
-        Err(Error::PerlClassNotFound)
-    }
-
-    #[cfg(all(feature = "unicode-perl", not(feature = "unicode-bool")))]
-    fn imp() -> Result<hir::ClassUnicode, Error> {
-        use crate::unicode_tables::perl_space::WHITE_SPACE;
-        Ok(hir_class(WHITE_SPACE))
-    }
-
-    #[cfg(feature = "unicode-bool")]
-    fn imp() -> Result<hir::ClassUnicode, Error> {
-        use crate::unicode_tables::property_bool::WHITE_SPACE;
-        Ok(hir_class(WHITE_SPACE))
-    }
-
-    imp()
-}
-
-/// Returns a Unicode aware class for \d.
-///
-/// This returns an error if the data is not available for \d.
-pub fn perl_digit() -> Result<hir::ClassUnicode, Error> {
-    #[cfg(not(any(feature = "unicode-perl", feature = "unicode-gencat")))]
-    fn imp() -> Result<hir::ClassUnicode, Error> {
-        Err(Error::PerlClassNotFound)
-    }
-
-    #[cfg(all(feature = "unicode-perl", not(feature = "unicode-gencat")))]
-    fn imp() -> Result<hir::ClassUnicode, Error> {
-        use crate::unicode_tables::perl_decimal::DECIMAL_NUMBER;
-        Ok(hir_class(DECIMAL_NUMBER))
-    }
-
-    #[cfg(feature = "unicode-gencat")]
-    fn imp() -> Result<hir::ClassUnicode, Error> {
-        use crate::unicode_tables::general_category::DECIMAL_NUMBER;
-        Ok(hir_class(DECIMAL_NUMBER))
-    }
-
-    imp()
-}
-
-/// Build a Unicode HIR class from a sequence of Unicode scalar value ranges.
-pub fn hir_class(ranges: &[(char, char)]) -> hir::ClassUnicode {
-    let hir_ranges: Vec<hir::ClassUnicodeRange> = ranges
-        .iter()
-        .map(|&(s, e)| hir::ClassUnicodeRange::new(s, e))
-        .collect();
-    hir::ClassUnicode::new(hir_ranges)
-}
-
-/// Returns true only if the given codepoint is in the `\w` character class.
-///
-/// If the `unicode-perl` feature is not enabled, then this returns an error.
-pub fn is_word_character(c: char) -> Result<bool, UnicodeWordError> {
-    #[cfg(not(feature = "unicode-perl"))]
-    fn imp(_: char) -> Result<bool, UnicodeWordError> {
-        Err(UnicodeWordError(()))
-    }
-
-    #[cfg(feature = "unicode-perl")]
-    fn imp(c: char) -> Result<bool, UnicodeWordError> {
-        use crate::{is_word_byte, unicode_tables::perl_word::PERL_WORD};
-
-        if u8::try_from(c).map_or(false, is_word_byte) {
-            return Ok(true);
-        }
-        Ok(PERL_WORD
-            .binary_search_by(|&(start, end)| {
-                use core::cmp::Ordering;
-
-                if start <= c && c <= end {
-                    Ordering::Equal
-                } else if start > c {
-                    Ordering::Greater
-                } else {
-                    Ordering::Less
-                }
-            })
-            .is_ok())
-    }
-
-    imp(c)
-}
-
-/// A mapping of property values for a specific property.
-///
-/// The first element of each tuple is a normalized property value while the
-/// second element of each tuple is the corresponding canonical property
-/// value.
-type PropertyValues = &'static [(&'static str, &'static str)];
-
-fn canonical_gencat(
-    normalized_value: &str,
-) -> Result<Option<&'static str>, Error> {
-    Ok(match normalized_value {
-        "any" => Some("Any"),
-        "assigned" => Some("Assigned"),
-        "ascii" => Some("ASCII"),
-        _ => {
-            let gencats = property_values("General_Category")?.unwrap();
-            canonical_value(gencats, normalized_value)
-        }
-    })
-}
-
-fn canonical_script(
-    normalized_value: &str,
-) -> Result<Option<&'static str>, Error> {
-    let scripts = property_values("Script")?.unwrap();
-    Ok(canonical_value(scripts, normalized_value))
-}
-
-/// Find the canonical property name for the given normalized property name.
-///
-/// If no such property exists, then `None` is returned.
-///
-/// The normalized property name must have been normalized according to
-/// UAX44 LM3, which can be done using `symbolic_name_normalize`.
-///
-/// If the property names data is not available, then an error is returned.
-fn canonical_prop(
-    normalized_name: &str,
-) -> Result<Option<&'static str>, Error> {
-    #[cfg(not(any(
-        feature = "unicode-age",
-        feature = "unicode-bool",
-        feature = "unicode-gencat",
-        feature = "unicode-perl",
-        feature = "unicode-script",
-        feature = "unicode-segment",
-    )))]
-    fn imp(_: &str) -> Result<Option<&'static str>, Error> {
-        Err(Error::PropertyNotFound)
-    }
-
-    #[cfg(any(
-        feature = "unicode-age",
-        feature = "unicode-bool",
-        feature = "unicode-gencat",
-        feature = "unicode-perl",
-        feature = "unicode-script",
-        feature = "unicode-segment",
-    ))]
-    fn imp(name: &str) -> Result<Option<&'static str>, Error> {
-        use crate::unicode_tables::property_names::PROPERTY_NAMES;
-
-        Ok(PROPERTY_NAMES
-            .binary_search_by_key(&name, |&(n, _)| n)
-            .ok()
-            .map(|i| PROPERTY_NAMES[i].1))
-    }
-
-    imp(normalized_name)
-}
-
-/// Find the canonical property value for the given normalized property
-/// value.
-///
-/// The given property values should correspond to the values for the property
-/// under question, which can be found using `property_values`.
-///
-/// If no such property value exists, then `None` is returned.
-///
-/// The normalized property value must have been normalized according to
-/// UAX44 LM3, which can be done using `symbolic_name_normalize`.
-fn canonical_value(
-    vals: PropertyValues,
-    normalized_value: &str,
-) -> Option<&'static str> {
-    vals.binary_search_by_key(&normalized_value, |&(n, _)| n)
-        .ok()
-        .map(|i| vals[i].1)
-}
-
-/// Return the table of property values for the given property name.
-///
-/// If the property values data is not available, then an error is returned.
-fn property_values(
-    canonical_property_name: &'static str,
-) -> Result<Option<PropertyValues>, Error> {
-    #[cfg(not(any(
-        feature = "unicode-age",
-        feature = "unicode-bool",
-        feature = "unicode-gencat",
-        feature = "unicode-perl",
-        feature = "unicode-script",
-        feature = "unicode-segment",
-    )))]
-    fn imp(_: &'static str) -> Result<Option<PropertyValues>, Error> {
-        Err(Error::PropertyValueNotFound)
-    }
-
-    #[cfg(any(
-        feature = "unicode-age",
-        feature = "unicode-bool",
-        feature = "unicode-gencat",
-        feature = "unicode-perl",
-        feature = "unicode-script",
-        feature = "unicode-segment",
-    ))]
-    fn imp(name: &'static str) -> Result<Option<PropertyValues>, Error> {
-        use crate::unicode_tables::property_values::PROPERTY_VALUES;
-
-        Ok(PROPERTY_VALUES
-            .binary_search_by_key(&name, |&(n, _)| n)
-            .ok()
-            .map(|i| PROPERTY_VALUES[i].1))
-    }
-
-    imp(canonical_property_name)
-}
-
-// This is only used in some cases, but small enough to just let it be dead
-// instead of figuring out (and maintaining) the right set of features.
-#[allow(dead_code)]
-fn property_set(
-    name_map: &'static [(&'static str, Range)],
-    canonical: &'static str,
-) -> Option<Range> {
-    name_map
-        .binary_search_by_key(&canonical, |x| x.0)
-        .ok()
-        .map(|i| name_map[i].1)
-}
-
-/// Returns an iterator over Unicode Age sets. Each item corresponds to a set
-/// of codepoints that were added in a particular revision of Unicode. The
-/// iterator yields items in chronological order.
-///
-/// If the given age value isn't valid or if the data isn't available, then an
-/// error is returned instead.
-fn ages(canonical_age: &str) -> Result<impl Iterator<Item = Range>, Error> {
-    #[cfg(not(feature = "unicode-age"))]
-    fn imp(_: &str) -> Result<impl Iterator<Item = Range>, Error> {
-        use core::option::IntoIter;
-        Err::<IntoIter<Range>, _>(Error::PropertyNotFound)
-    }
-
-    #[cfg(feature = "unicode-age")]
-    fn imp(canonical_age: &str) -> Result<impl Iterator<Item = Range>, Error> {
-        use crate::unicode_tables::age;
-
-        const AGES: &[(&str, Range)] = &[
-            ("V1_1", age::V1_1),
-            ("V2_0", age::V2_0),
-            ("V2_1", age::V2_1),
-            ("V3_0", age::V3_0),
-            ("V3_1", age::V3_1),
-            ("V3_2", age::V3_2),
-            ("V4_0", age::V4_0),
-            ("V4_1", age::V4_1),
-            ("V5_0", age::V5_0),
-            ("V5_1", age::V5_1),
-            ("V5_2", age::V5_2),
-            ("V6_0", age::V6_0),
-            ("V6_1", age::V6_1),
-            ("V6_2", age::V6_2),
-            ("V6_3", age::V6_3),
-            ("V7_0", age::V7_0),
-            ("V8_0", age::V8_0),
-            ("V9_0", age::V9_0),
-            ("V10_0", age::V10_0),
-            ("V11_0", age::V11_0),
-            ("V12_0", age::V12_0),
-            ("V12_1", age::V12_1),
-            ("V13_0", age::V13_0),
-            ("V14_0", age::V14_0),
-            ("V15_0", age::V15_0),
-            ("V15_1", age::V15_1),
-            ("V16_0", age::V16_0),
-        ];
-        assert_eq!(AGES.len(), age::BY_NAME.len(), "ages are out of sync");
-
-        let pos = AGES.iter().position(|&(age, _)| canonical_age == age);
-        match pos {
-            None => Err(Error::PropertyValueNotFound),
-            Some(i) => Ok(AGES[..=i].iter().map(|&(_, classes)| classes)),
-        }
-    }
-
-    imp(canonical_age)
-}
-
-/// Returns the Unicode HIR class corresponding to the given general category.
-///
-/// Name canonicalization is assumed to be performed by the caller.
-///
-/// If the given general category could not be found, or if the general
-/// category data is not available, then an error is returned.
-fn gencat(canonical_name: &'static str) -> Result<hir::ClassUnicode, Error> {
-    #[cfg(not(feature = "unicode-gencat"))]
-    fn imp(_: &'static str) -> Result<hir::ClassUnicode, Error> {
-        Err(Error::PropertyNotFound)
-    }
-
-    #[cfg(feature = "unicode-gencat")]
-    fn imp(name: &'static str) -> Result<hir::ClassUnicode, Error> {
-        use crate::unicode_tables::general_category::BY_NAME;
-        match name {
-            "ASCII" => Ok(hir_class(&[('\0', '\x7F')])),
-            "Any" => Ok(hir_class(&[('\0', '\u{10FFFF}')])),
-            "Assigned" => {
-                let mut cls = gencat("Unassigned")?;
-                cls.negate();
-                Ok(cls)
-            }
-            name => property_set(BY_NAME, name)
-                .map(hir_class)
-                .ok_or(Error::PropertyValueNotFound),
-        }
-    }
-
-    match canonical_name {
-        "Decimal_Number" => perl_digit(),
-        name => imp(name),
-    }
-}
-
-/// Returns the Unicode HIR class corresponding to the given script.
-///
-/// Name canonicalization is assumed to be performed by the caller.
-///
-/// If the given script could not be found, or if the script data is not
-/// available, then an error is returned.
-fn script(canonical_name: &'static str) -> Result<hir::ClassUnicode, Error> {
-    #[cfg(not(feature = "unicode-script"))]
-    fn imp(_: &'static str) -> Result<hir::ClassUnicode, Error> {
-        Err(Error::PropertyNotFound)
-    }
-
-    #[cfg(feature = "unicode-script")]
-    fn imp(name: &'static str) -> Result<hir::ClassUnicode, Error> {
-        use crate::unicode_tables::script::BY_NAME;
-        property_set(BY_NAME, name)
-            .map(hir_class)
-            .ok_or(Error::PropertyValueNotFound)
-    }
-
-    imp(canonical_name)
-}
-
-/// Returns the Unicode HIR class corresponding to the given script extension.
-///
-/// Name canonicalization is assumed to be performed by the caller.
-///
-/// If the given script extension could not be found, or if the script data is
-/// not available, then an error is returned.
-fn script_extension(
-    canonical_name: &'static str,
-) -> Result<hir::ClassUnicode, Error> {
-    #[cfg(not(feature = "unicode-script"))]
-    fn imp(_: &'static str) -> Result<hir::ClassUnicode, Error> {
-        Err(Error::PropertyNotFound)
-    }
-
-    #[cfg(feature = "unicode-script")]
-    fn imp(name: &'static str) -> Result<hir::ClassUnicode, Error> {
-        use crate::unicode_tables::script_extension::BY_NAME;
-        property_set(BY_NAME, name)
-            .map(hir_class)
-            .ok_or(Error::PropertyValueNotFound)
-    }
-
-    imp(canonical_name)
-}
-
-/// Returns the Unicode HIR class corresponding to the given Unicode boolean
-/// property.
-///
-/// Name canonicalization is assumed to be performed by the caller.
-///
-/// If the given boolean property could not be found, or if the boolean
-/// property data is not available, then an error is returned.
-fn bool_property(
-    canonical_name: &'static str,
-) -> Result<hir::ClassUnicode, Error> {
-    #[cfg(not(feature = "unicode-bool"))]
-    fn imp(_: &'static str) -> Result<hir::ClassUnicode, Error> {
-        Err(Error::PropertyNotFound)
-    }
-
-    #[cfg(feature = "unicode-bool")]
-    fn imp(name: &'static str) -> Result<hir::ClassUnicode, Error> {
-        use crate::unicode_tables::property_bool::BY_NAME;
-        property_set(BY_NAME, name)
-            .map(hir_class)
-            .ok_or(Error::PropertyNotFound)
-    }
-
-    match canonical_name {
-        "Decimal_Number" => perl_digit(),
-        "White_Space" => perl_space(),
-        name => imp(name),
-    }
-}
-
-/// Returns the Unicode HIR class corresponding to the given grapheme cluster
-/// break property.
-///
-/// Name canonicalization is assumed to be performed by the caller.
-///
-/// If the given property could not be found, or if the corresponding data is
-/// not available, then an error is returned.
-fn gcb(canonical_name: &'static str) -> Result<hir::ClassUnicode, Error> {
-    #[cfg(not(feature = "unicode-segment"))]
-    fn imp(_: &'static str) -> Result<hir::ClassUnicode, Error> {
-        Err(Error::PropertyNotFound)
-    }
-
-    #[cfg(feature = "unicode-segment")]
-    fn imp(name: &'static str) -> Result<hir::ClassUnicode, Error> {
-        use crate::unicode_tables::grapheme_cluster_break::BY_NAME;
-        property_set(BY_NAME, name)
-            .map(hir_class)
-            .ok_or(Error::PropertyValueNotFound)
-    }
-
-    imp(canonical_name)
-}
-
-/// Returns the Unicode HIR class corresponding to the given word break
-/// property.
-///
-/// Name canonicalization is assumed to be performed by the caller.
-///
-/// If the given property could not be found, or if the corresponding data is
-/// not available, then an error is returned.
-fn wb(canonical_name: &'static str) -> Result<hir::ClassUnicode, Error> {
-    #[cfg(not(feature = "unicode-segment"))]
-    fn imp(_: &'static str) -> Result<hir::ClassUnicode, Error> {
-        Err(Error::PropertyNotFound)
-    }
-
-    #[cfg(feature = "unicode-segment")]
-    fn imp(name: &'static str) -> Result<hir::ClassUnicode, Error> {
-        use crate::unicode_tables::word_break::BY_NAME;
-        property_set(BY_NAME, name)
-            .map(hir_class)
-            .ok_or(Error::PropertyValueNotFound)
-    }
-
-    imp(canonical_name)
-}
-
-/// Returns the Unicode HIR class corresponding to the given sentence
-/// break property.
-///
-/// Name canonicalization is assumed to be performed by the caller.
-///
-/// If the given property could not be found, or if the corresponding data is
-/// not available, then an error is returned.
-fn sb(canonical_name: &'static str) -> Result<hir::ClassUnicode, Error> {
-    #[cfg(not(feature = "unicode-segment"))]
-    fn imp(_: &'static str) -> Result<hir::ClassUnicode, Error> {
-        Err(Error::PropertyNotFound)
-    }
-
-    #[cfg(feature = "unicode-segment")]
-    fn imp(name: &'static str) -> Result<hir::ClassUnicode, Error> {
-        use crate::unicode_tables::sentence_break::BY_NAME;
-        property_set(BY_NAME, name)
-            .map(hir_class)
-            .ok_or(Error::PropertyValueNotFound)
-    }
-
-    imp(canonical_name)
-}
-
-/// Like symbolic_name_normalize_bytes, but operates on a string.
-fn symbolic_name_normalize(x: &str) -> String {
-    let mut tmp = x.as_bytes().to_vec();
-    let len = symbolic_name_normalize_bytes(&mut tmp).len();
-    tmp.truncate(len);
-    // This should always succeed because `symbolic_name_normalize_bytes`
-    // guarantees that `&tmp[..len]` is always valid UTF-8.
-    //
-    // N.B. We could avoid the additional UTF-8 check here, but it's unlikely
-    // to be worth skipping the additional safety check. A benchmark must
-    // justify it first.
-    String::from_utf8(tmp).unwrap()
-}
-
-/// Normalize the given symbolic name in place according to UAX44-LM3.
-///
-/// A "symbolic name" typically corresponds to property names and property
-/// value aliases. Note, though, that it should not be applied to property
-/// string values.
-///
-/// The slice returned is guaranteed to be valid UTF-8 for all possible values
-/// of `slice`.
-///
-/// See: https://unicode.org/reports/tr44/#UAX44-LM3
-fn symbolic_name_normalize_bytes(slice: &mut [u8]) -> &mut [u8] {
-    // I couldn't find a place in the standard that specified that property
-    // names/aliases had a particular structure (unlike character names), but
-    // we assume that it's ASCII only and drop anything that isn't ASCII.
-    let mut start = 0;
-    let mut starts_with_is = false;
-    if slice.len() >= 2 {
-        // Ignore any "is" prefix.
-        starts_with_is = slice[0..2] == b"is"[..]
-            || slice[0..2] == b"IS"[..]
-            || slice[0..2] == b"iS"[..]
-            || slice[0..2] == b"Is"[..];
-        if starts_with_is {
-            start = 2;
-        }
-    }
-    let mut next_write = 0;
-    for i in start..slice.len() {
-        // VALIDITY ARGUMENT: To guarantee that the resulting slice is valid
-        // UTF-8, we ensure that the slice contains only ASCII bytes. In
-        // particular, we drop every non-ASCII byte from the normalized string.
-        let b = slice[i];
-        if b == b' ' || b == b'_' || b == b'-' {
-            continue;
-        } else if b'A' <= b && b <= b'Z' {
-            slice[next_write] = b + (b'a' - b'A');
-            next_write += 1;
-        } else if b <= 0x7F {
-            slice[next_write] = b;
-            next_write += 1;
-        }
-    }
-    // Special case: ISO_Comment has a 'isc' abbreviation. Since we generally
-    // ignore 'is' prefixes, the 'isc' abbreviation gets caught in the cross
-    // fire and ends up creating an alias for 'c' to 'ISO_Comment', but it
-    // is actually an alias for the 'Other' general category.
-    if starts_with_is && next_write == 1 && slice[0] == b'c' {
-        slice[0] = b'i';
-        slice[1] = b's';
-        slice[2] = b'c';
-        next_write = 3;
-    }
-    &mut slice[..next_write]
-}
-
-#[cfg(test)]
-mod tests {
-    use super::*;
-
-    #[cfg(feature = "unicode-case")]
-    fn simple_fold_ok(c: char) -> impl Iterator<Item = char> {
-        SimpleCaseFolder::new().unwrap().mapping(c).iter().copied()
-    }
-
-    #[cfg(feature = "unicode-case")]
-    fn contains_case_map(start: char, end: char) -> bool {
-        SimpleCaseFolder::new().unwrap().overlaps(start, end)
-    }
-
-    #[test]
-    #[cfg(feature = "unicode-case")]
-    fn simple_fold_k() {
-        let xs: Vec<char> = simple_fold_ok('k').collect();
-        assert_eq!(xs, alloc::vec!['K', 'â„Ș']);
-
-        let xs: Vec<char> = simple_fold_ok('K').collect();
-        assert_eq!(xs, alloc::vec!['k', 'â„Ș']);
-
-        let xs: Vec<char> = simple_fold_ok('â„Ș').collect();
-        assert_eq!(xs, alloc::vec!['K', 'k']);
-    }
-
-    #[test]
-    #[cfg(feature = "unicode-case")]
-    fn simple_fold_a() {
-        let xs: Vec<char> = simple_fold_ok('a').collect();
-        assert_eq!(xs, alloc::vec!['A']);
-
-        let xs: Vec<char> = simple_fold_ok('A').collect();
-        assert_eq!(xs, alloc::vec!['a']);
-    }
-
-    #[test]
-    #[cfg(not(feature = "unicode-case"))]
-    fn simple_fold_disabled() {
-        assert!(SimpleCaseFolder::new().is_err());
-    }
-
-    #[test]
-    #[cfg(feature = "unicode-case")]
-    fn range_contains() {
-        assert!(contains_case_map('A', 'A'));
-        assert!(contains_case_map('Z', 'Z'));
-        assert!(contains_case_map('A', 'Z'));
-        assert!(contains_case_map('@', 'A'));
-        assert!(contains_case_map('Z', '['));
-        assert!(contains_case_map('☃', 'Ⰰ'));
-
-        assert!(!contains_case_map('[', '['));
-        assert!(!contains_case_map('[', '`'));
-
-        assert!(!contains_case_map('☃', '☃'));
-    }
-
-    #[test]
-    #[cfg(feature = "unicode-gencat")]
-    fn regression_466() {
-        use super::{CanonicalClassQuery, ClassQuery};
-
-        let q = ClassQuery::OneLetter('C');
-        assert_eq!(
-            q.canonicalize().unwrap(),
-            CanonicalClassQuery::GeneralCategory("Other")
-        );
-    }
-
-    #[test]
-    fn sym_normalize() {
-        let sym_norm = symbolic_name_normalize;
-
-        assert_eq!(sym_norm("Line_Break"), "linebreak");
-        assert_eq!(sym_norm("Line-break"), "linebreak");
-        assert_eq!(sym_norm("linebreak"), "linebreak");
-        assert_eq!(sym_norm("BA"), "ba");
-        assert_eq!(sym_norm("ba"), "ba");
-        assert_eq!(sym_norm("Greek"), "greek");
-        assert_eq!(sym_norm("isGreek"), "greek");
-        assert_eq!(sym_norm("IS_Greek"), "greek");
-        assert_eq!(sym_norm("isc"), "isc");
-        assert_eq!(sym_norm("is c"), "isc");
-        assert_eq!(sym_norm("is_c"), "isc");
-    }
-
-    #[test]
-    fn valid_utf8_symbolic() {
-        let mut x = b"abc\xFFxyz".to_vec();
-        let y = symbolic_name_normalize_bytes(&mut x);
-        assert_eq!(y, b"abcxyz");
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/LICENSE-UNICODE b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/LICENSE-UNICODE
deleted file mode 100644
index b82826bd..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/LICENSE-UNICODE
+++ /dev/null
@@ -1,57 +0,0 @@
-UNICODE, INC. LICENSE AGREEMENT - DATA FILES AND SOFTWARE
-
-Unicode Data Files include all data files under the directories
-http://www.unicode.org/Public/, http://www.unicode.org/reports/,
-http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and
-http://www.unicode.org/utility/trac/browser/.
-
-Unicode Data Files do not include PDF online code charts under the
-directory http://www.unicode.org/Public/.
-
-Software includes any source code published in the Unicode Standard
-or under the directories
-http://www.unicode.org/Public/, http://www.unicode.org/reports/,
-http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and
-http://www.unicode.org/utility/trac/browser/.
-
-NOTICE TO USER: Carefully read the following legal agreement.
-BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S
-DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"),
-YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE
-TERMS AND CONDITIONS OF THIS AGREEMENT.
-IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE
-THE DATA FILES OR SOFTWARE.
-
-COPYRIGHT AND PERMISSION NOTICE
-
-Copyright © 1991-2018 Unicode, Inc. All rights reserved.
-Distributed under the Terms of Use in http://www.unicode.org/copyright.html.
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of the Unicode data files and any associated documentation
-(the "Data Files") or Unicode software and any associated documentation
-(the "Software") to deal in the Data Files or Software
-without restriction, including without limitation the rights to use,
-copy, modify, merge, publish, distribute, and/or sell copies of
-the Data Files or Software, and to permit persons to whom the Data Files
-or Software are furnished to do so, provided that either
-(a) this copyright and permission notice appear with all copies
-of the Data Files or Software, or
-(b) this copyright and permission notice appear in associated
-Documentation.
-
-THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
-ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
-WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT OF THIRD PARTY RIGHTS.
-IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
-NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
-DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
-DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
-TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-PERFORMANCE OF THE DATA FILES OR SOFTWARE.
-
-Except as contained in this notice, the name of a copyright holder
-shall not be used in advertising or otherwise to promote the sale,
-use or other dealings in these Data Files or Software without prior
-written authorization of the copyright holder.
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/age.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/age.rs
deleted file mode 100644
index 466510c..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/age.rs
+++ /dev/null
@@ -1,1846 +0,0 @@
-// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY:
-//
-//   ucd-generate age ucd-16.0.0 --chars
-//
-// Unicode version: 16.0.0.
-//
-// ucd-generate 0.3.1 is available on crates.io.
-
-pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[
-    ("V10_0", V10_0),
-    ("V11_0", V11_0),
-    ("V12_0", V12_0),
-    ("V12_1", V12_1),
-    ("V13_0", V13_0),
-    ("V14_0", V14_0),
-    ("V15_0", V15_0),
-    ("V15_1", V15_1),
-    ("V16_0", V16_0),
-    ("V1_1", V1_1),
-    ("V2_0", V2_0),
-    ("V2_1", V2_1),
-    ("V3_0", V3_0),
-    ("V3_1", V3_1),
-    ("V3_2", V3_2),
-    ("V4_0", V4_0),
-    ("V4_1", V4_1),
-    ("V5_0", V5_0),
-    ("V5_1", V5_1),
-    ("V5_2", V5_2),
-    ("V6_0", V6_0),
-    ("V6_1", V6_1),
-    ("V6_2", V6_2),
-    ("V6_3", V6_3),
-    ("V7_0", V7_0),
-    ("V8_0", V8_0),
-    ("V9_0", V9_0),
-];
-
-pub const V10_0: &'static [(char, char)] = &[
-    ('àĄ ', 'àĄȘ'),
-    ('ৌ', 'ড়'),
-    ('\u{afa}', '\u{aff}'),
-    ('\u{d00}', '\u{d00}'),
-    ('\u{d3b}', '\u{d3c}'),
-    ('áł·', 'áł·'),
-    ('\u{1df6}', '\u{1df9}'),
-    ('₿', '₿'),
-    ('⏿', '⏿'),
-    ('⯒', '⯒'),
-    ('âč…', 'âč‰'),
-    ('ㄼ', 'ㄼ'),
-    ('鿖', 'éżȘ'),
-    ('𐌭', '𐌯'),
-    ('𑹀', '\u{11a47}'),
-    ('𑩐', 'đ‘Șƒ'),
-    ('đ‘Ș†', 'đ‘Șœ'),
-    ('đ‘Șž', 'đ‘Șą'),
-    ('𑮀', '𑮆'),
-    ('𑮈', '𑮉'),
-    ('𑮋', '\u{11d36}'),
-    ('\u{11d3a}', '\u{11d3a}'),
-    ('\u{11d3c}', '\u{11d3d}'),
-    ('\u{11d3f}', '\u{11d47}'),
-    ('𑔐', 'đ‘”™'),
-    ('𖿡', '𖿡'),
-    ('𛀂', '𛄞'),
-    ('𛅰', '𛋻'),
-    ('🉠', 'đŸ‰„'),
-    ('🛓', '🛔'),
-    ('đŸ›·', '🛾'),
-    ('đŸ€€', 'đŸ€‹'),
-    ('đŸ€Ÿ', 'đŸ€Ÿ'),
-    ('đŸ€š', 'đŸ€Ż'),
-    ('đŸ€±', 'đŸ€Č'),
-    ('đŸ„Œ', 'đŸ„Œ'),
-    ('đŸ„Ÿ', 'đŸ„«'),
-    ('🩒', '🩗'),
-    ('🧐', '🧩'),
-    ('đŹș°', '🯠'),
-];
-
-pub const V11_0: &'static [(char, char)] = &[
-    ('Ő ', 'Ő '),
-    ('ֈ', 'ֈ'),
-    ('ŚŻ', 'ŚŻ'),
-    ('\u{7fd}', 'ßż'),
-    ('\u{8d3}', '\u{8d3}'),
-    ('\u{9fe}', '\u{9fe}'),
-    ('à©¶', 'à©¶'),
-    ('\u{c04}', '\u{c04}'),
-    ('àȄ', 'àȄ'),
-    ('ᥞ', 'ᥞ'),
-    ('áȐ', 'áČș'),
-    ('áČœ', 'áČż'),
-    ('âźș', 'âźŒ'),
-    ('⯓', '⯫'),
-    ('⯰', 'âŻŸ'),
-    ('âčŠ', 'âčŽ'),
-    ('ㄯ', 'ㄯ'),
-    ('鿫', '鿯'),
-    ('êžŻ', 'êžŻ'),
-    ('êžž', 'êžč'),
-    ('êŁŸ', '\u{a8ff}'),
-    ('𐚎', '𐚔'),
-    ('𐩈', '𐩈'),
-    ('𐮀', '\u{10d27}'),
-    ('𐎰', 'đŽč'),
-    ('đŒ€', 'đŒ§'),
-    ('đŒ°', 'đœ™'),
-    ('\u{110cd}', '\u{110cd}'),
-    ('𑅄', '𑅆'),
-    ('\u{1133b}', '\u{1133b}'),
-    ('\u{1145e}', '\u{1145e}'),
-    ('𑜚', '𑜚'),
-    ('𑠀', 'đ‘ »'),
-    ('đ‘Ș', 'đ‘Ș'),
-    ('đ‘” ', 'đ‘”„'),
-    ('đ‘”§', '𑔚'),
-    ('đ‘”Ș', 'đ‘¶Ž'),
-    ('\u{11d90}', '\u{11d91}'),
-    ('đ‘¶“', 'đ‘¶˜'),
-    ('đ‘¶ ', 'đ‘¶©'),
-    ('đ‘» ', '𑻞'),
-    ('đ–č€', 'đ–șš'),
-    ('𘟭', 'đ˜Ÿ±'),
-    ('𝋠', '𝋳'),
-    ('đČ', '𝍾'),
-    ('đž±±', 'đžČŽ'),
-    ('🄯', '🄯'),
-    ('đŸ›č', 'đŸ›č'),
-    ('🟕', '🟘'),
-    ('đŸ„', 'đŸ„'),
-    ('đŸ„Ź', 'đŸ„°'),
-    ('đŸ„ł', 'đŸ„¶'),
-    ('đŸ„ș', 'đŸ„ș'),
-    ('đŸ„Œ', 'đŸ„ż'),
-    ('🩘', '🩱'),
-    ('🩰', 'đŸŠč'),
-    ('🧁', '🧂'),
-    ('🧧', '🧿'),
-    ('đŸ© ', 'đŸ©­'),
-];
-
-pub const V12_0: &'static [(char, char)] = &[
-    ('à±·', 'à±·'),
-    ('àș†', 'àș†'),
-    ('àș‰', 'àș‰'),
-    ('àșŒ', 'àșŒ'),
-    ('àșŽ', 'àș“'),
-    ('àș˜', 'àș˜'),
-    ('àș ', 'àș '),
-    ('àșš', 'àș©'),
-    ('àșŹ', 'àșŹ'),
-    ('\u{eba}', '\u{eba}'),
-    ('áłș', 'áłș'),
-    ('⯉', '⯉'),
-    ('⯿', '⯿'),
-    ('âč', 'âč'),
-    ('êžș', 'êžż'),
-    ('Ꟃ', 'Ᶎ'),
-    ('ê­Š', 'ê­§'),
-    ('𐿠', '𐿶'),
-    ('𑑟', '𑑟'),
-    ('𑚾', '𑚾'),
-    ('𑩠', '𑩧'),
-    ('đ‘ŠȘ', '\u{119d7}'),
-    ('\u{119da}', 'đ‘§€'),
-    ('đ‘Ș„', 'đ‘Ș…'),
-    ('𑿀', '𑿱'),
-    ('𑿿', '𑿿'),
-    ('\u{13430}', '\u{13438}'),
-    ('đ–œ…', 'đ–œŠ'),
-    ('\u{16f4f}', '\u{16f4f}'),
-    ('đ–œż', 'đ–Ÿ‡'),
-    ('𖿱', '𖿣'),
-    ('đ˜ŸČ', 'đ˜Ÿ·'),
-    ('𛅐', '𛅒'),
-    ('đ›…€', '𛅧'),
-    ('𞄀', '𞄬'),
-    ('\u{1e130}', 'đž„œ'),
-    ('𞅀', '𞅉'),
-    ('𞅎', '𞅏'),
-    ('𞋀', 'đž‹č'),
-    ('𞋿', '𞋿'),
-    ('đž„‹', 'đž„‹'),
-    ('𞮁', 'đžŽœ'),
-    ('🅬', '🅬'),
-    ('🛕', '🛕'),
-    ('đŸ›ș', 'đŸ›ș'),
-    ('🟠', 'đŸŸ«'),
-    ('đŸ€', 'đŸ€'),
-    ('đŸ€ż', 'đŸ€ż'),
-    ('đŸ„±', 'đŸ„±'),
-    ('đŸ„»', 'đŸ„»'),
-    ('đŸŠ„', 'đŸŠȘ'),
-    ('🩼', '🩯'),
-    ('đŸŠș', '🩿'),
-    ('🧃', '🧊'),
-    ('🧍', '🧏'),
-    ('🹀', 'đŸ©“'),
-    ('đŸ©°', 'đŸ©ł'),
-    ('đŸ©ž', 'đŸ©ș'),
-    ('đŸȘ€', 'đŸȘ‚'),
-    ('đŸȘ', 'đŸȘ•'),
-];
-
-pub const V12_1: &'static [(char, char)] = &[('㋿', '㋿')];
-
-pub const V13_0: &'static [(char, char)] = &[
-    ('àąŸ', 'àŁ‡'),
-    ('\u{b55}', '\u{b55}'),
-    ('àŽ„', 'àŽ„'),
-    ('\u{d81}', '\u{d81}'),
-    ('\u{1abf}', '\u{1ac0}'),
-    ('⼗', '⼗'),
-    ('âč', 'âč’'),
-    ('ㆻ', 'ㆿ'),
-    ('ä¶¶', 'ä¶ż'),
-    ('éż°', 'éżŒ'),
-    ('Ꟈ', 'ꟊ'),
-    ('꟔', 'ꟶ'),
-    ('\u{a82c}', '\u{a82c}'),
-    ('ê­š', 'ê­«'),
-    ('𐆜', '𐆜'),
-    ('đș€', 'đș©'),
-    ('\u{10eab}', 'đș­'),
-    ('đș°', 'đș±'),
-    ('đŸ°', '𐿋'),
-    ('𑅇', '𑅇'),
-    ('𑇎', '\u{111cf}'),
-    ('𑑚', '𑑚'),
-    ('𑑠', '𑑡'),
-    ('đ‘€€', '𑀆'),
-    ('𑀉', '𑀉'),
-    ('đ‘€Œ', '𑀓'),
-    ('𑀕', 'đ‘€–'),
-    ('đ‘€˜', 'đ‘€”'),
-    ('đ‘€·', '𑀞'),
-    ('\u{1193b}', '𑄆'),
-    ('𑄐', 'đ‘„™'),
-    ('đ‘Ÿ°', 'đ‘Ÿ°'),
-    ('\u{16fe4}', '\u{16fe4}'),
-    ('\u{16ff0}', '\u{16ff1}'),
-    ('𘫳', '𘳕'),
-    ('𘮀', '𘮈'),
-    ('🄍', '🄏'),
-    ('🅭', '🅯'),
-    ('🆭', '🆭'),
-    ('🛖', '🛗'),
-    ('đŸ›»', 'đŸ›Œ'),
-    ('🱰', 'đŸą±'),
-    ('đŸ€Œ', 'đŸ€Œ'),
-    ('đŸ„Č', 'đŸ„Č'),
-    ('đŸ„·', 'đŸ„ž'),
-    ('🩣', 'đŸŠ€'),
-    ('đŸŠ«', '🩭'),
-    ('🧋', '🧋'),
-    ('đŸ©Ž', 'đŸ©Ž'),
-    ('đŸȘƒ', 'đŸȘ†'),
-    ('đŸȘ–', 'đŸȘš'),
-    ('đŸȘ°', 'đŸȘ¶'),
-    ('đŸ«€', 'đŸ«‚'),
-    ('đŸ«', 'đŸ«–'),
-    ('🬀', '🼒'),
-    ('🼔', '🯊'),
-    ('🯰', 'đŸŻč'),
-    ('đȘ›—', 'đȘ›'),
-    ('𰀀', 'đ±Š'),
-];
-
-pub const V14_0: &'static [(char, char)] = &[
-    ('۝', '۝'),
-    ('àĄ°', 'àąŽ'),
-    ('\u{890}', '\u{891}'),
-    ('\u{898}', '\u{89f}'),
-    ('àą”', 'àą”'),
-    ('àŁˆ', '\u{8d2}'),
-    ('\u{c3c}', '\u{c3c}'),
-    ('ౝ', 'ౝ'),
-    ('àł', 'àł'),
-    ('ᜍ', 'ᜍ'),
-    ('\u{1715}', '\u{1715}'),
-    ('ᜟ', 'ᜟ'),
-    ('\u{180f}', '\u{180f}'),
-    ('\u{1ac1}', '\u{1ace}'),
-    ('ᭌ', 'ᭌ'),
-    ('᭜', '᭟'),
-    ('\u{1dfa}', '\u{1dfa}'),
-    ('⃀', '⃀'),
-    ('â°Ż', 'â°Ż'),
-    ('ⱟ', 'ⱟ'),
-    ('âč“', 'âč'),
-    ('éżœ', 'éżż'),
-    ('Ꟁ', 'ꟁ'),
-    ('Ꟑ', 'ꟑ'),
-    ('ꟓ', 'ꟓ'),
-    ('ꟕ', 'ꟙ'),
-    ('êŸČ', '꟎'),
-    ('ïŻ‚', 'ïŻ‚'),
-    ('', ''),
-    ('﷏', '﷏'),
-    ('ï·Ÿ', 'ï·ż'),
-    ('𐕰', 'đ•ș'),
-    ('đ•Œ', '𐖊'),
-    ('𐖌', '𐖒'),
-    ('𐖔', '𐖕'),
-    ('𐖗', '𐖡'),
-    ('𐖣', '𐖱'),
-    ('𐖳', 'đ–č'),
-    ('𐖻', 'đ–Œ'),
-    ('𐞀', '𐞅'),
-    ('𐞇', '𐞰'),
-    ('đžČ', 'đžș'),
-    ('đœ°', 'đŸ‰'),
-    ('\u{11070}', '𑁔'),
-    ('\u{110c2}', '\u{110c2}'),
-    ('đ‘šč', 'đ‘šč'),
-    ('𑝀', '𑝆'),
-    ('đ‘Ș°', 'đ‘Șż'),
-    ('đ’Ÿ', 'đ’żČ'),
-    ('đ–©°', 'đ–ȘŸ'),
-    ('đ–«€', '𖫉'),
-    ('𚿰', '𚿳'),
-    ('đšż”', 'đšż»'),
-    ('đšżœ', 'đšżŸ'),
-    ('𛄟', '𛄱'),
-    ('\u{1cf00}', '\u{1cf2d}'),
-    ('\u{1cf30}', '\u{1cf46}'),
-    ('đœœ', '𜿃'),
-    ('đ‡©', 'đ‡Ș'),
-    ('đŒ€', 'đŒž'),
-    ('𞊐', '\u{1e2ae}'),
-    ('𞟠', '𞟩'),
-    ('𞟹', 'đžŸ«'),
-    ('𞟭', '𞟼'),
-    ('𞟰', 'đžŸŸ'),
-    ('🛝', '🛟'),
-    ('🟰', '🟰'),
-    ('đŸ„č', 'đŸ„č'),
-    ('🧌', '🧌'),
-    ('đŸ©»', 'đŸ©Œ'),
-    ('đŸȘ©', 'đŸȘŹ'),
-    ('đŸȘ·', 'đŸȘș'),
-    ('đŸ«ƒ', 'đŸ«…'),
-    ('đŸ«—', 'đŸ«™'),
-    ('đŸ« ', 'đŸ«§'),
-    ('đŸ«°', 'đŸ«¶'),
-    ('đȘ›ž', 'đȘ›Ÿ'),
-    ('đ«œ”', 'đ«œž'),
-];
-
-pub const V15_0: &'static [(char, char)] = &[
-    ('àłł', 'àłł'),
-    ('\u{ece}', '\u{ece}'),
-    ('\u{10efd}', '\u{10eff}'),
-    ('𑈿', '\u{11241}'),
-    ('𑬀', '𑬉'),
-    ('\u{11f00}', 'đ‘Œ'),
-    ('đ‘Œ’', '\u{11f3a}'),
-    ('đ‘ŒŸ', 'đ‘œ™'),
-    ('𓐯', '𓐯'),
-    ('\u{13439}', '\u{13455}'),
-    ('đ›„Č', 'đ›„Č'),
-    ('𛅕', '𛅕'),
-    ('𝋀', '𝋓'),
-    ('đŒ„', 'đŒȘ'),
-    ('𞀰', '𞁭'),
-    ('\u{1e08f}', '\u{1e08f}'),
-    ('𞓐', 'đž“č'),
-    ('🛜', '🛜'),
-    ('🝮', 'đŸ¶'),
-    ('đŸ»', '🝿'),
-    ('🟙', '🟙'),
-    ('đŸ©”', 'đŸ©·'),
-    ('đŸȘ‡', 'đŸȘˆ'),
-    ('đŸȘ­', 'đŸȘŻ'),
-    ('đŸȘ»', 'đŸȘœ'),
-    ('đŸȘż', 'đŸȘż'),
-    ('đŸ«Ž', 'đŸ«'),
-    ('đŸ«š', 'đŸ«›'),
-    ('đŸ«š', 'đŸ«š'),
-    ('đŸ«·', 'đŸ«ž'),
-    ('đ«œč', 'đ«œč'),
-    ('đ±', 'đȎŻ'),
-];
-
-pub const V15_1: &'static [(char, char)] =
-    &[('âżŒ', 'âżż'), ('㇯', '㇯'), ('🯰', 'đźč')];
-
-pub const V16_0: &'static [(char, char)] = &[
-    ('\u{897}', '\u{897}'),
-    ('᭎', '᭏'),
-    ('á­ż', 'á­ż'),
-    ('áȉ', 'áȊ'),
-    ('␧', '␩'),
-    ('㇀', '㇄'),
-    ('Ɤ', 'ꟍ'),
-    ('Ꟛ', 'Ƛ'),
-    ('𐗀', '𐗳'),
-    ('𐔀', '𐔄'),
-    ('\u{10d69}', '𐶅'),
-    ('𐶎', 'đ¶'),
-    ('𐻂', '𐻄'),
-    ('\u{10efc}', '\u{10efc}'),
-    ('𑎀', '𑎉'),
-    ('𑎋', '𑎋'),
-    ('𑎎', '𑎎'),
-    ('𑎐', '𑎔'),
-    ('𑎷', '\u{113c0}'),
-    ('\u{113c2}', '\u{113c2}'),
-    ('\u{113c5}', '\u{113c5}'),
-    ('\u{113c7}', '𑏊'),
-    ('𑏌', '𑏕'),
-    ('𑏗', '𑏘'),
-    ('\u{113e1}', '\u{113e2}'),
-    ('𑛐', '𑛣'),
-    ('𑯀', '𑯡'),
-    ('𑯰', 'đ‘Żč'),
-    ('\u{11f5a}', '\u{11f5a}'),
-    ('𓑠', 'đ”ș'),
-    ('𖄀', 'đ–„č'),
-    ('𖔀', 'đ–”č'),
-    ('𘳿', '𘳿'),
-    ('𜰀', 'đœłč'),
-    ('𜮀', 'đœșł'),
-    ('𞗐', 'đž—ș'),
-    ('𞗿', '𞗿'),
-    ('đŸąČ', 'đŸą»'),
-    ('🣀', '🣁'),
-    ('đŸȘ‰', 'đŸȘ‰'),
-    ('đŸȘ', 'đŸȘ'),
-    ('đŸȘŸ', 'đŸȘŸ'),
-    ('đŸ«†', 'đŸ«†'),
-    ('đŸ«œ', 'đŸ«œ'),
-    ('đŸ«Ÿ', 'đŸ«Ÿ'),
-    ('đŸ«©', 'đŸ«©'),
-    ('🯋', '🯯'),
-];
-
-pub const V1_1: &'static [(char, char)] = &[
-    ('\0', 'Ç”'),
-    ('Çș', 'ȗ'),
-    ('ɐ', 'ʚ'),
-    ('ʰ', '˞'),
-    ('Ë ', 'Ë©'),
-    ('\u{300}', '\u{345}'),
-    ('\u{360}', '\u{361}'),
-    ('ÍŽ', 'Í”'),
-    ('Íș', 'Íș'),
-    ('ÍŸ', 'ÍŸ'),
-    ('΄', 'Ί'),
-    ('Ό', 'Ό'),
-    ('Ύ', 'Ρ'),
-    ('Σ', 'ώ'),
-    ('ϐ', 'ϖ'),
-    ('Ϛ', 'Ϛ'),
-    ('Ϝ', 'Ϝ'),
-    ('Ϟ', 'Ϟ'),
-    ('Ï ', 'Ï '),
-    ('Ïą', 'Ïł'),
-    ('Ё', 'Ќ'),
-    ('Ў', 'я'),
-    ('ё', 'ќ'),
-    ('ў', '\u{486}'),
-    ('Ґ', 'ӄ'),
-    ('Ӈ', 'ӈ'),
-    ('Ӌ', 'ӌ'),
-    ('Ӑ', 'ӫ'),
-    ('Óź', 'Ó”'),
-    ('Óž', 'Óč'),
-    ('Ô±', 'Ֆ'),
-    ('ՙ', '՟'),
-    ('ա', 'և'),
-    ('։', '։'),
-    ('\u{5b0}', '\u{5b9}'),
-    ('\u{5bb}', 'ڃ'),
-    ('ڐ', 'ŚȘ'),
-    ('ڰ', 'ŚŽ'),
-    ('ی', 'ی'),
-    ('ۛ', 'ۛ'),
-    ('۟', '۟'),
-    ('ŰĄ', 'Űș'),
-    ('ـ', '\u{652}'),
-    ('Ù ', 'Ù­'),
-    ('\u{670}', 'Ú·'),
-    ('Úș', 'ÚŸ'),
-    ('ۀ', 'ێ'),
-    ('ې', '\u{6ed}'),
-    ('Û°', 'Ûč'),
-    ('\u{901}', 'à€ƒ'),
-    ('à€…', 'à€č'),
-    ('\u{93c}', '\u{94d}'),
-    ('à„', '\u{954}'),
-    ('à„˜', 'à„°'),
-    ('\u{981}', 'àŠƒ'),
-    ('àŠ…', 'àŠŒ'),
-    ('àŠ', 'àŠ'),
-    ('àŠ“', 'àŠš'),
-    ('àŠȘ', 'àŠ°'),
-    ('àŠČ', 'àŠČ'),
-    ('àŠ¶', 'àŠč'),
-    ('\u{9bc}', '\u{9bc}'),
-    ('\u{9be}', '\u{9c4}'),
-    ('ে', 'ৈ'),
-    ('ো', '\u{9cd}'),
-    ('\u{9d7}', '\u{9d7}'),
-    ('ড়', 'ঢ়'),
-    ('য়', '\u{9e3}'),
-    ('à§Š', 'à§ș'),
-    ('\u{a02}', '\u{a02}'),
-    ('àš…', 'àšŠ'),
-    ('àš', 'àš'),
-    ('àš“', 'àšš'),
-    ('àšȘ', 'àš°'),
-    ('àšČ', 'àšł'),
-    ('àš”', 'àš¶'),
-    ('àšž', 'àšč'),
-    ('\u{a3c}', '\u{a3c}'),
-    ('àšŸ', '\u{a42}'),
-    ('\u{a47}', '\u{a48}'),
-    ('\u{a4b}', '\u{a4d}'),
-    ('ਖ਼', 'ੜ'),
-    ('ਫ਼', 'ਫ਼'),
-    ('੊', '੎'),
-    ('\u{a81}', 'àȘƒ'),
-    ('àȘ…', 'àȘ‹'),
-    ('àȘ', 'àȘ'),
-    ('àȘ', 'àȘ‘'),
-    ('àȘ“', 'àȘš'),
-    ('àȘȘ', 'àȘ°'),
-    ('àȘČ', 'àȘł'),
-    ('àȘ”', 'àȘč'),
-    ('\u{abc}', '\u{ac5}'),
-    ('\u{ac7}', 'ૉ'),
-    ('ો', '\u{acd}'),
-    ('ૐ', 'ૐ'),
-    ('à« ', 'à« '),
-    ('૊', 'à«Ż'),
-    ('\u{b01}', 'àŹƒ'),
-    ('àŹ…', 'àŹŒ'),
-    ('àŹ', 'àŹ'),
-    ('àŹ“', 'àŹš'),
-    ('àŹȘ', 'àŹ°'),
-    ('àŹČ', 'àŹł'),
-    ('àŹ¶', 'àŹč'),
-    ('\u{b3c}', '\u{b43}'),
-    ('େ', 'ୈ'),
-    ('ୋ', '\u{b4d}'),
-    ('\u{b56}', '\u{b57}'),
-    ('ଡ଼', 'ଢ଼'),
-    ('ୟ', 'à­Ą'),
-    ('à­Š', 'à­°'),
-    ('\u{b82}', 'àźƒ'),
-    ('àź…', 'àźŠ'),
-    ('àźŽ', 'àź'),
-    ('àź’', 'àź•'),
-    ('àź™', 'àźš'),
-    ('àźœ', 'àźœ'),
-    ('àźž', 'àźŸ'),
-    ('àźŁ', 'àź€'),
-    ('àźš', 'àźȘ'),
-    ('àźź', 'àź”'),
-    ('àź·', 'àźč'),
-    ('\u{bbe}', 'àŻ‚'),
-    ('àŻ†', 'àŻˆ'),
-    ('àŻŠ', '\u{bcd}'),
-    ('\u{bd7}', '\u{bd7}'),
-    ('àŻ§', 'àŻČ'),
-    ('ఁ', 'ః'),
-    ('అ', 'ఌ'),
-    ('ఎ', 'ఐ'),
-    ('ఒ', 'à°š'),
-    ('à°Ș', 'à°ł'),
-    ('à°”', 'à°č'),
-    ('\u{c3e}', 'ౄ'),
-    ('\u{c46}', '\u{c48}'),
-    ('\u{c4a}', '\u{c4d}'),
-    ('\u{c55}', '\u{c56}'),
-    ('à± ', 'à±Ą'),
-    ('ొ', 'à±Ż'),
-    ('àȂ', 'àȃ'),
-    ('àȅ', 'àȌ'),
-    ('àȎ', 'àȐ'),
-    ('àȒ', 'àČš'),
-    ('àČȘ', 'àČł'),
-    ('àČ”', 'àČč'),
-    ('àČŸ', 'àł„'),
-    ('\u{cc6}', '\u{cc8}'),
-    ('\u{cca}', '\u{ccd}'),
-    ('\u{cd5}', '\u{cd6}'),
-    ('àłž', 'àłž'),
-    ('àł ', 'àłĄ'),
-    ('àłŠ', 'àłŻ'),
-    ('àŽ‚', 'àŽƒ'),
-    ('àŽ…', 'àŽŒ'),
-    ('àŽŽ', 'àŽ'),
-    ('àŽ’', 'àŽš'),
-    ('àŽȘ', 'àŽč'),
-    ('\u{d3e}', '\u{d43}'),
-    ('à”†', 'à”ˆ'),
-    ('à”Š', '\u{d4d}'),
-    ('\u{d57}', '\u{d57}'),
-    ('à” ', 'à”Ą'),
-    ('à”Š', 'à”Ż'),
-    ('àž', '\u{e3a}'),
-    ('àžż', 'àč›'),
-    ('àș', 'àș‚'),
-    ('àș„', 'àș„'),
-    ('àș‡', 'àșˆ'),
-    ('àșŠ', 'àșŠ'),
-    ('àș', 'àș'),
-    ('àș”', 'àș—'),
-    ('àș™', 'àșŸ'),
-    ('àșĄ', 'àșŁ'),
-    ('àș„', 'àș„'),
-    ('àș§', 'àș§'),
-    ('àșȘ', 'àș«'),
-    ('àș­', '\u{eb9}'),
-    ('\u{ebb}', 'àșœ'),
-    ('ເ', 'ໄ'),
-    ('ໆ', 'ໆ'),
-    ('\u{ec8}', '\u{ecd}'),
-    ('໐', '໙'),
-    ('ໜ', 'ໝ'),
-    ('Ⴀ', 'Ⴥ'),
-    ('ა', 'ჶ'),
-    ('჻', '჻'),
-    ('ᄀ', 'ᅙ'),
-    ('ᅟ', 'ᆱ'),
-    ('ᆹ', 'á‡č'),
-    ('ᾀ', 'áșš'),
-    ('áș ', 'á»č'),
-    ('ጀ', 'ጕ'),
-    ('ጘ', 'ጝ'),
-    ('ጠ', 'ᜅ'),
-    ('ᜈ', 'ᜍ'),
-    ('ᜐ', '᜗'),
-    ('᜙', '᜙'),
-    ('᜛', '᜛'),
-    ('᜝', '᜝'),
-    ('ᜟ', '᜜'),
-    ('ៀ', '៎'),
-    ('៶', 'ῄ'),
-    ('ῆ', 'ΐ'),
-    ('ῖ', 'Ί'),
-    ('῝', '`'),
-    ('áżČ', '῎'),
-    ('áż¶', 'áżŸ'),
-    ('\u{2000}', '\u{202e}'),
-    ('‰', '⁆'),
-    ('\u{206a}', '⁰'),
-    ('', '₎'),
-    ('₠', 'â‚Ș'),
-    ('\u{20d0}', '\u{20e1}'),
-    ('℀', 'ℾ'),
-    ('⅓', 'ↂ'),
-    ('←', 'â‡Ș'),
-    ('∀', '⋱'),
-    ('⌀', '⌀'),
-    ('⌂', 'âș'),
-    ('␀', '␀'),
-    ('⑀', '⑊'),
-    ('①', 'â“Ș'),
-    ('─', '▕'),
-    ('■', '◯'),
-    ('☀', '☓'),
-    ('☚', '♯'),
-    ('✁', '✄'),
-    ('✆', '✉'),
-    ('✌', '✧'),
-    ('✩', '❋'),
-    ('❍', '❍'),
-    ('❏', '❒'),
-    ('❖', '❖'),
-    ('❘', '❞'),
-    ('❡', '❧'),
-    ('❶', '➔'),
-    ('➘', '➯'),
-    ('➱', '➟'),
-    ('\u{3000}', '〷'),
-    ('〿', '〿'),
-    ('ぁ', 'ゔ'),
-    ('\u{3099}', 'ゞ'),
-    ('ァ', 'ăƒŸ'),
-    ('ㄅ', 'ㄬ'),
-    ('ㄱ', 'ㆎ'),
-    ('㆐', '㆟'),
-    ('㈀', '㈜'),
-    ('㈠', '㉃'),
-    ('㉠', '㉻'),
-    ('㉿', '㊰'),
-    ('㋀', '㋋'),
-    ('㋐', 'ă‹Ÿ'),
-    ('㌀', 'ă¶'),
-    ('ă»', '㏝'),
-    ('㏠', 'ăŸ'),
-    ('侀', '韄'),
-    ('\u{e000}', 'ïš­'),
-    ('ïŹ€', 'ïŹ†'),
-    ('ïŹ“', 'ïŹ—'),
-    ('\u{fb1e}', 'ïŹ¶'),
-    ('ïŹž', 'ïŹŒ'),
-    ('ïŹŸ', 'ïŹŸ'),
-    ('נּ', 'סּ'),
-    ('ףּ', 'פּ'),
-    ('צּ', 'ïź±'),
-    ('ïŻ“', 'ïŽż'),
-    ('', 'ﶏ'),
-    ('ﶒ', 'ﷇ'),
-    ('ï·°', 'ï·»'),
-    ('\u{fe20}', '\u{fe23}'),
-    ('ïž°', 'ïč„'),
-    ('ïč‰', 'ïč’'),
-    ('ïč”', 'ïčŠ'),
-    ('ïčš', 'ïč«'),
-    ('ïč°', 'ïčČ'),
-    ('ïčŽ', 'ïčŽ'),
-    ('ïč¶', 'ﻌ'),
-    ('\u{feff}', '\u{feff}'),
-    ('', ''),
-    ('ïœĄ', ''),
-    ('ïż‚', 'ïż‡'),
-    ('ïżŠ', 'ïż'),
-    ('ïż’', 'ïż—'),
-    ('ïżš', 'ïżœ'),
-    ('ïż ', 'ïżŠ'),
-    ('ïżš', 'ïżź'),
-    ('ïżœ', '\u{ffff}'),
-];
-
-pub const V2_0: &'static [(char, char)] = &[
-    ('\u{591}', '\u{5a1}'),
-    ('\u{5a3}', '\u{5af}'),
-    ('\u{5c4}', '\u{5c4}'),
-    ('àŒ€', 'àœ‡'),
-    ('àœ‰', 'àœ©'),
-    ('\u{f71}', 'àŸ‹'),
-    ('\u{f90}', '\u{f95}'),
-    ('\u{f97}', '\u{f97}'),
-    ('\u{f99}', '\u{fad}'),
-    ('\u{fb1}', '\u{fb7}'),
-    ('\u{fb9}', '\u{fb9}'),
-    ('áș›', 'áș›'),
-    ('₫', '₫'),
-    ('가', '힣'),
-    ('\u{1fffe}', '\u{1ffff}'),
-    ('\u{2fffe}', '\u{2ffff}'),
-    ('\u{3fffe}', '\u{3ffff}'),
-    ('\u{4fffe}', '\u{4ffff}'),
-    ('\u{5fffe}', '\u{5ffff}'),
-    ('\u{6fffe}', '\u{6ffff}'),
-    ('\u{7fffe}', '\u{7ffff}'),
-    ('\u{8fffe}', '\u{8ffff}'),
-    ('\u{9fffe}', '\u{9ffff}'),
-    ('\u{afffe}', '\u{affff}'),
-    ('\u{bfffe}', '\u{bffff}'),
-    ('\u{cfffe}', '\u{cffff}'),
-    ('\u{dfffe}', '\u{dffff}'),
-    ('\u{efffe}', '\u{10ffff}'),
-];
-
-pub const V2_1: &'static [(char, char)] = &[('€', '€'), ('ïżŒ', 'ïżŒ')];
-
-pub const V3_0: &'static [(char, char)] = &[
-    ('Ƕ', 'Çč'),
-    ('Ș', 'ȟ'),
-    ('Èą', 'Èł'),
-    ('Ê©', 'Ê­'),
-    ('˟', '˟'),
-    ('ËȘ', 'Ëź'),
-    ('\u{346}', '\u{34e}'),
-    ('\u{362}', '\u{362}'),
-    ('ϗ', 'ϗ'),
-    ('ϛ', 'ϛ'),
-    ('ϝ', 'ϝ'),
-    ('ϟ', 'ϟ'),
-    ('ÏĄ', 'ÏĄ'),
-    ('Ѐ', 'Ѐ'),
-    ('Ѝ', 'Ѝ'),
-    ('ѐ', 'ѐ'),
-    ('ѝ', 'ѝ'),
-    ('\u{488}', '\u{489}'),
-    ('Ҍ', 'ҏ'),
-    ('ÓŹ', 'Ó­'),
-    ('֊', '֊'),
-    ('\u{653}', '\u{655}'),
-    ('Úž', 'Úč'),
-    ('Úż', 'Úż'),
-    ('ۏ', 'ۏ'),
-    ('Ûș', 'ÛŸ'),
-    ('܀', '܍'),
-    ('\u{70f}', 'ÜŹ'),
-    ('\u{730}', '\u{74a}'),
-    ('Ț€', '\u{7b0}'),
-    ('ං', 'ඃ'),
-    ('අ', 'ඖ'),
-    ('ක', 'න'),
-    ('à¶ł', 'à¶»'),
-    ('ග', 'ග'),
-    ('ව', 'ෆ'),
-    ('\u{dca}', '\u{dca}'),
-    ('\u{dcf}', '\u{dd4}'),
-    ('\u{dd6}', '\u{dd6}'),
-    ('ෘ', '\u{ddf}'),
-    ('à·Č', 'à·Ž'),
-    ('àœȘ', 'àœȘ'),
-    ('\u{f96}', '\u{f96}'),
-    ('\u{fae}', '\u{fb0}'),
-    ('\u{fb8}', '\u{fb8}'),
-    ('\u{fba}', '\u{fbc}'),
-    ('àŸŸ', 'àżŒ'),
-    ('àż', 'àż'),
-    ('က', 'အ'),
-    ('ဣ', 'ဧ'),
-    ('ဩ', 'á€Ș'),
-    ('ာ', '\u{1032}'),
-    ('\u{1036}', '\u{1039}'),
-    ('၀', '\u{1059}'),
-    ('ሀ', 'ሆ'),
-    ('ለ', 'ቆ'),
-    ('ቈ', 'ቈ'),
-    ('ቊ', 'ቍ'),
-    ('ቐ', 'ቖ'),
-    ('ቘ', 'ቘ'),
-    ('ቚ', 'ቝ'),
-    ('በ', 'ኆ'),
-    ('ኈ', 'ኈ'),
-    ('ኊ', 'ኍ'),
-    ('ነ', 'ኼ'),
-    ('ኰ', 'ኰ'),
-    ('áŠČ', 'ኔ'),
-    ('ኾ', 'ኟ'),
-    ('ዀ', 'ዀ'),
-    ('ዂ', 'ዅ'),
-    ('ወ', 'ዎ'),
-    ('ዐ', 'ዖ'),
-    ('ዘ', 'ዼ'),
-    ('ደ', 'ጎ'),
-    ('ጐ', 'ጐ'),
-    ('ጒ', 'ጕ'),
-    ('ጘ', 'ጞ'),
-    ('ጠ', 'ፆ'),
-    ('ፈ', 'ፚ'),
-    ('፡', 'ፌ'),
-    ('Ꭰ', 'Ꮾ'),
-    ('ᐁ', 'ᙶ'),
-    ('\u{1680}', '᚜'),
-    ('ᚠ', 'ᛰ'),
-    ('ក', 'ៜ'),
-    ('០', '៩'),
-    ('᠀', '\u{180e}'),
-    ('᠐', '᠙'),
-    ('ᠠ', '᥷'),
-    ('᱀', '\u{18a9}'),
-    ('\u{202f}', '\u{202f}'),
-    ('⁈', '⁍'),
-    ('₭', '₯'),
-    ('\u{20e2}', '\u{20e3}'),
-    ('â„č', 'â„ș'),
-    ('Ↄ', 'Ↄ'),
-    ('⇫', '⇳'),
-    ('⌁', '⌁'),
-    ('⍻', '⍻'),
-    ('⍜', '⎚'),
-    ('␄', '␊'),
-    ('◰', '◷'),
-    ('☙', '☙'),
-    ('♰', '♱'),
-    ('⠀', '⣿'),
-    ('âș€', 'âș™'),
-    ('âș›', '⻳'),
-    ('⌀', '⿕'),
-    ('âż°', 'âż»'),
-    ('〾', 'ă€ș'),
-    ('ă€Ÿ', 'ă€Ÿ'),
-    ('ㆠ', 'ㆷ'),
-    ('㐀', 'ä¶”'),
-    ('ꀀ', 'ꒌ'),
-    ('꒐', 'ê’Ą'),
-    ('ê’€', 'ê’ł'),
-    ('ê’”', '꓀'),
-    ('꓂', '꓄'),
-    ('꓆', '꓆'),
-    ('ïŹ', 'ïŹ'),
-    ('\u{fff9}', '\u{fffb}'),
-];
-
-pub const V3_1: &'static [(char, char)] = &[
-    ('ÏŽ', 'Ï”'),
-    ('\u{fdd0}', '\u{fdef}'),
-    ('𐌀', '𐌞'),
-    ('𐌠', '𐌣'),
-    ('𐌰', '𐍊'),
-    ('𐐀', '𐐄'),
-    ('𐐹', '𐑍'),
-    ('𝀀', 'đƒ”'),
-    ('𝄀', '𝄩'),
-    ('đ„Ș', '𝇝'),
-    ('𝐀', '𝑔'),
-    ('𝑖', '𝒜'),
-    ('𝒞', '𝒟'),
-    ('𝒱', '𝒱'),
-    ('đ’„', '𝒩'),
-    ('đ’©', '𝒬'),
-    ('𝒼', 'đ’č'),
-    ('đ’»', 'đ’»'),
-    ('đ’œ', '𝓀'),
-    ('𝓂', '𝓃'),
-    ('𝓅', '𝔅'),
-    ('𝔇', '𝔊'),
-    ('𝔍', '𝔔'),
-    ('𝔖', '𝔜'),
-    ('𝔞', 'đ”č'),
-    ('đ”»', 'đ”Ÿ'),
-    ('𝕀', '𝕄'),
-    ('𝕆', '𝕆'),
-    ('𝕊', '𝕐'),
-    ('𝕒', '𝚣'),
-    ('𝚹', '𝟉'),
-    ('𝟎', '𝟿'),
-    ('𠀀', 'đȘ›–'),
-    ('丽', '𯹝'),
-    ('\u{e0001}', '\u{e0001}'),
-    ('\u{e0020}', '\u{e007f}'),
-];
-
-pub const V3_2: &'static [(char, char)] = &[
-    ('È ', 'È '),
-    ('\u{34f}', '\u{34f}'),
-    ('\u{363}', '\u{36f}'),
-    ('Ϙ', 'ϙ'),
-    ('϶', '϶'),
-    ('Ҋ', 'ҋ'),
-    ('Ӆ', 'ӆ'),
-    ('Ӊ', 'ӊ'),
-    ('Ӎ', 'ӎ'),
-    ('Ԁ', 'ԏ'),
-    ('Ùź', 'ÙŻ'),
-    ('Ț±', 'Ț±'),
-    ('ჷ', 'პ'),
-    ('ᜀ', 'ᜌ'),
-    ('ᜎ', '\u{1714}'),
-    ('ᜠ', '᜶'),
-    ('ᝀ', '\u{1753}'),
-    ('ᝠ', 'ᝬ'),
-    ('᝼', 'ᝰ'),
-    ('\u{1772}', '\u{1773}'),
-    ('⁇', '⁇'),
-    ('⁎', '⁒'),
-    ('⁗', '⁗'),
-    ('\u{205f}', '\u{2063}'),
-    ('ⁱ', 'ⁱ'),
-    ('₰', '₱'),
-    ('\u{20e4}', '\u{20ea}'),
-    ('ℜ', '⅋'),
-    ('⇮', '⇿'),
-    ('â‹Č', '⋿'),
-    ('⍌', '⍌'),
-    ('⎛', '⏎'),
-    ('⓫', 'ⓟ'),
-    ('▖', '▟'),
-    ('◾', '◿'),
-    ('☖', '☗'),
-    ('â™Č', '♜'),
-    ('⚀', '⚉'),
-    ('❚', '❔'),
-    ('⟐', '⟫'),
-    ('⟰', '⟿'),
-    (' ', '⫿'),
-    ('〻', 'ă€œ'),
-    ('ゕ', 'ゖ'),
-    ('ゟ', '゠'),
-    ('ヿ', 'ヿ'),
-    ('ㇰ', 'ㇿ'),
-    ('㉑', '㉟'),
-    ('㊱', '㊿'),
-    ('ê’ą', 'ê’Ł'),
-    ('ê’Ž', 'ê’Ž'),
-    ('꓁', '꓁'),
-    ('꓅', '꓅'),
-    ('ïš°', 'ï©Ș'),
-    ('﷌', '﷌'),
-    ('\u{fe00}', '\u{fe0f}'),
-    ('ïč…', 'ïč†'),
-    ('ïčł', 'ïčł'),
-    ('', ''),
-];
-
-pub const V4_0: &'static [(char, char)] = &[
-    ('ÈĄ', 'ÈĄ'),
-    ('Ȏ', 'ȶ'),
-    ('Êź', 'ÊŻ'),
-    ('ËŻ', 'Ëż'),
-    ('\u{350}', '\u{357}'),
-    ('\u{35d}', '\u{35f}'),
-    ('Ï·', 'Ï»'),
-    ('\u{600}', '\u{603}'),
-    ('ۍ', '\u{615}'),
-    ('\u{656}', '\u{658}'),
-    ('Ûź', 'ÛŻ'),
-    ('Ûż', 'Ûż'),
-    ('Ü­', 'ÜŻ'),
-    ('ʍ', 'ʏ'),
-    ('à€„', 'à€„'),
-    ('àŠœ', 'àŠœ'),
-    ('\u{a01}', '\u{a01}'),
-    ('àšƒ', 'àšƒ'),
-    ('àȘŒ', 'àȘŒ'),
-    ('à«Ą', '\u{ae3}'),
-    ('૱', '૱'),
-    ('àŹ”', 'àŹ”'),
-    ('à­±', 'à­±'),
-    ('àŻł', 'àŻș'),
-    ('\u{cbc}', 'àČœ'),
-    ('\u{17dd}', '\u{17dd}'),
-    ('៰', 'áŸč'),
-    ('က', 'လ'),
-    ('\u{1920}', 'ါ'),
-    ('ူ', '\u{193b}'),
-    ('á„€', 'á„€'),
-    ('á„„', 'á„­'),
-    ('ᄰ', 'ᄎ'),
-    ('á§ ', 'á§ż'),
-    ('ᮀ', 'ᔫ'),
-    ('⁓', '⁔'),
-    ('℻', '℻'),
-    ('⏏', '⏐'),
-    ('⓿', '⓿'),
-    ('☔', '☕'),
-    ('⚊', '⚑'),
-    ('⚠', '⚡'),
-    ('⬀', '⬍'),
-    ('㈝', '㈞'),
-    ('㉐', '㉐'),
-    ('ă‰Œ', 'ă‰œ'),
-    ('㋌', '㋏'),
-    ('ă·', 'ăș'),
-    ('㏞', '㏟'),
-    ('㏿', '㏿'),
-    ('䷀', 'ä·ż'),
-    ('﷜', '﷜'),
-    ('ïč‡', 'ïčˆ'),
-    ('𐀀', '𐀋'),
-    ('𐀍', '𐀩'),
-    ('𐀹', 'đ€ș'),
-    ('đ€Œ', 'đ€œ'),
-    ('𐀿', '𐁍'),
-    ('𐁐', '𐁝'),
-    ('𐂀', 'đƒș'),
-    ('𐄀', '𐄂'),
-    ('𐄇', '𐄳'),
-    ('𐄷', '𐄿'),
-    ('𐎀', '𐎝'),
-    ('𐎟', '𐎟'),
-    ('𐐊', '𐐧'),
-    ('𐑎', '𐒝'),
-    ('𐒠', '𐒩'),
-    ('𐠀', '𐠅'),
-    ('𐠈', '𐠈'),
-    ('𐠊', '𐠔'),
-    ('𐠷', '𐠞'),
-    ('đ Œ', 'đ Œ'),
-    ('𐠿', '𐠿'),
-    ('𝌀', '𝍖'),
-    ('𝓁', '𝓁'),
-    ('\u{e0100}', '\u{e01ef}'),
-];
-
-pub const V4_1: &'static [(char, char)] = &[
-    ('ȷ', 'Ɂ'),
-    ('\u{358}', '\u{35c}'),
-    ('ÏŒ', 'Ïż'),
-    ('Ó¶', 'Ó·'),
-    ('\u{5a2}', '\u{5a2}'),
-    ('\u{5c5}', '\u{5c7}'),
-    ('ۋ', 'ۋ'),
-    ('۞', '۞'),
-    ('\u{659}', '\u{65e}'),
-    ('ʐ', 'ʭ'),
-    ('à„œ', 'à„œ'),
-    ('ৎ', 'ৎ'),
-    ('àź¶', 'àź¶'),
-    ('àŻŠ', 'àŻŠ'),
-    ('àż', 'àż‘'),
-    ('áƒč', 'áƒș'),
-    ('჌', '჌'),
-    ('ሇ', 'ሇ'),
-    ('ቇ', 'ቇ'),
-    ('ኇ', 'ኇ'),
-    ('ኯ', 'ኯ'),
-    ('ዏ', 'ዏ'),
-    ('ዯ', 'ዯ'),
-    ('ጏ', 'ጏ'),
-    ('ጟ', 'ጟ'),
-    ('ፇ', 'ፇ'),
-    ('\u{135f}', '፠'),
-    ('ᎀ', '᎙'),
-    ('ᩀ', 'ኩ'),
-    ('ᩰ', 'ᧉ'),
-    ('᧐', '᧙'),
-    ('᧞', '᧟'),
-    ('Ṁ', '\u{1a1b}'),
-    ('Ṟ', 'ṟ'),
-    ('ᔏ', '\u{1dc3}'),
-    ('⁕', '⁖'),
-    ('⁘', '⁞'),
-    ('ₐ', 'ₔ'),
-    ('â‚Č', 'â‚”'),
-    ('\u{20eb}', '\u{20eb}'),
-    ('ℌ', 'ℌ'),
-    ('⅌', '⅌'),
-    ('⏑', '⏛'),
-    ('☘', '☘'),
-    ('♟', '♿'),
-    ('⚒', '⚜'),
-    ('⚱', '⚱'),
-    ('⟀', '⟆'),
-    ('⬎', '⬓'),
-    ('Ⰰ', 'ⰼ'),
-    ('ⰰ', 'ⱞ'),
-    ('âȀ', 'âłȘ'),
-    ('âłč', '⎄'),
-    ('⎰', '┄'),
-    ('┯', '┯'),
-    ('ⶀ', 'ⶖ'),
-    ('â¶ ', 'â¶Š'),
-    ('â¶š', 'â¶ź'),
-    ('â¶°', 'â¶¶'),
-    ('â¶ž', 'â¶Ÿ'),
-    ('ⷀ', 'ⷆ'),
-    ('ⷈ', 'ⷎ'),
-    ('ⷐ', 'ⷖ'),
-    ('ⷘ', 'ⷞ'),
-    ('⾀', '⾗'),
-    ('⾜', '⾝'),
-    ('㇀', '㇏'),
-    ('ă‰Ÿ', 'ă‰Ÿ'),
-    ('韊', '韻'),
-    ('꜀', '꜖'),
-    ('ꠀ', '꠫'),
-    ('並', '龎'),
-    ('', 'ïž™'),
-    ('𐅀', '𐆊'),
-    ('𐎠', '𐏃'),
-    ('𐏈', '𐏕'),
-    ('𐹀', '\u{10a03}'),
-    ('\u{10a05}', '\u{10a06}'),
-    ('\u{10a0c}', '𐹓'),
-    ('𐹕', '𐹗'),
-    ('𐹙', '𐹳'),
-    ('\u{10a38}', '\u{10a3a}'),
-    ('\u{10a3f}', '𐩇'),
-    ('𐩐', '𐩘'),
-    ('𝈀', '𝉅'),
-    ('đš€', 'đš„'),
-];
-
-pub const V5_0: &'static [(char, char)] = &[
-    ('ɂ', 'ɏ'),
-    ('ͻ', '͜'),
-    ('ӏ', 'ӏ'),
-    ('Óș', 'Óż'),
-    ('Ԑ', 'ԓ'),
-    ('\u{5ba}', '\u{5ba}'),
-    ('߀', 'ßș'),
-    ('à„»', 'à„Œ'),
-    ('à„Ÿ', 'à„ż'),
-    ('\u{ce2}', '\u{ce3}'),
-    ('àł±', 'àłČ'),
-    ('\u{1b00}', 'ᭋ'),
-    ('᭐', 'ᭌ'),
-    ('\u{1dc4}', '\u{1dca}'),
-    ('\u{1dfe}', '\u{1dff}'),
-    ('\u{20ec}', '\u{20ef}'),
-    ('⅍', 'ⅎ'),
-    ('ↄ', 'ↄ'),
-    ('⏜', '⏧'),
-    ('âšČ', 'âšČ'),
-    ('⟇', '⟊'),
-    ('⬔', '⬚'),
-    ('⏠', '⏣'),
-    ('Ⱡ', 'ⱏ'),
-    ('ⱎ', 'ⱷ'),
-    ('ꜗ', 'ꜚ'),
-    ('꜠', 'êœĄ'),
-    ('êĄ€', 'êĄ·'),
-    ('𐀀', '𐀙'),
-    ('đ€Ÿ', 'đ€Ÿ'),
-    ('𒀀', '𒍼'),
-    ('𒐀', '𒑱'),
-    ('𒑰', '𒑳'),
-    ('𝍠', 'đ±'),
-    ('𝟊', '𝟋'),
-];
-
-pub const V5_1: &'static [(char, char)] = &[
-    ('Ͱ', 'ͳ'),
-    ('Ͷ', 'ͷ'),
-    ('Ϗ', 'Ϗ'),
-    ('\u{487}', '\u{487}'),
-    ('Ԕ', 'ԣ'),
-    ('ۆ', 'ۊ'),
-    ('\u{616}', '\u{61a}'),
-    ('Ű»', 'Űż'),
-    ('Ęź', 'Ęż'),
-    ('à„±', 'à„Č'),
-    ('\u{a51}', '\u{a51}'),
-    ('\u{a75}', '\u{a75}'),
-    ('\u{b44}', '\u{b44}'),
-    ('\u{b62}', '\u{b63}'),
-    ('àŻ', 'àŻ'),
-    ('జ', 'జ'),
-    ('ౘ', 'ౙ'),
-    ('\u{c62}', '\u{c63}'),
-    ('౞', 'à±ż'),
-    ('àŽœ', 'àŽœ'),
-    ('\u{d44}', '\u{d44}'),
-    ('\u{d62}', '\u{d63}'),
-    ('à”°', 'à””'),
-    ('à”č', 'à”ż'),
-    ('àœ«', 'àœŹ'),
-    ('àżŽ', 'àżŽ'),
-    ('àż’', 'àż”'),
-    ('ေ', 'ေ'),
-    ('္', '္'),
-    ('ါ', 'ါ'),
-    ('\u{1033}', '\u{1035}'),
-    ('\u{103a}', 'ဿ'),
-    ('ၚ', '႙'),
-    ('႞', '႟'),
-    ('áąȘ', 'áąȘ'),
-    ('\u{1b80}', '\u{1baa}'),
-    ('áźź', 'áźč'),
-    ('ᰀ', '\u{1c37}'),
-    ('᰻', '᱉'),
-    ('ᱍ', '᱿'),
-    ('\u{1dcb}', '\u{1de6}'),
-    ('áșœ', 'áșŸ'),
-    ('á»ș', 'ỿ'),
-    ('\u{2064}', '\u{2064}'),
-    ('\u{20f0}', '\u{20f0}'),
-    ('⅏', '⅏'),
-    ('ↅ', 'ↈ'),
-    ('⚝', '⚝'),
-    ('⚳', '⚌'),
-    ('⛀', '⛃'),
-    ('⟌', '⟌'),
-    ('⟬', '⟯'),
-    ('⬛', '⬟'),
-    ('⏀', '⭌'),
-    ('⭐', '⭔'),
-    ('Ɑ', 'Ɐ'),
-    ('ⱱ', 'ⱳ'),
-    ('ⱞ', 'ⱜ'),
-    ('\u{2de0}', '\u{2dff}'),
-    ('⾘', '⾛'),
-    ('⾞', '⾰'),
-    ('ㄭ', 'ㄭ'),
-    ('㇐', '㇣'),
-    ('韌', '鿃'),
-    ('ꔀ', 'ꘫ'),
-    ('Ꙁ', 'ꙟ'),
-    ('ê™ą', 'ê™ł'),
-    ('\u{a67c}', 'ꚗ'),
-    ('ꜛ', 'ꜟ'),
-    ('êœą', 'ꞌ'),
-    ('ꟻ', 'êŸż'),
-    ('êą€', '\u{a8c4}'),
-    ('êŁŽ', 'êŁ™'),
-    ('ꀀ', '\u{a953}'),
-    ('ꄟ', 'ꄟ'),
-    ('Ꚁ', '\u{aa36}'),
-    ('ꩀ', 'ꩍ'),
-    ('꩐', '꩙'),
-    ('꩜', '꩟'),
-    ('\u{fe24}', '\u{fe26}'),
-    ('𐆐', '𐆛'),
-    ('𐇐', '\u{101fd}'),
-    ('𐊀', '𐊜'),
-    ('𐊠', '𐋐'),
-    ('𐀠', 'đ€č'),
-    ('𐀿', '𐀿'),
-    ('đ„©', 'đ„©'),
-    ('🀀', 'đŸ€«'),
-    ('🀰', '🂓'),
-];
-
-pub const V5_2: &'static [(char, char)] = &[
-    ('Ô€', 'Ô„'),
-    ('ࠀ', '\u{82d}'),
-    ('à °', 'à Ÿ'),
-    ('\u{900}', '\u{900}'),
-    ('à„Ž', 'à„Ž'),
-    ('\u{955}', '\u{955}'),
-    ('à„č', 'à„ș'),
-    ('à§»', 'à§»'),
-    ('àż•', 'àż˜'),
-    ('ႚ', '\u{109d}'),
-    ('ᅚ', 'ᅞ'),
-    ('ᆣ', 'ᆧ'),
-    ('á‡ș', 'ᇿ'),
-    ('᐀', '᐀'),
-    ('ᙷ', 'ᙿ'),
-    ('Ṱ', 'ᣔ'),
-    ('áŠȘ', 'ካ'),
-    ('᧚', '᧚'),
-    ('áš ', '\u{1a5e}'),
-    ('\u{1a60}', '\u{1a7c}'),
-    ('\u{1a7f}', 'áȘ‰'),
-    ('áȘ', 'áȘ™'),
-    ('áȘ ', 'áȘ­'),
-    ('\u{1cd0}', 'áłČ'),
-    ('\u{1dfd}', '\u{1dfd}'),
-    ('₶', '₾'),
-    ('⅐', '⅒'),
-    ('↉', '↉'),
-    ('⏚', '⏚'),
-    ('⚞', '⚟'),
-    ('âšœ', '⚿'),
-    ('⛄', '⛍'),
-    ('⛏', '⛡'),
-    ('⛣', '⛣'),
-    ('⛹', '⛿'),
-    ('❗', '❗'),
-    ('⭕', '⭙'),
-    ('â±°', 'â±°'),
-    ('ⱟ', 'Ɀ'),
-    ('âł«', '\u{2cf1}'),
-    ('âž±', 'âž±'),
-    ('㉄', '㉏'),
-    ('鿄', '鿋'),
-    ('ꓐ', 'ê“ż'),
-    ('ꚠ', '꛷'),
-    ('ê °', 'ê č'),
-    ('\u{a8e0}', 'êŁ»'),
-    ('ꄠ', 'ꄌ'),
-    ('\u{a980}', '꧍'),
-    ('ꧏ', '꧙'),
-    ('꧞', '꧟'),
-    ('ê© ', 'ê©»'),
-    ('êȘ€', 'ꫂ'),
-    ('ꫛ', '꫟'),
-    ('êŻ€', '\u{abed}'),
-    ('êŻ°', 'êŻč'),
-    ('ힰ', 'ퟆ'),
-    ('ퟋ', 'ퟻ'),
-    ('ï©«', 'ï©­'),
-    ('𐡀', '𐡕'),
-    ('𐡗', '𐡟'),
-    ('𐀚', '𐀛'),
-    ('𐩠', '𐩿'),
-    ('𐬀', '𐏔'),
-    ('đŹč', '𐭕'),
-    ('𐭘', 'đ­Č'),
-    ('𐭞', '𐭿'),
-    ('𐰀', '𐱈'),
-    ('đč ', 'đčŸ'),
-    ('\u{11080}', '𑃁'),
-    ('𓀀', '𓐼'),
-    ('🄀', '🄊'),
-    ('🄐', '🄼'),
-    ('đŸ„±', 'đŸ„±'),
-    ('đŸ„œ', 'đŸ„œ'),
-    ('🄿', '🄿'),
-    ('🅂', '🅂'),
-    ('🅆', '🅆'),
-    ('🅊', '🅎'),
-    ('🅗', '🅗'),
-    ('🅟', '🅟'),
-    ('đŸ…č', 'đŸ…č'),
-    ('đŸ…»', 'đŸ…Œ'),
-    ('🅿', '🅿'),
-    ('🆊', '🆍'),
-    ('🆐', '🆐'),
-    ('🈀', '🈀'),
-    ('🈐', 'đŸˆ±'),
-    ('🉀', '🉈'),
-    ('đȘœ€', 'đ«œŽ'),
-];
-
-pub const V6_0: &'static [(char, char)] = &[
-    ('ÔŠ', 'Ô§'),
-    ('Ű ', 'Ű '),
-    ('\u{65f}', '\u{65f}'),
-    ('àĄ€', '\u{85b}'),
-    ('àĄž', 'àĄž'),
-    ('\u{93a}', 'à€»'),
-    ('à„', 'à„'),
-    ('\u{956}', '\u{957}'),
-    ('à„ł', 'à„·'),
-    ('à­Č', 'à­·'),
-    ('àŽ©', 'àŽ©'),
-    ('àŽș', 'àŽș'),
-    ('à”Ž', 'à”Ž'),
-    ('àŸŒ', '\u{f8f}'),
-    ('àż™', 'àżš'),
-    ('\u{135d}', '\u{135e}'),
-    ('ᯀ', '\u{1bf3}'),
-    ('áŻŒ', '᯿'),
-    ('\u{1dfc}', '\u{1dfc}'),
-    ('ₕ', 'ₜ'),
-    ('â‚č', 'â‚č'),
-    ('⏩', '⏳'),
-    ('⛎', '⛎'),
-    ('⛱', '⛱'),
-    ('⛀', '⛧'),
-    ('✅', '✅'),
-    ('✊', '✋'),
-    ('✹', '✹'),
-    ('❌', '❌'),
-    ('❎', '❎'),
-    ('❓', '❕'),
-    ('❟', '❠'),
-    ('➕', '➗'),
-    ('➰', '➰'),
-    ('➿', '➿'),
-    ('⟎', '⟏'),
-    ('â”°', 'â”°'),
-    ('\u{2d7f}', '\u{2d7f}'),
-    ('ㆾ', 'ă†ș'),
-    ('Ꙡ', 'ê™Ą'),
-    ('Ɥ', 'ꞎ'),
-    ('Ꞑ', 'ꞑ'),
-    ('Ꞡ', 'ꞩ'),
-    ('êŸș', 'êŸș'),
-    ('êŹ', 'êŹ†'),
-    ('êŹ‰', 'êŹŽ'),
-    ('êŹ‘', 'êŹ–'),
-    ('êŹ ', 'êŹŠ'),
-    ('êŹš', 'êŹź'),
-    ('ïźČ', 'ïŻ'),
-    ('𑀀', '𑁍'),
-    ('𑁒', '𑁯'),
-    ('𖠀', '𖹾'),
-    ('𛀀', '𛀁'),
-    ('🂠', '🂼'),
-    ('đŸ‚±', 'đŸ‚Ÿ'),
-    ('🃁', '🃏'),
-    ('🃑', '🃟'),
-    ('🄰', '🄰'),
-    ('đŸ„Č', 'đŸ„Œ'),
-    ('đŸ„Ÿ', 'đŸ„Ÿ'),
-    ('🅀', '🅁'),
-    ('🅃', '🅅'),
-    ('🅇', '🅉'),
-    ('🅏', '🅖'),
-    ('🅘', '🅞'),
-    ('🅠', 'đŸ…©'),
-    ('🅰', '🅾'),
-    ('đŸ…ș', 'đŸ…ș'),
-    ('đŸ…œ', 'đŸ…Ÿ'),
-    ('🆀', '🆉'),
-    ('🆎', '🆏'),
-    ('🆑', '🆚'),
-    ('🇩', '🇿'),
-    ('🈁', '🈂'),
-    ('đŸˆČ', 'đŸˆș'),
-    ('🉐', '🉑'),
-    ('🌀', '🌠'),
-    ('🌰', 'đŸŒ”'),
-    ('đŸŒ·', 'đŸŒ'),
-    ('🎀', '🎓'),
-    ('🎠', '🏄'),
-    ('🏆', '🏊'),
-    ('🏠', '🏰'),
-    ('🐀', 'đŸŸ'),
-    ('👀', '👀'),
-    ('👂', 'đŸ“·'),
-    ('đŸ“č', 'đŸ“Œ'),
-    ('🔀', 'đŸ”œ'),
-    ('🕐', '🕧'),
-    ('đŸ—»', '🗿'),
-    ('😁', '😐'),
-    ('😒', '😔'),
-    ('😖', '😖'),
-    ('😘', '😘'),
-    ('😚', '😚'),
-    ('😜', '😞'),
-    ('😠', 'đŸ˜„'),
-    ('😹', 'đŸ˜«'),
-    ('😭', '😭'),
-    ('😰', '😳'),
-    ('đŸ˜”', '🙀'),
-    ('🙅', '🙏'),
-    ('🚀', '🛅'),
-    ('🜀', '🝳'),
-    ('đ«€', 'đ« '),
-];
-
-pub const V6_1: &'static [(char, char)] = &[
-    ('֏', '֏'),
-    ('\u{604}', '\u{604}'),
-    ('àą ', 'àą '),
-    ('àąą', 'àąŹ'),
-    ('\u{8e4}', '\u{8fe}'),
-    ('à«°', 'à«°'),
-    ('ໞ', 'ໟ'),
-    ('Ⴧ', 'Ⴧ'),
-    ('Ⴭ', 'Ⴭ'),
-    ('ნ', 'ჿ'),
-    ('\u{1bab}', '\u{1bad}'),
-    ('áźș', 'áźż'),
-    ('᳀', '᳇'),
-    ('áłł', 'áł¶'),
-    ('⟋', '⟋'),
-    ('⟍', '⟍'),
-    ('âłČ', 'âłł'),
-    ('⎧', '⎧'),
-    ('⎭', '⎭'),
-    ('┊', '┧'),
-    ('âžČ', 'âž»'),
-    ('鿌', '鿌'),
-    ('\u{a674}', '\u{a67b}'),
-    ('\u{a69f}', '\u{a69f}'),
-    ('Ꞓ', 'ꞓ'),
-    ('êžȘ', 'êžȘ'),
-    ('꟞', 'êŸč'),
-    ('ê« ', '\u{aaf6}'),
-    ('ïšź', 'ïšŻ'),
-    ('𐩀', '𐊷'),
-    ('đŠŸ', '𐊿'),
-    ('𑃐', '𑃹'),
-    ('𑃰', 'đ‘ƒč'),
-    ('\u{11100}', '\u{11134}'),
-    ('đ‘„¶', '𑅃'),
-    ('\u{11180}', '𑇈'),
-    ('𑇐', '𑇙'),
-    ('𑚀', '\u{116b7}'),
-    ('𑛀', '𑛉'),
-    ('đ–Œ€', 'đ–œ„'),
-    ('đ–œ', 'đ–œŸ'),
-    ('\u{16f8f}', 'đ–ŸŸ'),
-    ('𞾀', '𞾃'),
-    ('𞾅', '𞾟'),
-    ('𞾡', '𞾱'),
-    ('𞞀', '𞞀'),
-    ('𞾧', '𞾧'),
-    ('đžž©', 'đžžČ'),
-    ('𞾮', 'đžž·'),
-    ('đžžč', 'đžžč'),
-    ('đžž»', 'đžž»'),
-    ('đžč‚', 'đžč‚'),
-    ('đžč‡', 'đžč‡'),
-    ('đžč‰', 'đžč‰'),
-    ('đžč‹', 'đžč‹'),
-    ('đžč', 'đžč'),
-    ('đžč‘', 'đžč’'),
-    ('đžč”', 'đžč”'),
-    ('đžč—', 'đžč—'),
-    ('đžč™', 'đžč™'),
-    ('đžč›', 'đžč›'),
-    ('đžč', 'đžč'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžčĄ', 'đžčą'),
-    ('đžč€', 'đžč€'),
-    ('đžč§', 'đžčȘ'),
-    ('đžčŹ', 'đžčČ'),
-    ('đžčŽ', 'đžč·'),
-    ('đžčč', 'đžčŒ'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžș€', 'đžș‰'),
-    ('đžș‹', 'đžș›'),
-    ('đžșĄ', 'đžșŁ'),
-    ('đžș„', 'đžș©'),
-    ('đžș«', 'đžș»'),
-    ('đž»°', 'đž»±'),
-    ('đŸ…Ș', 'đŸ…«'),
-    ('🕀', '🕃'),
-    ('😀', '😀'),
-    ('😑', '😑'),
-    ('😕', '😕'),
-    ('😗', '😗'),
-    ('😙', '😙'),
-    ('😛', '😛'),
-    ('😟', '😟'),
-    ('😩', '😧'),
-    ('😬', '😬'),
-    ('😼', '😯'),
-    ('😮', '😮'),
-];
-
-pub const V6_2: &'static [(char, char)] = &[('â‚ș', 'â‚ș')];
-
-pub const V6_3: &'static [(char, char)] =
-    &[('\u{61c}', '\u{61c}'), ('\u{2066}', '\u{2069}')];
-
-pub const V7_0: &'static [(char, char)] = &[
-    ('Íż', 'Íż'),
-    ('Ôš', 'ÔŻ'),
-    ('֍', '֎'),
-    ('\u{605}', '\u{605}'),
-    ('àąĄ', 'àąĄ'),
-    ('àą­', 'àąČ'),
-    ('\u{8ff}', '\u{8ff}'),
-    ('à„ž', 'à„ž'),
-    ('àŠ€', 'àŠ€'),
-    ('\u{c00}', '\u{c00}'),
-    ('à°Ž', 'à°Ž'),
-    ('\u{c81}', '\u{c81}'),
-    ('\u{d01}', '\u{d01}'),
-    ('à·Š', 'à·Ż'),
-    ('ᛱ', '᛾'),
-    ('ဝ', 'သ'),
-    ('\u{1ab0}', '\u{1abe}'),
-    ('\u{1cf8}', '\u{1cf9}'),
-    ('\u{1de7}', '\u{1df5}'),
-    ('₻', 'ₜ'),
-    ('⏎', 'âș'),
-    ('✀', '✀'),
-    ('⭍', '⭏'),
-    ('⭚', '⭳'),
-    ('â­¶', '⼕'),
-    ('⟘', 'âźč'),
-    ('âźœ', '⯈'),
-    ('⯊', '⯑'),
-    ('➌', 'âč‚'),
-    ('Ꚙ', 'ꚝ'),
-    ('ꞔ', 'ꞟ'),
-    ('Ɜ', 'Ɬ'),
-    ('Ʞ', 'Ʇ'),
-    ('ꟷ', 'ꟷ'),
-    ('ê§ ', 'ê§Ÿ'),
-    ('\u{aa7c}', 'ê©ż'),
-    ('êŹ°', 'ꭟ'),
-    ('ê­€', 'ê­„'),
-    ('\u{fe27}', '\u{fe2d}'),
-    ('𐆋', '𐆌'),
-    ('𐆠', '𐆠'),
-    ('\u{102e0}', '𐋻'),
-    ('𐌟', '𐌟'),
-    ('𐍐', '\u{1037a}'),
-    ('𐔀', '𐔧'),
-    ('𐔰', '𐕣'),
-    ('𐕯', '𐕯'),
-    ('𐘀', 'đœ¶'),
-    ('𐝀', '𐝕'),
-    ('𐝠', '𐝧'),
-    ('𐡠', '𐱞'),
-    ('𐹧', '𐹯'),
-    ('đȘ€', 'đȘŸ'),
-    ('𐫀', '\u{10ae6}'),
-    ('𐫫', '𐫶'),
-    ('𐼀', '𐼑'),
-    ('𐼙', '𐼜'),
-    ('𐟩', '𐟯'),
-    ('\u{1107f}', '\u{1107f}'),
-    ('𑅐', 'đ‘…¶'),
-    ('𑇍', '𑇍'),
-    ('𑇚', '𑇚'),
-    ('𑇡', '𑇮'),
-    ('𑈀', '𑈑'),
-    ('𑈓', 'đ‘ˆœ'),
-    ('𑊰', '\u{112ea}'),
-    ('𑋰', 'đ‘‹č'),
-    ('\u{11301}', '𑌃'),
-    ('𑌅', '𑌌'),
-    ('𑌏', '𑌐'),
-    ('𑌓', '𑌹'),
-    ('đ‘ŒȘ', '𑌰'),
-    ('đ‘ŒČ', '𑌳'),
-    ('đ‘Œ”', 'đ‘Œč'),
-    ('\u{1133c}', '𑍄'),
-    ('𑍇', '𑍈'),
-    ('𑍋', '\u{1134d}'),
-    ('\u{11357}', '\u{11357}'),
-    ('𑍝', '𑍣'),
-    ('\u{11366}', '\u{1136c}'),
-    ('\u{11370}', '\u{11374}'),
-    ('𑒀', '𑓇'),
-    ('𑓐', '𑓙'),
-    ('𑖀', '\u{115b5}'),
-    ('𑖾', '𑗉'),
-    ('𑘀', '𑙄'),
-    ('𑙐', '𑙙'),
-    ('𑱠', 'đ‘ŁČ'),
-    ('𑣿', '𑣿'),
-    ('đ‘«€', 'đ‘«ž'),
-    ('𒍯', '𒎘'),
-    ('𒑣', '𒑼'),
-    ('𒑮', '𒑮'),
-    ('đ–©€', 'đ–©ž'),
-    ('đ–© ', 'đ–©©'),
-    ('đ–©ź', 'đ–©Ż'),
-    ('𖫐', 'đ–«­'),
-    ('\u{16af0}', 'đ–«”'),
-    ('𖬀', '𖭅'),
-    ('𖭐', '𖭙'),
-    ('𖭛', '𖭡'),
-    ('𖭣', 'đ–­·'),
-    ('đ–­œ', '𖼏'),
-    ('𛰀', 'đ›±Ș'),
-    ('đ›±°', 'đ›±Œ'),
-    ('đ›Č€', 'đ›Čˆ'),
-    ('đ›Č', 'đ›Č™'),
-    ('đ›Čœ', '\u{1bca3}'),
-    ('𞠀', '𞣄'),
-    ('𞣇', '\u{1e8d6}'),
-    ('🂿', '🂿'),
-    ('🃠', 'đŸƒ”'),
-    ('🄋', '🄌'),
-    ('🌡', '🌬'),
-    ('đŸŒ¶', 'đŸŒ¶'),
-    ('đŸœ', 'đŸœ'),
-    ('🎔', '🎟'),
-    ('🏅', '🏅'),
-    ('🏋', '🏎'),
-    ('🏔', '🏟'),
-    ('đŸ±', 'đŸ·'),
-    ('🐿', '🐿'),
-    ('👁', '👁'),
-    ('📾', '📾'),
-    ('đŸ“œ', 'đŸ“Ÿ'),
-    ('đŸ”Ÿ', '🔿'),
-    ('🕄', '🕊'),
-    ('🕹', 'đŸ•č'),
-    ('đŸ•»', '🖣'),
-    ('đŸ–„', 'đŸ—ș'),
-    ('🙁', '🙂'),
-    ('🙐', '🙿'),
-    ('🛆', '🛏'),
-    ('🛠', '🛬'),
-    ('🛰', '🛳'),
-    ('🞀', '🟔'),
-    ('🠀', '🠋'),
-    ('🠐', '🡇'),
-    ('🡐', '🡙'),
-    ('🡠', '🱇'),
-    ('🱐', '🱭'),
-];
-
-pub const V8_0: &'static [(char, char)] = &[
-    ('àął', 'àąŽ'),
-    ('\u{8e3}', '\u{8e3}'),
-    ('à«č', 'à«č'),
-    ('ౚ', 'ౚ'),
-    ('à”Ÿ', 'à”Ÿ'),
-    ('Ꮤ', 'Ꮤ'),
-    ('Ꮮ', 'Ꮬ'),
-    ('₟', '₟'),
-    ('↊', '↋'),
-    ('⯏', '⯯'),
-    ('鿍', '鿕'),
-    ('\u{a69e}', '\u{a69e}'),
-    ('ꞏ', 'ꞏ'),
-    ('êžČ', 'ꞷ'),
-    ('êŁŒ', 'êŁœ'),
-    ('ê­ ', 'ê­Ł'),
-    ('ê­°', 'êźż'),
-    ('\u{fe2e}', '\u{fe2f}'),
-    ('𐣠', 'đŁČ'),
-    ('𐣎', '𐣔'),
-    ('𐣻', '𐣿'),
-    ('đŠŒ', 'đŠœ'),
-    ('𐧀', '𐧏'),
-    ('𐧒', '𐧿'),
-    ('đČ€', 'đČČ'),
-    ('𐳀', 'đłČ'),
-    ('đłș', '𐳿'),
-    ('\u{111c9}', '\u{111cc}'),
-    ('𑇛', '𑇟'),
-    ('𑊀', '𑊆'),
-    ('𑊈', '𑊈'),
-    ('𑊊', '𑊍'),
-    ('𑊏', '𑊝'),
-    ('𑊟', '𑊩'),
-    ('\u{11300}', '\u{11300}'),
-    ('𑍐', '𑍐'),
-    ('𑗊', '\u{115dd}'),
-    ('𑜀', '𑜙'),
-    ('\u{1171d}', '\u{1172b}'),
-    ('𑜰', '𑜿'),
-    ('𒎙', '𒎙'),
-    ('𒒀', '𒕃'),
-    ('𔐀', '𔙆'),
-    ('𝇞', '𝇹'),
-    ('𝠀', 'đȘ‹'),
-    ('\u{1da9b}', '\u{1da9f}'),
-    ('\u{1daa1}', '\u{1daaf}'),
-    ('🌭', '🌯'),
-    ('đŸŸ', '🍿'),
-    ('🏏', '🏓'),
-    ('🏾', '🏿'),
-    ('📿', '📿'),
-    ('🕋', '🕏'),
-    ('🙃', '🙄'),
-    ('🛐', '🛐'),
-    ('đŸ€', 'đŸ€˜'),
-    ('🩀', '🩄'),
-    ('🧀', '🧀'),
-    ('đ«  ', 'đŹșĄ'),
-];
-
-pub const V9_0: &'static [(char, char)] = &[
-    ('àą¶', 'àąœ'),
-    ('\u{8d4}', '\u{8e2}'),
-    ('àȀ', 'àȀ'),
-    ('à”', 'à”'),
-    ('à””', 'à”–'),
-    ('à”˜', 'à”ž'),
-    ('à”¶', 'à”ž'),
-    ('áȀ', 'áȈ'),
-    ('\u{1dfb}', '\u{1dfb}'),
-    ('⏻', '⏟'),
-    ('âčƒ', 'âč„'),
-    ('êžź', 'êžź'),
-    ('\u{a8c5}', '\u{a8c5}'),
-    ('𐆍', '𐆎'),
-    ('𐒰', '𐓓'),
-    ('𐓘', '𐓻'),
-    ('\u{1123e}', '\u{1123e}'),
-    ('𑐀', '𑑙'),
-    ('𑑛', '𑑛'),
-    ('𑑝', '𑑝'),
-    ('𑙠', '𑙬'),
-    ('𑰀', '𑰈'),
-    ('𑰊', '\u{11c36}'),
-    ('\u{11c38}', '𑱅'),
-    ('𑱐', '𑱏'),
-    ('𑱰', 'đ‘ȏ'),
-    ('\u{11c92}', '\u{11ca7}'),
-    ('đ‘Č©', '\u{11cb6}'),
-    ('𖿠', '𖿠'),
-    ('𗀀', '𘟬'),
-    ('𘠀', 'đ˜«Č'),
-    ('\u{1e000}', '\u{1e006}'),
-    ('\u{1e008}', '\u{1e018}'),
-    ('\u{1e01b}', '\u{1e021}'),
-    ('\u{1e023}', '\u{1e024}'),
-    ('\u{1e026}', '\u{1e02a}'),
-    ('𞀀', '\u{1e94a}'),
-    ('𞄐', 'đž„™'),
-    ('𞄞', 'đž„Ÿ'),
-    ('🆛', '🆬'),
-    ('đŸˆ»', 'đŸˆ»'),
-    ('đŸ•ș', 'đŸ•ș'),
-    ('đŸ–€', 'đŸ–€'),
-    ('🛑', '🛒'),
-    ('🛮', 'đŸ›¶'),
-    ('đŸ€™', 'đŸ€ž'),
-    ('đŸ€ ', 'đŸ€§'),
-    ('đŸ€°', 'đŸ€°'),
-    ('đŸ€ł', 'đŸ€Ÿ'),
-    ('đŸ„€', 'đŸ„‹'),
-    ('đŸ„', 'đŸ„ž'),
-    ('🩅', '🩑'),
-];
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/case_folding_simple.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/case_folding_simple.rs
deleted file mode 100644
index 07f6ff2..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/case_folding_simple.rs
+++ /dev/null
@@ -1,2948 +0,0 @@
-// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY:
-//
-//   ucd-generate case-folding-simple ucd-16.0.0 --chars --all-pairs
-//
-// Unicode version: 16.0.0.
-//
-// ucd-generate 0.3.1 is available on crates.io.
-
-pub const CASE_FOLDING_SIMPLE: &'static [(char, &'static [char])] = &[
-    ('A', &['a']),
-    ('B', &['b']),
-    ('C', &['c']),
-    ('D', &['d']),
-    ('E', &['e']),
-    ('F', &['f']),
-    ('G', &['g']),
-    ('H', &['h']),
-    ('I', &['i']),
-    ('J', &['j']),
-    ('K', &['k', 'â„Ș']),
-    ('L', &['l']),
-    ('M', &['m']),
-    ('N', &['n']),
-    ('O', &['o']),
-    ('P', &['p']),
-    ('Q', &['q']),
-    ('R', &['r']),
-    ('S', &['s', 'Ćż']),
-    ('T', &['t']),
-    ('U', &['u']),
-    ('V', &['v']),
-    ('W', &['w']),
-    ('X', &['x']),
-    ('Y', &['y']),
-    ('Z', &['z']),
-    ('a', &['A']),
-    ('b', &['B']),
-    ('c', &['C']),
-    ('d', &['D']),
-    ('e', &['E']),
-    ('f', &['F']),
-    ('g', &['G']),
-    ('h', &['H']),
-    ('i', &['I']),
-    ('j', &['J']),
-    ('k', &['K', 'â„Ș']),
-    ('l', &['L']),
-    ('m', &['M']),
-    ('n', &['N']),
-    ('o', &['O']),
-    ('p', &['P']),
-    ('q', &['Q']),
-    ('r', &['R']),
-    ('s', &['S', 'Ćż']),
-    ('t', &['T']),
-    ('u', &['U']),
-    ('v', &['V']),
-    ('w', &['W']),
-    ('x', &['X']),
-    ('y', &['Y']),
-    ('z', &['Z']),
-    ('µ', &['Μ', 'μ']),
-    ('À', &['à']),
-    ('Á', &['á']),
-    ('Â', &['â']),
-    ('Ã', &['ã']),
-    ('Ä', &['ä']),
-    ('Å', &['å', 'Å']),
-    ('Æ', &['æ']),
-    ('Ç', &['ç']),
-    ('È', &['è']),
-    ('É', &['é']),
-    ('Ê', &['ê']),
-    ('Ë', &['ë']),
-    ('Ì', &['ì']),
-    ('Í', &['í']),
-    ('Î', &['î']),
-    ('Ï', &['ï']),
-    ('Ð', &['ð']),
-    ('Ñ', &['ñ']),
-    ('Ò', &['ò']),
-    ('Ó', &['ó']),
-    ('Ô', &['ô']),
-    ('Õ', &['õ']),
-    ('Ö', &['ö']),
-    ('Ø', &['ø']),
-    ('Ù', &['ù']),
-    ('Ú', &['ú']),
-    ('Û', &['û']),
-    ('Ü', &['ü']),
-    ('Ý', &['ý']),
-    ('Þ', &['þ']),
-    ('ß', &['áșž']),
-    ('à', &['À']),
-    ('á', &['Á']),
-    ('â', &['Â']),
-    ('ã', &['Ã']),
-    ('ä', &['Ä']),
-    ('å', &['Å', 'Å']),
-    ('æ', &['Æ']),
-    ('ç', &['Ç']),
-    ('è', &['È']),
-    ('é', &['É']),
-    ('ê', &['Ê']),
-    ('ë', &['Ë']),
-    ('ì', &['Ì']),
-    ('í', &['Í']),
-    ('î', &['Î']),
-    ('ï', &['Ï']),
-    ('ð', &['Ð']),
-    ('ñ', &['Ñ']),
-    ('ò', &['Ò']),
-    ('ó', &['Ó']),
-    ('ô', &['Ô']),
-    ('õ', &['Õ']),
-    ('ö', &['Ö']),
-    ('ø', &['Ø']),
-    ('ù', &['Ù']),
-    ('ú', &['Ú']),
-    ('û', &['Û']),
-    ('ü', &['Ü']),
-    ('ý', &['Ý']),
-    ('þ', &['Þ']),
-    ('ÿ', &['Ÿ']),
-    ('Ā', &['ā']),
-    ('ā', &['Ā']),
-    ('Ă', &['ă']),
-    ('ă', &['Ă']),
-    ('Ą', &['ą']),
-    ('ą', &['Ą']),
-    ('Ć', &['ć']),
-    ('ć', &['Ć']),
-    ('Ĉ', &['ĉ']),
-    ('ĉ', &['Ĉ']),
-    ('Ċ', &['ċ']),
-    ('ċ', &['Ċ']),
-    ('Č', &['č']),
-    ('č', &['Č']),
-    ('Ď', &['ď']),
-    ('ď', &['Ď']),
-    ('Đ', &['đ']),
-    ('đ', &['Đ']),
-    ('Ē', &['ē']),
-    ('ē', &['Ē']),
-    ('Ĕ', &['ĕ']),
-    ('ĕ', &['Ĕ']),
-    ('Ė', &['ė']),
-    ('ė', &['Ė']),
-    ('Ę', &['ę']),
-    ('ę', &['Ę']),
-    ('Ě', &['ě']),
-    ('ě', &['Ě']),
-    ('Ĝ', &['ĝ']),
-    ('ĝ', &['Ĝ']),
-    ('Ğ', &['ğ']),
-    ('ğ', &['Ğ']),
-    ('Ä ', &['ÄĄ']),
-    ('ÄĄ', &['Ä ']),
-    ('Äą', &['ÄŁ']),
-    ('ÄŁ', &['Äą']),
-    ('Ä€', &['Ä„']),
-    ('Ä„', &['Ä€']),
-    ('Ċ', &['ħ']),
-    ('ħ', &['Ċ']),
-    ('Äš', &['Ä©']),
-    ('Ä©', &['Äš']),
-    ('ÄȘ', &['Ä«']),
-    ('Ä«', &['ÄȘ']),
-    ('ÄŹ', &['Ä­']),
-    ('Ä­', &['ÄŹ']),
-    ('Äź', &['ÄŻ']),
-    ('ÄŻ', &['Äź']),
-    ('ÄČ', &['Äł']),
-    ('Äł', &['ÄČ']),
-    ('ÄŽ', &['Ä”']),
-    ('Ä”', &['ÄŽ']),
-    ('Ķ', &['ķ']),
-    ('ķ', &['Ķ']),
-    ('Äč', &['Äș']),
-    ('Äș', &['Äč']),
-    ('Ļ', &['Č']),
-    ('Č', &['Ļ']),
-    ('Ĝ', &['ğ']),
-    ('ğ', &['Ĝ']),
-    ('Ŀ', &['ƀ']),
-    ('ƀ', &['Ŀ']),
-    ('Ɓ', &['Ƃ']),
-    ('Ƃ', &['Ɓ']),
-    ('ƃ', &['Ƅ']),
-    ('Ƅ', &['ƃ']),
-    ('ƅ', &['Ɔ']),
-    ('Ɔ', &['ƅ']),
-    ('Ƈ', &['ƈ']),
-    ('ƈ', &['Ƈ']),
-    ('Ɗ', &['Ƌ']),
-    ('Ƌ', &['Ɗ']),
-    ('ƌ', &['ƍ']),
-    ('ƍ', &['ƌ']),
-    ('Ǝ', &['Ə']),
-    ('Ə', &['Ǝ']),
-    ('Ɛ', &['Ƒ']),
-    ('Ƒ', &['Ɛ']),
-    ('Œ', &['œ']),
-    ('œ', &['Œ']),
-    ('Ɣ', &['ƕ']),
-    ('ƕ', &['Ɣ']),
-    ('Ɩ', &['Ɨ']),
-    ('Ɨ', &['Ɩ']),
-    ('Ƙ', &['ƙ']),
-    ('ƙ', &['Ƙ']),
-    ('ƚ', &['ƛ']),
-    ('ƛ', &['ƚ']),
-    ('Ɯ', &['Ɲ']),
-    ('Ɲ', &['Ɯ']),
-    ('ƞ', &['Ɵ']),
-    ('Ɵ', &['ƞ']),
-    ('Š', &['š']),
-    ('š', &['Š']),
-    ('Ćą', &['ĆŁ']),
-    ('ĆŁ', &['Ćą']),
-    ('Ć€', &['Ć„']),
-    ('Ć„', &['Ć€']),
-    ('Ɗ', &['Ƨ']),
-    ('Ƨ', &['Ɗ']),
-    ('Ćš', &['Ć©']),
-    ('Ć©', &['Ćš']),
-    ('ĆȘ', &['Ć«']),
-    ('Ć«', &['ĆȘ']),
-    ('ĆŹ', &['Ć­']),
-    ('Ć­', &['ĆŹ']),
-    ('Ćź', &['ĆŻ']),
-    ('ĆŻ', &['Ćź']),
-    ('ư', &['Ʊ']),
-    ('Ʊ', &['ư']),
-    ('ĆČ', &['Ćł']),
-    ('Ćł', &['ĆČ']),
-    ('ĆŽ', &['Ć”']),
-    ('Ć”', &['ĆŽ']),
-    ('ƶ', &['Ʒ']),
-    ('Ʒ', &['ƶ']),
-    ('Ÿ', &['ÿ']),
-    ('Ćč', &['Ćș']),
-    ('Ćș', &['Ćč']),
-    ('Ć»', &['ĆŒ']),
-    ('ĆŒ', &['Ć»']),
-    ('Ćœ', &['ĆŸ']),
-    ('ĆŸ', &['Ćœ']),
-    ('Ćż', &['S', 's']),
-    ('ƀ', &['Ƀ']),
-    ('Ɓ', &['ɓ']),
-    ('Ƃ', &['ƃ']),
-    ('ƃ', &['Ƃ']),
-    ('Ƅ', &['ƅ']),
-    ('ƅ', &['Ƅ']),
-    ('Ɔ', &['ɔ']),
-    ('Ƈ', &['ƈ']),
-    ('ƈ', &['Ƈ']),
-    ('Ɖ', &['ɖ']),
-    ('Ɗ', &['ɗ']),
-    ('Ƌ', &['ƌ']),
-    ('ƌ', &['Ƌ']),
-    ('Ǝ', &['ǝ']),
-    ('Ə', &['ə']),
-    ('Ɛ', &['ɛ']),
-    ('Ƒ', &['ƒ']),
-    ('ƒ', &['Ƒ']),
-    ('Ɠ', &['ɠ']),
-    ('Ɣ', &['ÉŁ']),
-    ('ƕ', &['Ƕ']),
-    ('Ɩ', &['ɩ']),
-    ('Ɨ', &['Éš']),
-    ('Ƙ', &['ƙ']),
-    ('ƙ', &['Ƙ']),
-    ('ƚ', &['Èœ']),
-    ('ƛ', &['Ƛ']),
-    ('Ɯ', &['ÉŻ']),
-    ('Ɲ', &['ÉČ']),
-    ('ƞ', &['Ƞ']),
-    ('Ɵ', &['É”']),
-    ('Æ ', &['ÆĄ']),
-    ('ÆĄ', &['Æ ']),
-    ('Æą', &['ÆŁ']),
-    ('ÆŁ', &['Æą']),
-    ('Æ€', &['Æ„']),
-    ('Æ„', &['Æ€']),
-    ('ÆŠ', &['ʀ']),
-    ('Ƨ', &['ƚ']),
-    ('ƚ', &['Ƨ']),
-    ('Ʃ', &['ʃ']),
-    ('ÆŹ', &['Æ­']),
-    ('Æ­', &['ÆŹ']),
-    ('Æź', &['ʈ']),
-    ('ÆŻ', &['ư']),
-    ('ư', &['ÆŻ']),
-    ('Ʊ', &['ʊ']),
-    ('ÆČ', &['ʋ']),
-    ('Æł', &['ÆŽ']),
-    ('ÆŽ', &['Æł']),
-    ('Ɣ', &['ƶ']),
-    ('ƶ', &['Ɣ']),
-    ('Ʒ', &['ʒ']),
-    ('Æž', &['Æč']),
-    ('Æč', &['Æž']),
-    ('ƌ', &['Ɯ']),
-    ('Ɯ', &['ƌ']),
-    ('Æż', &['Ç·']),
-    ('DŽ', &['Dž', 'dž']),
-    ('Dž', &['DŽ', 'dž']),
-    ('dž', &['DŽ', 'Dž']),
-    ('LJ', &['Lj', 'lj']),
-    ('Lj', &['LJ', 'lj']),
-    ('lj', &['LJ', 'Lj']),
-    ('NJ', &['Nj', 'nj']),
-    ('Nj', &['NJ', 'nj']),
-    ('nj', &['NJ', 'Nj']),
-    ('Ǎ', &['ǎ']),
-    ('ǎ', &['Ǎ']),
-    ('Ǐ', &['ǐ']),
-    ('ǐ', &['Ǐ']),
-    ('Ǒ', &['ǒ']),
-    ('ǒ', &['Ǒ']),
-    ('Ǔ', &['ǔ']),
-    ('ǔ', &['Ǔ']),
-    ('Ǖ', &['ǖ']),
-    ('ǖ', &['Ǖ']),
-    ('Ǘ', &['ǘ']),
-    ('ǘ', &['Ǘ']),
-    ('Ǚ', &['ǚ']),
-    ('ǚ', &['Ǚ']),
-    ('Ǜ', &['ǜ']),
-    ('ǜ', &['Ǜ']),
-    ('ǝ', &['Ǝ']),
-    ('Ǟ', &['ǟ']),
-    ('ǟ', &['Ǟ']),
-    ('Ç ', &['ÇĄ']),
-    ('ÇĄ', &['Ç ']),
-    ('Çą', &['ÇŁ']),
-    ('ÇŁ', &['Çą']),
-    ('Ç€', &['Ç„']),
-    ('Ç„', &['Ç€']),
-    ('NJ', &['ǧ']),
-    ('ǧ', &['NJ']),
-    ('Çš', &['Ç©']),
-    ('Ç©', &['Çš']),
-    ('ÇȘ', &['Ç«']),
-    ('Ç«', &['ÇȘ']),
-    ('ÇŹ', &['Ç­']),
-    ('Ç­', &['ÇŹ']),
-    ('Çź', &['ÇŻ']),
-    ('ÇŻ', &['Çź']),
-    ('DZ', &['ÇČ', 'Çł']),
-    ('ÇČ', &['DZ', 'Çł']),
-    ('Çł', &['DZ', 'ÇČ']),
-    ('ÇŽ', &['Ç”']),
-    ('Ç”', &['ÇŽ']),
-    ('Ƕ', &['ƕ']),
-    ('Ç·', &['Æż']),
-    ('Çž', &['Çč']),
-    ('Çč', &['Çž']),
-    ('Çș', &['Ç»']),
-    ('Ç»', &['Çș']),
-    ('nj', &['ǜ']),
-    ('ǜ', &['nj']),
-    ('ÇŸ', &['Çż']),
-    ('Çż', &['ÇŸ']),
-    ('Ȁ', &['ȁ']),
-    ('ȁ', &['Ȁ']),
-    ('Ȃ', &['ȃ']),
-    ('ȃ', &['Ȃ']),
-    ('Ȅ', &['ȅ']),
-    ('ȅ', &['Ȅ']),
-    ('Ȇ', &['ȇ']),
-    ('ȇ', &['Ȇ']),
-    ('Ȉ', &['ȉ']),
-    ('ȉ', &['Ȉ']),
-    ('Ȋ', &['ȋ']),
-    ('ȋ', &['Ȋ']),
-    ('Ȍ', &['ȍ']),
-    ('ȍ', &['Ȍ']),
-    ('Ȏ', &['ȏ']),
-    ('ȏ', &['Ȏ']),
-    ('Ȑ', &['ȑ']),
-    ('ȑ', &['Ȑ']),
-    ('Ȓ', &['ȓ']),
-    ('ȓ', &['Ȓ']),
-    ('Ȕ', &['ȕ']),
-    ('ȕ', &['Ȕ']),
-    ('Ȗ', &['ȗ']),
-    ('ȗ', &['Ȗ']),
-    ('Ș', &['ș']),
-    ('ș', &['Ș']),
-    ('Ț', &['ț']),
-    ('ț', &['Ț']),
-    ('Ȝ', &['ȝ']),
-    ('ȝ', &['Ȝ']),
-    ('Ȟ', &['ȟ']),
-    ('ȟ', &['Ȟ']),
-    ('Ƞ', &['ƞ']),
-    ('Èą', &['ÈŁ']),
-    ('ÈŁ', &['Èą']),
-    ('È€', &['È„']),
-    ('È„', &['È€']),
-    ('Ȋ', &['ȧ']),
-    ('ȧ', &['Ȋ']),
-    ('Èš', &['È©']),
-    ('È©', &['Èš']),
-    ('ÈȘ', &['È«']),
-    ('È«', &['ÈȘ']),
-    ('ÈŹ', &['È­']),
-    ('È­', &['ÈŹ']),
-    ('Èź', &['ÈŻ']),
-    ('ÈŻ', &['Èź']),
-    ('Ȱ', &['ȱ']),
-    ('ȱ', &['Ȱ']),
-    ('ÈČ', &['Èł']),
-    ('Èł', &['ÈČ']),
-    ('Èș', &['ⱄ']),
-    ('Ȼ', &['Ȍ']),
-    ('Ȍ', &['Ȼ']),
-    ('Èœ', &['ƚ']),
-    ('ȟ', &['ⱊ']),
-    ('Èż', &['ⱟ']),
-    ('ɀ', &['Ɀ']),
-    ('Ɂ', &['ɂ']),
-    ('ɂ', &['Ɂ']),
-    ('Ƀ', &['ƀ']),
-    ('Ʉ', &['ʉ']),
-    ('Ʌ', &['ʌ']),
-    ('Ɇ', &['ɇ']),
-    ('ɇ', &['Ɇ']),
-    ('Ɉ', &['ɉ']),
-    ('ɉ', &['Ɉ']),
-    ('Ɋ', &['ɋ']),
-    ('ɋ', &['Ɋ']),
-    ('Ɍ', &['ɍ']),
-    ('ɍ', &['Ɍ']),
-    ('Ɏ', &['ɏ']),
-    ('ɏ', &['Ɏ']),
-    ('ɐ', &['Ɐ']),
-    ('ɑ', &['Ɑ']),
-    ('ɒ', &['Ɒ']),
-    ('ɓ', &['Ɓ']),
-    ('ɔ', &['Ɔ']),
-    ('ɖ', &['Ɖ']),
-    ('ɗ', &['Ɗ']),
-    ('ə', &['Ə']),
-    ('ɛ', &['Ɛ']),
-    ('ɜ', &['Ɜ']),
-    ('ɠ', &['Ɠ']),
-    ('ÉĄ', &['êžŹ']),
-    ('ÉŁ', &['Ɣ']),
-    ('É€', &['Ɤ']),
-    ('Ʉ', &['Ɥ']),
-    ('ÉŠ', &['êžȘ']),
-    ('Éš', &['Ɨ']),
-    ('ɩ', &['Ɩ']),
-    ('ÉȘ', &['êžź']),
-    ('ɫ', &['ⱹ']),
-    ('ÉŹ', &['Ɬ']),
-    ('ÉŻ', &['Ɯ']),
-    ('ɱ', &['ⱟ']),
-    ('ÉČ', &['Ɲ']),
-    ('É”', &['Ɵ']),
-    ('ɜ', &['ⱀ']),
-    ('ʀ', &['ÆŠ']),
-    ('ʂ', &['Ʂ']),
-    ('ʃ', &['Ʃ']),
-    ('ʇ', &['Ʇ']),
-    ('ʈ', &['Æź']),
-    ('ʉ', &['Ʉ']),
-    ('ʊ', &['Ʊ']),
-    ('ʋ', &['ÆČ']),
-    ('ʌ', &['Ʌ']),
-    ('ʒ', &['Ʒ']),
-    ('ʝ', &['êžČ']),
-    ('ʞ', &['Ʞ']),
-    ('\u{345}', &['Ι', 'ι', '៟']),
-    ('Ͱ', &['ͱ']),
-    ('ͱ', &['Ͱ']),
-    ('ÍČ', &['Íł']),
-    ('Íł', &['ÍČ']),
-    ('Ͷ', &['ͷ']),
-    ('ͷ', &['Ͷ']),
-    ('ͻ', &['Ϝ']),
-    ('͌', &['ϟ']),
-    ('Íœ', &['Ïż']),
-    ('Íż', &['Ïł']),
-    ('Ά', &['ά']),
-    ('Έ', &['έ']),
-    ('Ή', &['μ']),
-    ('Ί', &['ί']),
-    ('Ό', &['ό']),
-    ('Ύ', &['ύ']),
-    ('Ώ', &['ώ']),
-    ('ΐ', &['ΐ']),
-    ('Α', &['α']),
-    ('Β', &['β', 'ϐ']),
-    ('Γ', &['γ']),
-    ('Δ', &['δ']),
-    ('Ε', &['ε', 'Ï”']),
-    ('Ζ', &['ζ']),
-    ('Η', &['η']),
-    ('Θ', &['θ', 'ϑ', 'ÏŽ']),
-    ('Ι', &['\u{345}', 'ι', '៟']),
-    ('Κ', &['κ', 'ϰ']),
-    ('Λ', &['λ']),
-    ('Μ', &['µ', 'μ']),
-    ('Ν', &['ν']),
-    ('Ξ', &['ξ']),
-    ('Ο', &['ο']),
-    ('Π', &['π', 'ϖ']),
-    ('Ρ', &['ρ', 'ϱ']),
-    ('Σ', &['ς', 'σ']),
-    ('Τ', &['τ']),
-    ('Υ', &['υ']),
-    ('Φ', &['φ', 'ϕ']),
-    ('Χ', &['χ']),
-    ('Ψ', &['ψ']),
-    ('Ω', &['ω', '℩']),
-    ('ÎȘ', &['ϊ']),
-    ('Ϋ', &['ϋ']),
-    ('ά', &['Ά']),
-    ('έ', &['Έ']),
-    ('μ', &['Ή']),
-    ('ί', &['Ί']),
-    ('ΰ', &['ΰ']),
-    ('α', &['Α']),
-    ('β', &['Β', 'ϐ']),
-    ('γ', &['Γ']),
-    ('δ', &['Δ']),
-    ('ε', &['Ε', 'Ï”']),
-    ('ζ', &['Ζ']),
-    ('η', &['Η']),
-    ('θ', &['Θ', 'ϑ', 'ÏŽ']),
-    ('ι', &['\u{345}', 'Ι', '៟']),
-    ('κ', &['Κ', 'ϰ']),
-    ('λ', &['Λ']),
-    ('μ', &['µ', 'Μ']),
-    ('ν', &['Ν']),
-    ('ξ', &['Ξ']),
-    ('ο', &['Ο']),
-    ('π', &['Π', 'ϖ']),
-    ('ρ', &['Ρ', 'ϱ']),
-    ('ς', &['Σ', 'σ']),
-    ('σ', &['Σ', 'ς']),
-    ('τ', &['Τ']),
-    ('υ', &['Υ']),
-    ('φ', &['Φ', 'ϕ']),
-    ('χ', &['Χ']),
-    ('ψ', &['Ψ']),
-    ('ω', &['Ω', '℩']),
-    ('ϊ', &['ÎȘ']),
-    ('ϋ', &['Ϋ']),
-    ('ό', &['Ό']),
-    ('ύ', &['Ύ']),
-    ('ώ', &['Ώ']),
-    ('Ϗ', &['ϗ']),
-    ('ϐ', &['Β', 'β']),
-    ('ϑ', &['Θ', 'θ', 'ÏŽ']),
-    ('ϕ', &['Φ', 'φ']),
-    ('ϖ', &['Π', 'π']),
-    ('ϗ', &['Ϗ']),
-    ('Ϙ', &['ϙ']),
-    ('ϙ', &['Ϙ']),
-    ('Ϛ', &['ϛ']),
-    ('ϛ', &['Ϛ']),
-    ('Ϝ', &['ϝ']),
-    ('ϝ', &['Ϝ']),
-    ('Ϟ', &['ϟ']),
-    ('ϟ', &['Ϟ']),
-    ('Ï ', &['ÏĄ']),
-    ('ÏĄ', &['Ï ']),
-    ('Ïą', &['ÏŁ']),
-    ('ÏŁ', &['Ïą']),
-    ('Ï€', &['Ï„']),
-    ('Ï„', &['Ï€']),
-    ('ϊ', &['ϧ']),
-    ('ϧ', &['ϊ']),
-    ('Ïš', &['Ï©']),
-    ('Ï©', &['Ïš']),
-    ('ÏȘ', &['Ï«']),
-    ('Ï«', &['ÏȘ']),
-    ('ÏŹ', &['Ï­']),
-    ('Ï­', &['ÏŹ']),
-    ('Ïź', &['ÏŻ']),
-    ('ÏŻ', &['Ïź']),
-    ('ϰ', &['Κ', 'κ']),
-    ('ϱ', &['Ρ', 'ρ']),
-    ('ÏČ', &['Ïč']),
-    ('Ïł', &['Íż']),
-    ('ÏŽ', &['Θ', 'θ', 'ϑ']),
-    ('Ï”', &['Ε', 'ε']),
-    ('Ï·', &['Ïž']),
-    ('Ïž', &['Ï·']),
-    ('Ïč', &['ÏČ']),
-    ('Ïș', &['Ï»']),
-    ('Ï»', &['Ïș']),
-    ('Ϝ', &['ͻ']),
-    ('ϟ', &['͌']),
-    ('Ïż', &['Íœ']),
-    ('Ѐ', &['ѐ']),
-    ('Ё', &['ё']),
-    ('Ђ', &['ђ']),
-    ('Ѓ', &['ѓ']),
-    ('Є', &['є']),
-    ('Ѕ', &['ѕ']),
-    ('І', &['і']),
-    ('Ї', &['ї']),
-    ('Ј', &['ј']),
-    ('Љ', &['љ']),
-    ('Њ', &['њ']),
-    ('Ћ', &['ћ']),
-    ('Ќ', &['ќ']),
-    ('Ѝ', &['ѝ']),
-    ('Ў', &['ў']),
-    ('Џ', &['џ']),
-    ('А', &['а']),
-    ('Б', &['б']),
-    ('В', &['ĐČ', 'áȀ']),
-    ('Г', &['г']),
-    ('Д', &['ĐŽ', 'áȁ']),
-    ('Е', &['Đ”']),
-    ('Ж', &['ж']),
-    ('З', &['Đ·']),
-    ('И', &['О']),
-    ('Й', &['Đč']),
-    ('К', &['Đș']),
-    ('Л', &['Đ»']),
-    ('М', &['ĐŒ']),
-    ('Н', &['Đœ']),
-    ('О', &['ĐŸ', 'áȂ']),
-    ('П', &['п']),
-    ('Р', &['р']),
-    ('ĐĄ', &['с', 'áȃ']),
-    ('Đą', &['т', 'áȄ', 'áȅ']),
-    ('У', &['у']),
-    ('Đ€', &['ф']),
-    ('Đ„', &['х']),
-    ('Щ', &['ц']),
-    ('Ч', &['ч']),
-    ('К', &['ш']),
-    ('Đ©', &['щ']),
-    ('ĐȘ', &['ъ', 'áȆ']),
-    ('Đ«', &['ы']),
-    ('Ь', &['ь']),
-    ('Э', &['э']),
-    ('м', &['ю']),
-    ('Я', &['я']),
-    ('а', &['А']),
-    ('б', &['Б']),
-    ('ĐČ', &['В', 'áȀ']),
-    ('г', &['Г']),
-    ('ĐŽ', &['Д', 'áȁ']),
-    ('Đ”', &['Е']),
-    ('ж', &['Ж']),
-    ('Đ·', &['З']),
-    ('О', &['И']),
-    ('Đč', &['Й']),
-    ('Đș', &['К']),
-    ('Đ»', &['Л']),
-    ('ĐŒ', &['М']),
-    ('Đœ', &['Н']),
-    ('ĐŸ', &['О', 'áȂ']),
-    ('п', &['П']),
-    ('р', &['Р']),
-    ('с', &['ĐĄ', 'áȃ']),
-    ('т', &['Đą', 'áȄ', 'áȅ']),
-    ('у', &['У']),
-    ('ф', &['Đ€']),
-    ('х', &['Đ„']),
-    ('ц', &['Щ']),
-    ('ч', &['Ч']),
-    ('ш', &['К']),
-    ('щ', &['Đ©']),
-    ('ъ', &['ĐȘ', 'áȆ']),
-    ('ы', &['Đ«']),
-    ('ь', &['Ь']),
-    ('э', &['Э']),
-    ('ю', &['м']),
-    ('я', &['Я']),
-    ('ѐ', &['Ѐ']),
-    ('ё', &['Ё']),
-    ('ђ', &['Ђ']),
-    ('ѓ', &['Ѓ']),
-    ('є', &['Є']),
-    ('ѕ', &['Ѕ']),
-    ('і', &['І']),
-    ('ї', &['Ї']),
-    ('ј', &['Ј']),
-    ('љ', &['Љ']),
-    ('њ', &['Њ']),
-    ('ћ', &['Ћ']),
-    ('ќ', &['Ќ']),
-    ('ѝ', &['Ѝ']),
-    ('ў', &['Ў']),
-    ('џ', &['Џ']),
-    ('Ń ', &['ŃĄ']),
-    ('ŃĄ', &['Ń ']),
-    ('Ńą', &['ŃŁ', 'áȇ']),
-    ('ŃŁ', &['Ńą', 'áȇ']),
-    ('Ń€', &['Ń„']),
-    ('Ń„', &['Ń€']),
-    ('ъ', &['ѧ']),
-    ('ѧ', &['ъ']),
-    ('Ńš', &['Ń©']),
-    ('Ń©', &['Ńš']),
-    ('ŃȘ', &['Ń«']),
-    ('Ń«', &['ŃȘ']),
-    ('ŃŹ', &['Ń­']),
-    ('Ń­', &['ŃŹ']),
-    ('Ńź', &['ŃŻ']),
-    ('ŃŻ', &['Ńź']),
-    ('Ѱ', &['ѱ']),
-    ('ѱ', &['Ѱ']),
-    ('ŃČ', &['Ńł']),
-    ('Ńł', &['ŃČ']),
-    ('ŃŽ', &['Ń”']),
-    ('Ń”', &['ŃŽ']),
-    ('Ѷ', &['ѷ']),
-    ('ѷ', &['Ѷ']),
-    ('Ńž', &['Ńč']),
-    ('Ńč', &['Ńž']),
-    ('Ńș', &['Ń»']),
-    ('Ń»', &['Ńș']),
-    ('ŃŒ', &['Ńœ']),
-    ('Ńœ', &['ŃŒ']),
-    ('ŃŸ', &['Ńż']),
-    ('Ńż', &['ŃŸ']),
-    ('Ҁ', &['ҁ']),
-    ('ҁ', &['Ҁ']),
-    ('Ҋ', &['ҋ']),
-    ('ҋ', &['Ҋ']),
-    ('Ҍ', &['ҍ']),
-    ('ҍ', &['Ҍ']),
-    ('Ҏ', &['ҏ']),
-    ('ҏ', &['Ҏ']),
-    ('Ґ', &['ґ']),
-    ('ґ', &['Ґ']),
-    ('Ғ', &['ғ']),
-    ('ғ', &['Ғ']),
-    ('Ҕ', &['ҕ']),
-    ('ҕ', &['Ҕ']),
-    ('Җ', &['җ']),
-    ('җ', &['Җ']),
-    ('Ҙ', &['ҙ']),
-    ('ҙ', &['Ҙ']),
-    ('Қ', &['қ']),
-    ('қ', &['Қ']),
-    ('Ҝ', &['ҝ']),
-    ('ҝ', &['Ҝ']),
-    ('Ҟ', &['ҟ']),
-    ('ҟ', &['Ҟ']),
-    ('Ò ', &['ÒĄ']),
-    ('ÒĄ', &['Ò ']),
-    ('Òą', &['ÒŁ']),
-    ('ÒŁ', &['Òą']),
-    ('Ò€', &['Ò„']),
-    ('Ò„', &['Ò€']),
-    ('ÒŠ', &['Ò§']),
-    ('Ò§', &['ÒŠ']),
-    ('Òš', &['Ò©']),
-    ('Ò©', &['Òš']),
-    ('ÒȘ', &['Ò«']),
-    ('Ò«', &['ÒȘ']),
-    ('ÒŹ', &['Ò­']),
-    ('Ò­', &['ÒŹ']),
-    ('Òź', &['ÒŻ']),
-    ('ÒŻ', &['Òź']),
-    ('Ò°', &['Ò±']),
-    ('Ò±', &['Ò°']),
-    ('ÒČ', &['Òł']),
-    ('Òł', &['ÒČ']),
-    ('ÒŽ', &['Ò”']),
-    ('Ò”', &['ÒŽ']),
-    ('Ò¶', &['Ò·']),
-    ('Ò·', &['Ò¶']),
-    ('Òž', &['Òč']),
-    ('Òč', &['Òž']),
-    ('Òș', &['Ò»']),
-    ('Ò»', &['Òș']),
-    ('Ҍ', &['Ҝ']),
-    ('Ҝ', &['Ҍ']),
-    ('ÒŸ', &['Òż']),
-    ('Òż', &['ÒŸ']),
-    ('Ӏ', &['ӏ']),
-    ('Ӂ', &['ӂ']),
-    ('ӂ', &['Ӂ']),
-    ('Ӄ', &['ӄ']),
-    ('ӄ', &['Ӄ']),
-    ('Ӆ', &['ӆ']),
-    ('ӆ', &['Ӆ']),
-    ('Ӈ', &['ӈ']),
-    ('ӈ', &['Ӈ']),
-    ('Ӊ', &['ӊ']),
-    ('ӊ', &['Ӊ']),
-    ('Ӌ', &['ӌ']),
-    ('ӌ', &['Ӌ']),
-    ('Ӎ', &['ӎ']),
-    ('ӎ', &['Ӎ']),
-    ('ӏ', &['Ӏ']),
-    ('Ӑ', &['ӑ']),
-    ('ӑ', &['Ӑ']),
-    ('Ӓ', &['ӓ']),
-    ('ӓ', &['Ӓ']),
-    ('Ӕ', &['ӕ']),
-    ('ӕ', &['Ӕ']),
-    ('Ӗ', &['ӗ']),
-    ('ӗ', &['Ӗ']),
-    ('Ә', &['ә']),
-    ('ә', &['Ә']),
-    ('Ӛ', &['ӛ']),
-    ('ӛ', &['Ӛ']),
-    ('Ӝ', &['ӝ']),
-    ('ӝ', &['Ӝ']),
-    ('Ӟ', &['ӟ']),
-    ('ӟ', &['Ӟ']),
-    ('Ó ', &['ÓĄ']),
-    ('ÓĄ', &['Ó ']),
-    ('Óą', &['ÓŁ']),
-    ('ÓŁ', &['Óą']),
-    ('Ó€', &['Ó„']),
-    ('Ó„', &['Ó€']),
-    ('ÓŠ', &['Ó§']),
-    ('Ó§', &['ÓŠ']),
-    ('Óš', &['Ó©']),
-    ('Ó©', &['Óš']),
-    ('ÓȘ', &['Ó«']),
-    ('Ó«', &['ÓȘ']),
-    ('ÓŹ', &['Ó­']),
-    ('Ó­', &['ÓŹ']),
-    ('Óź', &['ÓŻ']),
-    ('ÓŻ', &['Óź']),
-    ('Ó°', &['Ó±']),
-    ('Ó±', &['Ó°']),
-    ('ÓČ', &['Ół']),
-    ('Ół', &['ÓČ']),
-    ('ÓŽ', &['Ó”']),
-    ('Ó”', &['ÓŽ']),
-    ('Ó¶', &['Ó·']),
-    ('Ó·', &['Ó¶']),
-    ('Óž', &['Óč']),
-    ('Óč', &['Óž']),
-    ('Óș', &['Ó»']),
-    ('Ó»', &['Óș']),
-    ('ӌ', &['Ӝ']),
-    ('Ӝ', &['ӌ']),
-    ('ÓŸ', &['Óż']),
-    ('Óż', &['ÓŸ']),
-    ('Ԁ', &['ԁ']),
-    ('ԁ', &['Ԁ']),
-    ('Ԃ', &['ԃ']),
-    ('ԃ', &['Ԃ']),
-    ('Ԅ', &['ԅ']),
-    ('ԅ', &['Ԅ']),
-    ('Ԇ', &['ԇ']),
-    ('ԇ', &['Ԇ']),
-    ('Ԉ', &['ԉ']),
-    ('ԉ', &['Ԉ']),
-    ('Ԋ', &['ԋ']),
-    ('ԋ', &['Ԋ']),
-    ('Ԍ', &['ԍ']),
-    ('ԍ', &['Ԍ']),
-    ('Ԏ', &['ԏ']),
-    ('ԏ', &['Ԏ']),
-    ('Ԑ', &['ԑ']),
-    ('ԑ', &['Ԑ']),
-    ('Ԓ', &['ԓ']),
-    ('ԓ', &['Ԓ']),
-    ('Ԕ', &['ԕ']),
-    ('ԕ', &['Ԕ']),
-    ('Ԗ', &['ԗ']),
-    ('ԗ', &['Ԗ']),
-    ('Ԙ', &['ԙ']),
-    ('ԙ', &['Ԙ']),
-    ('Ԛ', &['ԛ']),
-    ('ԛ', &['Ԛ']),
-    ('Ԝ', &['ԝ']),
-    ('ԝ', &['Ԝ']),
-    ('Ԟ', &['ԟ']),
-    ('ԟ', &['Ԟ']),
-    ('Ô ', &['ÔĄ']),
-    ('ÔĄ', &['Ô ']),
-    ('Ôą', &['ÔŁ']),
-    ('ÔŁ', &['Ôą']),
-    ('Ô€', &['Ô„']),
-    ('Ô„', &['Ô€']),
-    ('ÔŠ', &['Ô§']),
-    ('Ô§', &['ÔŠ']),
-    ('Ôš', &['Ô©']),
-    ('Ô©', &['Ôš']),
-    ('ÔȘ', &['Ô«']),
-    ('Ô«', &['ÔȘ']),
-    ('ÔŹ', &['Ô­']),
-    ('Ô­', &['ÔŹ']),
-    ('Ôź', &['ÔŻ']),
-    ('ÔŻ', &['Ôź']),
-    ('Ô±', &['ŐĄ']),
-    ('ÔČ', &['Őą']),
-    ('Ôł', &['ŐŁ']),
-    ('ÔŽ', &['Ő€']),
-    ('Ô”', &['Ő„']),
-    ('Ô¶', &['ŐŠ']),
-    ('Ô·', &['Ő§']),
-    ('Ôž', &['Őš']),
-    ('Ôč', &['Ő©']),
-    ('Ôș', &['ŐȘ']),
-    ('Ô»', &['Ő«']),
-    ('ÔŒ', &['ŐŹ']),
-    ('Ôœ', &['Ő­']),
-    ('ÔŸ', &['Őź']),
-    ('Ôż', &['ŐŻ']),
-    ('Հ', &['հ']),
-    ('Ձ', &['ձ']),
-    ('Ղ', &['ŐČ']),
-    ('Ճ', &['ճ']),
-    ('Մ', &['ծ']),
-    ('Յ', &['Ő”']),
-    ('Ն', &['Ő¶']),
-    ('Շ', &['Ő·']),
-    ('Ո', &['՞']),
-    ('Չ', &['Őč']),
-    ('Պ', &['Őș']),
-    ('Ջ', &['Ő»']),
-    ('Ռ', &['ŐŒ']),
-    ('Ս', &['Őœ']),
-    ('Վ', &['ŐŸ']),
-    ('Տ', &['տ']),
-    ('Ր', &['ր']),
-    ('Ց', &['ց']),
-    ('Ւ', &['ւ']),
-    ('Փ', &['փ']),
-    ('Ք', &['ք']),
-    ('Օ', &['օ']),
-    ('Ֆ', &['ֆ']),
-    ('ŐĄ', &['Ô±']),
-    ('Őą', &['ÔČ']),
-    ('ŐŁ', &['Ôł']),
-    ('Ő€', &['ÔŽ']),
-    ('Ő„', &['Ô”']),
-    ('ŐŠ', &['Ô¶']),
-    ('Ő§', &['Ô·']),
-    ('Őš', &['Ôž']),
-    ('Ő©', &['Ôč']),
-    ('ŐȘ', &['Ôș']),
-    ('Ő«', &['Ô»']),
-    ('ŐŹ', &['ÔŒ']),
-    ('Ő­', &['Ôœ']),
-    ('Őź', &['ÔŸ']),
-    ('ŐŻ', &['Ôż']),
-    ('հ', &['Հ']),
-    ('ձ', &['Ձ']),
-    ('ŐČ', &['Ղ']),
-    ('ճ', &['Ճ']),
-    ('ծ', &['Մ']),
-    ('Ő”', &['Յ']),
-    ('Ő¶', &['Ն']),
-    ('Ő·', &['Շ']),
-    ('՞', &['Ո']),
-    ('Őč', &['Չ']),
-    ('Őș', &['Պ']),
-    ('Ő»', &['Ջ']),
-    ('ŐŒ', &['Ռ']),
-    ('Őœ', &['Ս']),
-    ('ŐŸ', &['Վ']),
-    ('տ', &['Տ']),
-    ('ր', &['Ր']),
-    ('ց', &['Ց']),
-    ('ւ', &['Ւ']),
-    ('փ', &['Փ']),
-    ('ք', &['Ք']),
-    ('օ', &['Օ']),
-    ('ֆ', &['Ֆ']),
-    ('Ⴀ', &['⮀']),
-    ('Ⴁ', &['⮁']),
-    ('Ⴑ', &['⮂']),
-    ('Ⴃ', &['⮃']),
-    ('á‚€', &['⮄']),
-    ('á‚„', &['⮅']),
-    ('Ⴉ', &['⮆']),
-    ('Ⴇ', &['⮇']),
-    ('Ⴙ', &['⮈']),
-    ('Ⴉ', &['⮉']),
-    ('á‚Ș', &['⮊']),
-    ('Ⴋ', &['⮋']),
-    ('Ⴌ', &['⮌']),
-    ('Ⴍ', &['⮍']),
-    ('Ⴜ', &['⮎']),
-    ('Ⴏ', &['⮏']),
-    ('Ⴐ', &['⮐']),
-    ('Ⴑ', &['⮑']),
-    ('á‚Č', &['⮒']),
-    ('Ⴓ', &['⮓']),
-    ('Ⴎ', &['⮔']),
-    ('á‚”', &['⮕']),
-    ('Ⴖ', &['⮖']),
-    ('Ⴗ', &['⮗']),
-    ('Ⴞ', &['⮘']),
-    ('á‚č', &['⮙']),
-    ('á‚ș', &['⮚']),
-    ('Ⴛ', &['⮛']),
-    ('ႌ', &['⮜']),
-    ('ႜ', &['⎝']),
-    ('႟', &['⮞']),
-    ('Ⴟ', &['⮟']),
-    ('Ⴠ', &['⮠']),
-    ('Ⴡ', &['⎥']),
-    ('Ⴢ', &['⮱']),
-    ('Ⴣ', &['⎣']),
-    ('Ⴤ', &['⎀']),
-    ('Ⴥ', &['⎄']),
-    ('Ⴧ', &['⮧']),
-    ('Ⴭ', &['⮭']),
-    ('ა', &['áȐ']),
-    ('ბ', &['áȑ']),
-    ('გ', &['áȒ']),
-    ('დ', &['áȓ']),
-    ('ე', &['áȔ']),
-    ('ვ', &['áȕ']),
-    ('ზ', &['áȖ']),
-    ('თ', &['áȗ']),
-    ('ი', &['áȘ']),
-    ('კ', &['áș']),
-    ('ლ', &['áȚ']),
-    ('მ', &['áț']),
-    ('ნ', &['áȜ']),
-    ('ო', &['áȝ']),
-    ('პ', &['áȞ']),
-    ('ჟ', &['áȟ']),
-    ('რ', &['áČ ']),
-    ('ქ', &['áČĄ']),
-    ('ჹ', &['áČą']),
-    ('უ', &['áČŁ']),
-    ('Ⴠ', &['áČ€']),
-    ('Ⴤ', &['áČ„']),
-    ('჊', &['áČŠ']),
-    ('ყ', &['áȧ']),
-    ('ლ', &['áČš']),
-    ('ჩ', &['áČ©']),
-    ('áƒȘ', &['áČȘ']),
-    ('ძ', &['áČ«']),
-    ('჏', &['áČŹ']),
-    ('ჭ', &['áČ­']),
-    ('ჟ', &['áČź']),
-    ('ჯ', &['áČŻ']),
-    ('ჰ', &['áȰ']),
-    ('ჱ', &['áȱ']),
-    ('áƒČ', &['áČČ']),
-    ('ჳ', &['áČł']),
-    ('჎', &['áČŽ']),
-    ('ე', &['áČ”']),
-    ('ჶ', &['áȶ']),
-    ('ჷ', &['áČ·']),
-    ('პ', &['áČž']),
-    ('áƒč', &['áČč']),
-    ('áƒș', &['áČș']),
-    ('ნ', &['áČœ']),
-    ('ჟ', &['áČŸ']),
-    ('ჿ', &['áČż']),
-    ('Ꭰ', &['ꭰ']),
-    ('Ꭱ', &['ê­±']),
-    ('Ꮁ', &['ê­Č']),
-    ('Ꭳ', &['ê­ł']),
-    ('ᎀ', &['ê­Ž']),
-    ('ᎄ', &['ꭔ']),
-    ('ᎊ', &['ꭶ']),
-    ('Ꭷ', &['ꭷ']),
-    ('᎚', &['ê­ž']),
-    ('Ꭹ', &['ê­č']),
-    ('áŽȘ', &['ê­ș']),
-    ('Ꭻ', &['ꭻ']),
-    ('Ꭼ', &['ê­Œ']),
-    ('Ꭽ', &['ꭜ']),
-    ('Ꮌ', &['ê­Ÿ']),
-    ('Ꭿ', &['ê­ż']),
-    ('Ꮀ', &['êź€']),
-    ('Ꮁ', &['êź']),
-    ('áŽČ', &['êź‚']),
-    ('Ꮃ', &['êźƒ']),
-    ('Ꭾ', &['êź„']),
-    ('᎔', &['êź…']),
-    ('Ꮆ', &['êź†']),
-    ('Ꮇ', &['êź‡']),
-    ('Ꮎ', &['êźˆ']),
-    ('áŽč', &['êź‰']),
-    ('áŽș', &['êźŠ']),
-    ('Ꮋ', &['êź‹']),
-    ('ᎌ', &['êźŒ']),
-    ('᎜', &['êź']),
-    ('᎟', &['êźŽ']),
-    ('Ꮏ', &['êź']),
-    ('Ꮐ', &['êź']),
-    ('Ꮑ', &['êź‘']),
-    ('Ꮒ', &['êź’']),
-    ('Ꮓ', &['êź“']),
-    ('Ꮔ', &['êź”']),
-    ('Ꮕ', &['êź•']),
-    ('Ꮖ', &['êź–']),
-    ('Ꮗ', &['êź—']),
-    ('Ꮘ', &['êź˜']),
-    ('Ꮙ', &['êź™']),
-    ('Ꮚ', &['êźš']),
-    ('Ꮛ', &['êź›']),
-    ('Ꮜ', &['êźœ']),
-    ('Ꮝ', &['êź']),
-    ('Ꮞ', &['êźž']),
-    ('Ꮟ', &['êźŸ']),
-    ('Ꮠ', &['êź ']),
-    ('Ꮡ', &['êźĄ']),
-    ('Ꮢ', &['êźą']),
-    ('Ꮣ', &['êźŁ']),
-    ('Ꮤ', &['êź€']),
-    ('Ꮥ', &['êź„']),
-    ('Ꮦ', &['êźŠ']),
-    ('Ꮧ', &['êź§']),
-    ('Ꮨ', &['êźš']),
-    ('Ꮩ', &['êź©']),
-    ('Ꮪ', &['êźȘ']),
-    ('Ꮫ', &['êź«']),
-    ('Ꮬ', &['êźŹ']),
-    ('Ꮭ', &['êź­']),
-    ('Ꮮ', &['êźź']),
-    ('Ꮯ', &['êźŻ']),
-    ('Ꮰ', &['êź°']),
-    ('Ꮱ', &['êź±']),
-    ('Ᏹ', &['êźČ']),
-    ('Ꮳ', &['êźł']),
-    ('Ꮐ', &['êźŽ']),
-    ('Ꮔ', &['êź”']),
-    ('Ꮚ', &['êź¶']),
-    ('Ꮷ', &['êź·']),
-    ('Ꮪ', &['êźž']),
-    ('Ꮹ', &['êźč']),
-    ('áȘ', &['êźș']),
-    ('Ꮻ', &['êź»']),
-    ('Ꮼ', &['êźŒ']),
-    ('Ꮽ', &['êźœ']),
-    ('ᏼ', &['êźŸ']),
-    ('Ꮿ', &['êźż']),
-    ('Ᏸ', &['Ꮮ']),
-    ('Ᏹ', &['áč']),
-    ('áČ', &['áș']),
-    ('Ᏻ', &['ᏻ']),
-    ('Ꮞ', &['Ꮜ']),
-    ('Ꮤ', &['Ꮬ']),
-    ('Ꮮ', &['Ᏸ']),
-    ('áč', &['Ᏹ']),
-    ('áș', &['áČ']),
-    ('ᏻ', &['Ᏻ']),
-    ('Ꮜ', &['Ꮞ']),
-    ('Ꮬ', &['Ꮤ']),
-    ('áȀ', &['В', 'ĐČ']),
-    ('áȁ', &['Д', 'ĐŽ']),
-    ('áȂ', &['О', 'ĐŸ']),
-    ('áȃ', &['ĐĄ', 'с']),
-    ('áȄ', &['Đą', 'т', 'áȅ']),
-    ('áȅ', &['Đą', 'т', 'áȄ']),
-    ('áȆ', &['ĐȘ', 'ъ']),
-    ('áȇ', &['Ńą', 'ŃŁ']),
-    ('áȈ', &['Ꙋ', 'ꙋ']),
-    ('áȉ', &['áȊ']),
-    ('áȊ', &['áȉ']),
-    ('áȐ', &['ა']),
-    ('áȑ', &['ბ']),
-    ('áȒ', &['გ']),
-    ('áȓ', &['დ']),
-    ('áȔ', &['ე']),
-    ('áȕ', &['ვ']),
-    ('áȖ', &['ზ']),
-    ('áȗ', &['თ']),
-    ('áȘ', &['ი']),
-    ('áș', &['კ']),
-    ('áȚ', &['ლ']),
-    ('áț', &['მ']),
-    ('áȜ', &['ნ']),
-    ('áȝ', &['ო']),
-    ('áȞ', &['პ']),
-    ('áȟ', &['ჟ']),
-    ('áČ ', &['რ']),
-    ('áČĄ', &['ქ']),
-    ('áČą', &['ჹ']),
-    ('áČŁ', &['უ']),
-    ('áČ€', &['Ⴠ']),
-    ('áČ„', &['Ⴤ']),
-    ('áČŠ', &['჊']),
-    ('áȧ', &['ყ']),
-    ('áČš', &['ლ']),
-    ('áČ©', &['ჩ']),
-    ('áČȘ', &['áƒȘ']),
-    ('áČ«', &['ძ']),
-    ('áČŹ', &['჏']),
-    ('áČ­', &['ჭ']),
-    ('áČź', &['ჟ']),
-    ('áČŻ', &['ჯ']),
-    ('áȰ', &['ჰ']),
-    ('áȱ', &['ჱ']),
-    ('áČČ', &['áƒČ']),
-    ('áČł', &['ჳ']),
-    ('áČŽ', &['჎']),
-    ('áČ”', &['ე']),
-    ('áȶ', &['ჶ']),
-    ('áČ·', &['ჷ']),
-    ('áČž', &['პ']),
-    ('áČč', &['áƒč']),
-    ('áČș', &['áƒș']),
-    ('áČœ', &['ნ']),
-    ('áČŸ', &['ჟ']),
-    ('áČż', &['ჿ']),
-    ('á”č', &['Ꝝ']),
-    ('ᔜ', &['Ᵽ']),
-    ('ᶎ', &['Ᶎ']),
-    ('ᾀ', &['ᾁ']),
-    ('ᾁ', &['ᾀ']),
-    ('ᾂ', &['ᾃ']),
-    ('ᾃ', &['ᾂ']),
-    ('ᾄ', &['ᾅ']),
-    ('ᾅ', &['ᾄ']),
-    ('ᾆ', &['ᾇ']),
-    ('ᾇ', &['ᾆ']),
-    ('ᾈ', &['ᾉ']),
-    ('ᾉ', &['ᾈ']),
-    ('ᾊ', &['ᾋ']),
-    ('ᾋ', &['ᾊ']),
-    ('ᾌ', &['ᾍ']),
-    ('ᾍ', &['ᾌ']),
-    ('ᾎ', &['ᾏ']),
-    ('ᾏ', &['ᾎ']),
-    ('ᾐ', &['ᾑ']),
-    ('ᾑ', &['ᾐ']),
-    ('ᾒ', &['ᾓ']),
-    ('ᾓ', &['ᾒ']),
-    ('ᾔ', &['ᾕ']),
-    ('ᾕ', &['ᾔ']),
-    ('ᾖ', &['ᾗ']),
-    ('ᾗ', &['ᾖ']),
-    ('ᾘ', &['ᾙ']),
-    ('ᾙ', &['ᾘ']),
-    ('ᾚ', &['ᾛ']),
-    ('ᾛ', &['ᾚ']),
-    ('ᾜ', &['ᾝ']),
-    ('ᾝ', &['ᾜ']),
-    ('ᾞ', &['ᾟ']),
-    ('ᾟ', &['ᾞ']),
-    ('ហ', &['ឥ']),
-    ('ឥ', &['ហ']),
-    ('ឹ', &['ឣ']),
-    ('ឣ', &['ឹ']),
-    ('ក', &['ង']),
-    ('ង', &['ក']),
-    ('ដ', &['ឧ']),
-    ('ឧ', &['ដ']),
-    ('ážš', &['áž©']),
-    ('áž©', &['ážš']),
-    ('ážȘ', &['áž«']),
-    ('áž«', &['ážȘ']),
-    ('ត', &['ឭ']),
-    ('ឭ', &['ត']),
-    ('ស', &['ឯ']),
-    ('ឯ', &['ស']),
-    ('áž°', &['áž±']),
-    ('áž±', &['áž°']),
-    ('ážČ', &['ážł']),
-    ('ážł', &['ážČ']),
-    ('ណ', &['ប']),
-    ('ប', &['ណ']),
-    ('áž¶', &['áž·']),
-    ('áž·', &['áž¶']),
-    ('ážž', &['ážč']),
-    ('ážč', &['ážž']),
-    ('ážș', &['áž»']),
-    ('áž»', &['ážș']),
-    ('ឌ', &['វ']),
-    ('វ', &['ឌ']),
-    ('ស', &['ážż']),
-    ('ážż', &['ស']),
-    ('áč€', &['áč']),
-    ('áč', &['áč€']),
-    ('áč‚', &['áčƒ']),
-    ('áčƒ', &['áč‚']),
-    ('áč„', &['áč…']),
-    ('áč…', &['áč„']),
-    ('áč†', &['áč‡']),
-    ('áč‡', &['áč†']),
-    ('áčˆ', &['áč‰']),
-    ('áč‰', &['áčˆ']),
-    ('áčŠ', &['áč‹']),
-    ('áč‹', &['áčŠ']),
-    ('áčŒ', &['áč']),
-    ('áč', &['áčŒ']),
-    ('áčŽ', &['áč']),
-    ('áč', &['áčŽ']),
-    ('áč', &['áč‘']),
-    ('áč‘', &['áč']),
-    ('áč’', &['áč“']),
-    ('áč“', &['áč’']),
-    ('áč”', &['áč•']),
-    ('áč•', &['áč”']),
-    ('áč–', &['áč—']),
-    ('áč—', &['áč–']),
-    ('áč˜', &['áč™']),
-    ('áč™', &['áč˜']),
-    ('áčš', &['áč›']),
-    ('áč›', &['áčš']),
-    ('áčœ', &['áč']),
-    ('áč', &['áčœ']),
-    ('áčž', &['áčŸ']),
-    ('áčŸ', &['áčž']),
-    ('áč ', &['áčĄ', 'áș›']),
-    ('áčĄ', &['áč ', 'áș›']),
-    ('áčą', &['áčŁ']),
-    ('áčŁ', &['áčą']),
-    ('áč€', &['áč„']),
-    ('áč„', &['áč€']),
-    ('áčŠ', &['áč§']),
-    ('áč§', &['áčŠ']),
-    ('áčš', &['áč©']),
-    ('áč©', &['áčš']),
-    ('áčȘ', &['áč«']),
-    ('áč«', &['áčȘ']),
-    ('áčŹ', &['áč­']),
-    ('áč­', &['áčŹ']),
-    ('áčź', &['áčŻ']),
-    ('áčŻ', &['áčź']),
-    ('áč°', &['áč±']),
-    ('áč±', &['áč°']),
-    ('áčČ', &['áčł']),
-    ('áčł', &['áčČ']),
-    ('áčŽ', &['áč”']),
-    ('áč”', &['áčŽ']),
-    ('áč¶', &['áč·']),
-    ('áč·', &['áč¶']),
-    ('áčž', &['áčč']),
-    ('áčč', &['áčž']),
-    ('áčș', &['áč»']),
-    ('áč»', &['áčș']),
-    ('áčŒ', &['áčœ']),
-    ('áčœ', &['áčŒ']),
-    ('áčŸ', &['áčż']),
-    ('áčż', &['áčŸ']),
-    ('áș€', &['áș']),
-    ('áș', &['áș€']),
-    ('áș‚', &['áșƒ']),
-    ('áșƒ', &['áș‚']),
-    ('áș„', &['áș…']),
-    ('áș…', &['áș„']),
-    ('áș†', &['áș‡']),
-    ('áș‡', &['áș†']),
-    ('áșˆ', &['áș‰']),
-    ('áș‰', &['áșˆ']),
-    ('áșŠ', &['áș‹']),
-    ('áș‹', &['áșŠ']),
-    ('áșŒ', &['áș']),
-    ('áș', &['áșŒ']),
-    ('áșŽ', &['áș']),
-    ('áș', &['áșŽ']),
-    ('áș', &['áș‘']),
-    ('áș‘', &['áș']),
-    ('áș’', &['áș“']),
-    ('áș“', &['áș’']),
-    ('áș”', &['áș•']),
-    ('áș•', &['áș”']),
-    ('áș›', &['áč ', 'áčĄ']),
-    ('áșž', &['ß']),
-    ('áș ', &['áșĄ']),
-    ('áșĄ', &['áș ']),
-    ('áșą', &['áșŁ']),
-    ('áșŁ', &['áșą']),
-    ('áș€', &['áș„']),
-    ('áș„', &['áș€']),
-    ('áșŠ', &['áș§']),
-    ('áș§', &['áșŠ']),
-    ('áșš', &['áș©']),
-    ('áș©', &['áșš']),
-    ('áșȘ', &['áș«']),
-    ('áș«', &['áșȘ']),
-    ('áșŹ', &['áș­']),
-    ('áș­', &['áșŹ']),
-    ('áșź', &['áșŻ']),
-    ('áșŻ', &['áșź']),
-    ('áș°', &['áș±']),
-    ('áș±', &['áș°']),
-    ('áșČ', &['áșł']),
-    ('áșł', &['áșČ']),
-    ('áșŽ', &['áș”']),
-    ('áș”', &['áșŽ']),
-    ('áș¶', &['áș·']),
-    ('áș·', &['áș¶']),
-    ('áșž', &['áșč']),
-    ('áșč', &['áșž']),
-    ('áșș', &['áș»']),
-    ('áș»', &['áșș']),
-    ('áșŒ', &['áșœ']),
-    ('áșœ', &['áșŒ']),
-    ('áșŸ', &['áșż']),
-    ('áșż', &['áșŸ']),
-    ('Ề', &['ề']),
-    ('ề', &['Ề']),
-    ('Ể', &['ể']),
-    ('ể', &['Ể']),
-    ('Ễ', &['ễ']),
-    ('ễ', &['Ễ']),
-    ('Ệ', &['ệ']),
-    ('ệ', &['Ệ']),
-    ('Ỉ', &['ỉ']),
-    ('ỉ', &['Ỉ']),
-    ('Ị', &['ị']),
-    ('ị', &['Ị']),
-    ('Ọ', &['ọ']),
-    ('ọ', &['Ọ']),
-    ('Ỏ', &['ỏ']),
-    ('ỏ', &['Ỏ']),
-    ('Ố', &['ố']),
-    ('ố', &['Ố']),
-    ('Ồ', &['ồ']),
-    ('ồ', &['Ồ']),
-    ('Ổ', &['ổ']),
-    ('ổ', &['Ổ']),
-    ('Ỗ', &['ỗ']),
-    ('ỗ', &['Ỗ']),
-    ('Ộ', &['ộ']),
-    ('ộ', &['Ộ']),
-    ('Ớ', &['ớ']),
-    ('ớ', &['Ớ']),
-    ('Ờ', &['ờ']),
-    ('ờ', &['Ờ']),
-    ('Ở', &['ở']),
-    ('ở', &['Ở']),
-    ('Ỡ', &['ụ']),
-    ('ụ', &['Ỡ']),
-    ('ỹ', &['ợ']),
-    ('ợ', &['ỹ']),
-    ('Ề', &['Ễ']),
-    ('Ễ', &['Ề']),
-    ('Ị', &['ủ']),
-    ('ủ', &['Ị']),
-    ('Ớ', &['ứ']),
-    ('ứ', &['Ớ']),
-    ('á»Ș', &['ừ']),
-    ('ừ', &['á»Ș']),
-    ('ỏ', &['ử']),
-    ('ử', &['ỏ']),
-    ('ở', &['ữ']),
-    ('ữ', &['ở']),
-    ('á»°', &['á»±']),
-    ('á»±', &['á»°']),
-    ('á»Č', &['ỳ']),
-    ('ỳ', &['á»Č']),
-    ('Ỏ', &['Ổ']),
-    ('Ổ', &['Ỏ']),
-    ('á»¶', &['á»·']),
-    ('á»·', &['á»¶']),
-    ('Ở', &['á»č']),
-    ('á»č', &['Ở']),
-    ('á»ș', &['á»»']),
-    ('á»»', &['á»ș']),
-    ('Ọ', &['Ờ']),
-    ('Ờ', &['Ọ']),
-    ('ở', &['ỿ']),
-    ('ỿ', &['ở']),
-    ('ጀ', &['ገ']),
-    ('ጁ', &['ጉ']),
-    ('ጂ', &['ጊ']),
-    ('ጃ', &['ጋ']),
-    ('ጄ', &['ጌ']),
-    ('ጅ', &['ግ']),
-    ('ጆ', &['ጎ']),
-    ('ጇ', &['ጏ']),
-    ('ገ', &['ጀ']),
-    ('ጉ', &['ጁ']),
-    ('ጊ', &['ጂ']),
-    ('ጋ', &['ጃ']),
-    ('ጌ', &['ጄ']),
-    ('ግ', &['ጅ']),
-    ('ጎ', &['ጆ']),
-    ('ጏ', &['ጇ']),
-    ('ጐ', &['ጘ']),
-    ('጑', &['ጙ']),
-    ('ጒ', &['ጚ']),
-    ('ጓ', &['ጛ']),
-    ('ጔ', &['ጜ']),
-    ('ጕ', &['ጝ']),
-    ('ጘ', &['ጐ']),
-    ('ጙ', &['጑']),
-    ('ጚ', &['ጒ']),
-    ('ጛ', &['ጓ']),
-    ('ጜ', &['ጔ']),
-    ('ጝ', &['ጕ']),
-    ('ጠ', &['ጚ']),
-    ('áŒĄ', &['ጩ']),
-    ('áŒą', &['áŒȘ']),
-    ('áŒŁ', &['ጫ']),
-    ('ጀ', &['áŒŹ']),
-    ('ጄ', &['ጭ']),
-    ('ጊ', &['áŒź']),
-    ('ጧ', &['áŒŻ']),
-    ('ጚ', &['ጠ']),
-    ('ጩ', &['áŒĄ']),
-    ('áŒȘ', &['áŒą']),
-    ('ጫ', &['áŒŁ']),
-    ('áŒŹ', &['ጀ']),
-    ('ጭ', &['ጄ']),
-    ('áŒź', &['ጊ']),
-    ('áŒŻ', &['ጧ']),
-    ('ጰ', &['ጞ']),
-    ('ጱ', &['áŒč']),
-    ('áŒČ', &['áŒș']),
-    ('áŒł', &['ጻ']),
-    ('ጎ', &['ጌ']),
-    ('ጔ', &['ጜ']),
-    ('ጶ', &['ጟ']),
-    ('ጷ', &['áŒż']),
-    ('ጞ', &['ጰ']),
-    ('áŒč', &['ጱ']),
-    ('áŒș', &['áŒČ']),
-    ('ጻ', &['áŒł']),
-    ('ጌ', &['ጎ']),
-    ('ጜ', &['ጔ']),
-    ('ጟ', &['ጶ']),
-    ('áŒż', &['ጷ']),
-    ('ᜀ', &['ᜈ']),
-    ('ᜁ', &['ᜉ']),
-    ('ᜂ', &['ᜊ']),
-    ('ᜃ', &['ᜋ']),
-    ('ᜄ', &['ᜌ']),
-    ('ᜅ', &['ᜍ']),
-    ('ᜈ', &['ᜀ']),
-    ('ᜉ', &['ᜁ']),
-    ('ᜊ', &['ᜂ']),
-    ('ᜋ', &['ᜃ']),
-    ('ᜌ', &['ᜄ']),
-    ('ᜍ', &['ᜅ']),
-    ('ᜑ', &['᜙']),
-    ('ᜓ', &['᜛']),
-    ('᜕', &['᜝']),
-    ('᜗', &['ᜟ']),
-    ('᜙', &['ᜑ']),
-    ('᜛', &['ᜓ']),
-    ('᜝', &['᜕']),
-    ('ᜟ', &['᜗']),
-    ('ᜠ', &['᜚']),
-    ('áœĄ', &['ᜩ']),
-    ('áœą', &['áœȘ']),
-    ('áœŁ', &['ᜫ']),
-    ('ᜀ', &['áœŹ']),
-    ('ᜄ', &['ᜭ']),
-    ('ᜊ', &['áœź']),
-    ('ᜧ', &['áœŻ']),
-    ('᜚', &['ᜠ']),
-    ('ᜩ', &['áœĄ']),
-    ('áœȘ', &['áœą']),
-    ('ᜫ', &['áœŁ']),
-    ('áœŹ', &['ᜀ']),
-    ('ᜭ', &['ᜄ']),
-    ('áœź', &['ᜊ']),
-    ('áœŻ', &['ᜧ']),
-    ('ᜰ', &['áŸș']),
-    ('ᜱ', &['៻']),
-    ('áœČ', &['Ὲ']),
-    ('áœł', &['Έ']),
-    ('ᜎ', &['Ὴ']),
-    ('᜔', &['Ή']),
-    ('᜶', &['Ὶ']),
-    ('᜷', &['Ί']),
-    ('᜞', &['áżž']),
-    ('áœč', &['áżč']),
-    ('áœș', &['áżȘ']),
-    ('᜻', &['áż«']),
-    ('ᜌ', &['áżș']),
-    ('᜜', &['áż»']),
-    ('ៀ', &['ៈ']),
-    ('េ', &['៉']),
-    ('ែ', &['៊']),
-    ('ៃ', &['់']),
-    ('ោ', &['៌']),
-    ('ៅ', &['៍']),
-    ('ំ', &['៎']),
-    ('ះ', &['៏']),
-    ('ៈ', &['ៀ']),
-    ('៉', &['េ']),
-    ('៊', &['ែ']),
-    ('់', &['ៃ']),
-    ('៌', &['ោ']),
-    ('៍', &['ៅ']),
-    ('៎', &['ំ']),
-    ('៏', &['ះ']),
-    ('័', &['៘']),
-    ('៑', &['៙']),
-    ('្', &['៚']),
-    ('៓', &['៛']),
-    ('។', &['ៜ']),
-    ('៕', &['៝']),
-    ('៖', &['៞']),
-    ('ៗ', &['៟']),
-    ('៘', &['័']),
-    ('៙', &['៑']),
-    ('៚', &['្']),
-    ('៛', &['៓']),
-    ('ៜ', &['។']),
-    ('៝', &['៕']),
-    ('៞', &['៖']),
-    ('៟', &['ៗ']),
-    ('០', &['៚']),
-    ('áŸĄ', &['៩']),
-    ('áŸą', &['áŸȘ']),
-    ('áŸŁ', &['៫']),
-    ('ៀ', &['áŸŹ']),
-    ('ោ', &['៭']),
-    ('៊', &['áŸź']),
-    ('៧', &['áŸŻ']),
-    ('៚', &['០']),
-    ('៩', &['áŸĄ']),
-    ('áŸȘ', &['áŸą']),
-    ('៫', &['áŸŁ']),
-    ('áŸŹ', &['ៀ']),
-    ('៭', &['ោ']),
-    ('áŸź', &['៊']),
-    ('áŸŻ', &['៧']),
-    ('៰', &['៞']),
-    ('៱', &['áŸč']),
-    ('áŸł', &['៌']),
-    ('៞', &['៰']),
-    ('áŸč', &['៱']),
-    ('áŸș', &['ᜰ']),
-    ('៻', &['ᜱ']),
-    ('៌', &['áŸł']),
-    ('៟', &['\u{345}', 'Ι', 'ι']),
-    ('ῃ', &['ῌ']),
-    ('Ὲ', &['áœČ']),
-    ('Έ', &['áœł']),
-    ('Ὴ', &['ᜎ']),
-    ('Ή', &['᜔']),
-    ('ῌ', &['ῃ']),
-    ('ῐ', &['Ῐ']),
-    ('ῑ', &['Ῑ']),
-    ('ΐ', &['ΐ']),
-    ('Ῐ', &['ῐ']),
-    ('Ῑ', &['ῑ']),
-    ('Ὶ', &['᜶']),
-    ('Ί', &['᜷']),
-    ('áż ', &['áżš']),
-    ('ῥ', &['Ῡ']),
-    ('ΰ', &['ΰ']),
-    ('ῄ', &['῏']),
-    ('áżš', &['áż ']),
-    ('Ῡ', &['ῥ']),
-    ('áżȘ', &['áœș']),
-    ('áż«', &['᜻']),
-    ('῏', &['ῄ']),
-    ('áżł', &['áżŒ']),
-    ('áżž', &['᜞']),
-    ('áżč', &['áœč']),
-    ('áżș', &['ᜌ']),
-    ('áż»', &['᜜']),
-    ('áżŒ', &['áżł']),
-    ('℩', &['Ω', 'ω']),
-    ('â„Ș', &['K', 'k']),
-    ('Å', &['Å', 'å']),
-    ('â„Č', &['ⅎ']),
-    ('ⅎ', &['â„Č']),
-    ('Ⅰ', &['ⅰ']),
-    ('Ⅱ', &['ⅱ']),
-    ('ⅱ', &['â…Č']),
-    ('Ⅳ', &['ⅳ']),
-    ('â…€', &['Ⅾ']),
-    ('â…„', &['â…”']),
-    ('Ⅹ', &['ⅶ']),
-    ('Ⅷ', &['ⅷ']),
-    ('ⅹ', &['ⅾ']),
-    ('Ⅹ', &['â…č']),
-    ('â…Ș', &['â…ș']),
-    ('Ⅻ', &['ⅻ']),
-    ('Ⅼ', &['â…Œ']),
-    ('Ⅽ', &['â…œ']),
-    ('ⅼ', &['â…Ÿ']),
-    ('Ⅿ', &['ⅿ']),
-    ('ⅰ', &['Ⅰ']),
-    ('ⅱ', &['Ⅱ']),
-    ('â…Č', &['ⅱ']),
-    ('ⅳ', &['Ⅳ']),
-    ('Ⅾ', &['â…€']),
-    ('â…”', &['â…„']),
-    ('ⅶ', &['Ⅹ']),
-    ('ⅷ', &['Ⅷ']),
-    ('ⅾ', &['ⅹ']),
-    ('â…č', &['Ⅹ']),
-    ('â…ș', &['â…Ș']),
-    ('ⅻ', &['Ⅻ']),
-    ('â…Œ', &['Ⅼ']),
-    ('â…œ', &['Ⅽ']),
-    ('â…Ÿ', &['ⅼ']),
-    ('ⅿ', &['Ⅿ']),
-    ('Ↄ', &['ↄ']),
-    ('ↄ', &['Ↄ']),
-    ('Ⓐ', &['ⓐ']),
-    ('Ⓑ', &['ⓑ']),
-    ('Ⓘ', &['ⓒ']),
-    ('â’č', &['ⓓ']),
-    ('â’ș', &['ⓔ']),
-    ('Ⓕ', &['ⓕ']),
-    ('â’Œ', &['ⓖ']),
-    ('â’œ', &['ⓗ']),
-    ('â’Ÿ', &['ⓘ']),
-    ('Ⓙ', &['ⓙ']),
-    ('Ⓚ', &['ⓚ']),
-    ('Ⓛ', &['ⓛ']),
-    ('Ⓜ', &['ⓜ']),
-    ('Ⓝ', &['ⓝ']),
-    ('Ⓞ', &['ⓞ']),
-    ('Ⓟ', &['ⓟ']),
-    ('Ⓠ', &['ⓠ']),
-    ('Ⓡ', &['ⓡ']),
-    ('Ⓢ', &['⓱']),
-    ('Ⓣ', &['ⓣ']),
-    ('Ⓤ', &['â“€']),
-    ('Ⓥ', &['â“„']),
-    ('Ⓦ', &['ⓩ']),
-    ('Ⓧ', &['ⓧ']),
-    ('Ⓨ', &['⓹']),
-    ('Ⓩ', &['ⓩ']),
-    ('ⓐ', &['Ⓐ']),
-    ('ⓑ', &['Ⓑ']),
-    ('ⓒ', &['Ⓘ']),
-    ('ⓓ', &['â’č']),
-    ('ⓔ', &['â’ș']),
-    ('ⓕ', &['Ⓕ']),
-    ('ⓖ', &['â’Œ']),
-    ('ⓗ', &['â’œ']),
-    ('ⓘ', &['â’Ÿ']),
-    ('ⓙ', &['Ⓙ']),
-    ('ⓚ', &['Ⓚ']),
-    ('ⓛ', &['Ⓛ']),
-    ('ⓜ', &['Ⓜ']),
-    ('ⓝ', &['Ⓝ']),
-    ('ⓞ', &['Ⓞ']),
-    ('ⓟ', &['Ⓟ']),
-    ('ⓠ', &['Ⓠ']),
-    ('ⓡ', &['Ⓡ']),
-    ('⓱', &['Ⓢ']),
-    ('ⓣ', &['Ⓣ']),
-    ('â“€', &['Ⓤ']),
-    ('â“„', &['Ⓥ']),
-    ('ⓩ', &['Ⓦ']),
-    ('ⓧ', &['Ⓧ']),
-    ('⓹', &['Ⓨ']),
-    ('ⓩ', &['Ⓩ']),
-    ('Ⰰ', &['ⰰ']),
-    ('Ⰱ', &['ⰱ']),
-    ('Ⰲ', &['â°Č']),
-    ('Ⰳ', &['ⰳ']),
-    ('Ⰴ', &['Ⱞ']),
-    ('Ⰵ', &['â°”']),
-    ('Ⰶ', &['ⰶ']),
-    ('Ⰷ', &['ⰷ']),
-    ('Ⰸ', &['Ⱎ']),
-    ('Ⰹ', &['â°č']),
-    ('Ⰺ', &['â°ș']),
-    ('Ⰻ', &['ⰻ']),
-    ('Ⰼ', &['â°Œ']),
-    ('Ⰽ', &['Ⱌ']),
-    ('Ⰾ', &['Ⱏ']),
-    ('Ⰿ', &['ⰿ']),
-    ('Ⱀ', &['ⱀ']),
-    ('Ⱁ', &['ⱁ']),
-    ('Ⱂ', &['ⱂ']),
-    ('Ⱃ', &['ⱃ']),
-    ('Ⱄ', &['ⱄ']),
-    ('Ⱅ', &['ⱅ']),
-    ('Ⱆ', &['ⱆ']),
-    ('Ⱇ', &['ⱇ']),
-    ('Ⱈ', &['ⱈ']),
-    ('Ⱉ', &['ⱉ']),
-    ('Ⱊ', &['ⱊ']),
-    ('Ⱋ', &['ⱋ']),
-    ('Ⱌ', &['ⱌ']),
-    ('Ⱍ', &['ⱍ']),
-    ('Ⱎ', &['ⱎ']),
-    ('Ⱏ', &['ⱏ']),
-    ('Ⱐ', &['ⱐ']),
-    ('â°Ą', &['ⱑ']),
-    ('â°ą', &['ⱒ']),
-    ('â°Ł', &['ⱓ']),
-    ('â°€', &['ⱔ']),
-    ('â°„', &['ⱕ']),
-    ('â°Š', &['ⱖ']),
-    ('Ⱗ', &['ⱗ']),
-    ('Ⱊ', &['ⱘ']),
-    ('Ⱙ', &['ⱙ']),
-    ('â°Ș', &['ⱚ']),
-    ('Ⱛ', &['ⱛ']),
-    ('â°Ź', &['ⱜ']),
-    ('Ⱝ', &['ⱝ']),
-    ('â°ź', &['ⱞ']),
-    ('â°Ż', &['ⱟ']),
-    ('ⰰ', &['Ⰰ']),
-    ('ⰱ', &['Ⰱ']),
-    ('â°Č', &['Ⰲ']),
-    ('ⰳ', &['Ⰳ']),
-    ('Ⱞ', &['Ⰴ']),
-    ('â°”', &['Ⰵ']),
-    ('ⰶ', &['Ⰶ']),
-    ('ⰷ', &['Ⰷ']),
-    ('Ⱎ', &['Ⰸ']),
-    ('â°č', &['Ⰹ']),
-    ('â°ș', &['Ⰺ']),
-    ('ⰻ', &['Ⰻ']),
-    ('â°Œ', &['Ⰼ']),
-    ('Ⱌ', &['Ⰽ']),
-    ('Ⱏ', &['Ⰾ']),
-    ('ⰿ', &['Ⰿ']),
-    ('ⱀ', &['Ⱀ']),
-    ('ⱁ', &['Ⱁ']),
-    ('ⱂ', &['Ⱂ']),
-    ('ⱃ', &['Ⱃ']),
-    ('ⱄ', &['Ⱄ']),
-    ('ⱅ', &['Ⱅ']),
-    ('ⱆ', &['Ⱆ']),
-    ('ⱇ', &['Ⱇ']),
-    ('ⱈ', &['Ⱈ']),
-    ('ⱉ', &['Ⱉ']),
-    ('ⱊ', &['Ⱊ']),
-    ('ⱋ', &['Ⱋ']),
-    ('ⱌ', &['Ⱌ']),
-    ('ⱍ', &['Ⱍ']),
-    ('ⱎ', &['Ⱎ']),
-    ('ⱏ', &['Ⱏ']),
-    ('ⱐ', &['Ⱐ']),
-    ('ⱑ', &['â°Ą']),
-    ('ⱒ', &['â°ą']),
-    ('ⱓ', &['â°Ł']),
-    ('ⱔ', &['â°€']),
-    ('ⱕ', &['â°„']),
-    ('ⱖ', &['â°Š']),
-    ('ⱗ', &['Ⱗ']),
-    ('ⱘ', &['Ⱊ']),
-    ('ⱙ', &['Ⱙ']),
-    ('ⱚ', &['â°Ș']),
-    ('ⱛ', &['Ⱛ']),
-    ('ⱜ', &['â°Ź']),
-    ('ⱝ', &['Ⱝ']),
-    ('ⱞ', &['â°ź']),
-    ('ⱟ', &['â°Ż']),
-    ('Ⱡ', &['ⱥ']),
-    ('ⱥ', &['Ⱡ']),
-    ('ⱹ', &['ɫ']),
-    ('Ᵽ', &['ᔜ']),
-    ('ⱀ', &['ɜ']),
-    ('ⱄ', &['Èș']),
-    ('ⱊ', &['ȟ']),
-    ('Ⱨ', &['ⱚ']),
-    ('ⱚ', &['Ⱨ']),
-    ('Ⱪ', &['â±Ș']),
-    ('â±Ș', &['Ⱪ']),
-    ('Ⱬ', &['ⱏ']),
-    ('ⱏ', &['Ⱬ']),
-    ('Ɑ', &['ɑ']),
-    ('ⱟ', &['ɱ']),
-    ('Ɐ', &['ɐ']),
-    ('Ɒ', &['ɒ']),
-    ('â±Č', &['ⱳ']),
-    ('ⱳ', &['â±Č']),
-    ('â±”', &['â±¶']),
-    ('â±¶', &['â±”']),
-    ('ⱟ', &['Èż']),
-    ('Ɀ', &['ɀ']),
-    ('âȀ', &['âȁ']),
-    ('âȁ', &['âȀ']),
-    ('âȂ', &['âȃ']),
-    ('âȃ', &['âȂ']),
-    ('âȄ', &['âȅ']),
-    ('âȅ', &['âȄ']),
-    ('âȆ', &['âȇ']),
-    ('âȇ', &['âȆ']),
-    ('âȈ', &['âȉ']),
-    ('âȉ', &['âȈ']),
-    ('âȊ', &['âȋ']),
-    ('âȋ', &['âȊ']),
-    ('âȌ', &['âȍ']),
-    ('âȍ', &['âȌ']),
-    ('âȎ', &['âȏ']),
-    ('âȏ', &['âȎ']),
-    ('âȐ', &['âȑ']),
-    ('âȑ', &['âȐ']),
-    ('âȒ', &['âȓ']),
-    ('âȓ', &['âȒ']),
-    ('âȔ', &['âȕ']),
-    ('âȕ', &['âȔ']),
-    ('âȖ', &['âȗ']),
-    ('âȗ', &['âȖ']),
-    ('âȘ', &['âș']),
-    ('âș', &['âȘ']),
-    ('âȚ', &['âț']),
-    ('âț', &['âȚ']),
-    ('âȜ', &['âȝ']),
-    ('âȝ', &['âȜ']),
-    ('âȞ', &['âȟ']),
-    ('âȟ', &['âȞ']),
-    ('âČ ', &['âČĄ']),
-    ('âČĄ', &['âČ ']),
-    ('âČą', &['âČŁ']),
-    ('âČŁ', &['âČą']),
-    ('âČ€', &['âČ„']),
-    ('âČ„', &['âČ€']),
-    ('âČŠ', &['âȧ']),
-    ('âȧ', &['âČŠ']),
-    ('âČš', &['âČ©']),
-    ('âČ©', &['âČš']),
-    ('âČȘ', &['âČ«']),
-    ('âČ«', &['âČȘ']),
-    ('âČŹ', &['âČ­']),
-    ('âČ­', &['âČŹ']),
-    ('âČź', &['âČŻ']),
-    ('âČŻ', &['âČź']),
-    ('âȰ', &['âȱ']),
-    ('âȱ', &['âȰ']),
-    ('âČČ', &['âČł']),
-    ('âČł', &['âČČ']),
-    ('âČŽ', &['âČ”']),
-    ('âČ”', &['âČŽ']),
-    ('âȶ', &['âČ·']),
-    ('âČ·', &['âȶ']),
-    ('âČž', &['âČč']),
-    ('âČč', &['âČž']),
-    ('âČș', &['âČ»']),
-    ('âČ»', &['âČș']),
-    ('âČŒ', &['âČœ']),
-    ('âČœ', &['âČŒ']),
-    ('âČŸ', &['âČż']),
-    ('âČż', &['âČŸ']),
-    ('Ⳁ', &['ⳁ']),
-    ('ⳁ', &['Ⳁ']),
-    ('Ⳃ', &['ⳃ']),
-    ('ⳃ', &['Ⳃ']),
-    ('Ⳅ', &['ⳅ']),
-    ('ⳅ', &['Ⳅ']),
-    ('Ⳇ', &['ⳇ']),
-    ('ⳇ', &['Ⳇ']),
-    ('Ⳉ', &['ⳉ']),
-    ('ⳉ', &['Ⳉ']),
-    ('Ⳋ', &['ⳋ']),
-    ('ⳋ', &['Ⳋ']),
-    ('Ⳍ', &['ⳍ']),
-    ('ⳍ', &['Ⳍ']),
-    ('Ⳏ', &['ⳏ']),
-    ('ⳏ', &['Ⳏ']),
-    ('Ⳑ', &['ⳑ']),
-    ('ⳑ', &['Ⳑ']),
-    ('Ⳓ', &['ⳓ']),
-    ('ⳓ', &['Ⳓ']),
-    ('Ⳕ', &['ⳕ']),
-    ('ⳕ', &['Ⳕ']),
-    ('Ⳗ', &['ⳗ']),
-    ('ⳗ', &['Ⳗ']),
-    ('Ⳙ', &['ⳙ']),
-    ('ⳙ', &['Ⳙ']),
-    ('Ⳛ', &['ⳛ']),
-    ('ⳛ', &['Ⳛ']),
-    ('Ⳝ', &['ⳝ']),
-    ('ⳝ', &['Ⳝ']),
-    ('Ⳟ', &['ⳟ']),
-    ('ⳟ', &['Ⳟ']),
-    ('Ⳡ', &['⳥']),
-    ('⳥', &['Ⳡ']),
-    ('⳹', &['ⳣ']),
-    ('ⳣ', &['⳹']),
-    ('Ⳬ', &['ⳏ']),
-    ('ⳏ', &['Ⳬ']),
-    ('âł­', &['âłź']),
-    ('âłź', &['âł­']),
-    ('âłČ', &['âłł']),
-    ('âłł', &['âłČ']),
-    ('⮀', &['Ⴀ']),
-    ('⮁', &['Ⴁ']),
-    ('⮂', &['Ⴑ']),
-    ('⮃', &['Ⴃ']),
-    ('⮄', &['á‚€']),
-    ('⮅', &['á‚„']),
-    ('⮆', &['Ⴉ']),
-    ('⮇', &['Ⴇ']),
-    ('⮈', &['Ⴙ']),
-    ('⮉', &['Ⴉ']),
-    ('⮊', &['á‚Ș']),
-    ('⮋', &['Ⴋ']),
-    ('⮌', &['Ⴌ']),
-    ('⮍', &['Ⴍ']),
-    ('⮎', &['Ⴜ']),
-    ('⮏', &['Ⴏ']),
-    ('⮐', &['Ⴐ']),
-    ('⮑', &['Ⴑ']),
-    ('⮒', &['á‚Č']),
-    ('⮓', &['Ⴓ']),
-    ('⮔', &['Ⴎ']),
-    ('⮕', &['á‚”']),
-    ('⮖', &['Ⴖ']),
-    ('⮗', &['Ⴗ']),
-    ('⮘', &['Ⴞ']),
-    ('⮙', &['á‚č']),
-    ('⮚', &['á‚ș']),
-    ('⮛', &['Ⴛ']),
-    ('⮜', &['ႌ']),
-    ('⎝', &['ႜ']),
-    ('⮞', &['႟']),
-    ('⮟', &['Ⴟ']),
-    ('⮠', &['Ⴠ']),
-    ('⎥', &['Ⴡ']),
-    ('⮱', &['Ⴢ']),
-    ('⎣', &['Ⴣ']),
-    ('⎀', &['Ⴤ']),
-    ('⎄', &['Ⴥ']),
-    ('⮧', &['Ⴧ']),
-    ('⮭', &['Ⴭ']),
-    ('Ꙁ', &['ꙁ']),
-    ('ꙁ', &['Ꙁ']),
-    ('Ꙃ', &['ꙃ']),
-    ('ꙃ', &['Ꙃ']),
-    ('Ꙅ', &['ꙅ']),
-    ('ꙅ', &['Ꙅ']),
-    ('Ꙇ', &['ꙇ']),
-    ('ꙇ', &['Ꙇ']),
-    ('Ꙉ', &['ꙉ']),
-    ('ꙉ', &['Ꙉ']),
-    ('Ꙋ', &['áȈ', 'ꙋ']),
-    ('ꙋ', &['áȈ', 'Ꙋ']),
-    ('Ꙍ', &['ꙍ']),
-    ('ꙍ', &['Ꙍ']),
-    ('Ꙏ', &['ꙏ']),
-    ('ꙏ', &['Ꙏ']),
-    ('Ꙑ', &['ꙑ']),
-    ('ꙑ', &['Ꙑ']),
-    ('Ꙓ', &['ꙓ']),
-    ('ꙓ', &['Ꙓ']),
-    ('Ꙕ', &['ꙕ']),
-    ('ꙕ', &['Ꙕ']),
-    ('Ꙗ', &['ꙗ']),
-    ('ꙗ', &['Ꙗ']),
-    ('Ꙙ', &['ꙙ']),
-    ('ꙙ', &['Ꙙ']),
-    ('Ꙛ', &['ꙛ']),
-    ('ꙛ', &['Ꙛ']),
-    ('Ꙝ', &['ꙝ']),
-    ('ꙝ', &['Ꙝ']),
-    ('Ꙟ', &['ꙟ']),
-    ('ꙟ', &['Ꙟ']),
-    ('Ꙡ', &['ê™Ą']),
-    ('ê™Ą', &['Ꙡ']),
-    ('ê™ą', &['ê™Ł']),
-    ('ê™Ł', &['ê™ą']),
-    ('Ꙁ', &['Ꙅ']),
-    ('Ꙅ', &['Ꙁ']),
-    ('Ꙋ', &['ꙧ']),
-    ('ꙧ', &['Ꙋ']),
-    ('Ꙛ', &['ꙩ']),
-    ('ꙩ', &['Ꙛ']),
-    ('ê™Ș', &['ꙫ']),
-    ('ꙫ', &['ê™Ș']),
-    ('ê™Ź', &['ꙭ']),
-    ('ꙭ', &['ê™Ź']),
-    ('Ꚁ', &['ꚁ']),
-    ('ꚁ', &['Ꚁ']),
-    ('Ꚃ', &['ꚃ']),
-    ('ꚃ', &['Ꚃ']),
-    ('Ꚅ', &['ꚅ']),
-    ('ꚅ', &['Ꚅ']),
-    ('Ꚇ', &['ꚇ']),
-    ('ꚇ', &['Ꚇ']),
-    ('Ꚉ', &['ꚉ']),
-    ('ꚉ', &['Ꚉ']),
-    ('Ꚋ', &['ꚋ']),
-    ('ꚋ', &['Ꚋ']),
-    ('Ꚍ', &['ꚍ']),
-    ('ꚍ', &['Ꚍ']),
-    ('Ꚏ', &['ꚏ']),
-    ('ꚏ', &['Ꚏ']),
-    ('Ꚑ', &['ꚑ']),
-    ('ꚑ', &['Ꚑ']),
-    ('Ꚓ', &['ꚓ']),
-    ('ꚓ', &['Ꚓ']),
-    ('Ꚕ', &['ꚕ']),
-    ('ꚕ', &['Ꚕ']),
-    ('Ꚗ', &['ꚗ']),
-    ('ꚗ', &['Ꚗ']),
-    ('Ꚙ', &['ꚙ']),
-    ('ꚙ', &['Ꚙ']),
-    ('Ꚛ', &['ꚛ']),
-    ('ꚛ', &['Ꚛ']),
-    ('êœą', &['êœŁ']),
-    ('êœŁ', &['êœą']),
-    ('꜀', &['꜄']),
-    ('꜄', &['꜀']),
-    ('꜊', &['ꜧ']),
-    ('ꜧ', &['꜊']),
-    ('ꜚ', &['ꜩ']),
-    ('ꜩ', &['ꜚ']),
-    ('êœȘ', &['ꜫ']),
-    ('ꜫ', &['êœȘ']),
-    ('êœŹ', &['ꜭ']),
-    ('ꜭ', &['êœŹ']),
-    ('êœź', &['êœŻ']),
-    ('êœŻ', &['êœź']),
-    ('êœČ', &['êœł']),
-    ('êœł', &['êœČ']),
-    ('꜎', &['꜔']),
-    ('꜔', &['꜎']),
-    ('Ꜷ', &['ꜷ']),
-    ('ꜷ', &['Ꜷ']),
-    ('ꜞ', &['êœč']),
-    ('êœč', &['ꜞ']),
-    ('êœș', &['ꜻ']),
-    ('ꜻ', &['êœș']),
-    ('꜌', &['ꜜ']),
-    ('ꜜ', &['꜌']),
-    ('ꜟ', &['êœż']),
-    ('êœż', &['ꜟ']),
-    ('Ꝁ', &['ꝁ']),
-    ('ꝁ', &['Ꝁ']),
-    ('Ꝃ', &['ꝃ']),
-    ('ꝃ', &['Ꝃ']),
-    ('Ꝅ', &['ꝅ']),
-    ('ꝅ', &['Ꝅ']),
-    ('Ꝇ', &['ꝇ']),
-    ('ꝇ', &['Ꝇ']),
-    ('Ꝉ', &['ꝉ']),
-    ('ꝉ', &['Ꝉ']),
-    ('Ꝋ', &['ꝋ']),
-    ('ꝋ', &['Ꝋ']),
-    ('Ꝍ', &['ꝍ']),
-    ('ꝍ', &['Ꝍ']),
-    ('Ꝏ', &['ꝏ']),
-    ('ꝏ', &['Ꝏ']),
-    ('Ꝑ', &['ꝑ']),
-    ('ꝑ', &['Ꝑ']),
-    ('Ꝓ', &['ꝓ']),
-    ('ꝓ', &['Ꝓ']),
-    ('Ꝕ', &['ꝕ']),
-    ('ꝕ', &['Ꝕ']),
-    ('Ꝗ', &['ꝗ']),
-    ('ꝗ', &['Ꝗ']),
-    ('Ꝙ', &['ꝙ']),
-    ('ꝙ', &['Ꝙ']),
-    ('Ꝛ', &['ꝛ']),
-    ('ꝛ', &['Ꝛ']),
-    ('Ꝝ', &['ꝝ']),
-    ('ꝝ', &['Ꝝ']),
-    ('Ꝟ', &['ꝟ']),
-    ('ꝟ', &['Ꝟ']),
-    ('Ꝡ', &['êĄ']),
-    ('êĄ', &['Ꝡ']),
-    ('êą', &['êŁ']),
-    ('êŁ', &['êą']),
-    ('Ꝁ', &['Ꝅ']),
-    ('Ꝅ', &['Ꝁ']),
-    ('Ꝋ', &['ꝧ']),
-    ('ꝧ', &['Ꝋ']),
-    ('Ꝛ', &['ꝩ']),
-    ('ꝩ', &['Ꝛ']),
-    ('êȘ', &['ꝫ']),
-    ('ꝫ', &['êȘ']),
-    ('êŹ', &['ꝭ']),
-    ('ꝭ', &['êŹ']),
-    ('êź', &['êŻ']),
-    ('êŻ', &['êź']),
-    ('êč', &['êș']),
-    ('êș', &['êč']),
-    ('Ꝼ', &['Ꝍ']),
-    ('Ꝍ', &['Ꝼ']),
-    ('Ꝝ', &['á”č']),
-    ('ꝟ', &['êż']),
-    ('êż', &['ꝟ']),
-    ('Ꞁ', &['ꞁ']),
-    ('ꞁ', &['Ꞁ']),
-    ('Ꞃ', &['ꞃ']),
-    ('ꞃ', &['Ꞃ']),
-    ('Ꞅ', &['ꞅ']),
-    ('ꞅ', &['Ꞅ']),
-    ('Ꞇ', &['ꞇ']),
-    ('ꞇ', &['Ꞇ']),
-    ('Ꞌ', &['ꞌ']),
-    ('ꞌ', &['Ꞌ']),
-    ('Ɥ', &['Ʉ']),
-    ('Ꞑ', &['ꞑ']),
-    ('ꞑ', &['Ꞑ']),
-    ('Ꞓ', &['ꞓ']),
-    ('ꞓ', &['Ꞓ']),
-    ('ꞔ', &['Ꞔ']),
-    ('Ꞗ', &['ꞗ']),
-    ('ꞗ', &['Ꞗ']),
-    ('Ꞙ', &['ꞙ']),
-    ('ꞙ', &['Ꞙ']),
-    ('Ꞛ', &['ꞛ']),
-    ('ꞛ', &['Ꞛ']),
-    ('Ꞝ', &['ꞝ']),
-    ('ꞝ', &['Ꞝ']),
-    ('Ꞟ', &['ꞟ']),
-    ('ꞟ', &['Ꞟ']),
-    ('Ꞡ', &['êžĄ']),
-    ('êžĄ', &['Ꞡ']),
-    ('êžą', &['êžŁ']),
-    ('êžŁ', &['êžą']),
-    ('Ꞁ', &['Ꞅ']),
-    ('Ꞅ', &['Ꞁ']),
-    ('꞊', &['ꞧ']),
-    ('ꞧ', &['꞊']),
-    ('Ꞛ', &['ꞩ']),
-    ('ꞩ', &['Ꞛ']),
-    ('êžȘ', &['ÉŠ']),
-    ('Ɜ', &['ɜ']),
-    ('êžŹ', &['ÉĄ']),
-    ('Ɬ', &['ÉŹ']),
-    ('êžź', &['ÉȘ']),
-    ('Ʞ', &['ʞ']),
-    ('Ʇ', &['ʇ']),
-    ('êžČ', &['ʝ']),
-    ('êžł', &['ꭓ']),
-    ('ꞎ', &['ꞔ']),
-    ('ꞔ', &['ꞎ']),
-    ('Ꞷ', &['ꞷ']),
-    ('ꞷ', &['Ꞷ']),
-    ('êžž', &['êžč']),
-    ('êžč', &['êžž']),
-    ('êžș', &['ꞻ']),
-    ('ꞻ', &['êžș']),
-    ('ꞌ', &['Ꞝ']),
-    ('Ꞝ', &['ꞌ']),
-    ('ꞟ', &['êžż']),
-    ('êžż', &['ꞟ']),
-    ('Ꟁ', &['ꟁ']),
-    ('ꟁ', &['Ꟁ']),
-    ('Ꟃ', &['ꟃ']),
-    ('ꟃ', &['Ꟃ']),
-    ('Ꞔ', &['ꞔ']),
-    ('Ʂ', &['ʂ']),
-    ('Ᶎ', &['ᶎ']),
-    ('Ꟈ', &['ꟈ']),
-    ('ꟈ', &['Ꟈ']),
-    ('Ꟊ', &['ꟊ']),
-    ('ꟊ', &['Ꟊ']),
-    ('Ɤ', &['É€']),
-    ('Ꟍ', &['ꟍ']),
-    ('ꟍ', &['Ꟍ']),
-    ('Ꟑ', &['ꟑ']),
-    ('ꟑ', &['Ꟑ']),
-    ('Ꟗ', &['ꟗ']),
-    ('ꟗ', &['Ꟗ']),
-    ('Ꟙ', &['ꟙ']),
-    ('ꟙ', &['Ꟙ']),
-    ('Ꟛ', &['ꟛ']),
-    ('ꟛ', &['Ꟛ']),
-    ('Ƛ', &['ƛ']),
-    ('꟔', &['ꟶ']),
-    ('ꟶ', &['꟔']),
-    ('ꭓ', &['êžł']),
-    ('ꭰ', &['Ꭰ']),
-    ('ê­±', &['Ꭱ']),
-    ('ê­Č', &['Ꮁ']),
-    ('ê­ł', &['Ꭳ']),
-    ('ê­Ž', &['ᎀ']),
-    ('ꭔ', &['ᎄ']),
-    ('ꭶ', &['ᎊ']),
-    ('ꭷ', &['Ꭷ']),
-    ('ê­ž', &['᎚']),
-    ('ê­č', &['Ꭹ']),
-    ('ê­ș', &['áŽȘ']),
-    ('ꭻ', &['Ꭻ']),
-    ('ê­Œ', &['Ꭼ']),
-    ('ꭜ', &['Ꭽ']),
-    ('ê­Ÿ', &['Ꮌ']),
-    ('ê­ż', &['Ꭿ']),
-    ('êź€', &['Ꮀ']),
-    ('êź', &['Ꮁ']),
-    ('êź‚', &['áŽČ']),
-    ('êźƒ', &['Ꮃ']),
-    ('êź„', &['Ꭾ']),
-    ('êź…', &['᎔']),
-    ('êź†', &['Ꮆ']),
-    ('êź‡', &['Ꮇ']),
-    ('êźˆ', &['Ꮎ']),
-    ('êź‰', &['áŽč']),
-    ('êźŠ', &['áŽș']),
-    ('êź‹', &['Ꮋ']),
-    ('êźŒ', &['ᎌ']),
-    ('êź', &['᎜']),
-    ('êźŽ', &['᎟']),
-    ('êź', &['Ꮏ']),
-    ('êź', &['Ꮐ']),
-    ('êź‘', &['Ꮑ']),
-    ('êź’', &['Ꮒ']),
-    ('êź“', &['Ꮓ']),
-    ('êź”', &['Ꮔ']),
-    ('êź•', &['Ꮕ']),
-    ('êź–', &['Ꮖ']),
-    ('êź—', &['Ꮗ']),
-    ('êź˜', &['Ꮘ']),
-    ('êź™', &['Ꮙ']),
-    ('êźš', &['Ꮚ']),
-    ('êź›', &['Ꮛ']),
-    ('êźœ', &['Ꮜ']),
-    ('êź', &['Ꮝ']),
-    ('êźž', &['Ꮞ']),
-    ('êźŸ', &['Ꮟ']),
-    ('êź ', &['Ꮠ']),
-    ('êźĄ', &['Ꮡ']),
-    ('êźą', &['Ꮢ']),
-    ('êźŁ', &['Ꮣ']),
-    ('êź€', &['Ꮤ']),
-    ('êź„', &['Ꮥ']),
-    ('êźŠ', &['Ꮦ']),
-    ('êź§', &['Ꮧ']),
-    ('êźš', &['Ꮨ']),
-    ('êź©', &['Ꮩ']),
-    ('êźȘ', &['Ꮪ']),
-    ('êź«', &['Ꮫ']),
-    ('êźŹ', &['Ꮬ']),
-    ('êź­', &['Ꮭ']),
-    ('êźź', &['Ꮮ']),
-    ('êźŻ', &['Ꮯ']),
-    ('êź°', &['Ꮰ']),
-    ('êź±', &['Ꮱ']),
-    ('êźČ', &['Ᏹ']),
-    ('êźł', &['Ꮳ']),
-    ('êźŽ', &['Ꮐ']),
-    ('êź”', &['Ꮔ']),
-    ('êź¶', &['Ꮚ']),
-    ('êź·', &['Ꮷ']),
-    ('êźž', &['Ꮪ']),
-    ('êźč', &['Ꮹ']),
-    ('êźș', &['áȘ']),
-    ('êź»', &['Ꮻ']),
-    ('êźŒ', &['Ꮼ']),
-    ('êźœ', &['Ꮽ']),
-    ('êźŸ', &['ᏼ']),
-    ('êźż', &['Ꮿ']),
-    ('ïŹ…', &['ïŹ†']),
-    ('ïŹ†', &['ïŹ…']),
-    ('ïŒĄ', &['']),
-    ('ïŒą', &['']),
-    ('ïŒŁ', &['']),
-    ('', &['']),
-    ('', &['']),
-    ('', &['']),
-    ('', &['']),
-    ('', &['']),
-    ('', &['']),
-    ('ïŒȘ', &['']),
-    ('', &['']),
-    ('ïŒŹ', &['']),
-    ('', &['']),
-    ('ïŒź', &['']),
-    ('ïŒŻ', &['']),
-    ('', &['']),
-    ('', &['']),
-    ('ïŒČ', &['']),
-    ('ïŒł', &['']),
-    ('', &['']),
-    ('', &['']),
-    ('', &['']),
-    ('', &['']),
-    ('', &['']),
-    ('ïŒč', &['']),
-    ('ïŒș', &['']),
-    ('', &['ïŒĄ']),
-    ('', &['ïŒą']),
-    ('', &['ïŒŁ']),
-    ('', &['']),
-    ('', &['']),
-    ('', &['']),
-    ('', &['']),
-    ('', &['']),
-    ('', &['']),
-    ('', &['ïŒȘ']),
-    ('', &['']),
-    ('', &['ïŒŹ']),
-    ('', &['']),
-    ('', &['ïŒź']),
-    ('', &['ïŒŻ']),
-    ('', &['']),
-    ('', &['']),
-    ('', &['ïŒČ']),
-    ('', &['ïŒł']),
-    ('', &['']),
-    ('', &['']),
-    ('', &['']),
-    ('', &['']),
-    ('', &['']),
-    ('', &['ïŒč']),
-    ('', &['ïŒș']),
-    ('𐐀', &['𐐹']),
-    ('𐐁', &['𐐩']),
-    ('𐐂', &['đȘ']),
-    ('𐐃', &['𐐫']),
-    ('𐐄', &['𐐬']),
-    ('𐐅', &['𐐭']),
-    ('𐐆', &['𐐼']),
-    ('𐐇', &['𐐯']),
-    ('𐐈', &['𐐰']),
-    ('𐐉', &['𐐱']),
-    ('𐐊', &['đČ']),
-    ('𐐋', &['𐐳']),
-    ('𐐌', &['𐐮']),
-    ('𐐍', &['𐐔']),
-    ('𐐎', &['𐐶']),
-    ('𐐏', &['𐐷']),
-    ('𐐐', &['𐐞']),
-    ('𐐑', &['đč']),
-    ('𐐒', &['đș']),
-    ('𐐓', &['𐐻']),
-    ('𐐔', &['đŒ']),
-    ('𐐕', &['đœ']),
-    ('𐐖', &['đŸ']),
-    ('𐐗', &['𐐿']),
-    ('𐐘', &['𐑀']),
-    ('𐐙', &['𐑁']),
-    ('𐐚', &['𐑂']),
-    ('𐐛', &['𐑃']),
-    ('𐐜', &['𐑄']),
-    ('𐐝', &['𐑅']),
-    ('𐐞', &['𐑆']),
-    ('𐐟', &['𐑇']),
-    ('𐐠', &['𐑈']),
-    ('𐐡', &['𐑉']),
-    ('𐐱', &['𐑊']),
-    ('𐐣', &['𐑋']),
-    ('𐐀', &['𐑌']),
-    ('𐐄', &['𐑍']),
-    ('𐐩', &['𐑎']),
-    ('𐐧', &['𐑏']),
-    ('𐐹', &['𐐀']),
-    ('𐐩', &['𐐁']),
-    ('đȘ', &['𐐂']),
-    ('𐐫', &['𐐃']),
-    ('𐐬', &['𐐄']),
-    ('𐐭', &['𐐅']),
-    ('𐐼', &['𐐆']),
-    ('𐐯', &['𐐇']),
-    ('𐐰', &['𐐈']),
-    ('𐐱', &['𐐉']),
-    ('đČ', &['𐐊']),
-    ('𐐳', &['𐐋']),
-    ('𐐮', &['𐐌']),
-    ('𐐔', &['𐐍']),
-    ('𐐶', &['𐐎']),
-    ('𐐷', &['𐐏']),
-    ('𐐞', &['𐐐']),
-    ('đč', &['𐐑']),
-    ('đș', &['𐐒']),
-    ('𐐻', &['𐐓']),
-    ('đŒ', &['𐐔']),
-    ('đœ', &['𐐕']),
-    ('đŸ', &['𐐖']),
-    ('𐐿', &['𐐗']),
-    ('𐑀', &['𐐘']),
-    ('𐑁', &['𐐙']),
-    ('𐑂', &['𐐚']),
-    ('𐑃', &['𐐛']),
-    ('𐑄', &['𐐜']),
-    ('𐑅', &['𐐝']),
-    ('𐑆', &['𐐞']),
-    ('𐑇', &['𐐟']),
-    ('𐑈', &['𐐠']),
-    ('𐑉', &['𐐡']),
-    ('𐑊', &['𐐱']),
-    ('𐑋', &['𐐣']),
-    ('𐑌', &['𐐀']),
-    ('𐑍', &['𐐄']),
-    ('𐑎', &['𐐩']),
-    ('𐑏', &['𐐧']),
-    ('𐒰', &['𐓘']),
-    ('𐒱', &['𐓙']),
-    ('đ’Č', &['𐓚']),
-    ('𐒳', &['𐓛']),
-    ('𐒮', &['𐓜']),
-    ('𐒔', &['𐓝']),
-    ('𐒶', &['𐓞']),
-    ('𐒷', &['𐓟']),
-    ('𐒾', &['𐓠']),
-    ('đ’č', &['𐓡']),
-    ('đ’ș', &['𐓱']),
-    ('𐒻', &['𐓣']),
-    ('đ’Œ', &['𐓀']),
-    ('đ’œ', &['𐓄']),
-    ('đ’Ÿ', &['𐓩']),
-    ('𐒿', &['𐓧']),
-    ('𐓀', &['𐓹']),
-    ('𐓁', &['𐓩']),
-    ('𐓂', &['đ“Ș']),
-    ('𐓃', &['𐓫']),
-    ('𐓄', &['𐓬']),
-    ('𐓅', &['𐓭']),
-    ('𐓆', &['𐓼']),
-    ('𐓇', &['𐓯']),
-    ('𐓈', &['𐓰']),
-    ('𐓉', &['𐓱']),
-    ('𐓊', &['đ“Č']),
-    ('𐓋', &['𐓳']),
-    ('𐓌', &['𐓮']),
-    ('𐓍', &['𐓔']),
-    ('𐓎', &['𐓶']),
-    ('𐓏', &['𐓷']),
-    ('𐓐', &['𐓾']),
-    ('𐓑', &['đ“č']),
-    ('𐓒', &['đ“ș']),
-    ('𐓓', &['𐓻']),
-    ('𐓘', &['𐒰']),
-    ('𐓙', &['𐒱']),
-    ('𐓚', &['đ’Č']),
-    ('𐓛', &['𐒳']),
-    ('𐓜', &['𐒮']),
-    ('𐓝', &['𐒔']),
-    ('𐓞', &['𐒶']),
-    ('𐓟', &['𐒷']),
-    ('𐓠', &['𐒾']),
-    ('𐓡', &['đ’č']),
-    ('𐓱', &['đ’ș']),
-    ('𐓣', &['𐒻']),
-    ('𐓀', &['đ’Œ']),
-    ('𐓄', &['đ’œ']),
-    ('𐓩', &['đ’Ÿ']),
-    ('𐓧', &['𐒿']),
-    ('𐓹', &['𐓀']),
-    ('𐓩', &['𐓁']),
-    ('đ“Ș', &['𐓂']),
-    ('𐓫', &['𐓃']),
-    ('𐓬', &['𐓄']),
-    ('𐓭', &['𐓅']),
-    ('𐓼', &['𐓆']),
-    ('𐓯', &['𐓇']),
-    ('𐓰', &['𐓈']),
-    ('𐓱', &['𐓉']),
-    ('đ“Č', &['𐓊']),
-    ('𐓳', &['𐓋']),
-    ('𐓮', &['𐓌']),
-    ('𐓔', &['𐓍']),
-    ('𐓶', &['𐓎']),
-    ('𐓷', &['𐓏']),
-    ('𐓾', &['𐓐']),
-    ('đ“č', &['𐓑']),
-    ('đ“ș', &['𐓒']),
-    ('𐓻', &['𐓓']),
-    ('𐕰', &['𐖗']),
-    ('𐕱', &['𐖘']),
-    ('đ•Č', &['𐖙']),
-    ('𐕳', &['𐖚']),
-    ('𐕮', &['𐖛']),
-    ('𐕔', &['𐖜']),
-    ('𐕶', &['𐖝']),
-    ('𐕷', &['𐖞']),
-    ('𐕾', &['𐖟']),
-    ('đ•č', &['𐖠']),
-    ('đ•ș', &['𐖡']),
-    ('đ•Œ', &['𐖣']),
-    ('đ•œ', &['𐖀']),
-    ('đ•Ÿ', &['𐖄']),
-    ('𐕿', &['𐖩']),
-    ('𐖀', &['𐖧']),
-    ('𐖁', &['𐖹']),
-    ('𐖂', &['𐖩']),
-    ('𐖃', &['đ–Ș']),
-    ('𐖄', &['𐖫']),
-    ('𐖅', &['𐖬']),
-    ('𐖆', &['𐖭']),
-    ('𐖇', &['𐖼']),
-    ('𐖈', &['𐖯']),
-    ('𐖉', &['𐖰']),
-    ('𐖊', &['𐖱']),
-    ('𐖌', &['𐖳']),
-    ('𐖍', &['𐖮']),
-    ('𐖎', &['𐖔']),
-    ('𐖏', &['𐖶']),
-    ('𐖐', &['𐖷']),
-    ('𐖑', &['𐖾']),
-    ('𐖒', &['đ–č']),
-    ('𐖔', &['𐖻']),
-    ('𐖕', &['đ–Œ']),
-    ('𐖗', &['𐕰']),
-    ('𐖘', &['𐕱']),
-    ('𐖙', &['đ•Č']),
-    ('𐖚', &['𐕳']),
-    ('𐖛', &['𐕮']),
-    ('𐖜', &['𐕔']),
-    ('𐖝', &['𐕶']),
-    ('𐖞', &['𐕷']),
-    ('𐖟', &['𐕾']),
-    ('𐖠', &['đ•č']),
-    ('𐖡', &['đ•ș']),
-    ('𐖣', &['đ•Œ']),
-    ('𐖀', &['đ•œ']),
-    ('𐖄', &['đ•Ÿ']),
-    ('𐖩', &['𐕿']),
-    ('𐖧', &['𐖀']),
-    ('𐖹', &['𐖁']),
-    ('𐖩', &['𐖂']),
-    ('đ–Ș', &['𐖃']),
-    ('𐖫', &['𐖄']),
-    ('𐖬', &['𐖅']),
-    ('𐖭', &['𐖆']),
-    ('𐖼', &['𐖇']),
-    ('𐖯', &['𐖈']),
-    ('𐖰', &['𐖉']),
-    ('𐖱', &['𐖊']),
-    ('𐖳', &['𐖌']),
-    ('𐖮', &['𐖍']),
-    ('𐖔', &['𐖎']),
-    ('𐖶', &['𐖏']),
-    ('𐖷', &['𐖐']),
-    ('𐖾', &['𐖑']),
-    ('đ–č', &['𐖒']),
-    ('𐖻', &['𐖔']),
-    ('đ–Œ', &['𐖕']),
-    ('đČ€', &['𐳀']),
-    ('đČ', &['𐳁']),
-    ('đČ‚', &['𐳂']),
-    ('đČƒ', &['𐳃']),
-    ('đČ„', &['𐳄']),
-    ('đČ…', &['𐳅']),
-    ('đČ†', &['𐳆']),
-    ('đČ‡', &['𐳇']),
-    ('đČˆ', &['𐳈']),
-    ('đČ‰', &['𐳉']),
-    ('đČŠ', &['𐳊']),
-    ('đČ‹', &['𐳋']),
-    ('đČŒ', &['𐳌']),
-    ('đČ', &['𐳍']),
-    ('đČŽ', &['𐳎']),
-    ('đČ', &['𐳏']),
-    ('đČ', &['𐳐']),
-    ('đČ‘', &['𐳑']),
-    ('đČ’', &['𐳒']),
-    ('đČ“', &['𐳓']),
-    ('đČ”', &['𐳔']),
-    ('đČ•', &['𐳕']),
-    ('đČ–', &['𐳖']),
-    ('đČ—', &['𐳗']),
-    ('đČ˜', &['𐳘']),
-    ('đČ™', &['𐳙']),
-    ('đČš', &['𐳚']),
-    ('đČ›', &['𐳛']),
-    ('đČœ', &['𐳜']),
-    ('đČ', &['𐳝']),
-    ('đČž', &['𐳞']),
-    ('đČŸ', &['𐳟']),
-    ('đČ ', &['𐳠']),
-    ('đČĄ', &['𐳥']),
-    ('đČą', &['𐳹']),
-    ('đČŁ', &['𐳣']),
-    ('đČ€', &['𐳀']),
-    ('đČ„', &['𐳄']),
-    ('đČŠ', &['𐳊']),
-    ('đČ§', &['𐳧']),
-    ('đČš', &['𐳚']),
-    ('đČ©', &['𐳩']),
-    ('đČȘ', &['đłȘ']),
-    ('đČ«', &['𐳫']),
-    ('đČŹ', &['𐳏']),
-    ('đČ­', &['𐳭']),
-    ('đČź', &['𐳟']),
-    ('đČŻ', &['𐳯']),
-    ('đČ°', &['𐳰']),
-    ('đČ±', &['𐳱']),
-    ('đČČ', &['đłČ']),
-    ('𐳀', &['đČ€']),
-    ('𐳁', &['đČ']),
-    ('𐳂', &['đČ‚']),
-    ('𐳃', &['đČƒ']),
-    ('𐳄', &['đČ„']),
-    ('𐳅', &['đČ…']),
-    ('𐳆', &['đČ†']),
-    ('𐳇', &['đČ‡']),
-    ('𐳈', &['đČˆ']),
-    ('𐳉', &['đČ‰']),
-    ('𐳊', &['đČŠ']),
-    ('𐳋', &['đČ‹']),
-    ('𐳌', &['đČŒ']),
-    ('𐳍', &['đČ']),
-    ('𐳎', &['đČŽ']),
-    ('𐳏', &['đČ']),
-    ('𐳐', &['đČ']),
-    ('𐳑', &['đČ‘']),
-    ('𐳒', &['đČ’']),
-    ('𐳓', &['đČ“']),
-    ('𐳔', &['đČ”']),
-    ('𐳕', &['đČ•']),
-    ('𐳖', &['đČ–']),
-    ('𐳗', &['đČ—']),
-    ('𐳘', &['đČ˜']),
-    ('𐳙', &['đČ™']),
-    ('𐳚', &['đČš']),
-    ('𐳛', &['đČ›']),
-    ('𐳜', &['đČœ']),
-    ('𐳝', &['đČ']),
-    ('𐳞', &['đČž']),
-    ('𐳟', &['đČŸ']),
-    ('𐳠', &['đČ ']),
-    ('𐳥', &['đČĄ']),
-    ('𐳹', &['đČą']),
-    ('𐳣', &['đČŁ']),
-    ('𐳀', &['đČ€']),
-    ('𐳄', &['đČ„']),
-    ('𐳊', &['đČŠ']),
-    ('𐳧', &['đČ§']),
-    ('𐳚', &['đČš']),
-    ('𐳩', &['đČ©']),
-    ('đłȘ', &['đČȘ']),
-    ('𐳫', &['đČ«']),
-    ('𐳏', &['đČŹ']),
-    ('𐳭', &['đČ­']),
-    ('𐳟', &['đČź']),
-    ('𐳯', &['đČŻ']),
-    ('𐳰', &['đČ°']),
-    ('𐳱', &['đČ±']),
-    ('đłČ', &['đČČ']),
-    ('𐔐', &['𐔰']),
-    ('𐔑', &['𐔱']),
-    ('𐔒', &['đ”Č']),
-    ('𐔓', &['𐔳']),
-    ('𐔔', &['𐔎']),
-    ('𐔕', &['𐔔']),
-    ('𐔖', &['𐔶']),
-    ('𐔗', &['𐔷']),
-    ('𐔘', &['𐔞']),
-    ('𐔙', &['đ”č']),
-    ('𐔚', &['đ”ș']),
-    ('𐔛', &['𐔻']),
-    ('đ”œ', &['đ”Œ']),
-    ('đ”', &['đ”œ']),
-    ('𐔞', &['đ”Ÿ']),
-    ('đ”Ÿ', &['𐔿']),
-    ('𐔠', &['𐶀']),
-    ('𐔥', &['𐶁']),
-    ('𐔹', &['𐶂']),
-    ('𐔣', &['𐶃']),
-    ('𐔀', &['𐶄']),
-    ('𐔄', &['𐶅']),
-    ('𐔰', &['𐔐']),
-    ('𐔱', &['𐔑']),
-    ('đ”Č', &['𐔒']),
-    ('𐔳', &['𐔓']),
-    ('𐔎', &['𐔔']),
-    ('𐔔', &['𐔕']),
-    ('𐔶', &['𐔖']),
-    ('𐔷', &['𐔗']),
-    ('𐔞', &['𐔘']),
-    ('đ”č', &['𐔙']),
-    ('đ”ș', &['𐔚']),
-    ('𐔻', &['𐔛']),
-    ('đ”Œ', &['đ”œ']),
-    ('đ”œ', &['đ”']),
-    ('đ”Ÿ', &['𐔞']),
-    ('𐔿', &['đ”Ÿ']),
-    ('𐶀', &['𐔠']),
-    ('𐶁', &['𐔥']),
-    ('𐶂', &['𐔹']),
-    ('𐶃', &['𐔣']),
-    ('𐶄', &['𐔀']),
-    ('𐶅', &['𐔄']),
-    ('𑱠', &['𑣀']),
-    ('𑱡', &['𑣁']),
-    ('𑱱', &['𑣂']),
-    ('𑱣', &['𑣃']),
-    ('𑹀', &['𑣄']),
-    ('𑹄', &['𑣅']),
-    ('𑱩', &['𑣆']),
-    ('𑱧', &['𑣇']),
-    ('𑱹', &['𑣈']),
-    ('𑹩', &['𑣉']),
-    ('đ‘ąȘ', &['𑣊']),
-    ('𑹫', &['𑣋']),
-    ('𑱬', &['𑣌']),
-    ('𑱭', &['𑣍']),
-    ('𑱼', &['𑣎']),
-    ('𑱯', &['𑣏']),
-    ('𑱰', &['𑣐']),
-    ('𑹱', &['𑣑']),
-    ('đ‘ąČ', &['𑣒']),
-    ('𑱳', &['𑣓']),
-    ('𑱮', &['𑣔']),
-    ('𑹔', &['𑣕']),
-    ('𑹶', &['𑣖']),
-    ('𑹷', &['𑣗']),
-    ('𑱾', &['𑣘']),
-    ('đ‘ąč', &['𑣙']),
-    ('đ‘ąș', &['𑣚']),
-    ('𑹻', &['𑣛']),
-    ('đ‘ąŒ', &['𑣜']),
-    ('đ‘ąœ', &['𑣝']),
-    ('đ‘ąŸ', &['𑣞']),
-    ('𑱿', &['𑣟']),
-    ('𑣀', &['𑱠']),
-    ('𑣁', &['𑱡']),
-    ('𑣂', &['𑱱']),
-    ('𑣃', &['𑱣']),
-    ('𑣄', &['𑹀']),
-    ('𑣅', &['𑹄']),
-    ('𑣆', &['𑱩']),
-    ('𑣇', &['𑱧']),
-    ('𑣈', &['𑱹']),
-    ('𑣉', &['𑹩']),
-    ('𑣊', &['đ‘ąȘ']),
-    ('𑣋', &['𑹫']),
-    ('𑣌', &['𑱬']),
-    ('𑣍', &['𑱭']),
-    ('𑣎', &['𑱼']),
-    ('𑣏', &['𑱯']),
-    ('𑣐', &['𑱰']),
-    ('𑣑', &['𑹱']),
-    ('𑣒', &['đ‘ąČ']),
-    ('𑣓', &['𑱳']),
-    ('𑣔', &['𑱮']),
-    ('𑣕', &['𑹔']),
-    ('𑣖', &['𑹶']),
-    ('𑣗', &['𑹷']),
-    ('𑣘', &['𑱾']),
-    ('𑣙', &['đ‘ąč']),
-    ('𑣚', &['đ‘ąș']),
-    ('𑣛', &['𑹻']),
-    ('𑣜', &['đ‘ąŒ']),
-    ('𑣝', &['đ‘ąœ']),
-    ('𑣞', &['đ‘ąŸ']),
-    ('𑣟', &['𑱿']),
-    ('đ–č€', &['đ–č ']),
-    ('đ–č', &['đ–čĄ']),
-    ('đ–č‚', &['đ–čą']),
-    ('đ–čƒ', &['đ–čŁ']),
-    ('đ–č„', &['đ–č€']),
-    ('đ–č…', &['đ–č„']),
-    ('đ–č†', &['đ–čŠ']),
-    ('đ–č‡', &['đ–č§']),
-    ('đ–čˆ', &['đ–čš']),
-    ('đ–č‰', &['đ–č©']),
-    ('đ–čŠ', &['đ–čȘ']),
-    ('đ–č‹', &['đ–č«']),
-    ('đ–čŒ', &['đ–čŹ']),
-    ('đ–č', &['đ–č­']),
-    ('đ–čŽ', &['đ–čź']),
-    ('đ–č', &['đ–čŻ']),
-    ('đ–č', &['đ–č°']),
-    ('đ–č‘', &['đ–č±']),
-    ('đ–č’', &['đ–čČ']),
-    ('đ–č“', &['đ–čł']),
-    ('đ–č”', &['đ–čŽ']),
-    ('đ–č•', &['đ–č”']),
-    ('đ–č–', &['đ–č¶']),
-    ('đ–č—', &['đ–č·']),
-    ('đ–č˜', &['đ–čž']),
-    ('đ–č™', &['đ–čč']),
-    ('đ–čš', &['đ–čș']),
-    ('đ–č›', &['đ–č»']),
-    ('đ–čœ', &['đ–čŒ']),
-    ('đ–č', &['đ–čœ']),
-    ('đ–čž', &['đ–čŸ']),
-    ('đ–čŸ', &['đ–čż']),
-    ('đ–č ', &['đ–č€']),
-    ('đ–čĄ', &['đ–č']),
-    ('đ–čą', &['đ–č‚']),
-    ('đ–čŁ', &['đ–čƒ']),
-    ('đ–č€', &['đ–č„']),
-    ('đ–č„', &['đ–č…']),
-    ('đ–čŠ', &['đ–č†']),
-    ('đ–č§', &['đ–č‡']),
-    ('đ–čš', &['đ–čˆ']),
-    ('đ–č©', &['đ–č‰']),
-    ('đ–čȘ', &['đ–čŠ']),
-    ('đ–č«', &['đ–č‹']),
-    ('đ–čŹ', &['đ–čŒ']),
-    ('đ–č­', &['đ–č']),
-    ('đ–čź', &['đ–čŽ']),
-    ('đ–čŻ', &['đ–č']),
-    ('đ–č°', &['đ–č']),
-    ('đ–č±', &['đ–č‘']),
-    ('đ–čČ', &['đ–č’']),
-    ('đ–čł', &['đ–č“']),
-    ('đ–čŽ', &['đ–č”']),
-    ('đ–č”', &['đ–č•']),
-    ('đ–č¶', &['đ–č–']),
-    ('đ–č·', &['đ–č—']),
-    ('đ–čž', &['đ–č˜']),
-    ('đ–čč', &['đ–č™']),
-    ('đ–čș', &['đ–čš']),
-    ('đ–č»', &['đ–č›']),
-    ('đ–čŒ', &['đ–čœ']),
-    ('đ–čœ', &['đ–č']),
-    ('đ–čŸ', &['đ–čž']),
-    ('đ–čż', &['đ–čŸ']),
-    ('𞀀', &['𞀹']),
-    ('𞀁', &['𞀣']),
-    ('𞀂', &['𞀀']),
-    ('đž€ƒ', &['𞀄']),
-    ('𞀄', &['𞀊']),
-    ('𞀅', &['𞀧']),
-    ('𞀆', &['𞀚']),
-    ('𞀇', &['𞀩']),
-    ('đž€ˆ', &['đž€Ș']),
-    ('𞀉', &['𞀫']),
-    ('𞀊', &['𞀏']),
-    ('𞀋', &['𞀭']),
-    ('đž€Œ', &['𞀟']),
-    ('đž€', &['𞀯']),
-    ('𞀎', &['𞀰']),
-    ('đž€', &['𞀱']),
-    ('𞀐', &['đž€Č']),
-    ('𞀑', &['𞀳']),
-    ('𞀒', &['𞀎']),
-    ('𞀓', &['𞀔']),
-    ('𞀔', &['𞀶']),
-    ('𞀕', &['𞀷']),
-    ('𞀖', &['𞀞']),
-    ('𞀗', &['đž€č']),
-    ('đž€˜', &['đž€ș']),
-    ('𞀙', &['𞀻']),
-    ('𞀚', &['đž€Œ']),
-    ('𞀛', &['đž€œ']),
-    ('đž€œ', &['đž€Ÿ']),
-    ('đž€', &['𞀿']),
-    ('𞀞', &['đž„€']),
-    ('đž€Ÿ', &['𞄁']),
-    ('𞀠', &['đž„‚']),
-    ('𞀥', &['đž„ƒ']),
-    ('𞀹', &['𞀀']),
-    ('𞀣', &['𞀁']),
-    ('𞀀', &['𞀂']),
-    ('𞀄', &['đž€ƒ']),
-    ('𞀊', &['𞀄']),
-    ('𞀧', &['𞀅']),
-    ('𞀚', &['𞀆']),
-    ('𞀩', &['𞀇']),
-    ('đž€Ș', &['đž€ˆ']),
-    ('𞀫', &['𞀉']),
-    ('𞀏', &['𞀊']),
-    ('𞀭', &['𞀋']),
-    ('𞀟', &['đž€Œ']),
-    ('𞀯', &['đž€']),
-    ('𞀰', &['𞀎']),
-    ('𞀱', &['đž€']),
-    ('đž€Č', &['𞀐']),
-    ('𞀳', &['𞀑']),
-    ('𞀎', &['𞀒']),
-    ('𞀔', &['𞀓']),
-    ('𞀶', &['𞀔']),
-    ('𞀷', &['𞀕']),
-    ('𞀞', &['𞀖']),
-    ('đž€č', &['𞀗']),
-    ('đž€ș', &['đž€˜']),
-    ('𞀻', &['𞀙']),
-    ('đž€Œ', &['𞀚']),
-    ('đž€œ', &['𞀛']),
-    ('đž€Ÿ', &['đž€œ']),
-    ('𞀿', &['đž€']),
-    ('đž„€', &['𞀞']),
-    ('𞄁', &['đž€Ÿ']),
-    ('đž„‚', &['𞀠']),
-    ('đž„ƒ', &['𞀥']),
-];
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/general_category.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/general_category.rs
deleted file mode 100644
index 6ff6b53..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/general_category.rs
+++ /dev/null
@@ -1,6717 +0,0 @@
-// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY:
-//
-//   ucd-generate general-category ucd-16.0.0 --chars --exclude surrogate
-//
-// Unicode version: 16.0.0.
-//
-// ucd-generate 0.3.1 is available on crates.io.
-
-pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[
-    ("Cased_Letter", CASED_LETTER),
-    ("Close_Punctuation", CLOSE_PUNCTUATION),
-    ("Connector_Punctuation", CONNECTOR_PUNCTUATION),
-    ("Control", CONTROL),
-    ("Currency_Symbol", CURRENCY_SYMBOL),
-    ("Dash_Punctuation", DASH_PUNCTUATION),
-    ("Decimal_Number", DECIMAL_NUMBER),
-    ("Enclosing_Mark", ENCLOSING_MARK),
-    ("Final_Punctuation", FINAL_PUNCTUATION),
-    ("Format", FORMAT),
-    ("Initial_Punctuation", INITIAL_PUNCTUATION),
-    ("Letter", LETTER),
-    ("Letter_Number", LETTER_NUMBER),
-    ("Line_Separator", LINE_SEPARATOR),
-    ("Lowercase_Letter", LOWERCASE_LETTER),
-    ("Mark", MARK),
-    ("Math_Symbol", MATH_SYMBOL),
-    ("Modifier_Letter", MODIFIER_LETTER),
-    ("Modifier_Symbol", MODIFIER_SYMBOL),
-    ("Nonspacing_Mark", NONSPACING_MARK),
-    ("Number", NUMBER),
-    ("Open_Punctuation", OPEN_PUNCTUATION),
-    ("Other", OTHER),
-    ("Other_Letter", OTHER_LETTER),
-    ("Other_Number", OTHER_NUMBER),
-    ("Other_Punctuation", OTHER_PUNCTUATION),
-    ("Other_Symbol", OTHER_SYMBOL),
-    ("Paragraph_Separator", PARAGRAPH_SEPARATOR),
-    ("Private_Use", PRIVATE_USE),
-    ("Punctuation", PUNCTUATION),
-    ("Separator", SEPARATOR),
-    ("Space_Separator", SPACE_SEPARATOR),
-    ("Spacing_Mark", SPACING_MARK),
-    ("Symbol", SYMBOL),
-    ("Titlecase_Letter", TITLECASE_LETTER),
-    ("Unassigned", UNASSIGNED),
-    ("Uppercase_Letter", UPPERCASE_LETTER),
-];
-
-pub const CASED_LETTER: &'static [(char, char)] = &[
-    ('A', 'Z'),
-    ('a', 'z'),
-    ('µ', 'µ'),
-    ('À', 'Ö'),
-    ('Ø', 'ö'),
-    ('ø', 'Æș'),
-    ('ÆŒ', 'Æż'),
-    ('DŽ', 'ʓ'),
-    ('ʕ', 'ÊŻ'),
-    ('Ͱ', 'ͳ'),
-    ('Ͷ', 'ͷ'),
-    ('ͻ', '͜'),
-    ('Íż', 'Íż'),
-    ('Ά', 'Ά'),
-    ('Έ', 'Ί'),
-    ('Ό', 'Ό'),
-    ('Ύ', 'Ρ'),
-    ('Σ', 'Ï”'),
-    ('Ϸ', 'ҁ'),
-    ('Ҋ', 'ÔŻ'),
-    ('Ô±', 'Ֆ'),
-    ('ՠ', 'ֈ'),
-    ('Ⴀ', 'Ⴥ'),
-    ('Ⴧ', 'Ⴧ'),
-    ('Ⴭ', 'Ⴭ'),
-    ('ა', 'áƒș'),
-    ('ნ', 'ჿ'),
-    ('Ꭰ', 'Ꮤ'),
-    ('Ꮮ', 'Ꮬ'),
-    ('áȀ', 'áȊ'),
-    ('áȐ', 'áČș'),
-    ('áČœ', 'áČż'),
-    ('ᮀ', 'Ꭻ'),
-    ('ᔫ', 'ᔷ'),
-    ('á”č', 'ᶚ'),
-    ('ᾀ', 'ጕ'),
-    ('ጘ', 'ጝ'),
-    ('ጠ', 'ᜅ'),
-    ('ᜈ', 'ᜍ'),
-    ('ᜐ', '᜗'),
-    ('᜙', '᜙'),
-    ('᜛', '᜛'),
-    ('᜝', '᜝'),
-    ('ᜟ', '᜜'),
-    ('ៀ', '៎'),
-    ('៶', '៌'),
-    ('៟', '៟'),
-    ('ῂ', 'ῄ'),
-    ('ῆ', 'ῌ'),
-    ('ῐ', 'ΐ'),
-    ('ῖ', 'Ί'),
-    ('ῠ', '῏'),
-    ('áżČ', '῎'),
-    ('áż¶', 'áżŒ'),
-    ('ℂ', 'ℂ'),
-    ('ℇ', 'ℇ'),
-    ('ℊ', 'ℓ'),
-    ('ℕ', 'ℕ'),
-    ('ℙ', 'ℝ'),
-    ('â„€', 'â„€'),
-    ('℩', '℩'),
-    ('ℹ', 'ℹ'),
-    ('â„Ș', 'ℭ'),
-    ('ℯ', '℮'),
-    ('â„č', 'â„č'),
-    ('ℌ', 'ℿ'),
-    ('ⅅ', 'ⅉ'),
-    ('ⅎ', 'ⅎ'),
-    ('Ↄ', 'ↄ'),
-    ('Ⰰ', 'ⱻ'),
-    ('ⱟ', 'Ⳁ'),
-    ('âł«', 'âłź'),
-    ('âłČ', 'âłł'),
-    ('⮀', '⎄'),
-    ('⎧', '⎧'),
-    ('⎭', '⎭'),
-    ('Ꙁ', 'ꙭ'),
-    ('Ꚁ', 'ꚛ'),
-    ('êœą', 'êŻ'),
-    ('ꝱ', 'ꞇ'),
-    ('Ꞌ', 'ꞎ'),
-    ('Ꞑ', 'ꟍ'),
-    ('Ꟑ', 'ꟑ'),
-    ('ꟓ', 'ꟓ'),
-    ('ꟕ', 'Ƛ'),
-    ('꟔', 'ꟶ'),
-    ('êŸș', 'êŸș'),
-    ('êŹ°', 'ꭚ'),
-    ('ê­ ', 'ê­š'),
-    ('ê­°', 'êźż'),
-    ('ïŹ€', 'ïŹ†'),
-    ('ïŹ“', 'ïŹ—'),
-    ('ïŒĄ', 'ïŒș'),
-    ('', ''),
-    ('𐐀', '𐑏'),
-    ('𐒰', '𐓓'),
-    ('𐓘', '𐓻'),
-    ('𐕰', 'đ•ș'),
-    ('đ•Œ', '𐖊'),
-    ('𐖌', '𐖒'),
-    ('𐖔', '𐖕'),
-    ('𐖗', '𐖡'),
-    ('𐖣', '𐖱'),
-    ('𐖳', 'đ–č'),
-    ('𐖻', 'đ–Œ'),
-    ('đČ€', 'đČČ'),
-    ('𐳀', 'đłČ'),
-    ('𐔐', '𐔄'),
-    ('𐔰', '𐶅'),
-    ('𑱠', '𑣟'),
-    ('đ–č€', 'đ–čż'),
-    ('𝐀', '𝑔'),
-    ('𝑖', '𝒜'),
-    ('𝒞', '𝒟'),
-    ('𝒱', '𝒱'),
-    ('đ’„', '𝒩'),
-    ('đ’©', '𝒬'),
-    ('𝒼', 'đ’č'),
-    ('đ’»', 'đ’»'),
-    ('đ’œ', '𝓃'),
-    ('𝓅', '𝔅'),
-    ('𝔇', '𝔊'),
-    ('𝔍', '𝔔'),
-    ('𝔖', '𝔜'),
-    ('𝔞', 'đ”č'),
-    ('đ”»', 'đ”Ÿ'),
-    ('𝕀', '𝕄'),
-    ('𝕆', '𝕆'),
-    ('𝕊', '𝕐'),
-    ('𝕒', 'đš„'),
-    ('𝚹', '𝛀'),
-    ('𝛂', '𝛚'),
-    ('𝛜', 'đ›ș'),
-    ('đ›Œ', '𝜔'),
-    ('𝜖', '𝜮'),
-    ('đœ¶', '𝝎'),
-    ('𝝐', '𝝼'),
-    ('𝝰', '𝞈'),
-    ('𝞊', '𝞹'),
-    ('đžȘ', '𝟂'),
-    ('𝟄', '𝟋'),
-    ('đŒ€', 'đŒ‰'),
-    ('đŒ‹', 'đŒž'),
-    ('đŒ„', 'đŒȘ'),
-    ('𞀀', 'đž„ƒ'),
-];
-
-pub const CLOSE_PUNCTUATION: &'static [(char, char)] = &[
-    (')', ')'),
-    (']', ']'),
-    ('}', '}'),
-    ('àŒ»', 'àŒ»'),
-    ('àŒœ', 'àŒœ'),
-    ('᚜', '᚜'),
-    ('⁆', '⁆'),
-    (' ', ' '),
-    ('₎', '₎'),
-    ('⌉', '⌉'),
-    ('⌋', '⌋'),
-    ('⟩', '⟩'),
-    ('❩', '❩'),
-    ('❫', '❫'),
-    ('❭', '❭'),
-    ('❯', '❯'),
-    ('❱', '❱'),
-    ('❳', '❳'),
-    ('❔', '❔'),
-    ('⟆', '⟆'),
-    ('⟧', '⟧'),
-    ('⟩', '⟩'),
-    ('⟫', '⟫'),
-    ('⟭', '⟭'),
-    ('⟯', '⟯'),
-    ('⩄', '⩄'),
-    ('⩆', '⩆'),
-    ('⊈', '⊈'),
-    ('⩊', '⩊'),
-    ('⩌', '⩌'),
-    ('⊎', '⊎'),
-    ('⊐', '⊐'),
-    ('⩒', '⩒'),
-    ('⩔', '⩔'),
-    ('⩖', '⩖'),
-    ('⊘', '⊘'),
-    ('⧙', '⧙'),
-    ('⧛', '⧛'),
-    ('⧜', '⧜'),
-    ('➣', '➣'),
-    ('âž„', 'âž„'),
-    ('âž§', 'âž§'),
-    ('âž©', 'âž©'),
-    ('âč–', 'âč–'),
-    ('âč˜', 'âč˜'),
-    ('âčš', 'âčš'),
-    ('âčœ', 'âčœ'),
-    ('〉', '〉'),
-    ('》', '》'),
-    ('」', '」'),
-    ('』', '』'),
-    ('】', '】'),
-    ('〕', '〕'),
-    ('〗', '〗'),
-    ('〙', '〙'),
-    ('〛', '〛'),
-    ('〞', '〟'),
-    ('', ''),
-    ('', ''),
-    ('ïž¶', 'ïž¶'),
-    ('ïžž', 'ïžž'),
-    ('ïžș', 'ïžș'),
-    ('', ''),
-    ('', ''),
-    ('ïč€', 'ïč€'),
-    ('ïč‚', 'ïč‚'),
-    ('ïč„', 'ïč„'),
-    ('ïčˆ', 'ïčˆ'),
-    ('ïčš', 'ïčš'),
-    ('ïčœ', 'ïčœ'),
-    ('ïčž', 'ïčž'),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('ïœŁ', 'ïœŁ'),
-];
-
-pub const CONNECTOR_PUNCTUATION: &'static [(char, char)] = &[
-    ('_', '_'),
-    ('‿', '⁀'),
-    ('⁔', '⁔'),
-    ('ïžł', ''),
-    ('ïč', 'ïč'),
-    ('ïŒż', 'ïŒż'),
-];
-
-pub const CONTROL: &'static [(char, char)] =
-    &[('\0', '\u{1f}'), ('\u{7f}', '\u{9f}')];
-
-pub const CURRENCY_SYMBOL: &'static [(char, char)] = &[
-    ('$', '$'),
-    ('¢', '¥'),
-    ('֏', '֏'),
-    ('ۋ', 'ۋ'),
-    ('ߟ', 'ßż'),
-    ('à§Č', 'à§ł'),
-    ('à§»', 'à§»'),
-    ('૱', '૱'),
-    ('àŻč', 'àŻč'),
-    ('àžż', 'àžż'),
-    ('៛', '៛'),
-    ('₠', '⃀'),
-    ('ê ž', 'ê ž'),
-    ('﷌', '﷌'),
-    ('ïč©', 'ïč©'),
-    ('', ''),
-    ('ïż ', 'ïżĄ'),
-    ('ïż„', 'ïżŠ'),
-    ('𑿝', '𑿠'),
-    ('𞋿', '𞋿'),
-    ('đžČ°', 'đžČ°'),
-];
-
-pub const DASH_PUNCTUATION: &'static [(char, char)] = &[
-    ('-', '-'),
-    ('֊', '֊'),
-    ('ÖŸ', 'ÖŸ'),
-    ('᐀', '᐀'),
-    ('᠆', '᠆'),
-    ('‐', '―'),
-    ('⾗', '⾗'),
-    ('⾚', '⾚'),
-    ('âžș', 'âž»'),
-    ('âč€', 'âč€'),
-    ('âč', 'âč'),
-    ('〜', '〜'),
-    ('〰', '〰'),
-    ('゠', '゠'),
-    ('ïž±', 'ïžČ'),
-    ('ïč˜', 'ïč˜'),
-    ('ïčŁ', 'ïčŁ'),
-    ('', ''),
-    ('𐔟', '𐔟'),
-    ('đș­', 'đș­'),
-];
-
-pub const DECIMAL_NUMBER: &'static [(char, char)] = &[
-    ('0', '9'),
-    ('Ù ', 'Ù©'),
-    ('Û°', 'Ûč'),
-    ('߀', '߉'),
-    ('à„Š', 'à„Ż'),
-    ('à§Š', 'à§Ż'),
-    ('੊', 'à©Ż'),
-    ('૊', 'à«Ż'),
-    ('à­Š', 'à­Ż'),
-    ('àŻŠ', 'àŻŻ'),
-    ('ొ', 'à±Ż'),
-    ('àłŠ', 'àłŻ'),
-    ('à”Š', 'à”Ż'),
-    ('à·Š', 'à·Ż'),
-    ('àč', 'àč™'),
-    ('໐', '໙'),
-    ('àŒ ', 'àŒ©'),
-    ('၀', '၉'),
-    ('႐', '႙'),
-    ('០', '៩'),
-    ('᠐', '᠙'),
-    ('ᄆ', 'ᄏ'),
-    ('᧐', '᧙'),
-    ('áȘ€', 'áȘ‰'),
-    ('áȘ', 'áȘ™'),
-    ('᭐', '᭙'),
-    ('áź°', 'áźč'),
-    ('᱀', '᱉'),
-    ('᱐', '᱙'),
-    ('꘠', '꘩'),
-    ('êŁ', 'êŁ™'),
-    ('ꀀ', 'ꀉ'),
-    ('꧐', '꧙'),
-    ('ê§°', 'ê§č'),
-    ('꩐', '꩙'),
-    ('êŻ°', 'êŻč'),
-    ('', ''),
-    ('𐒠', '𐒩'),
-    ('𐎰', 'đŽč'),
-    ('𐔀', '𐔉'),
-    ('𑁩', '𑁯'),
-    ('𑃰', 'đ‘ƒč'),
-    ('đ‘„¶', '𑄿'),
-    ('𑇐', '𑇙'),
-    ('𑋰', 'đ‘‹č'),
-    ('𑑐', '𑑙'),
-    ('𑓐', '𑓙'),
-    ('𑙐', '𑙙'),
-    ('𑛀', '𑛉'),
-    ('𑛐', '𑛣'),
-    ('𑜰', 'đ‘œč'),
-    ('𑣠', '𑣩'),
-    ('𑄐', 'đ‘„™'),
-    ('𑯰', 'đ‘Żč'),
-    ('𑱐', '𑱙'),
-    ('𑔐', 'đ‘”™'),
-    ('đ‘¶ ', 'đ‘¶©'),
-    ('đ‘œ', 'đ‘œ™'),
-    ('𖄰', 'đ–„č'),
-    ('đ–© ', 'đ–©©'),
-    ('đ–«€', '𖫉'),
-    ('𖭐', '𖭙'),
-    ('đ–”°', 'đ–”č'),
-    ('𜳰', 'đœłč'),
-    ('𝟎', '𝟿'),
-    ('𞅀', '𞅉'),
-    ('𞋰', 'đž‹č'),
-    ('𞓰', 'đž“č'),
-    ('đž—±', 'đž—ș'),
-    ('𞄐', 'đž„™'),
-    ('🯰', 'đŸŻč'),
-];
-
-pub const ENCLOSING_MARK: &'static [(char, char)] = &[
-    ('\u{488}', '\u{489}'),
-    ('\u{1abe}', '\u{1abe}'),
-    ('\u{20dd}', '\u{20e0}'),
-    ('\u{20e2}', '\u{20e4}'),
-    ('\u{a670}', '\u{a672}'),
-];
-
-pub const FINAL_PUNCTUATION: &'static [(char, char)] = &[
-    ('»', '»'),
-    ('’', '’'),
-    ('”', '”'),
-    ('›', '›'),
-    ('➃', '➃'),
-    ('⾅', '⾅'),
-    ('⾊', '⾊'),
-    ('➍', '➍'),
-    ('➝', '➝'),
-    ('➥', '➥'),
-];
-
-pub const FORMAT: &'static [(char, char)] = &[
-    ('\u{ad}', '\u{ad}'),
-    ('\u{600}', '\u{605}'),
-    ('\u{61c}', '\u{61c}'),
-    ('\u{6dd}', '\u{6dd}'),
-    ('\u{70f}', '\u{70f}'),
-    ('\u{890}', '\u{891}'),
-    ('\u{8e2}', '\u{8e2}'),
-    ('\u{180e}', '\u{180e}'),
-    ('\u{200b}', '\u{200f}'),
-    ('\u{202a}', '\u{202e}'),
-    ('\u{2060}', '\u{2064}'),
-    ('\u{2066}', '\u{206f}'),
-    ('\u{feff}', '\u{feff}'),
-    ('\u{fff9}', '\u{fffb}'),
-    ('\u{110bd}', '\u{110bd}'),
-    ('\u{110cd}', '\u{110cd}'),
-    ('\u{13430}', '\u{1343f}'),
-    ('\u{1bca0}', '\u{1bca3}'),
-    ('\u{1d173}', '\u{1d17a}'),
-    ('\u{e0001}', '\u{e0001}'),
-    ('\u{e0020}', '\u{e007f}'),
-];
-
-pub const INITIAL_PUNCTUATION: &'static [(char, char)] = &[
-    ('«', '«'),
-    ('‘', '‘'),
-    ('‛', '“'),
-    ('‟', '‟'),
-    ('‹', '‹'),
-    ('⾂', '⾂'),
-    ('⾄', '⾄'),
-    ('⾉', '⾉'),
-    ('⾌', '⾌'),
-    ('⾜', '⾜'),
-    ('âž ', 'âž '),
-];
-
-pub const LETTER: &'static [(char, char)] = &[
-    ('A', 'Z'),
-    ('a', 'z'),
-    ('ª', 'ª'),
-    ('µ', 'µ'),
-    ('º', 'º'),
-    ('À', 'Ö'),
-    ('Ø', 'ö'),
-    ('ø', 'ˁ'),
-    ('ˆ', 'ˑ'),
-    ('Ë ', 'Ë€'),
-    ('ËŹ', 'ËŹ'),
-    ('Ëź', 'Ëź'),
-    ('Ͱ', '͎'),
-    ('Ͷ', 'ͷ'),
-    ('Íș', 'Íœ'),
-    ('Íż', 'Íż'),
-    ('Ά', 'Ά'),
-    ('Έ', 'Ί'),
-    ('Ό', 'Ό'),
-    ('Ύ', 'Ρ'),
-    ('Σ', 'Ï”'),
-    ('Ϸ', 'ҁ'),
-    ('Ҋ', 'ÔŻ'),
-    ('Ô±', 'Ֆ'),
-    ('ՙ', 'ՙ'),
-    ('ՠ', 'ֈ'),
-    ('ڐ', 'ŚȘ'),
-    ('ŚŻ', 'ŚČ'),
-    ('Ű ', 'ي'),
-    ('Ùź', 'ÙŻ'),
-    ('ٱ', 'ۓ'),
-    ('ە', 'ە'),
-    ('Û„', 'ÛŠ'),
-    ('Ûź', 'ÛŻ'),
-    ('Ûș', 'ÛŒ'),
-    ('Ûż', 'Ûż'),
-    ('ܐ', 'ܐ'),
-    ('ܒ', 'ܯ'),
-    ('ʍ', 'Ț„'),
-    ('Ț±', 'Ț±'),
-    ('ߊ', 'ßȘ'),
-    ('ߎ', 'ߔ'),
-    ('ßș', 'ßș'),
-    ('ࠀ', 'ࠕ'),
-    ('ࠚ', 'ࠚ'),
-    ('à €', 'à €'),
-    ('à š', 'à š'),
-    ('àĄ€', 'àĄ˜'),
-    ('àĄ ', 'àĄȘ'),
-    ('àĄ°', 'àą‡'),
-    ('àą‰', 'àąŽ'),
-    ('àą ', 'àŁ‰'),
-    ('à€„', 'à€č'),
-    ('à€œ', 'à€œ'),
-    ('à„', 'à„'),
-    ('à„˜', 'à„Ą'),
-    ('à„±', 'àŠ€'),
-    ('àŠ…', 'àŠŒ'),
-    ('àŠ', 'àŠ'),
-    ('àŠ“', 'àŠš'),
-    ('àŠȘ', 'àŠ°'),
-    ('àŠČ', 'àŠČ'),
-    ('àŠ¶', 'àŠč'),
-    ('àŠœ', 'àŠœ'),
-    ('ৎ', 'ৎ'),
-    ('ড়', 'ঢ়'),
-    ('য়', 'à§Ą'),
-    ('à§°', 'à§±'),
-    ('ৌ', 'ৌ'),
-    ('àš…', 'àšŠ'),
-    ('àš', 'àš'),
-    ('àš“', 'àšš'),
-    ('àšȘ', 'àš°'),
-    ('àšČ', 'àšł'),
-    ('àš”', 'àš¶'),
-    ('àšž', 'àšč'),
-    ('ਖ਼', 'ੜ'),
-    ('ਫ਼', 'ਫ਼'),
-    ('à©Č', '੎'),
-    ('àȘ…', 'àȘ'),
-    ('àȘ', 'àȘ‘'),
-    ('àȘ“', 'àȘš'),
-    ('àȘȘ', 'àȘ°'),
-    ('àȘČ', 'àȘł'),
-    ('àȘ”', 'àȘč'),
-    ('àȘœ', 'àȘœ'),
-    ('ૐ', 'ૐ'),
-    ('à« ', 'à«Ą'),
-    ('à«č', 'à«č'),
-    ('àŹ…', 'àŹŒ'),
-    ('àŹ', 'àŹ'),
-    ('àŹ“', 'àŹš'),
-    ('àŹȘ', 'àŹ°'),
-    ('àŹČ', 'àŹł'),
-    ('àŹ”', 'àŹč'),
-    ('àŹœ', 'àŹœ'),
-    ('ଡ଼', 'ଢ଼'),
-    ('ୟ', 'à­Ą'),
-    ('à­±', 'à­±'),
-    ('àźƒ', 'àźƒ'),
-    ('àź…', 'àźŠ'),
-    ('àźŽ', 'àź'),
-    ('àź’', 'àź•'),
-    ('àź™', 'àźš'),
-    ('àźœ', 'àźœ'),
-    ('àźž', 'àźŸ'),
-    ('àźŁ', 'àź€'),
-    ('àźš', 'àźȘ'),
-    ('àźź', 'àźč'),
-    ('àŻ', 'àŻ'),
-    ('అ', 'ఌ'),
-    ('ఎ', 'ఐ'),
-    ('ఒ', 'à°š'),
-    ('à°Ș', 'à°č'),
-    ('జ', 'జ'),
-    ('ౘ', 'ౚ'),
-    ('ౝ', 'ౝ'),
-    ('à± ', 'à±Ą'),
-    ('àȀ', 'àȀ'),
-    ('àȅ', 'àȌ'),
-    ('àȎ', 'àȐ'),
-    ('àȒ', 'àČš'),
-    ('àČȘ', 'àČł'),
-    ('àČ”', 'àČč'),
-    ('àČœ', 'àČœ'),
-    ('àł', 'àłž'),
-    ('àł ', 'àłĄ'),
-    ('àł±', 'àłČ'),
-    ('àŽ„', 'àŽŒ'),
-    ('àŽŽ', 'àŽ'),
-    ('àŽ’', 'àŽș'),
-    ('àŽœ', 'àŽœ'),
-    ('à”Ž', 'à”Ž'),
-    ('à””', 'à”–'),
-    ('à”Ÿ', 'à”Ą'),
-    ('à”ș', 'à”ż'),
-    ('අ', 'ඖ'),
-    ('ක', 'න'),
-    ('à¶ł', 'à¶»'),
-    ('ග', 'ග'),
-    ('ව', 'ෆ'),
-    ('àž', 'àž°'),
-    ('àžČ', 'àžł'),
-    ('àč€', 'àč†'),
-    ('àș', 'àș‚'),
-    ('àș„', 'àș„'),
-    ('àș†', 'àșŠ'),
-    ('àșŒ', 'àșŁ'),
-    ('àș„', 'àș„'),
-    ('àș§', 'àș°'),
-    ('àșČ', 'àșł'),
-    ('àșœ', 'àșœ'),
-    ('ເ', 'ໄ'),
-    ('ໆ', 'ໆ'),
-    ('ໜ', 'ໟ'),
-    ('àŒ€', 'àŒ€'),
-    ('àœ€', 'àœ‡'),
-    ('àœ‰', 'àœŹ'),
-    ('àŸˆ', 'àŸŒ'),
-    ('က', 'á€Ș'),
-    ('ဿ', 'ဿ'),
-    ('ၐ', 'ၕ'),
-    ('ၚ', 'ၝ'),
-    ('ၥ', 'ၥ'),
-    ('၄', '၊'),
-    ('ၟ', 'ၰ'),
-    ('ၔ', 'ႁ'),
-    ('ႎ', 'ႎ'),
-    ('Ⴀ', 'Ⴥ'),
-    ('Ⴧ', 'Ⴧ'),
-    ('Ⴭ', 'Ⴭ'),
-    ('ა', 'áƒș'),
-    ('჌', 'ቈ'),
-    ('ቊ', 'ቍ'),
-    ('ቐ', 'ቖ'),
-    ('ቘ', 'ቘ'),
-    ('ቚ', 'ቝ'),
-    ('በ', 'ኈ'),
-    ('ኊ', 'ኍ'),
-    ('ነ', 'ኰ'),
-    ('áŠČ', 'ኔ'),
-    ('ኾ', 'ኟ'),
-    ('ዀ', 'ዀ'),
-    ('ዂ', 'ዅ'),
-    ('ወ', 'ዖ'),
-    ('ዘ', 'ጐ'),
-    ('ጒ', 'ጕ'),
-    ('ጘ', 'ፚ'),
-    ('ᎀ', 'ᎏ'),
-    ('Ꭰ', 'Ꮤ'),
-    ('Ꮮ', 'Ꮬ'),
-    ('ᐁ', 'ᙬ'),
-    ('ᙯ', 'ᙿ'),
-    ('ᚁ', 'ᚚ'),
-    ('ᚠ', 'á›Ș'),
-    ('ᛱ', '᛾'),
-    ('ᜀ', 'ᜑ'),
-    ('ᜟ', 'ᜱ'),
-    ('ᝀ', 'ᝑ'),
-    ('ᝠ', 'ᝬ'),
-    ('᝼', 'ᝰ'),
-    ('ក', 'ឳ'),
-    ('ៗ', 'ៗ'),
-    ('ៜ', 'ៜ'),
-    ('ᠠ', 'ᥞ'),
-    ('᱀', '᱄'),
-    ('᱇', 'ᱹ'),
-    ('áąȘ', 'áąȘ'),
-    ('Ṱ', 'ᣔ'),
-    ('က', 'သ'),
-    ('ᄐ', 'ᄭ'),
-    ('ᄰ', 'ᄎ'),
-    ('ᩀ', 'ካ'),
-    ('ᩰ', 'ᧉ'),
-    ('Ṁ', 'Ṗ'),
-    ('áš ', 'ᩔ'),
-    ('áȘ§', 'áȘ§'),
-    ('ᬅ', 'ᬳ'),
-    ('ᭅ', 'ᭌ'),
-    ('ៃ', '០'),
-    ('៟', '៯'),
-    ('áźș', 'ᯄ'),
-    ('ᰀ', 'ᰣ'),
-    ('ᱍ', 'ᱏ'),
-    ('ᱚ', 'ᱜ'),
-    ('áȀ', 'áȊ'),
-    ('áȐ', 'áČș'),
-    ('áČœ', 'áČż'),
-    ('ᳩ', '᳏'),
-    ('áłź', 'áłł'),
-    ('áł”', 'áł¶'),
-    ('áłș', 'áłș'),
-    ('ᮀ', 'á¶ż'),
-    ('ᾀ', 'ጕ'),
-    ('ጘ', 'ጝ'),
-    ('ጠ', 'ᜅ'),
-    ('ᜈ', 'ᜍ'),
-    ('ᜐ', '᜗'),
-    ('᜙', '᜙'),
-    ('᜛', '᜛'),
-    ('᜝', '᜝'),
-    ('ᜟ', '᜜'),
-    ('ៀ', '៎'),
-    ('៶', '៌'),
-    ('៟', '៟'),
-    ('ῂ', 'ῄ'),
-    ('ῆ', 'ῌ'),
-    ('ῐ', 'ΐ'),
-    ('ῖ', 'Ί'),
-    ('ῠ', '῏'),
-    ('áżČ', '῎'),
-    ('áż¶', 'áżŒ'),
-    ('ⁱ', 'ⁱ'),
-    ('ⁿ', 'ⁿ'),
-    ('ₐ', 'ₜ'),
-    ('ℂ', 'ℂ'),
-    ('ℇ', 'ℇ'),
-    ('ℊ', 'ℓ'),
-    ('ℕ', 'ℕ'),
-    ('ℙ', 'ℝ'),
-    ('â„€', 'â„€'),
-    ('℩', '℩'),
-    ('ℹ', 'ℹ'),
-    ('â„Ș', 'ℭ'),
-    ('ℯ', 'â„č'),
-    ('ℌ', 'ℿ'),
-    ('ⅅ', 'ⅉ'),
-    ('ⅎ', 'ⅎ'),
-    ('Ↄ', 'ↄ'),
-    ('Ⰰ', 'Ⳁ'),
-    ('âł«', 'âłź'),
-    ('âłČ', 'âłł'),
-    ('⮀', '⎄'),
-    ('⎧', '⎧'),
-    ('⎭', '⎭'),
-    ('⎰', '┧'),
-    ('┯', '┯'),
-    ('ⶀ', 'ⶖ'),
-    ('â¶ ', 'â¶Š'),
-    ('â¶š', 'â¶ź'),
-    ('â¶°', 'â¶¶'),
-    ('â¶ž', 'â¶Ÿ'),
-    ('ⷀ', 'ⷆ'),
-    ('ⷈ', 'ⷎ'),
-    ('ⷐ', 'ⷖ'),
-    ('ⷘ', 'ⷞ'),
-    ('➯', '➯'),
-    ('々', '〆'),
-    ('〱', '〔'),
-    ('〻', 'ă€Œ'),
-    ('ぁ', 'ゖ'),
-    ('ゝ', 'ゟ'),
-    ('ァ', 'ăƒș'),
-    ('ăƒŒ', 'ヿ'),
-    ('ㄅ', 'ㄯ'),
-    ('ㄱ', 'ㆎ'),
-    ('ㆠ', 'ㆿ'),
-    ('ㇰ', 'ㇿ'),
-    ('㐀', 'ä¶ż'),
-    ('侀', 'ꒌ'),
-    ('ꓐ', 'ꓜ'),
-    ('ꔀ', 'ꘌ'),
-    ('ꘐ', 'ꘟ'),
-    ('ê˜Ș', 'ꘫ'),
-    ('Ꙁ', 'ê™ź'),
-    ('ê™ż', 'ꚝ'),
-    ('ꚠ', 'ꛄ'),
-    ('ꜗ', 'ꜟ'),
-    ('êœą', 'ꞈ'),
-    ('Ꞌ', 'ꟍ'),
-    ('Ꟑ', 'ꟑ'),
-    ('ꟓ', 'ꟓ'),
-    ('ꟕ', 'Ƛ'),
-    ('êŸČ', 'ꠁ'),
-    ('ꠃ', 'ꠅ'),
-    ('ꠇ', 'ꠊ'),
-    ('ꠌ', 'ê ą'),
-    ('êĄ€', 'êĄł'),
-    ('êą‚', 'êął'),
-    ('êŁČ', 'êŁ·'),
-    ('êŁ»', 'êŁ»'),
-    ('êŁœ', 'êŁŸ'),
-    ('ꀊ', 'ꀄ'),
-    ('ꀰ', 'ꄆ'),
-    ('ꄠ', 'ꄌ'),
-    ('ꊄ', 'êŠČ'),
-    ('ꧏ', 'ꧏ'),
-    ('ê§ ', 'ê§€'),
-    ('ê§Š', 'ê§Ż'),
-    ('ê§ș', 'ê§Ÿ'),
-    ('Ꚁ', 'êšš'),
-    ('ꩀ', 'ꩂ'),
-    ('ꩄ', 'ꩋ'),
-    ('ê© ', 'ê©¶'),
-    ('ê©ș', 'ê©ș'),
-    ('꩟', 'êȘŻ'),
-    ('êȘ±', 'êȘ±'),
-    ('êȘ”', 'êȘ¶'),
-    ('êȘč', 'êȘœ'),
-    ('ꫀ', 'ꫀ'),
-    ('ꫂ', 'ꫂ'),
-    ('ꫛ', 'ꫝ'),
-    ('ê« ', 'ê«Ș'),
-    ('ê«Č', '꫎'),
-    ('êŹ', 'êŹ†'),
-    ('êŹ‰', 'êŹŽ'),
-    ('êŹ‘', 'êŹ–'),
-    ('êŹ ', 'êŹŠ'),
-    ('êŹš', 'êŹź'),
-    ('êŹ°', 'ꭚ'),
-    ('ꭜ', 'ꭩ'),
-    ('ê­°', 'êŻą'),
-    ('가', '힣'),
-    ('ힰ', 'ퟆ'),
-    ('ퟋ', 'ퟻ'),
-    ('', 'ï©­'),
-    ('並', '龎'),
-    ('ïŹ€', 'ïŹ†'),
-    ('ïŹ“', 'ïŹ—'),
-    ('ïŹ', 'ïŹ'),
-    ('ïŹŸ', 'ïŹš'),
-    ('ïŹȘ', 'ïŹ¶'),
-    ('ïŹž', 'ïŹŒ'),
-    ('ïŹŸ', 'ïŹŸ'),
-    ('נּ', 'סּ'),
-    ('ףּ', 'פּ'),
-    ('צּ', 'ïź±'),
-    ('ïŻ“', ''),
-    ('', 'ﶏ'),
-    ('ﶒ', 'ﷇ'),
-    ('ï·°', 'ï·»'),
-    ('ïč°', 'ïčŽ'),
-    ('ïč¶', 'ﻌ'),
-    ('ïŒĄ', 'ïŒș'),
-    ('', ''),
-    ('', ''),
-    ('ïż‚', 'ïż‡'),
-    ('ïżŠ', 'ïż'),
-    ('ïż’', 'ïż—'),
-    ('ïżš', 'ïżœ'),
-    ('𐀀', '𐀋'),
-    ('𐀍', '𐀩'),
-    ('𐀹', 'đ€ș'),
-    ('đ€Œ', 'đ€œ'),
-    ('𐀿', '𐁍'),
-    ('𐁐', '𐁝'),
-    ('𐂀', 'đƒș'),
-    ('𐊀', '𐊜'),
-    ('𐊠', '𐋐'),
-    ('𐌀', '𐌟'),
-    ('𐌭', '𐍀'),
-    ('𐍂', '𐍉'),
-    ('𐍐', 'đ”'),
-    ('𐎀', '𐎝'),
-    ('𐎠', '𐏃'),
-    ('𐏈', '𐏏'),
-    ('𐐀', '𐒝'),
-    ('𐒰', '𐓓'),
-    ('𐓘', '𐓻'),
-    ('𐔀', '𐔧'),
-    ('𐔰', '𐕣'),
-    ('𐕰', 'đ•ș'),
-    ('đ•Œ', '𐖊'),
-    ('𐖌', '𐖒'),
-    ('𐖔', '𐖕'),
-    ('𐖗', '𐖡'),
-    ('𐖣', '𐖱'),
-    ('𐖳', 'đ–č'),
-    ('𐖻', 'đ–Œ'),
-    ('𐗀', '𐗳'),
-    ('𐘀', 'đœ¶'),
-    ('𐝀', '𐝕'),
-    ('𐝠', '𐝧'),
-    ('𐞀', '𐞅'),
-    ('𐞇', '𐞰'),
-    ('đžČ', 'đžș'),
-    ('𐠀', '𐠅'),
-    ('𐠈', '𐠈'),
-    ('𐠊', '𐠔'),
-    ('𐠷', '𐠞'),
-    ('đ Œ', 'đ Œ'),
-    ('𐠿', '𐡕'),
-    ('𐥠', '𐥶'),
-    ('𐱀', '𐱞'),
-    ('𐣠', 'đŁČ'),
-    ('𐣎', '𐣔'),
-    ('𐀀', '𐀕'),
-    ('𐀠', 'đ€č'),
-    ('𐩀', '𐊷'),
-    ('đŠŸ', '𐊿'),
-    ('𐹀', '𐹀'),
-    ('𐹐', '𐹓'),
-    ('𐹕', '𐹗'),
-    ('𐹙', '𐚔'),
-    ('𐩠', 'đ©Œ'),
-    ('đȘ€', 'đȘœ'),
-    ('𐫀', '𐫇'),
-    ('𐫉', '𐫀'),
-    ('𐬀', '𐏔'),
-    ('𐭀', '𐭕'),
-    ('𐭠', 'đ­Č'),
-    ('𐼀', '𐼑'),
-    ('𐰀', '𐱈'),
-    ('đČ€', 'đČČ'),
-    ('𐳀', 'đłČ'),
-    ('𐮀', '𐮣'),
-    ('𐔊', '𐔄'),
-    ('𐔯', '𐶅'),
-    ('đș€', 'đș©'),
-    ('đș°', 'đș±'),
-    ('𐻂', '𐻄'),
-    ('đŒ€', 'đŒœ'),
-    ('đŒ§', 'đŒ§'),
-    ('đŒ°', 'đœ…'),
-    ('đœ°', 'đŸ'),
-    ('đŸ°', '𐿄'),
-    ('𐿠', '𐿶'),
-    ('𑀃', 'đ‘€·'),
-    ('𑁱', 'đ‘Č'),
-    ('𑁔', '𑁔'),
-    ('𑂃', '𑂯'),
-    ('𑃐', '𑃹'),
-    ('𑄃', '𑄩'),
-    ('𑅄', '𑅄'),
-    ('𑅇', '𑅇'),
-    ('𑅐', 'đ‘…Č'),
-    ('đ‘…¶', 'đ‘…¶'),
-    ('𑆃', 'đ‘†Č'),
-    ('𑇁', '𑇄'),
-    ('𑇚', '𑇚'),
-    ('𑇜', '𑇜'),
-    ('𑈀', '𑈑'),
-    ('𑈓', 'đ‘ˆ«'),
-    ('𑈿', '𑉀'),
-    ('𑊀', '𑊆'),
-    ('𑊈', '𑊈'),
-    ('𑊊', '𑊍'),
-    ('𑊏', '𑊝'),
-    ('𑊟', '𑊹'),
-    ('𑊰', '𑋞'),
-    ('𑌅', '𑌌'),
-    ('𑌏', '𑌐'),
-    ('𑌓', '𑌹'),
-    ('đ‘ŒȘ', '𑌰'),
-    ('đ‘ŒČ', '𑌳'),
-    ('đ‘Œ”', 'đ‘Œč'),
-    ('đ‘Œœ', 'đ‘Œœ'),
-    ('𑍐', '𑍐'),
-    ('𑍝', '𑍡'),
-    ('𑎀', '𑎉'),
-    ('𑎋', '𑎋'),
-    ('𑎎', '𑎎'),
-    ('𑎐', '𑎔'),
-    ('𑎷', '𑎷'),
-    ('𑏑', '𑏑'),
-    ('𑏓', '𑏓'),
-    ('𑐀', '𑐮'),
-    ('𑑇', '𑑊'),
-    ('𑑟', '𑑡'),
-    ('𑒀', '𑒯'),
-    ('𑓄', '𑓅'),
-    ('𑓇', '𑓇'),
-    ('𑖀', '𑖼'),
-    ('𑗘', '𑗛'),
-    ('𑘀', '𑘯'),
-    ('𑙄', '𑙄'),
-    ('𑚀', 'đ‘šȘ'),
-    ('𑚾', '𑚾'),
-    ('𑜀', '𑜚'),
-    ('𑝀', '𑝆'),
-    ('𑠀', 'đ‘ «'),
-    ('𑱠', '𑣟'),
-    ('𑣿', '𑀆'),
-    ('𑀉', '𑀉'),
-    ('đ‘€Œ', '𑀓'),
-    ('𑀕', 'đ‘€–'),
-    ('đ‘€˜', '𑀯'),
-    ('𑀿', '𑀿'),
-    ('𑄁', '𑄁'),
-    ('𑩠', '𑩧'),
-    ('đ‘ŠȘ', '𑧐'),
-    ('𑧡', '𑧡'),
-    ('𑧣', '𑧣'),
-    ('𑹀', '𑹀'),
-    ('𑹋', 'đ‘šČ'),
-    ('đ‘šș', 'đ‘šș'),
-    ('𑩐', '𑩐'),
-    ('đ‘©œ', 'đ‘Ș‰'),
-    ('đ‘Ș', 'đ‘Ș'),
-    ('đ‘Ș°', 'đ‘«ž'),
-    ('𑯀', '𑯠'),
-    ('𑰀', '𑰈'),
-    ('𑰊', '𑰼'),
-    ('𑱀', '𑱀'),
-    ('đ‘±Č', 'đ‘ȏ'),
-    ('𑮀', '𑮆'),
-    ('𑮈', '𑮉'),
-    ('𑮋', '𑮰'),
-    ('𑔆', '𑔆'),
-    ('đ‘” ', 'đ‘”„'),
-    ('đ‘”§', '𑔚'),
-    ('đ‘”Ș', '𑶉'),
-    ('đ‘¶˜', 'đ‘¶˜'),
-    ('đ‘» ', 'đ‘»Č'),
-    ('đ‘Œ‚', 'đ‘Œ‚'),
-    ('đ‘Œ„', 'đ‘Œ'),
-    ('đ‘Œ’', 'đ‘Œł'),
-    ('đ‘Ÿ°', 'đ‘Ÿ°'),
-    ('𒀀', '𒎙'),
-    ('𒒀', '𒕃'),
-    ('đ’Ÿ', '𒿰'),
-    ('𓀀', '𓐯'),
-    ('𓑁', '𓑆'),
-    ('𓑠', 'đ”ș'),
-    ('𔐀', '𔙆'),
-    ('𖄀', '𖄝'),
-    ('𖠀', '𖹾'),
-    ('đ–©€', 'đ–©ž'),
-    ('đ–©°', 'đ–ȘŸ'),
-    ('𖫐', 'đ–«­'),
-    ('𖬀', '𖬯'),
-    ('𖭀', '𖭃'),
-    ('𖭣', 'đ–­·'),
-    ('đ–­œ', '𖼏'),
-    ('𖔀', '𖔏'),
-    ('đ–č€', 'đ–čż'),
-    ('đ–Œ€', 'đ–œŠ'),
-    ('đ–œ', 'đ–œ'),
-    ('đ–Ÿ“', 'đ–ŸŸ'),
-    ('𖿠', '𖿡'),
-    ('𖿣', '𖿣'),
-    ('𗀀', 'đ˜Ÿ·'),
-    ('𘠀', '𘳕'),
-    ('𘳿', '𘎈'),
-    ('𚿰', '𚿳'),
-    ('đšż”', 'đšż»'),
-    ('đšżœ', 'đšżŸ'),
-    ('𛀀', '𛄱'),
-    ('đ›„Č', 'đ›„Č'),
-    ('𛅐', '𛅒'),
-    ('𛅕', '𛅕'),
-    ('đ›…€', '𛅧'),
-    ('𛅰', '𛋻'),
-    ('𛰀', 'đ›±Ș'),
-    ('đ›±°', 'đ›±Œ'),
-    ('đ›Č€', 'đ›Čˆ'),
-    ('đ›Č', 'đ›Č™'),
-    ('𝐀', '𝑔'),
-    ('𝑖', '𝒜'),
-    ('𝒞', '𝒟'),
-    ('𝒱', '𝒱'),
-    ('đ’„', '𝒩'),
-    ('đ’©', '𝒬'),
-    ('𝒼', 'đ’č'),
-    ('đ’»', 'đ’»'),
-    ('đ’œ', '𝓃'),
-    ('𝓅', '𝔅'),
-    ('𝔇', '𝔊'),
-    ('𝔍', '𝔔'),
-    ('𝔖', '𝔜'),
-    ('𝔞', 'đ”č'),
-    ('đ”»', 'đ”Ÿ'),
-    ('𝕀', '𝕄'),
-    ('𝕆', '𝕆'),
-    ('𝕊', '𝕐'),
-    ('𝕒', 'đš„'),
-    ('𝚹', '𝛀'),
-    ('𝛂', '𝛚'),
-    ('𝛜', 'đ›ș'),
-    ('đ›Œ', '𝜔'),
-    ('𝜖', '𝜮'),
-    ('đœ¶', '𝝎'),
-    ('𝝐', '𝝼'),
-    ('𝝰', '𝞈'),
-    ('𝞊', '𝞹'),
-    ('đžȘ', '𝟂'),
-    ('𝟄', '𝟋'),
-    ('đŒ€', 'đŒž'),
-    ('đŒ„', 'đŒȘ'),
-    ('𞀰', '𞁭'),
-    ('𞄀', '𞄬'),
-    ('đž„·', 'đž„œ'),
-    ('𞅎', '𞅎'),
-    ('𞊐', '𞊭'),
-    ('𞋀', 'đž‹«'),
-    ('𞓐', 'đž“«'),
-    ('𞗐', '𞗭'),
-    ('𞗰', '𞗰'),
-    ('𞟠', '𞟩'),
-    ('𞟹', 'đžŸ«'),
-    ('𞟭', '𞟼'),
-    ('𞟰', 'đžŸŸ'),
-    ('𞠀', '𞣄'),
-    ('𞀀', 'đž„ƒ'),
-    ('đž„‹', 'đž„‹'),
-    ('𞾀', '𞾃'),
-    ('𞾅', '𞾟'),
-    ('𞾡', '𞾱'),
-    ('𞞀', '𞞀'),
-    ('𞾧', '𞾧'),
-    ('đžž©', 'đžžČ'),
-    ('𞾮', 'đžž·'),
-    ('đžžč', 'đžžč'),
-    ('đžž»', 'đžž»'),
-    ('đžč‚', 'đžč‚'),
-    ('đžč‡', 'đžč‡'),
-    ('đžč‰', 'đžč‰'),
-    ('đžč‹', 'đžč‹'),
-    ('đžč', 'đžč'),
-    ('đžč‘', 'đžč’'),
-    ('đžč”', 'đžč”'),
-    ('đžč—', 'đžč—'),
-    ('đžč™', 'đžč™'),
-    ('đžč›', 'đžč›'),
-    ('đžč', 'đžč'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžčĄ', 'đžčą'),
-    ('đžč€', 'đžč€'),
-    ('đžč§', 'đžčȘ'),
-    ('đžčŹ', 'đžčČ'),
-    ('đžčŽ', 'đžč·'),
-    ('đžčč', 'đžčŒ'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžș€', 'đžș‰'),
-    ('đžș‹', 'đžș›'),
-    ('đžșĄ', 'đžșŁ'),
-    ('đžș„', 'đžș©'),
-    ('đžș«', 'đžș»'),
-    ('𠀀', 'đȘ›Ÿ'),
-    ('đȘœ€', 'đ«œč'),
-    ('đ«€', 'đ« '),
-    ('đ«  ', 'đŹșĄ'),
-    ('đŹș°', '🯠'),
-    ('🯰', 'đźč'),
-    ('丽', '𯹝'),
-    ('𰀀', 'đ±Š'),
-    ('đ±', 'đȎŻ'),
-];
-
-pub const LETTER_NUMBER: &'static [(char, char)] = &[
-    ('᛼', 'ᛰ'),
-    ('Ⅰ', 'ↂ'),
-    ('ↅ', 'ↈ'),
-    ('〇', '〇'),
-    ('〡', '〩'),
-    ('〾', 'ă€ș'),
-    ('ꛊ', 'ê›Ż'),
-    ('𐅀', '𐅮'),
-    ('𐍁', '𐍁'),
-    ('𐍊', '𐍊'),
-    ('𐏑', '𐏕'),
-    ('𒐀', '𒑼'),
-];
-
-pub const LINE_SEPARATOR: &'static [(char, char)] =
-    &[('\u{2028}', '\u{2028}')];
-
-pub const LOWERCASE_LETTER: &'static [(char, char)] = &[
-    ('a', 'z'),
-    ('µ', 'µ'),
-    ('ß', 'ö'),
-    ('ø', 'ÿ'),
-    ('ā', 'ā'),
-    ('ă', 'ă'),
-    ('ą', 'ą'),
-    ('ć', 'ć'),
-    ('ĉ', 'ĉ'),
-    ('ċ', 'ċ'),
-    ('č', 'č'),
-    ('ď', 'ď'),
-    ('đ', 'đ'),
-    ('ē', 'ē'),
-    ('ĕ', 'ĕ'),
-    ('ė', 'ė'),
-    ('ę', 'ę'),
-    ('ě', 'ě'),
-    ('ĝ', 'ĝ'),
-    ('ğ', 'ğ'),
-    ('ÄĄ', 'ÄĄ'),
-    ('ÄŁ', 'ÄŁ'),
-    ('Ä„', 'Ä„'),
-    ('ħ', 'ħ'),
-    ('Ä©', 'Ä©'),
-    ('Ä«', 'Ä«'),
-    ('Ä­', 'Ä­'),
-    ('ÄŻ', 'ÄŻ'),
-    ('ı', 'ı'),
-    ('Äł', 'Äł'),
-    ('Ä”', 'Ä”'),
-    ('Ä·', 'Äž'),
-    ('Äș', 'Äș'),
-    ('Č', 'Č'),
-    ('ÄŸ', 'ÄŸ'),
-    ('ƀ', 'ƀ'),
-    ('Ƃ', 'Ƃ'),
-    ('Ƅ', 'Ƅ'),
-    ('Ɔ', 'Ɔ'),
-    ('ƈ', 'Ɖ'),
-    ('Ƌ', 'Ƌ'),
-    ('ƍ', 'ƍ'),
-    ('Ə', 'Ə'),
-    ('Ƒ', 'Ƒ'),
-    ('œ', 'œ'),
-    ('ƕ', 'ƕ'),
-    ('Ɨ', 'Ɨ'),
-    ('ƙ', 'ƙ'),
-    ('ƛ', 'ƛ'),
-    ('Ɲ', 'Ɲ'),
-    ('ß', 'ß'),
-    ('š', 'š'),
-    ('ĆŁ', 'ĆŁ'),
-    ('Ć„', 'Ć„'),
-    ('Ƨ', 'Ƨ'),
-    ('Ć©', 'Ć©'),
-    ('Ć«', 'Ć«'),
-    ('Ć­', 'Ć­'),
-    ('ĆŻ', 'ĆŻ'),
-    ('Ʊ', 'Ʊ'),
-    ('Ćł', 'Ćł'),
-    ('Ć”', 'Ć”'),
-    ('Ć·', 'Ć·'),
-    ('Ćș', 'Ćș'),
-    ('ĆŒ', 'ĆŒ'),
-    ('ĆŸ', 'ƀ'),
-    ('ƃ', 'ƃ'),
-    ('ƅ', 'ƅ'),
-    ('ƈ', 'ƈ'),
-    ('ƌ', 'ƍ'),
-    ('ƒ', 'ƒ'),
-    ('ƕ', 'ƕ'),
-    ('ƙ', 'ƛ'),
-    ('ƞ', 'ƞ'),
-    ('ÆĄ', 'ÆĄ'),
-    ('ÆŁ', 'ÆŁ'),
-    ('Æ„', 'Æ„'),
-    ('Æš', 'Æš'),
-    ('ÆȘ', 'Æ«'),
-    ('Æ­', 'Æ­'),
-    ('ư', 'ư'),
-    ('ÆŽ', 'ÆŽ'),
-    ('ƶ', 'ƶ'),
-    ('Æč', 'Æș'),
-    ('Æœ', 'Æż'),
-    ('dž', 'dž'),
-    ('lj', 'lj'),
-    ('nj', 'nj'),
-    ('ǎ', 'ǎ'),
-    ('ǐ', 'ǐ'),
-    ('ǒ', 'ǒ'),
-    ('ǔ', 'ǔ'),
-    ('ǖ', 'ǖ'),
-    ('ǘ', 'ǘ'),
-    ('ǚ', 'ǚ'),
-    ('ǜ', 'ǝ'),
-    ('ǟ', 'ǟ'),
-    ('ÇĄ', 'ÇĄ'),
-    ('ÇŁ', 'ÇŁ'),
-    ('Ç„', 'Ç„'),
-    ('ǧ', 'ǧ'),
-    ('Ç©', 'Ç©'),
-    ('Ç«', 'Ç«'),
-    ('Ç­', 'Ç­'),
-    ('ǯ', 'ǰ'),
-    ('Çł', 'Çł'),
-    ('Ç”', 'Ç”'),
-    ('Çč', 'Çč'),
-    ('Ç»', 'Ç»'),
-    ('ǜ', 'ǜ'),
-    ('Çż', 'Çż'),
-    ('ȁ', 'ȁ'),
-    ('ȃ', 'ȃ'),
-    ('ȅ', 'ȅ'),
-    ('ȇ', 'ȇ'),
-    ('ȉ', 'ȉ'),
-    ('ȋ', 'ȋ'),
-    ('ȍ', 'ȍ'),
-    ('ȏ', 'ȏ'),
-    ('ȑ', 'ȑ'),
-    ('ȓ', 'ȓ'),
-    ('ȕ', 'ȕ'),
-    ('ȗ', 'ȗ'),
-    ('ș', 'ș'),
-    ('ț', 'ț'),
-    ('ȝ', 'ȝ'),
-    ('ȟ', 'ȟ'),
-    ('ÈĄ', 'ÈĄ'),
-    ('ÈŁ', 'ÈŁ'),
-    ('È„', 'È„'),
-    ('ȧ', 'ȧ'),
-    ('È©', 'È©'),
-    ('È«', 'È«'),
-    ('È­', 'È­'),
-    ('ÈŻ', 'ÈŻ'),
-    ('ȱ', 'ȱ'),
-    ('Èł', 'Èč'),
-    ('Ȍ', 'Ȍ'),
-    ('Èż', 'ɀ'),
-    ('ɂ', 'ɂ'),
-    ('ɇ', 'ɇ'),
-    ('ɉ', 'ɉ'),
-    ('ɋ', 'ɋ'),
-    ('ɍ', 'ɍ'),
-    ('ɏ', 'ʓ'),
-    ('ʕ', 'ÊŻ'),
-    ('ͱ', 'ͱ'),
-    ('Íł', 'Íł'),
-    ('Í·', 'Í·'),
-    ('ͻ', '͜'),
-    ('ΐ', 'ΐ'),
-    ('ÎŹ', 'ώ'),
-    ('ϐ', 'ϑ'),
-    ('ϕ', 'ϗ'),
-    ('ϙ', 'ϙ'),
-    ('ϛ', 'ϛ'),
-    ('ϝ', 'ϝ'),
-    ('ϟ', 'ϟ'),
-    ('ÏĄ', 'ÏĄ'),
-    ('ÏŁ', 'ÏŁ'),
-    ('Ï„', 'Ï„'),
-    ('ϧ', 'ϧ'),
-    ('Ï©', 'Ï©'),
-    ('Ï«', 'Ï«'),
-    ('Ï­', 'Ï­'),
-    ('ÏŻ', 'Ïł'),
-    ('Ï”', 'Ï”'),
-    ('Ïž', 'Ïž'),
-    ('ϻ', 'ό'),
-    ('а', 'џ'),
-    ('ŃĄ', 'ŃĄ'),
-    ('ŃŁ', 'ŃŁ'),
-    ('Ń„', 'Ń„'),
-    ('ѧ', 'ѧ'),
-    ('Ń©', 'Ń©'),
-    ('Ń«', 'Ń«'),
-    ('Ń­', 'Ń­'),
-    ('ŃŻ', 'ŃŻ'),
-    ('ѱ', 'ѱ'),
-    ('Ńł', 'Ńł'),
-    ('Ń”', 'Ń”'),
-    ('Ń·', 'Ń·'),
-    ('Ńč', 'Ńč'),
-    ('Ń»', 'Ń»'),
-    ('Ńœ', 'Ńœ'),
-    ('Ńż', 'Ńż'),
-    ('ҁ', 'ҁ'),
-    ('ҋ', 'ҋ'),
-    ('ҍ', 'ҍ'),
-    ('ҏ', 'ҏ'),
-    ('ґ', 'ґ'),
-    ('ғ', 'ғ'),
-    ('ҕ', 'ҕ'),
-    ('җ', 'җ'),
-    ('ҙ', 'ҙ'),
-    ('қ', 'қ'),
-    ('ҝ', 'ҝ'),
-    ('ҟ', 'ҟ'),
-    ('ÒĄ', 'ÒĄ'),
-    ('ÒŁ', 'ÒŁ'),
-    ('Ò„', 'Ò„'),
-    ('Ò§', 'Ò§'),
-    ('Ò©', 'Ò©'),
-    ('Ò«', 'Ò«'),
-    ('Ò­', 'Ò­'),
-    ('ÒŻ', 'ÒŻ'),
-    ('Ò±', 'Ò±'),
-    ('Òł', 'Òł'),
-    ('Ò”', 'Ò”'),
-    ('Ò·', 'Ò·'),
-    ('Òč', 'Òč'),
-    ('Ò»', 'Ò»'),
-    ('Ҝ', 'Ҝ'),
-    ('Òż', 'Òż'),
-    ('ӂ', 'ӂ'),
-    ('ӄ', 'ӄ'),
-    ('ӆ', 'ӆ'),
-    ('ӈ', 'ӈ'),
-    ('ӊ', 'ӊ'),
-    ('ӌ', 'ӌ'),
-    ('ӎ', 'ӏ'),
-    ('ӑ', 'ӑ'),
-    ('ӓ', 'ӓ'),
-    ('ӕ', 'ӕ'),
-    ('ӗ', 'ӗ'),
-    ('ә', 'ә'),
-    ('ӛ', 'ӛ'),
-    ('ӝ', 'ӝ'),
-    ('ӟ', 'ӟ'),
-    ('ÓĄ', 'ÓĄ'),
-    ('ÓŁ', 'ÓŁ'),
-    ('Ó„', 'Ó„'),
-    ('Ó§', 'Ó§'),
-    ('Ó©', 'Ó©'),
-    ('Ó«', 'Ó«'),
-    ('Ó­', 'Ó­'),
-    ('ÓŻ', 'ÓŻ'),
-    ('Ó±', 'Ó±'),
-    ('Ół', 'Ół'),
-    ('Ó”', 'Ó”'),
-    ('Ó·', 'Ó·'),
-    ('Óč', 'Óč'),
-    ('Ó»', 'Ó»'),
-    ('Ӝ', 'Ӝ'),
-    ('Óż', 'Óż'),
-    ('ԁ', 'ԁ'),
-    ('ԃ', 'ԃ'),
-    ('ԅ', 'ԅ'),
-    ('ԇ', 'ԇ'),
-    ('ԉ', 'ԉ'),
-    ('ԋ', 'ԋ'),
-    ('ԍ', 'ԍ'),
-    ('ԏ', 'ԏ'),
-    ('ԑ', 'ԑ'),
-    ('ԓ', 'ԓ'),
-    ('ԕ', 'ԕ'),
-    ('ԗ', 'ԗ'),
-    ('ԙ', 'ԙ'),
-    ('ԛ', 'ԛ'),
-    ('ԝ', 'ԝ'),
-    ('ԟ', 'ԟ'),
-    ('ÔĄ', 'ÔĄ'),
-    ('ÔŁ', 'ÔŁ'),
-    ('Ô„', 'Ô„'),
-    ('Ô§', 'Ô§'),
-    ('Ô©', 'Ô©'),
-    ('Ô«', 'Ô«'),
-    ('Ô­', 'Ô­'),
-    ('ÔŻ', 'ÔŻ'),
-    ('ՠ', 'ֈ'),
-    ('ა', 'áƒș'),
-    ('ნ', 'ჿ'),
-    ('Ꮮ', 'Ꮬ'),
-    ('áȀ', 'áȈ'),
-    ('áȊ', 'áȊ'),
-    ('ᮀ', 'Ꭻ'),
-    ('ᔫ', 'ᔷ'),
-    ('á”č', 'ᶚ'),
-    ('ខ', 'ខ'),
-    ('ឃ', 'ឃ'),
-    ('ᾅ', 'ᾅ'),
-    ('ᾇ', 'ᾇ'),
-    ('ᾉ', 'ᾉ'),
-    ('ᾋ', 'ᾋ'),
-    ('ឍ', 'ឍ'),
-    ('ត', 'ត'),
-    ('ᾑ', 'ᾑ'),
-    ('ᾓ', 'ᾓ'),
-    ('ᾕ', 'ᾕ'),
-    ('ᾗ', 'ᾗ'),
-    ('ᾙ', 'ᾙ'),
-    ('ᾛ', 'ᾛ'),
-    ('ឝ', 'ឝ'),
-    ('ᾟ', 'ᾟ'),
-    ('ឥ', 'ឥ'),
-    ('ឣ', 'ឣ'),
-    ('áž„', 'áž„'),
-    ('áž§', 'áž§'),
-    ('áž©', 'áž©'),
-    ('áž«', 'áž«'),
-    ('áž­', 'áž­'),
-    ('ឯ', 'ឯ'),
-    ('áž±', 'áž±'),
-    ('ážł', 'ážł'),
-    ('áž”', 'áž”'),
-    ('áž·', 'áž·'),
-    ('ážč', 'ážč'),
-    ('áž»', 'áž»'),
-    ('វ', 'វ'),
-    ('ážż', 'ážż'),
-    ('áč', 'áč'),
-    ('áčƒ', 'áčƒ'),
-    ('áč…', 'áč…'),
-    ('áč‡', 'áč‡'),
-    ('áč‰', 'áč‰'),
-    ('áč‹', 'áč‹'),
-    ('áč', 'áč'),
-    ('áč', 'áč'),
-    ('áč‘', 'áč‘'),
-    ('áč“', 'áč“'),
-    ('áč•', 'áč•'),
-    ('áč—', 'áč—'),
-    ('áč™', 'áč™'),
-    ('áč›', 'áč›'),
-    ('áč', 'áč'),
-    ('áčŸ', 'áčŸ'),
-    ('áčĄ', 'áčĄ'),
-    ('áčŁ', 'áčŁ'),
-    ('áč„', 'áč„'),
-    ('áč§', 'áč§'),
-    ('áč©', 'áč©'),
-    ('áč«', 'áč«'),
-    ('áč­', 'áč­'),
-    ('áčŻ', 'áčŻ'),
-    ('áč±', 'áč±'),
-    ('áčł', 'áčł'),
-    ('áč”', 'áč”'),
-    ('áč·', 'áč·'),
-    ('áčč', 'áčč'),
-    ('áč»', 'áč»'),
-    ('áčœ', 'áčœ'),
-    ('áčż', 'áčż'),
-    ('áș', 'áș'),
-    ('áșƒ', 'áșƒ'),
-    ('áș…', 'áș…'),
-    ('áș‡', 'áș‡'),
-    ('áș‰', 'áș‰'),
-    ('áș‹', 'áș‹'),
-    ('áș', 'áș'),
-    ('áș', 'áș'),
-    ('áș‘', 'áș‘'),
-    ('áș“', 'áș“'),
-    ('áș•', 'áș'),
-    ('áșŸ', 'áșŸ'),
-    ('áșĄ', 'áșĄ'),
-    ('áșŁ', 'áșŁ'),
-    ('áș„', 'áș„'),
-    ('áș§', 'áș§'),
-    ('áș©', 'áș©'),
-    ('áș«', 'áș«'),
-    ('áș­', 'áș­'),
-    ('áșŻ', 'áșŻ'),
-    ('áș±', 'áș±'),
-    ('áșł', 'áșł'),
-    ('áș”', 'áș”'),
-    ('áș·', 'áș·'),
-    ('áșč', 'áșč'),
-    ('áș»', 'áș»'),
-    ('áșœ', 'áșœ'),
-    ('áșż', 'áșż'),
-    ('ề', 'ề'),
-    ('ể', 'ể'),
-    ('ễ', 'ễ'),
-    ('ệ', 'ệ'),
-    ('ỉ', 'ỉ'),
-    ('ị', 'ị'),
-    ('ọ', 'ọ'),
-    ('ỏ', 'ỏ'),
-    ('ố', 'ố'),
-    ('ồ', 'ồ'),
-    ('ổ', 'ổ'),
-    ('ỗ', 'ỗ'),
-    ('ộ', 'ộ'),
-    ('ớ', 'ớ'),
-    ('ờ', 'ờ'),
-    ('ở', 'ở'),
-    ('ụ', 'ụ'),
-    ('ợ', 'ợ'),
-    ('Ễ', 'Ễ'),
-    ('á»§', 'á»§'),
-    ('ứ', 'ứ'),
-    ('ừ', 'ừ'),
-    ('á»­', 'á»­'),
-    ('ữ', 'ữ'),
-    ('á»±', 'á»±'),
-    ('ỳ', 'ỳ'),
-    ('á»”', 'á»”'),
-    ('á»·', 'á»·'),
-    ('á»č', 'á»č'),
-    ('á»»', 'á»»'),
-    ('Ờ', 'Ờ'),
-    ('ỿ', 'ጇ'),
-    ('ጐ', 'ጕ'),
-    ('ጠ', 'ጧ'),
-    ('ጰ', 'ጷ'),
-    ('ᜀ', 'ᜅ'),
-    ('ᜐ', '᜗'),
-    ('ᜠ', 'ᜧ'),
-    ('ᜰ', '᜜'),
-    ('ៀ', 'ះ'),
-    ('័', 'ៗ'),
-    ('០', '៧'),
-    ('៰', '៎'),
-    ('៶', '៷'),
-    ('៟', '៟'),
-    ('ῂ', 'ῄ'),
-    ('ῆ', 'ῇ'),
-    ('ῐ', 'ΐ'),
-    ('ῖ', 'ῗ'),
-    ('áż ', 'áż§'),
-    ('áżČ', '῎'),
-    ('áż¶', 'áż·'),
-    ('ℊ', 'ℊ'),
-    ('ℎ', 'ℏ'),
-    ('ℓ', 'ℓ'),
-    ('ℯ', 'ℯ'),
-    ('℮', '℮'),
-    ('â„č', 'â„č'),
-    ('ℌ', 'ℜ'),
-    ('ⅆ', 'ⅉ'),
-    ('ⅎ', 'ⅎ'),
-    ('ↄ', 'ↄ'),
-    ('ⰰ', 'ⱟ'),
-    ('ⱥ', 'ⱥ'),
-    ('ⱄ', 'ⱊ'),
-    ('ⱚ', 'ⱚ'),
-    ('â±Ș', 'â±Ș'),
-    ('ⱏ', 'ⱏ'),
-    ('â±±', 'â±±'),
-    ('ⱳ', 'ⱎ'),
-    ('â±¶', 'â±»'),
-    ('âȁ', 'âȁ'),
-    ('âȃ', 'âȃ'),
-    ('âȅ', 'âȅ'),
-    ('âȇ', 'âȇ'),
-    ('âȉ', 'âȉ'),
-    ('âȋ', 'âȋ'),
-    ('âȍ', 'âȍ'),
-    ('âȏ', 'âȏ'),
-    ('âȑ', 'âȑ'),
-    ('âȓ', 'âȓ'),
-    ('âȕ', 'âȕ'),
-    ('âȗ', 'âȗ'),
-    ('âș', 'âș'),
-    ('âț', 'âț'),
-    ('âȝ', 'âȝ'),
-    ('âȟ', 'âȟ'),
-    ('âČĄ', 'âČĄ'),
-    ('âČŁ', 'âČŁ'),
-    ('âČ„', 'âČ„'),
-    ('âȧ', 'âȧ'),
-    ('âČ©', 'âČ©'),
-    ('âČ«', 'âČ«'),
-    ('âČ­', 'âČ­'),
-    ('âČŻ', 'âČŻ'),
-    ('âȱ', 'âȱ'),
-    ('âČł', 'âČł'),
-    ('âČ”', 'âČ”'),
-    ('âČ·', 'âČ·'),
-    ('âČč', 'âČč'),
-    ('âČ»', 'âČ»'),
-    ('âČœ', 'âČœ'),
-    ('âČż', 'âČż'),
-    ('ⳁ', 'ⳁ'),
-    ('ⳃ', 'ⳃ'),
-    ('ⳅ', 'ⳅ'),
-    ('ⳇ', 'ⳇ'),
-    ('ⳉ', 'ⳉ'),
-    ('ⳋ', 'ⳋ'),
-    ('ⳍ', 'ⳍ'),
-    ('ⳏ', 'ⳏ'),
-    ('ⳑ', 'ⳑ'),
-    ('ⳓ', 'ⳓ'),
-    ('ⳕ', 'ⳕ'),
-    ('ⳗ', 'ⳗ'),
-    ('ⳙ', 'ⳙ'),
-    ('ⳛ', 'ⳛ'),
-    ('ⳝ', 'ⳝ'),
-    ('ⳟ', 'ⳟ'),
-    ('⳥', '⳥'),
-    ('ⳣ', 'Ⳁ'),
-    ('ⳏ', 'ⳏ'),
-    ('âłź', 'âłź'),
-    ('âłł', 'âłł'),
-    ('⮀', '⎄'),
-    ('⎧', '⎧'),
-    ('⎭', '⎭'),
-    ('ꙁ', 'ꙁ'),
-    ('ꙃ', 'ꙃ'),
-    ('ꙅ', 'ꙅ'),
-    ('ꙇ', 'ꙇ'),
-    ('ꙉ', 'ꙉ'),
-    ('ꙋ', 'ꙋ'),
-    ('ꙍ', 'ꙍ'),
-    ('ꙏ', 'ꙏ'),
-    ('ꙑ', 'ꙑ'),
-    ('ꙓ', 'ꙓ'),
-    ('ꙕ', 'ꙕ'),
-    ('ꙗ', 'ꙗ'),
-    ('ꙙ', 'ꙙ'),
-    ('ꙛ', 'ꙛ'),
-    ('ꙝ', 'ꙝ'),
-    ('ꙟ', 'ꙟ'),
-    ('ê™Ą', 'ê™Ą'),
-    ('ê™Ł', 'ê™Ł'),
-    ('Ꙅ', 'Ꙅ'),
-    ('ꙧ', 'ꙧ'),
-    ('ꙩ', 'ꙩ'),
-    ('ꙫ', 'ꙫ'),
-    ('ꙭ', 'ꙭ'),
-    ('ꚁ', 'ꚁ'),
-    ('ꚃ', 'ꚃ'),
-    ('ꚅ', 'ꚅ'),
-    ('ꚇ', 'ꚇ'),
-    ('ꚉ', 'ꚉ'),
-    ('ꚋ', 'ꚋ'),
-    ('ꚍ', 'ꚍ'),
-    ('ꚏ', 'ꚏ'),
-    ('ꚑ', 'ꚑ'),
-    ('ꚓ', 'ꚓ'),
-    ('ꚕ', 'ꚕ'),
-    ('ꚗ', 'ꚗ'),
-    ('ꚙ', 'ꚙ'),
-    ('ꚛ', 'ꚛ'),
-    ('êœŁ', 'êœŁ'),
-    ('꜄', '꜄'),
-    ('ꜧ', 'ꜧ'),
-    ('ꜩ', 'ꜩ'),
-    ('ꜫ', 'ꜫ'),
-    ('ꜭ', 'ꜭ'),
-    ('êœŻ', 'ꜱ'),
-    ('êœł', 'êœł'),
-    ('꜔', '꜔'),
-    ('ꜷ', 'ꜷ'),
-    ('êœč', 'êœč'),
-    ('ꜻ', 'ꜻ'),
-    ('ꜜ', 'ꜜ'),
-    ('êœż', 'êœż'),
-    ('ꝁ', 'ꝁ'),
-    ('ꝃ', 'ꝃ'),
-    ('ꝅ', 'ꝅ'),
-    ('ꝇ', 'ꝇ'),
-    ('ꝉ', 'ꝉ'),
-    ('ꝋ', 'ꝋ'),
-    ('ꝍ', 'ꝍ'),
-    ('ꝏ', 'ꝏ'),
-    ('ꝑ', 'ꝑ'),
-    ('ꝓ', 'ꝓ'),
-    ('ꝕ', 'ꝕ'),
-    ('ꝗ', 'ꝗ'),
-    ('ꝙ', 'ꝙ'),
-    ('ꝛ', 'ꝛ'),
-    ('ꝝ', 'ꝝ'),
-    ('ꝟ', 'ꝟ'),
-    ('êĄ', 'êĄ'),
-    ('êŁ', 'êŁ'),
-    ('Ꝅ', 'Ꝅ'),
-    ('ꝧ', 'ꝧ'),
-    ('ꝩ', 'ꝩ'),
-    ('ꝫ', 'ꝫ'),
-    ('ꝭ', 'ꝭ'),
-    ('êŻ', 'êŻ'),
-    ('ꝱ', 'Ꝟ'),
-    ('êș', 'êș'),
-    ('Ꝍ', 'Ꝍ'),
-    ('êż', 'êż'),
-    ('ꞁ', 'ꞁ'),
-    ('ꞃ', 'ꞃ'),
-    ('ꞅ', 'ꞅ'),
-    ('ꞇ', 'ꞇ'),
-    ('ꞌ', 'ꞌ'),
-    ('ꞎ', 'ꞎ'),
-    ('ꞑ', 'ꞑ'),
-    ('ꞓ', 'ꞕ'),
-    ('ꞗ', 'ꞗ'),
-    ('ꞙ', 'ꞙ'),
-    ('ꞛ', 'ꞛ'),
-    ('ꞝ', 'ꞝ'),
-    ('ꞟ', 'ꞟ'),
-    ('êžĄ', 'êžĄ'),
-    ('êžŁ', 'êžŁ'),
-    ('Ꞅ', 'Ꞅ'),
-    ('ꞧ', 'ꞧ'),
-    ('ꞩ', 'ꞩ'),
-    ('êžŻ', 'êžŻ'),
-    ('ꞔ', 'ꞔ'),
-    ('ꞷ', 'ꞷ'),
-    ('êžč', 'êžč'),
-    ('ꞻ', 'ꞻ'),
-    ('Ꞝ', 'Ꞝ'),
-    ('êžż', 'êžż'),
-    ('ꟁ', 'ꟁ'),
-    ('ꟃ', 'ꟃ'),
-    ('ꟈ', 'ꟈ'),
-    ('ꟊ', 'ꟊ'),
-    ('ꟍ', 'ꟍ'),
-    ('ꟑ', 'ꟑ'),
-    ('ꟓ', 'ꟓ'),
-    ('ꟕ', 'ꟕ'),
-    ('ꟗ', 'ꟗ'),
-    ('ꟙ', 'ꟙ'),
-    ('ꟛ', 'ꟛ'),
-    ('ꟶ', 'ꟶ'),
-    ('êŸș', 'êŸș'),
-    ('êŹ°', 'ꭚ'),
-    ('ê­ ', 'ê­š'),
-    ('ê­°', 'êźż'),
-    ('ïŹ€', 'ïŹ†'),
-    ('ïŹ“', 'ïŹ—'),
-    ('', ''),
-    ('𐐹', '𐑏'),
-    ('𐓘', '𐓻'),
-    ('𐖗', '𐖡'),
-    ('𐖣', '𐖱'),
-    ('𐖳', 'đ–č'),
-    ('𐖻', 'đ–Œ'),
-    ('𐳀', 'đłČ'),
-    ('𐔰', '𐶅'),
-    ('𑣀', '𑣟'),
-    ('đ–č ', 'đ–čż'),
-    ('𝐚', '𝐳'),
-    ('𝑎', '𝑔'),
-    ('𝑖', '𝑧'),
-    ('𝒂', '𝒛'),
-    ('đ’¶', 'đ’č'),
-    ('đ’»', 'đ’»'),
-    ('đ’œ', '𝓃'),
-    ('𝓅', '𝓏'),
-    ('đ“Ș', '𝔃'),
-    ('𝔞', 'đ”·'),
-    ('𝕒', 'đ•«'),
-    ('𝖆', '𝖟'),
-    ('đ–ș', '𝗓'),
-    ('𝗼', '𝘇'),
-    ('𝘱', 'đ˜»'),
-    ('𝙖', '𝙯'),
-    ('𝚊', 'đš„'),
-    ('𝛂', '𝛚'),
-    ('𝛜', '𝛡'),
-    ('đ›Œ', '𝜔'),
-    ('𝜖', '𝜛'),
-    ('đœ¶', '𝝎'),
-    ('𝝐', '𝝕'),
-    ('𝝰', '𝞈'),
-    ('𝞊', '𝞏'),
-    ('đžȘ', '𝟂'),
-    ('𝟄', '𝟉'),
-    ('𝟋', '𝟋'),
-    ('đŒ€', 'đŒ‰'),
-    ('đŒ‹', 'đŒž'),
-    ('đŒ„', 'đŒȘ'),
-    ('𞀹', 'đž„ƒ'),
-];
-
-pub const MARK: &'static [(char, char)] = &[
-    ('\u{300}', '\u{36f}'),
-    ('\u{483}', '\u{489}'),
-    ('\u{591}', '\u{5bd}'),
-    ('\u{5bf}', '\u{5bf}'),
-    ('\u{5c1}', '\u{5c2}'),
-    ('\u{5c4}', '\u{5c5}'),
-    ('\u{5c7}', '\u{5c7}'),
-    ('\u{610}', '\u{61a}'),
-    ('\u{64b}', '\u{65f}'),
-    ('\u{670}', '\u{670}'),
-    ('\u{6d6}', '\u{6dc}'),
-    ('\u{6df}', '\u{6e4}'),
-    ('\u{6e7}', '\u{6e8}'),
-    ('\u{6ea}', '\u{6ed}'),
-    ('\u{711}', '\u{711}'),
-    ('\u{730}', '\u{74a}'),
-    ('\u{7a6}', '\u{7b0}'),
-    ('\u{7eb}', '\u{7f3}'),
-    ('\u{7fd}', '\u{7fd}'),
-    ('\u{816}', '\u{819}'),
-    ('\u{81b}', '\u{823}'),
-    ('\u{825}', '\u{827}'),
-    ('\u{829}', '\u{82d}'),
-    ('\u{859}', '\u{85b}'),
-    ('\u{897}', '\u{89f}'),
-    ('\u{8ca}', '\u{8e1}'),
-    ('\u{8e3}', 'à€ƒ'),
-    ('\u{93a}', '\u{93c}'),
-    ('à€Ÿ', 'à„'),
-    ('\u{951}', '\u{957}'),
-    ('\u{962}', '\u{963}'),
-    ('\u{981}', 'àŠƒ'),
-    ('\u{9bc}', '\u{9bc}'),
-    ('\u{9be}', '\u{9c4}'),
-    ('ে', 'ৈ'),
-    ('ো', '\u{9cd}'),
-    ('\u{9d7}', '\u{9d7}'),
-    ('\u{9e2}', '\u{9e3}'),
-    ('\u{9fe}', '\u{9fe}'),
-    ('\u{a01}', 'àšƒ'),
-    ('\u{a3c}', '\u{a3c}'),
-    ('àšŸ', '\u{a42}'),
-    ('\u{a47}', '\u{a48}'),
-    ('\u{a4b}', '\u{a4d}'),
-    ('\u{a51}', '\u{a51}'),
-    ('\u{a70}', '\u{a71}'),
-    ('\u{a75}', '\u{a75}'),
-    ('\u{a81}', 'àȘƒ'),
-    ('\u{abc}', '\u{abc}'),
-    ('àȘŸ', '\u{ac5}'),
-    ('\u{ac7}', 'ૉ'),
-    ('ો', '\u{acd}'),
-    ('\u{ae2}', '\u{ae3}'),
-    ('\u{afa}', '\u{aff}'),
-    ('\u{b01}', 'àŹƒ'),
-    ('\u{b3c}', '\u{b3c}'),
-    ('\u{b3e}', '\u{b44}'),
-    ('େ', 'ୈ'),
-    ('ୋ', '\u{b4d}'),
-    ('\u{b55}', '\u{b57}'),
-    ('\u{b62}', '\u{b63}'),
-    ('\u{b82}', '\u{b82}'),
-    ('\u{bbe}', 'àŻ‚'),
-    ('àŻ†', 'àŻˆ'),
-    ('àŻŠ', '\u{bcd}'),
-    ('\u{bd7}', '\u{bd7}'),
-    ('\u{c00}', '\u{c04}'),
-    ('\u{c3c}', '\u{c3c}'),
-    ('\u{c3e}', 'ౄ'),
-    ('\u{c46}', '\u{c48}'),
-    ('\u{c4a}', '\u{c4d}'),
-    ('\u{c55}', '\u{c56}'),
-    ('\u{c62}', '\u{c63}'),
-    ('\u{c81}', 'àȃ'),
-    ('\u{cbc}', '\u{cbc}'),
-    ('àČŸ', 'àł„'),
-    ('\u{cc6}', '\u{cc8}'),
-    ('\u{cca}', '\u{ccd}'),
-    ('\u{cd5}', '\u{cd6}'),
-    ('\u{ce2}', '\u{ce3}'),
-    ('àłł', 'àłł'),
-    ('\u{d00}', 'àŽƒ'),
-    ('\u{d3b}', '\u{d3c}'),
-    ('\u{d3e}', '\u{d44}'),
-    ('à”†', 'à”ˆ'),
-    ('à”Š', '\u{d4d}'),
-    ('\u{d57}', '\u{d57}'),
-    ('\u{d62}', '\u{d63}'),
-    ('\u{d81}', 'ඃ'),
-    ('\u{dca}', '\u{dca}'),
-    ('\u{dcf}', '\u{dd4}'),
-    ('\u{dd6}', '\u{dd6}'),
-    ('ෘ', '\u{ddf}'),
-    ('à·Č', 'à·ł'),
-    ('\u{e31}', '\u{e31}'),
-    ('\u{e34}', '\u{e3a}'),
-    ('\u{e47}', '\u{e4e}'),
-    ('\u{eb1}', '\u{eb1}'),
-    ('\u{eb4}', '\u{ebc}'),
-    ('\u{ec8}', '\u{ece}'),
-    ('\u{f18}', '\u{f19}'),
-    ('\u{f35}', '\u{f35}'),
-    ('\u{f37}', '\u{f37}'),
-    ('\u{f39}', '\u{f39}'),
-    ('àŒŸ', 'àŒż'),
-    ('\u{f71}', '\u{f84}'),
-    ('\u{f86}', '\u{f87}'),
-    ('\u{f8d}', '\u{f97}'),
-    ('\u{f99}', '\u{fbc}'),
-    ('\u{fc6}', '\u{fc6}'),
-    ('ါ', '\u{103e}'),
-    ('ၖ', '\u{1059}'),
-    ('\u{105e}', '\u{1060}'),
-    ('ၹ', '၀'),
-    ('ၧ', 'ၭ'),
-    ('\u{1071}', '\u{1074}'),
-    ('\u{1082}', '\u{108d}'),
-    ('ႏ', 'ႏ'),
-    ('ႚ', '\u{109d}'),
-    ('\u{135d}', '\u{135f}'),
-    ('\u{1712}', '\u{1715}'),
-    ('\u{1732}', '\u{1734}'),
-    ('\u{1752}', '\u{1753}'),
-    ('\u{1772}', '\u{1773}'),
-    ('\u{17b4}', '\u{17d3}'),
-    ('\u{17dd}', '\u{17dd}'),
-    ('\u{180b}', '\u{180d}'),
-    ('\u{180f}', '\u{180f}'),
-    ('\u{1885}', '\u{1886}'),
-    ('\u{18a9}', '\u{18a9}'),
-    ('\u{1920}', 'ါ'),
-    ('ူ', '\u{193b}'),
-    ('\u{1a17}', '\u{1a1b}'),
-    ('ᩕ', '\u{1a5e}'),
-    ('\u{1a60}', '\u{1a7c}'),
-    ('\u{1a7f}', '\u{1a7f}'),
-    ('\u{1ab0}', '\u{1ace}'),
-    ('\u{1b00}', 'ᬄ'),
-    ('\u{1b34}', '\u{1b44}'),
-    ('\u{1b6b}', '\u{1b73}'),
-    ('\u{1b80}', 'ἂ'),
-    ('៥', '\u{1bad}'),
-    ('\u{1be6}', '\u{1bf3}'),
-    ('á°€', '\u{1c37}'),
-    ('\u{1cd0}', '\u{1cd2}'),
-    ('\u{1cd4}', '\u{1ce8}'),
-    ('\u{1ced}', '\u{1ced}'),
-    ('\u{1cf4}', '\u{1cf4}'),
-    ('áł·', '\u{1cf9}'),
-    ('\u{1dc0}', '\u{1dff}'),
-    ('\u{20d0}', '\u{20f0}'),
-    ('\u{2cef}', '\u{2cf1}'),
-    ('\u{2d7f}', '\u{2d7f}'),
-    ('\u{2de0}', '\u{2dff}'),
-    ('\u{302a}', '\u{302f}'),
-    ('\u{3099}', '\u{309a}'),
-    ('\u{a66f}', '\u{a672}'),
-    ('\u{a674}', '\u{a67d}'),
-    ('\u{a69e}', '\u{a69f}'),
-    ('\u{a6f0}', '\u{a6f1}'),
-    ('\u{a802}', '\u{a802}'),
-    ('\u{a806}', '\u{a806}'),
-    ('\u{a80b}', '\u{a80b}'),
-    ('ê Ł', 'ê §'),
-    ('\u{a82c}', '\u{a82c}'),
-    ('êą€', 'êą'),
-    ('êąŽ', '\u{a8c5}'),
-    ('\u{a8e0}', '\u{a8f1}'),
-    ('\u{a8ff}', '\u{a8ff}'),
-    ('\u{a926}', '\u{a92d}'),
-    ('\u{a947}', '\u{a953}'),
-    ('\u{a980}', 'ꊃ'),
-    ('\u{a9b3}', '\u{a9c0}'),
-    ('\u{a9e5}', '\u{a9e5}'),
-    ('\u{aa29}', '\u{aa36}'),
-    ('\u{aa43}', '\u{aa43}'),
-    ('\u{aa4c}', 'ꩍ'),
-    ('ꩻ', '꩜'),
-    ('\u{aab0}', '\u{aab0}'),
-    ('\u{aab2}', '\u{aab4}'),
-    ('\u{aab7}', '\u{aab8}'),
-    ('\u{aabe}', '\u{aabf}'),
-    ('\u{aac1}', '\u{aac1}'),
-    ('ê««', 'ê«Ż'),
-    ('ê«”', '\u{aaf6}'),
-    ('êŻŁ', 'êŻȘ'),
-    ('êŻŹ', '\u{abed}'),
-    ('\u{fb1e}', '\u{fb1e}'),
-    ('\u{fe00}', '\u{fe0f}'),
-    ('\u{fe20}', '\u{fe2f}'),
-    ('\u{101fd}', '\u{101fd}'),
-    ('\u{102e0}', '\u{102e0}'),
-    ('\u{10376}', '\u{1037a}'),
-    ('\u{10a01}', '\u{10a03}'),
-    ('\u{10a05}', '\u{10a06}'),
-    ('\u{10a0c}', '\u{10a0f}'),
-    ('\u{10a38}', '\u{10a3a}'),
-    ('\u{10a3f}', '\u{10a3f}'),
-    ('\u{10ae5}', '\u{10ae6}'),
-    ('\u{10d24}', '\u{10d27}'),
-    ('\u{10d69}', '\u{10d6d}'),
-    ('\u{10eab}', '\u{10eac}'),
-    ('\u{10efc}', '\u{10eff}'),
-    ('\u{10f46}', '\u{10f50}'),
-    ('\u{10f82}', '\u{10f85}'),
-    ('𑀀', '𑀂'),
-    ('\u{11038}', '\u{11046}'),
-    ('\u{11070}', '\u{11070}'),
-    ('\u{11073}', '\u{11074}'),
-    ('\u{1107f}', '𑂂'),
-    ('𑂰', '\u{110ba}'),
-    ('\u{110c2}', '\u{110c2}'),
-    ('\u{11100}', '\u{11102}'),
-    ('\u{11127}', '\u{11134}'),
-    ('𑅅', '𑅆'),
-    ('\u{11173}', '\u{11173}'),
-    ('\u{11180}', '𑆂'),
-    ('𑆳', '\u{111c0}'),
-    ('\u{111c9}', '\u{111cc}'),
-    ('𑇎', '\u{111cf}'),
-    ('𑈬', '\u{11237}'),
-    ('\u{1123e}', '\u{1123e}'),
-    ('\u{11241}', '\u{11241}'),
-    ('\u{112df}', '\u{112ea}'),
-    ('\u{11300}', '𑌃'),
-    ('\u{1133b}', '\u{1133c}'),
-    ('\u{1133e}', '𑍄'),
-    ('𑍇', '𑍈'),
-    ('𑍋', '\u{1134d}'),
-    ('\u{11357}', '\u{11357}'),
-    ('𑍱', '𑍣'),
-    ('\u{11366}', '\u{1136c}'),
-    ('\u{11370}', '\u{11374}'),
-    ('\u{113b8}', '\u{113c0}'),
-    ('\u{113c2}', '\u{113c2}'),
-    ('\u{113c5}', '\u{113c5}'),
-    ('\u{113c7}', '𑏊'),
-    ('𑏌', '\u{113d0}'),
-    ('\u{113d2}', '\u{113d2}'),
-    ('\u{113e1}', '\u{113e2}'),
-    ('𑐔', '\u{11446}'),
-    ('\u{1145e}', '\u{1145e}'),
-    ('\u{114b0}', '\u{114c3}'),
-    ('\u{115af}', '\u{115b5}'),
-    ('𑖾', '\u{115c0}'),
-    ('\u{115dc}', '\u{115dd}'),
-    ('𑘰', '\u{11640}'),
-    ('\u{116ab}', '\u{116b7}'),
-    ('\u{1171d}', '\u{1172b}'),
-    ('𑠬', '\u{1183a}'),
-    ('\u{11930}', 'đ‘€”'),
-    ('đ‘€·', '𑀞'),
-    ('\u{1193b}', '\u{1193e}'),
-    ('đ‘„€', 'đ‘„€'),
-    ('đ‘„‚', '\u{11943}'),
-    ('𑧑', '\u{119d7}'),
-    ('\u{119da}', '\u{119e0}'),
-    ('đ‘§€', 'đ‘§€'),
-    ('\u{11a01}', '\u{11a0a}'),
-    ('\u{11a33}', 'đ‘šč'),
-    ('\u{11a3b}', '\u{11a3e}'),
-    ('\u{11a47}', '\u{11a47}'),
-    ('\u{11a51}', '\u{11a5b}'),
-    ('\u{11a8a}', '\u{11a99}'),
-    ('𑰯', '\u{11c36}'),
-    ('\u{11c38}', '\u{11c3f}'),
-    ('\u{11c92}', '\u{11ca7}'),
-    ('đ‘Č©', '\u{11cb6}'),
-    ('\u{11d31}', '\u{11d36}'),
-    ('\u{11d3a}', '\u{11d3a}'),
-    ('\u{11d3c}', '\u{11d3d}'),
-    ('\u{11d3f}', '\u{11d45}'),
-    ('\u{11d47}', '\u{11d47}'),
-    ('đ‘¶Š', 'đ‘¶Ž'),
-    ('\u{11d90}', '\u{11d91}'),
-    ('đ‘¶“', '\u{11d97}'),
-    ('\u{11ef3}', 'đ‘»¶'),
-    ('\u{11f00}', '\u{11f01}'),
-    ('đ‘Œƒ', 'đ‘Œƒ'),
-    ('đ‘ŒŽ', '\u{11f3a}'),
-    ('đ‘ŒŸ', '\u{11f42}'),
-    ('\u{11f5a}', '\u{11f5a}'),
-    ('\u{13440}', '\u{13440}'),
-    ('\u{13447}', '\u{13455}'),
-    ('\u{1611e}', '\u{1612f}'),
-    ('\u{16af0}', '\u{16af4}'),
-    ('\u{16b30}', '\u{16b36}'),
-    ('\u{16f4f}', '\u{16f4f}'),
-    ('đ–œ‘', 'đ–Ÿ‡'),
-    ('\u{16f8f}', '\u{16f92}'),
-    ('\u{16fe4}', '\u{16fe4}'),
-    ('\u{16ff0}', '\u{16ff1}'),
-    ('\u{1bc9d}', '\u{1bc9e}'),
-    ('\u{1cf00}', '\u{1cf2d}'),
-    ('\u{1cf30}', '\u{1cf46}'),
-    ('\u{1d165}', '\u{1d169}'),
-    ('\u{1d16d}', '\u{1d172}'),
-    ('\u{1d17b}', '\u{1d182}'),
-    ('\u{1d185}', '\u{1d18b}'),
-    ('\u{1d1aa}', '\u{1d1ad}'),
-    ('\u{1d242}', '\u{1d244}'),
-    ('\u{1da00}', '\u{1da36}'),
-    ('\u{1da3b}', '\u{1da6c}'),
-    ('\u{1da75}', '\u{1da75}'),
-    ('\u{1da84}', '\u{1da84}'),
-    ('\u{1da9b}', '\u{1da9f}'),
-    ('\u{1daa1}', '\u{1daaf}'),
-    ('\u{1e000}', '\u{1e006}'),
-    ('\u{1e008}', '\u{1e018}'),
-    ('\u{1e01b}', '\u{1e021}'),
-    ('\u{1e023}', '\u{1e024}'),
-    ('\u{1e026}', '\u{1e02a}'),
-    ('\u{1e08f}', '\u{1e08f}'),
-    ('\u{1e130}', '\u{1e136}'),
-    ('\u{1e2ae}', '\u{1e2ae}'),
-    ('\u{1e2ec}', '\u{1e2ef}'),
-    ('\u{1e4ec}', '\u{1e4ef}'),
-    ('\u{1e5ee}', '\u{1e5ef}'),
-    ('\u{1e8d0}', '\u{1e8d6}'),
-    ('\u{1e944}', '\u{1e94a}'),
-    ('\u{e0100}', '\u{e01ef}'),
-];
-
-pub const MATH_SYMBOL: &'static [(char, char)] = &[
-    ('+', '+'),
-    ('<', '>'),
-    ('|', '|'),
-    ('~', '~'),
-    ('¬', '¬'),
-    ('±', '±'),
-    ('×', '×'),
-    ('÷', '÷'),
-    ('϶', '϶'),
-    ('ۆ', 'ۈ'),
-    ('⁄', '⁄'),
-    ('⁒', '⁒'),
-    ('âș', '⁌'),
-    ('₊', '₌'),
-    ('℘', '℘'),
-    ('⅀', '⅄'),
-    ('⅋', '⅋'),
-    ('←', '↔'),
-    ('↚', '↛'),
-    ('↠', '↠'),
-    ('↣', '↣'),
-    ('↩', '↩'),
-    ('↼', '↼'),
-    ('⇎', '⇏'),
-    ('⇒', '⇒'),
-    ('⇔', '⇔'),
-    ('⇮', '⋿'),
-    ('⌠', '⌡'),
-    ('⍌', '⍌'),
-    ('⎛', '⎳'),
-    ('⏜', '⏡'),
-    ('▷', '▷'),
-    ('◁', '◁'),
-    ('◾', '◿'),
-    ('♯', '♯'),
-    ('⟀', '⟄'),
-    ('⟇', '⟄'),
-    ('⟰', '⟿'),
-    (' ', '⩂'),
-    ('⩙', '⧗'),
-    ('⧜', '⧻'),
-    ('â§Ÿ', '⫿'),
-    ('⬰', '⭄'),
-    ('⭇', '⭌'),
-    ('ïŹ©', 'ïŹ©'),
-    ('ïčą', 'ïčą'),
-    ('ïč€', 'ïčŠ'),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('ïżą', 'ïżą'),
-    ('ïż©', 'ïżŹ'),
-    ('𐶎', 'đ¶'),
-    ('𝛁', '𝛁'),
-    ('𝛛', '𝛛'),
-    ('đ›»', 'đ›»'),
-    ('𝜕', '𝜕'),
-    ('đœ”', 'đœ”'),
-    ('𝝏', '𝝏'),
-    ('𝝯', '𝝯'),
-    ('𝞉', '𝞉'),
-    ('đž©', 'đž©'),
-    ('𝟃', '𝟃'),
-    ('đž»°', 'đž»±'),
-];
-
-pub const MODIFIER_LETTER: &'static [(char, char)] = &[
-    ('ʰ', 'ˁ'),
-    ('ˆ', 'ˑ'),
-    ('Ë ', 'Ë€'),
-    ('ËŹ', 'ËŹ'),
-    ('Ëź', 'Ëź'),
-    ('ÍŽ', 'ÍŽ'),
-    ('Íș', 'Íș'),
-    ('ՙ', 'ՙ'),
-    ('ـ', 'ـ'),
-    ('Û„', 'ÛŠ'),
-    ('ߎ', 'ߔ'),
-    ('ßș', 'ßș'),
-    ('ࠚ', 'ࠚ'),
-    ('à €', 'à €'),
-    ('à š', 'à š'),
-    ('àŁ‰', 'àŁ‰'),
-    ('à„±', 'à„±'),
-    ('àč†', 'àč†'),
-    ('ໆ', 'ໆ'),
-    ('჌', '჌'),
-    ('ៗ', 'ៗ'),
-    ('᥃', '᥃'),
-    ('áȘ§', 'áȘ§'),
-    ('ᱞ', 'ᱜ'),
-    ('ᎏ', 'á”Ș'),
-    ('ᔞ', 'ᔞ'),
-    ('ᶛ', 'á¶ż'),
-    ('ⁱ', 'ⁱ'),
-    ('ⁿ', 'ⁿ'),
-    ('ₐ', 'ₜ'),
-    ('ⱌ', 'ⱜ'),
-    ('┯', '┯'),
-    ('➯', '➯'),
-    ('々', '々'),
-    ('〱', '〔'),
-    ('〻', '〻'),
-    ('ゝ', 'ゞ'),
-    ('ăƒŒ', 'ăƒŸ'),
-    ('ꀕ', 'ꀕ'),
-    ('ꓞ', 'ꓜ'),
-    ('ꘌ', 'ꘌ'),
-    ('ê™ż', 'ê™ż'),
-    ('ꚜ', 'ꚝ'),
-    ('ꜗ', 'ꜟ'),
-    ('ꝰ', 'ꝰ'),
-    ('ꞈ', 'ꞈ'),
-    ('êŸČ', '꟎'),
-    ('꟞', 'êŸč'),
-    ('ꧏ', 'ꧏ'),
-    ('ê§Š', 'ê§Š'),
-    ('ê©°', 'ê©°'),
-    ('ꫝ', 'ꫝ'),
-    ('ê«ł', '꫎'),
-    ('ꭜ', 'ꭟ'),
-    ('ê­©', 'ê­©'),
-    ('', ''),
-    ('\u{ff9e}', '\u{ff9f}'),
-    ('𐞀', '𐞅'),
-    ('𐞇', '𐞰'),
-    ('đžČ', 'đžș'),
-    ('𐔎', '𐔎'),
-    ('𐔯', '𐔯'),
-    ('𖭀', '𖭃'),
-    ('𖔀', 'đ–”‚'),
-    ('đ–”«', '𖔏'),
-    ('đ–Ÿ“', 'đ–ŸŸ'),
-    ('𖿠', '𖿡'),
-    ('𖿣', '𖿣'),
-    ('𚿰', '𚿳'),
-    ('đšż”', 'đšż»'),
-    ('đšżœ', 'đšżŸ'),
-    ('𞀰', '𞁭'),
-    ('đž„·', 'đž„œ'),
-    ('đž“«', 'đž“«'),
-    ('đž„‹', 'đž„‹'),
-];
-
-pub const MODIFIER_SYMBOL: &'static [(char, char)] = &[
-    ('^', '^'),
-    ('`', '`'),
-    ('¨', '¨'),
-    ('¯', '¯'),
-    ('´', '´'),
-    ('¸', '¸'),
-    ('˂', '˅'),
-    ('˒', '˟'),
-    ('Ë„', 'Ë«'),
-    ('Ë­', 'Ë­'),
-    ('ËŻ', 'Ëż'),
-    ('Í”', 'Í”'),
-    ('΄', '΅'),
-    ('àąˆ', 'àąˆ'),
-    ('ៜ', 'ៜ'),
-    ('áŸż', '῁'),
-    ('῍', '῏'),
-    ('῝', '῟'),
-    ('῭', '`'),
-    ('áżœ', 'áżŸ'),
-    ('゛', '゜'),
-    ('꜀', '꜖'),
-    ('꜠', 'êœĄ'),
-    ('꞉', '꞊'),
-    ('꭛', '꭛'),
-    ('ê­Ș', 'ê­«'),
-    ('ïźČ', 'ïŻ‚'),
-    ('', ''),
-    ('', ''),
-    ('ïżŁ', 'ïżŁ'),
-    ('đŸ»', '🏿'),
-];
-
-pub const NONSPACING_MARK: &'static [(char, char)] = &[
-    ('\u{300}', '\u{36f}'),
-    ('\u{483}', '\u{487}'),
-    ('\u{591}', '\u{5bd}'),
-    ('\u{5bf}', '\u{5bf}'),
-    ('\u{5c1}', '\u{5c2}'),
-    ('\u{5c4}', '\u{5c5}'),
-    ('\u{5c7}', '\u{5c7}'),
-    ('\u{610}', '\u{61a}'),
-    ('\u{64b}', '\u{65f}'),
-    ('\u{670}', '\u{670}'),
-    ('\u{6d6}', '\u{6dc}'),
-    ('\u{6df}', '\u{6e4}'),
-    ('\u{6e7}', '\u{6e8}'),
-    ('\u{6ea}', '\u{6ed}'),
-    ('\u{711}', '\u{711}'),
-    ('\u{730}', '\u{74a}'),
-    ('\u{7a6}', '\u{7b0}'),
-    ('\u{7eb}', '\u{7f3}'),
-    ('\u{7fd}', '\u{7fd}'),
-    ('\u{816}', '\u{819}'),
-    ('\u{81b}', '\u{823}'),
-    ('\u{825}', '\u{827}'),
-    ('\u{829}', '\u{82d}'),
-    ('\u{859}', '\u{85b}'),
-    ('\u{897}', '\u{89f}'),
-    ('\u{8ca}', '\u{8e1}'),
-    ('\u{8e3}', '\u{902}'),
-    ('\u{93a}', '\u{93a}'),
-    ('\u{93c}', '\u{93c}'),
-    ('\u{941}', '\u{948}'),
-    ('\u{94d}', '\u{94d}'),
-    ('\u{951}', '\u{957}'),
-    ('\u{962}', '\u{963}'),
-    ('\u{981}', '\u{981}'),
-    ('\u{9bc}', '\u{9bc}'),
-    ('\u{9c1}', '\u{9c4}'),
-    ('\u{9cd}', '\u{9cd}'),
-    ('\u{9e2}', '\u{9e3}'),
-    ('\u{9fe}', '\u{9fe}'),
-    ('\u{a01}', '\u{a02}'),
-    ('\u{a3c}', '\u{a3c}'),
-    ('\u{a41}', '\u{a42}'),
-    ('\u{a47}', '\u{a48}'),
-    ('\u{a4b}', '\u{a4d}'),
-    ('\u{a51}', '\u{a51}'),
-    ('\u{a70}', '\u{a71}'),
-    ('\u{a75}', '\u{a75}'),
-    ('\u{a81}', '\u{a82}'),
-    ('\u{abc}', '\u{abc}'),
-    ('\u{ac1}', '\u{ac5}'),
-    ('\u{ac7}', '\u{ac8}'),
-    ('\u{acd}', '\u{acd}'),
-    ('\u{ae2}', '\u{ae3}'),
-    ('\u{afa}', '\u{aff}'),
-    ('\u{b01}', '\u{b01}'),
-    ('\u{b3c}', '\u{b3c}'),
-    ('\u{b3f}', '\u{b3f}'),
-    ('\u{b41}', '\u{b44}'),
-    ('\u{b4d}', '\u{b4d}'),
-    ('\u{b55}', '\u{b56}'),
-    ('\u{b62}', '\u{b63}'),
-    ('\u{b82}', '\u{b82}'),
-    ('\u{bc0}', '\u{bc0}'),
-    ('\u{bcd}', '\u{bcd}'),
-    ('\u{c00}', '\u{c00}'),
-    ('\u{c04}', '\u{c04}'),
-    ('\u{c3c}', '\u{c3c}'),
-    ('\u{c3e}', '\u{c40}'),
-    ('\u{c46}', '\u{c48}'),
-    ('\u{c4a}', '\u{c4d}'),
-    ('\u{c55}', '\u{c56}'),
-    ('\u{c62}', '\u{c63}'),
-    ('\u{c81}', '\u{c81}'),
-    ('\u{cbc}', '\u{cbc}'),
-    ('\u{cbf}', '\u{cbf}'),
-    ('\u{cc6}', '\u{cc6}'),
-    ('\u{ccc}', '\u{ccd}'),
-    ('\u{ce2}', '\u{ce3}'),
-    ('\u{d00}', '\u{d01}'),
-    ('\u{d3b}', '\u{d3c}'),
-    ('\u{d41}', '\u{d44}'),
-    ('\u{d4d}', '\u{d4d}'),
-    ('\u{d62}', '\u{d63}'),
-    ('\u{d81}', '\u{d81}'),
-    ('\u{dca}', '\u{dca}'),
-    ('\u{dd2}', '\u{dd4}'),
-    ('\u{dd6}', '\u{dd6}'),
-    ('\u{e31}', '\u{e31}'),
-    ('\u{e34}', '\u{e3a}'),
-    ('\u{e47}', '\u{e4e}'),
-    ('\u{eb1}', '\u{eb1}'),
-    ('\u{eb4}', '\u{ebc}'),
-    ('\u{ec8}', '\u{ece}'),
-    ('\u{f18}', '\u{f19}'),
-    ('\u{f35}', '\u{f35}'),
-    ('\u{f37}', '\u{f37}'),
-    ('\u{f39}', '\u{f39}'),
-    ('\u{f71}', '\u{f7e}'),
-    ('\u{f80}', '\u{f84}'),
-    ('\u{f86}', '\u{f87}'),
-    ('\u{f8d}', '\u{f97}'),
-    ('\u{f99}', '\u{fbc}'),
-    ('\u{fc6}', '\u{fc6}'),
-    ('\u{102d}', '\u{1030}'),
-    ('\u{1032}', '\u{1037}'),
-    ('\u{1039}', '\u{103a}'),
-    ('\u{103d}', '\u{103e}'),
-    ('\u{1058}', '\u{1059}'),
-    ('\u{105e}', '\u{1060}'),
-    ('\u{1071}', '\u{1074}'),
-    ('\u{1082}', '\u{1082}'),
-    ('\u{1085}', '\u{1086}'),
-    ('\u{108d}', '\u{108d}'),
-    ('\u{109d}', '\u{109d}'),
-    ('\u{135d}', '\u{135f}'),
-    ('\u{1712}', '\u{1714}'),
-    ('\u{1732}', '\u{1733}'),
-    ('\u{1752}', '\u{1753}'),
-    ('\u{1772}', '\u{1773}'),
-    ('\u{17b4}', '\u{17b5}'),
-    ('\u{17b7}', '\u{17bd}'),
-    ('\u{17c6}', '\u{17c6}'),
-    ('\u{17c9}', '\u{17d3}'),
-    ('\u{17dd}', '\u{17dd}'),
-    ('\u{180b}', '\u{180d}'),
-    ('\u{180f}', '\u{180f}'),
-    ('\u{1885}', '\u{1886}'),
-    ('\u{18a9}', '\u{18a9}'),
-    ('\u{1920}', '\u{1922}'),
-    ('\u{1927}', '\u{1928}'),
-    ('\u{1932}', '\u{1932}'),
-    ('\u{1939}', '\u{193b}'),
-    ('\u{1a17}', '\u{1a18}'),
-    ('\u{1a1b}', '\u{1a1b}'),
-    ('\u{1a56}', '\u{1a56}'),
-    ('\u{1a58}', '\u{1a5e}'),
-    ('\u{1a60}', '\u{1a60}'),
-    ('\u{1a62}', '\u{1a62}'),
-    ('\u{1a65}', '\u{1a6c}'),
-    ('\u{1a73}', '\u{1a7c}'),
-    ('\u{1a7f}', '\u{1a7f}'),
-    ('\u{1ab0}', '\u{1abd}'),
-    ('\u{1abf}', '\u{1ace}'),
-    ('\u{1b00}', '\u{1b03}'),
-    ('\u{1b34}', '\u{1b34}'),
-    ('\u{1b36}', '\u{1b3a}'),
-    ('\u{1b3c}', '\u{1b3c}'),
-    ('\u{1b42}', '\u{1b42}'),
-    ('\u{1b6b}', '\u{1b73}'),
-    ('\u{1b80}', '\u{1b81}'),
-    ('\u{1ba2}', '\u{1ba5}'),
-    ('\u{1ba8}', '\u{1ba9}'),
-    ('\u{1bab}', '\u{1bad}'),
-    ('\u{1be6}', '\u{1be6}'),
-    ('\u{1be8}', '\u{1be9}'),
-    ('\u{1bed}', '\u{1bed}'),
-    ('\u{1bef}', '\u{1bf1}'),
-    ('\u{1c2c}', '\u{1c33}'),
-    ('\u{1c36}', '\u{1c37}'),
-    ('\u{1cd0}', '\u{1cd2}'),
-    ('\u{1cd4}', '\u{1ce0}'),
-    ('\u{1ce2}', '\u{1ce8}'),
-    ('\u{1ced}', '\u{1ced}'),
-    ('\u{1cf4}', '\u{1cf4}'),
-    ('\u{1cf8}', '\u{1cf9}'),
-    ('\u{1dc0}', '\u{1dff}'),
-    ('\u{20d0}', '\u{20dc}'),
-    ('\u{20e1}', '\u{20e1}'),
-    ('\u{20e5}', '\u{20f0}'),
-    ('\u{2cef}', '\u{2cf1}'),
-    ('\u{2d7f}', '\u{2d7f}'),
-    ('\u{2de0}', '\u{2dff}'),
-    ('\u{302a}', '\u{302d}'),
-    ('\u{3099}', '\u{309a}'),
-    ('\u{a66f}', '\u{a66f}'),
-    ('\u{a674}', '\u{a67d}'),
-    ('\u{a69e}', '\u{a69f}'),
-    ('\u{a6f0}', '\u{a6f1}'),
-    ('\u{a802}', '\u{a802}'),
-    ('\u{a806}', '\u{a806}'),
-    ('\u{a80b}', '\u{a80b}'),
-    ('\u{a825}', '\u{a826}'),
-    ('\u{a82c}', '\u{a82c}'),
-    ('\u{a8c4}', '\u{a8c5}'),
-    ('\u{a8e0}', '\u{a8f1}'),
-    ('\u{a8ff}', '\u{a8ff}'),
-    ('\u{a926}', '\u{a92d}'),
-    ('\u{a947}', '\u{a951}'),
-    ('\u{a980}', '\u{a982}'),
-    ('\u{a9b3}', '\u{a9b3}'),
-    ('\u{a9b6}', '\u{a9b9}'),
-    ('\u{a9bc}', '\u{a9bd}'),
-    ('\u{a9e5}', '\u{a9e5}'),
-    ('\u{aa29}', '\u{aa2e}'),
-    ('\u{aa31}', '\u{aa32}'),
-    ('\u{aa35}', '\u{aa36}'),
-    ('\u{aa43}', '\u{aa43}'),
-    ('\u{aa4c}', '\u{aa4c}'),
-    ('\u{aa7c}', '\u{aa7c}'),
-    ('\u{aab0}', '\u{aab0}'),
-    ('\u{aab2}', '\u{aab4}'),
-    ('\u{aab7}', '\u{aab8}'),
-    ('\u{aabe}', '\u{aabf}'),
-    ('\u{aac1}', '\u{aac1}'),
-    ('\u{aaec}', '\u{aaed}'),
-    ('\u{aaf6}', '\u{aaf6}'),
-    ('\u{abe5}', '\u{abe5}'),
-    ('\u{abe8}', '\u{abe8}'),
-    ('\u{abed}', '\u{abed}'),
-    ('\u{fb1e}', '\u{fb1e}'),
-    ('\u{fe00}', '\u{fe0f}'),
-    ('\u{fe20}', '\u{fe2f}'),
-    ('\u{101fd}', '\u{101fd}'),
-    ('\u{102e0}', '\u{102e0}'),
-    ('\u{10376}', '\u{1037a}'),
-    ('\u{10a01}', '\u{10a03}'),
-    ('\u{10a05}', '\u{10a06}'),
-    ('\u{10a0c}', '\u{10a0f}'),
-    ('\u{10a38}', '\u{10a3a}'),
-    ('\u{10a3f}', '\u{10a3f}'),
-    ('\u{10ae5}', '\u{10ae6}'),
-    ('\u{10d24}', '\u{10d27}'),
-    ('\u{10d69}', '\u{10d6d}'),
-    ('\u{10eab}', '\u{10eac}'),
-    ('\u{10efc}', '\u{10eff}'),
-    ('\u{10f46}', '\u{10f50}'),
-    ('\u{10f82}', '\u{10f85}'),
-    ('\u{11001}', '\u{11001}'),
-    ('\u{11038}', '\u{11046}'),
-    ('\u{11070}', '\u{11070}'),
-    ('\u{11073}', '\u{11074}'),
-    ('\u{1107f}', '\u{11081}'),
-    ('\u{110b3}', '\u{110b6}'),
-    ('\u{110b9}', '\u{110ba}'),
-    ('\u{110c2}', '\u{110c2}'),
-    ('\u{11100}', '\u{11102}'),
-    ('\u{11127}', '\u{1112b}'),
-    ('\u{1112d}', '\u{11134}'),
-    ('\u{11173}', '\u{11173}'),
-    ('\u{11180}', '\u{11181}'),
-    ('\u{111b6}', '\u{111be}'),
-    ('\u{111c9}', '\u{111cc}'),
-    ('\u{111cf}', '\u{111cf}'),
-    ('\u{1122f}', '\u{11231}'),
-    ('\u{11234}', '\u{11234}'),
-    ('\u{11236}', '\u{11237}'),
-    ('\u{1123e}', '\u{1123e}'),
-    ('\u{11241}', '\u{11241}'),
-    ('\u{112df}', '\u{112df}'),
-    ('\u{112e3}', '\u{112ea}'),
-    ('\u{11300}', '\u{11301}'),
-    ('\u{1133b}', '\u{1133c}'),
-    ('\u{11340}', '\u{11340}'),
-    ('\u{11366}', '\u{1136c}'),
-    ('\u{11370}', '\u{11374}'),
-    ('\u{113bb}', '\u{113c0}'),
-    ('\u{113ce}', '\u{113ce}'),
-    ('\u{113d0}', '\u{113d0}'),
-    ('\u{113d2}', '\u{113d2}'),
-    ('\u{113e1}', '\u{113e2}'),
-    ('\u{11438}', '\u{1143f}'),
-    ('\u{11442}', '\u{11444}'),
-    ('\u{11446}', '\u{11446}'),
-    ('\u{1145e}', '\u{1145e}'),
-    ('\u{114b3}', '\u{114b8}'),
-    ('\u{114ba}', '\u{114ba}'),
-    ('\u{114bf}', '\u{114c0}'),
-    ('\u{114c2}', '\u{114c3}'),
-    ('\u{115b2}', '\u{115b5}'),
-    ('\u{115bc}', '\u{115bd}'),
-    ('\u{115bf}', '\u{115c0}'),
-    ('\u{115dc}', '\u{115dd}'),
-    ('\u{11633}', '\u{1163a}'),
-    ('\u{1163d}', '\u{1163d}'),
-    ('\u{1163f}', '\u{11640}'),
-    ('\u{116ab}', '\u{116ab}'),
-    ('\u{116ad}', '\u{116ad}'),
-    ('\u{116b0}', '\u{116b5}'),
-    ('\u{116b7}', '\u{116b7}'),
-    ('\u{1171d}', '\u{1171d}'),
-    ('\u{1171f}', '\u{1171f}'),
-    ('\u{11722}', '\u{11725}'),
-    ('\u{11727}', '\u{1172b}'),
-    ('\u{1182f}', '\u{11837}'),
-    ('\u{11839}', '\u{1183a}'),
-    ('\u{1193b}', '\u{1193c}'),
-    ('\u{1193e}', '\u{1193e}'),
-    ('\u{11943}', '\u{11943}'),
-    ('\u{119d4}', '\u{119d7}'),
-    ('\u{119da}', '\u{119db}'),
-    ('\u{119e0}', '\u{119e0}'),
-    ('\u{11a01}', '\u{11a0a}'),
-    ('\u{11a33}', '\u{11a38}'),
-    ('\u{11a3b}', '\u{11a3e}'),
-    ('\u{11a47}', '\u{11a47}'),
-    ('\u{11a51}', '\u{11a56}'),
-    ('\u{11a59}', '\u{11a5b}'),
-    ('\u{11a8a}', '\u{11a96}'),
-    ('\u{11a98}', '\u{11a99}'),
-    ('\u{11c30}', '\u{11c36}'),
-    ('\u{11c38}', '\u{11c3d}'),
-    ('\u{11c3f}', '\u{11c3f}'),
-    ('\u{11c92}', '\u{11ca7}'),
-    ('\u{11caa}', '\u{11cb0}'),
-    ('\u{11cb2}', '\u{11cb3}'),
-    ('\u{11cb5}', '\u{11cb6}'),
-    ('\u{11d31}', '\u{11d36}'),
-    ('\u{11d3a}', '\u{11d3a}'),
-    ('\u{11d3c}', '\u{11d3d}'),
-    ('\u{11d3f}', '\u{11d45}'),
-    ('\u{11d47}', '\u{11d47}'),
-    ('\u{11d90}', '\u{11d91}'),
-    ('\u{11d95}', '\u{11d95}'),
-    ('\u{11d97}', '\u{11d97}'),
-    ('\u{11ef3}', '\u{11ef4}'),
-    ('\u{11f00}', '\u{11f01}'),
-    ('\u{11f36}', '\u{11f3a}'),
-    ('\u{11f40}', '\u{11f40}'),
-    ('\u{11f42}', '\u{11f42}'),
-    ('\u{11f5a}', '\u{11f5a}'),
-    ('\u{13440}', '\u{13440}'),
-    ('\u{13447}', '\u{13455}'),
-    ('\u{1611e}', '\u{16129}'),
-    ('\u{1612d}', '\u{1612f}'),
-    ('\u{16af0}', '\u{16af4}'),
-    ('\u{16b30}', '\u{16b36}'),
-    ('\u{16f4f}', '\u{16f4f}'),
-    ('\u{16f8f}', '\u{16f92}'),
-    ('\u{16fe4}', '\u{16fe4}'),
-    ('\u{1bc9d}', '\u{1bc9e}'),
-    ('\u{1cf00}', '\u{1cf2d}'),
-    ('\u{1cf30}', '\u{1cf46}'),
-    ('\u{1d167}', '\u{1d169}'),
-    ('\u{1d17b}', '\u{1d182}'),
-    ('\u{1d185}', '\u{1d18b}'),
-    ('\u{1d1aa}', '\u{1d1ad}'),
-    ('\u{1d242}', '\u{1d244}'),
-    ('\u{1da00}', '\u{1da36}'),
-    ('\u{1da3b}', '\u{1da6c}'),
-    ('\u{1da75}', '\u{1da75}'),
-    ('\u{1da84}', '\u{1da84}'),
-    ('\u{1da9b}', '\u{1da9f}'),
-    ('\u{1daa1}', '\u{1daaf}'),
-    ('\u{1e000}', '\u{1e006}'),
-    ('\u{1e008}', '\u{1e018}'),
-    ('\u{1e01b}', '\u{1e021}'),
-    ('\u{1e023}', '\u{1e024}'),
-    ('\u{1e026}', '\u{1e02a}'),
-    ('\u{1e08f}', '\u{1e08f}'),
-    ('\u{1e130}', '\u{1e136}'),
-    ('\u{1e2ae}', '\u{1e2ae}'),
-    ('\u{1e2ec}', '\u{1e2ef}'),
-    ('\u{1e4ec}', '\u{1e4ef}'),
-    ('\u{1e5ee}', '\u{1e5ef}'),
-    ('\u{1e8d0}', '\u{1e8d6}'),
-    ('\u{1e944}', '\u{1e94a}'),
-    ('\u{e0100}', '\u{e01ef}'),
-];
-
-pub const NUMBER: &'static [(char, char)] = &[
-    ('0', '9'),
-    ('²', '³'),
-    ('¹', '¹'),
-    ('¼', '¾'),
-    ('Ù ', 'Ù©'),
-    ('Û°', 'Ûč'),
-    ('߀', '߉'),
-    ('à„Š', 'à„Ż'),
-    ('à§Š', 'à§Ż'),
-    ('à§Ž', 'à§č'),
-    ('੊', 'à©Ż'),
-    ('૊', 'à«Ż'),
-    ('à­Š', 'à­Ż'),
-    ('à­Č', 'à­·'),
-    ('àŻŠ', 'àŻČ'),
-    ('ొ', 'à±Ż'),
-    ('౞', '౟'),
-    ('àłŠ', 'àłŻ'),
-    ('à”˜', 'à”ž'),
-    ('à”Š', 'à”ž'),
-    ('à·Š', 'à·Ż'),
-    ('àč', 'àč™'),
-    ('໐', '໙'),
-    ('àŒ ', 'àŒł'),
-    ('၀', '၉'),
-    ('႐', '႙'),
-    ('፩', 'ፌ'),
-    ('᛼', 'ᛰ'),
-    ('០', '៩'),
-    ('៰', 'áŸč'),
-    ('᠐', '᠙'),
-    ('ᄆ', 'ᄏ'),
-    ('᧐', '᧚'),
-    ('áȘ€', 'áȘ‰'),
-    ('áȘ', 'áȘ™'),
-    ('᭐', '᭙'),
-    ('áź°', 'áźč'),
-    ('᱀', '᱉'),
-    ('᱐', '᱙'),
-    ('⁰', '⁰'),
-    ('⁎', 'âč'),
-    ('₀', '₉'),
-    ('⅐', 'ↂ'),
-    ('ↅ', '↉'),
-    ('①', '⒛'),
-    ('â“Ș', '⓿'),
-    ('❶', '➓'),
-    ('âłœ', 'âłœ'),
-    ('〇', '〇'),
-    ('〡', '〩'),
-    ('〾', 'ă€ș'),
-    ('㆒', '㆕'),
-    ('㈠', '㈩'),
-    ('㉈', '㉏'),
-    ('㉑', '㉟'),
-    ('㊀', '㊉'),
-    ('㊱', '㊿'),
-    ('꘠', '꘩'),
-    ('ꛊ', 'ê›Ż'),
-    ('ê °', 'ê ”'),
-    ('êŁ', 'êŁ™'),
-    ('ꀀ', 'ꀉ'),
-    ('꧐', '꧙'),
-    ('ê§°', 'ê§č'),
-    ('꩐', '꩙'),
-    ('êŻ°', 'êŻč'),
-    ('', ''),
-    ('𐄇', '𐄳'),
-    ('𐅀', '𐅾'),
-    ('𐆊', '𐆋'),
-    ('𐋡', '𐋻'),
-    ('𐌠', '𐌣'),
-    ('𐍁', '𐍁'),
-    ('𐍊', '𐍊'),
-    ('𐏑', '𐏕'),
-    ('𐒠', '𐒩'),
-    ('𐡘', '𐡟'),
-    ('đĄč', '𐥿'),
-    ('𐹧', '𐹯'),
-    ('𐣻', '𐣿'),
-    ('𐀖', '𐀛'),
-    ('đŠŒ', 'đŠœ'),
-    ('𐧀', '𐧏'),
-    ('𐧒', '𐧿'),
-    ('𐩀', '𐩈'),
-    ('đ©œ', 'đ©Ÿ'),
-    ('đȘ', 'đȘŸ'),
-    ('𐫫', '𐫯'),
-    ('𐭘', '𐭟'),
-    ('𐭞', '𐭿'),
-    ('𐟩', '𐟯'),
-    ('đłș', '𐳿'),
-    ('𐎰', 'đŽč'),
-    ('𐔀', '𐔉'),
-    ('đč ', 'đčŸ'),
-    ('đŒ', 'đŒŠ'),
-    ('đœ‘', 'đœ”'),
-    ('𐿅', '𐿋'),
-    ('𑁒', '𑁯'),
-    ('𑃰', 'đ‘ƒč'),
-    ('đ‘„¶', '𑄿'),
-    ('𑇐', '𑇙'),
-    ('𑇡', '𑇮'),
-    ('𑋰', 'đ‘‹č'),
-    ('𑑐', '𑑙'),
-    ('𑓐', '𑓙'),
-    ('𑙐', '𑙙'),
-    ('𑛀', '𑛉'),
-    ('𑛐', '𑛣'),
-    ('𑜰', 'đ‘œ»'),
-    ('𑣠', 'đ‘ŁČ'),
-    ('𑄐', 'đ‘„™'),
-    ('𑯰', 'đ‘Żč'),
-    ('𑱐', '𑱏'),
-    ('𑔐', 'đ‘”™'),
-    ('đ‘¶ ', 'đ‘¶©'),
-    ('đ‘œ', 'đ‘œ™'),
-    ('𑿀', '𑿔'),
-    ('𒐀', '𒑼'),
-    ('𖄰', 'đ–„č'),
-    ('đ–© ', 'đ–©©'),
-    ('đ–«€', '𖫉'),
-    ('𖭐', '𖭙'),
-    ('𖭛', '𖭡'),
-    ('đ–”°', 'đ–”č'),
-    ('đ–ș€', 'đ–ș–'),
-    ('𜳰', 'đœłč'),
-    ('𝋀', '𝋓'),
-    ('𝋠', '𝋳'),
-    ('𝍠', '𝍾'),
-    ('𝟎', '𝟿'),
-    ('𞅀', '𞅉'),
-    ('𞋰', 'đž‹č'),
-    ('𞓰', 'đž“č'),
-    ('đž—±', 'đž—ș'),
-    ('𞣇', '𞣏'),
-    ('𞄐', 'đž„™'),
-    ('đž±±', 'đžČ«'),
-    ('đžČ­', 'đžČŻ'),
-    ('đžČ±', 'đžČŽ'),
-    ('𞮁', '𞮭'),
-    ('𞮯', 'đžŽœ'),
-    ('🄀', '🄌'),
-    ('🯰', 'đŸŻč'),
-];
-
-pub const OPEN_PUNCTUATION: &'static [(char, char)] = &[
-    ('(', '('),
-    ('[', '['),
-    ('{', '{'),
-    ('àŒș', 'àŒș'),
-    ('àŒŒ', 'àŒŒ'),
-    ('᚛', '᚛'),
-    ('‚', '‚'),
-    ('„', '„'),
-    ('⁅', '⁅'),
-    ('⁜', '⁜'),
-    ('₍', '₍'),
-    ('⌈', '⌈'),
-    ('⌊', '⌊'),
-    ('⟨', '⟨'),
-    ('❚', '❚'),
-    ('âȘ', 'âȘ'),
-    ('❬', '❬'),
-    ('❼', '❼'),
-    ('❰', '❰'),
-    ('âČ', 'âČ'),
-    ('❎', '❎'),
-    ('⟅', '⟅'),
-    ('⟩', '⟩'),
-    ('⟹', '⟹'),
-    ('âŸȘ', 'âŸȘ'),
-    ('⟬', '⟬'),
-    ('⟼', '⟼'),
-    ('⊃', '⊃'),
-    ('⩅', '⩅'),
-    ('⩇', '⩇'),
-    ('⩉', '⩉'),
-    ('⩋', '⩋'),
-    ('⊍', '⊍'),
-    ('⊏', '⊏'),
-    ('⩑', '⩑'),
-    ('⩓', '⩓'),
-    ('⩕', '⩕'),
-    ('⩗', '⩗'),
-    ('⧘', '⧘'),
-    ('⧚', '⧚'),
-    ('⧌', '⧌'),
-    ('âžą', 'âžą'),
-    ('➀', '➀'),
-    ('➊', '➊'),
-    ('âžš', 'âžš'),
-    ('âč‚', 'âč‚'),
-    ('âč•', 'âč•'),
-    ('âč—', 'âč—'),
-    ('âč™', 'âč™'),
-    ('âč›', 'âč›'),
-    ('〈', '〈'),
-    ('《', '《'),
-    ('「', '「'),
-    ('『', '『'),
-    ('【', '【'),
-    ('〔', '〔'),
-    ('〖', '〖'),
-    ('〘', '〘'),
-    ('〚', '〚'),
-    ('〝', '〝'),
-    ('ïŽż', 'ïŽż'),
-    ('ïž—', 'ïž—'),
-    ('ïž”', 'ïž”'),
-    ('ïž·', 'ïž·'),
-    ('ïžč', 'ïžč'),
-    ('ïž»', 'ïž»'),
-    ('', ''),
-    ('ïžż', 'ïžż'),
-    ('ïč', 'ïč'),
-    ('ïčƒ', 'ïčƒ'),
-    ('ïč‡', 'ïč‡'),
-    ('ïč™', 'ïč™'),
-    ('ïč›', 'ïč›'),
-    ('ïč', 'ïč'),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('ïœą', 'ïœą'),
-];
-
-pub const OTHER: &'static [(char, char)] = &[
-    ('\0', '\u{1f}'),
-    ('\u{7f}', '\u{9f}'),
-    ('\u{ad}', '\u{ad}'),
-    ('\u{378}', '\u{379}'),
-    ('\u{380}', '\u{383}'),
-    ('\u{38b}', '\u{38b}'),
-    ('\u{38d}', '\u{38d}'),
-    ('\u{3a2}', '\u{3a2}'),
-    ('\u{530}', '\u{530}'),
-    ('\u{557}', '\u{558}'),
-    ('\u{58b}', '\u{58c}'),
-    ('\u{590}', '\u{590}'),
-    ('\u{5c8}', '\u{5cf}'),
-    ('\u{5eb}', '\u{5ee}'),
-    ('\u{5f5}', '\u{605}'),
-    ('\u{61c}', '\u{61c}'),
-    ('\u{6dd}', '\u{6dd}'),
-    ('\u{70e}', '\u{70f}'),
-    ('\u{74b}', '\u{74c}'),
-    ('\u{7b2}', '\u{7bf}'),
-    ('\u{7fb}', '\u{7fc}'),
-    ('\u{82e}', '\u{82f}'),
-    ('\u{83f}', '\u{83f}'),
-    ('\u{85c}', '\u{85d}'),
-    ('\u{85f}', '\u{85f}'),
-    ('\u{86b}', '\u{86f}'),
-    ('\u{88f}', '\u{896}'),
-    ('\u{8e2}', '\u{8e2}'),
-    ('\u{984}', '\u{984}'),
-    ('\u{98d}', '\u{98e}'),
-    ('\u{991}', '\u{992}'),
-    ('\u{9a9}', '\u{9a9}'),
-    ('\u{9b1}', '\u{9b1}'),
-    ('\u{9b3}', '\u{9b5}'),
-    ('\u{9ba}', '\u{9bb}'),
-    ('\u{9c5}', '\u{9c6}'),
-    ('\u{9c9}', '\u{9ca}'),
-    ('\u{9cf}', '\u{9d6}'),
-    ('\u{9d8}', '\u{9db}'),
-    ('\u{9de}', '\u{9de}'),
-    ('\u{9e4}', '\u{9e5}'),
-    ('\u{9ff}', '\u{a00}'),
-    ('\u{a04}', '\u{a04}'),
-    ('\u{a0b}', '\u{a0e}'),
-    ('\u{a11}', '\u{a12}'),
-    ('\u{a29}', '\u{a29}'),
-    ('\u{a31}', '\u{a31}'),
-    ('\u{a34}', '\u{a34}'),
-    ('\u{a37}', '\u{a37}'),
-    ('\u{a3a}', '\u{a3b}'),
-    ('\u{a3d}', '\u{a3d}'),
-    ('\u{a43}', '\u{a46}'),
-    ('\u{a49}', '\u{a4a}'),
-    ('\u{a4e}', '\u{a50}'),
-    ('\u{a52}', '\u{a58}'),
-    ('\u{a5d}', '\u{a5d}'),
-    ('\u{a5f}', '\u{a65}'),
-    ('\u{a77}', '\u{a80}'),
-    ('\u{a84}', '\u{a84}'),
-    ('\u{a8e}', '\u{a8e}'),
-    ('\u{a92}', '\u{a92}'),
-    ('\u{aa9}', '\u{aa9}'),
-    ('\u{ab1}', '\u{ab1}'),
-    ('\u{ab4}', '\u{ab4}'),
-    ('\u{aba}', '\u{abb}'),
-    ('\u{ac6}', '\u{ac6}'),
-    ('\u{aca}', '\u{aca}'),
-    ('\u{ace}', '\u{acf}'),
-    ('\u{ad1}', '\u{adf}'),
-    ('\u{ae4}', '\u{ae5}'),
-    ('\u{af2}', '\u{af8}'),
-    ('\u{b00}', '\u{b00}'),
-    ('\u{b04}', '\u{b04}'),
-    ('\u{b0d}', '\u{b0e}'),
-    ('\u{b11}', '\u{b12}'),
-    ('\u{b29}', '\u{b29}'),
-    ('\u{b31}', '\u{b31}'),
-    ('\u{b34}', '\u{b34}'),
-    ('\u{b3a}', '\u{b3b}'),
-    ('\u{b45}', '\u{b46}'),
-    ('\u{b49}', '\u{b4a}'),
-    ('\u{b4e}', '\u{b54}'),
-    ('\u{b58}', '\u{b5b}'),
-    ('\u{b5e}', '\u{b5e}'),
-    ('\u{b64}', '\u{b65}'),
-    ('\u{b78}', '\u{b81}'),
-    ('\u{b84}', '\u{b84}'),
-    ('\u{b8b}', '\u{b8d}'),
-    ('\u{b91}', '\u{b91}'),
-    ('\u{b96}', '\u{b98}'),
-    ('\u{b9b}', '\u{b9b}'),
-    ('\u{b9d}', '\u{b9d}'),
-    ('\u{ba0}', '\u{ba2}'),
-    ('\u{ba5}', '\u{ba7}'),
-    ('\u{bab}', '\u{bad}'),
-    ('\u{bba}', '\u{bbd}'),
-    ('\u{bc3}', '\u{bc5}'),
-    ('\u{bc9}', '\u{bc9}'),
-    ('\u{bce}', '\u{bcf}'),
-    ('\u{bd1}', '\u{bd6}'),
-    ('\u{bd8}', '\u{be5}'),
-    ('\u{bfb}', '\u{bff}'),
-    ('\u{c0d}', '\u{c0d}'),
-    ('\u{c11}', '\u{c11}'),
-    ('\u{c29}', '\u{c29}'),
-    ('\u{c3a}', '\u{c3b}'),
-    ('\u{c45}', '\u{c45}'),
-    ('\u{c49}', '\u{c49}'),
-    ('\u{c4e}', '\u{c54}'),
-    ('\u{c57}', '\u{c57}'),
-    ('\u{c5b}', '\u{c5c}'),
-    ('\u{c5e}', '\u{c5f}'),
-    ('\u{c64}', '\u{c65}'),
-    ('\u{c70}', '\u{c76}'),
-    ('\u{c8d}', '\u{c8d}'),
-    ('\u{c91}', '\u{c91}'),
-    ('\u{ca9}', '\u{ca9}'),
-    ('\u{cb4}', '\u{cb4}'),
-    ('\u{cba}', '\u{cbb}'),
-    ('\u{cc5}', '\u{cc5}'),
-    ('\u{cc9}', '\u{cc9}'),
-    ('\u{cce}', '\u{cd4}'),
-    ('\u{cd7}', '\u{cdc}'),
-    ('\u{cdf}', '\u{cdf}'),
-    ('\u{ce4}', '\u{ce5}'),
-    ('\u{cf0}', '\u{cf0}'),
-    ('\u{cf4}', '\u{cff}'),
-    ('\u{d0d}', '\u{d0d}'),
-    ('\u{d11}', '\u{d11}'),
-    ('\u{d45}', '\u{d45}'),
-    ('\u{d49}', '\u{d49}'),
-    ('\u{d50}', '\u{d53}'),
-    ('\u{d64}', '\u{d65}'),
-    ('\u{d80}', '\u{d80}'),
-    ('\u{d84}', '\u{d84}'),
-    ('\u{d97}', '\u{d99}'),
-    ('\u{db2}', '\u{db2}'),
-    ('\u{dbc}', '\u{dbc}'),
-    ('\u{dbe}', '\u{dbf}'),
-    ('\u{dc7}', '\u{dc9}'),
-    ('\u{dcb}', '\u{dce}'),
-    ('\u{dd5}', '\u{dd5}'),
-    ('\u{dd7}', '\u{dd7}'),
-    ('\u{de0}', '\u{de5}'),
-    ('\u{df0}', '\u{df1}'),
-    ('\u{df5}', '\u{e00}'),
-    ('\u{e3b}', '\u{e3e}'),
-    ('\u{e5c}', '\u{e80}'),
-    ('\u{e83}', '\u{e83}'),
-    ('\u{e85}', '\u{e85}'),
-    ('\u{e8b}', '\u{e8b}'),
-    ('\u{ea4}', '\u{ea4}'),
-    ('\u{ea6}', '\u{ea6}'),
-    ('\u{ebe}', '\u{ebf}'),
-    ('\u{ec5}', '\u{ec5}'),
-    ('\u{ec7}', '\u{ec7}'),
-    ('\u{ecf}', '\u{ecf}'),
-    ('\u{eda}', '\u{edb}'),
-    ('\u{ee0}', '\u{eff}'),
-    ('\u{f48}', '\u{f48}'),
-    ('\u{f6d}', '\u{f70}'),
-    ('\u{f98}', '\u{f98}'),
-    ('\u{fbd}', '\u{fbd}'),
-    ('\u{fcd}', '\u{fcd}'),
-    ('\u{fdb}', '\u{fff}'),
-    ('\u{10c6}', '\u{10c6}'),
-    ('\u{10c8}', '\u{10cc}'),
-    ('\u{10ce}', '\u{10cf}'),
-    ('\u{1249}', '\u{1249}'),
-    ('\u{124e}', '\u{124f}'),
-    ('\u{1257}', '\u{1257}'),
-    ('\u{1259}', '\u{1259}'),
-    ('\u{125e}', '\u{125f}'),
-    ('\u{1289}', '\u{1289}'),
-    ('\u{128e}', '\u{128f}'),
-    ('\u{12b1}', '\u{12b1}'),
-    ('\u{12b6}', '\u{12b7}'),
-    ('\u{12bf}', '\u{12bf}'),
-    ('\u{12c1}', '\u{12c1}'),
-    ('\u{12c6}', '\u{12c7}'),
-    ('\u{12d7}', '\u{12d7}'),
-    ('\u{1311}', '\u{1311}'),
-    ('\u{1316}', '\u{1317}'),
-    ('\u{135b}', '\u{135c}'),
-    ('\u{137d}', '\u{137f}'),
-    ('\u{139a}', '\u{139f}'),
-    ('\u{13f6}', '\u{13f7}'),
-    ('\u{13fe}', '\u{13ff}'),
-    ('\u{169d}', '\u{169f}'),
-    ('\u{16f9}', '\u{16ff}'),
-    ('\u{1716}', '\u{171e}'),
-    ('\u{1737}', '\u{173f}'),
-    ('\u{1754}', '\u{175f}'),
-    ('\u{176d}', '\u{176d}'),
-    ('\u{1771}', '\u{1771}'),
-    ('\u{1774}', '\u{177f}'),
-    ('\u{17de}', '\u{17df}'),
-    ('\u{17ea}', '\u{17ef}'),
-    ('\u{17fa}', '\u{17ff}'),
-    ('\u{180e}', '\u{180e}'),
-    ('\u{181a}', '\u{181f}'),
-    ('\u{1879}', '\u{187f}'),
-    ('\u{18ab}', '\u{18af}'),
-    ('\u{18f6}', '\u{18ff}'),
-    ('\u{191f}', '\u{191f}'),
-    ('\u{192c}', '\u{192f}'),
-    ('\u{193c}', '\u{193f}'),
-    ('\u{1941}', '\u{1943}'),
-    ('\u{196e}', '\u{196f}'),
-    ('\u{1975}', '\u{197f}'),
-    ('\u{19ac}', '\u{19af}'),
-    ('\u{19ca}', '\u{19cf}'),
-    ('\u{19db}', '\u{19dd}'),
-    ('\u{1a1c}', '\u{1a1d}'),
-    ('\u{1a5f}', '\u{1a5f}'),
-    ('\u{1a7d}', '\u{1a7e}'),
-    ('\u{1a8a}', '\u{1a8f}'),
-    ('\u{1a9a}', '\u{1a9f}'),
-    ('\u{1aae}', '\u{1aaf}'),
-    ('\u{1acf}', '\u{1aff}'),
-    ('\u{1b4d}', '\u{1b4d}'),
-    ('\u{1bf4}', '\u{1bfb}'),
-    ('\u{1c38}', '\u{1c3a}'),
-    ('\u{1c4a}', '\u{1c4c}'),
-    ('\u{1c8b}', '\u{1c8f}'),
-    ('\u{1cbb}', '\u{1cbc}'),
-    ('\u{1cc8}', '\u{1ccf}'),
-    ('\u{1cfb}', '\u{1cff}'),
-    ('\u{1f16}', '\u{1f17}'),
-    ('\u{1f1e}', '\u{1f1f}'),
-    ('\u{1f46}', '\u{1f47}'),
-    ('\u{1f4e}', '\u{1f4f}'),
-    ('\u{1f58}', '\u{1f58}'),
-    ('\u{1f5a}', '\u{1f5a}'),
-    ('\u{1f5c}', '\u{1f5c}'),
-    ('\u{1f5e}', '\u{1f5e}'),
-    ('\u{1f7e}', '\u{1f7f}'),
-    ('\u{1fb5}', '\u{1fb5}'),
-    ('\u{1fc5}', '\u{1fc5}'),
-    ('\u{1fd4}', '\u{1fd5}'),
-    ('\u{1fdc}', '\u{1fdc}'),
-    ('\u{1ff0}', '\u{1ff1}'),
-    ('\u{1ff5}', '\u{1ff5}'),
-    ('\u{1fff}', '\u{1fff}'),
-    ('\u{200b}', '\u{200f}'),
-    ('\u{202a}', '\u{202e}'),
-    ('\u{2060}', '\u{206f}'),
-    ('\u{2072}', '\u{2073}'),
-    ('\u{208f}', '\u{208f}'),
-    ('\u{209d}', '\u{209f}'),
-    ('\u{20c1}', '\u{20cf}'),
-    ('\u{20f1}', '\u{20ff}'),
-    ('\u{218c}', '\u{218f}'),
-    ('\u{242a}', '\u{243f}'),
-    ('\u{244b}', '\u{245f}'),
-    ('\u{2b74}', '\u{2b75}'),
-    ('\u{2b96}', '\u{2b96}'),
-    ('\u{2cf4}', '\u{2cf8}'),
-    ('\u{2d26}', '\u{2d26}'),
-    ('\u{2d28}', '\u{2d2c}'),
-    ('\u{2d2e}', '\u{2d2f}'),
-    ('\u{2d68}', '\u{2d6e}'),
-    ('\u{2d71}', '\u{2d7e}'),
-    ('\u{2d97}', '\u{2d9f}'),
-    ('\u{2da7}', '\u{2da7}'),
-    ('\u{2daf}', '\u{2daf}'),
-    ('\u{2db7}', '\u{2db7}'),
-    ('\u{2dbf}', '\u{2dbf}'),
-    ('\u{2dc7}', '\u{2dc7}'),
-    ('\u{2dcf}', '\u{2dcf}'),
-    ('\u{2dd7}', '\u{2dd7}'),
-    ('\u{2ddf}', '\u{2ddf}'),
-    ('\u{2e5e}', '\u{2e7f}'),
-    ('\u{2e9a}', '\u{2e9a}'),
-    ('\u{2ef4}', '\u{2eff}'),
-    ('\u{2fd6}', '\u{2fef}'),
-    ('\u{3040}', '\u{3040}'),
-    ('\u{3097}', '\u{3098}'),
-    ('\u{3100}', '\u{3104}'),
-    ('\u{3130}', '\u{3130}'),
-    ('\u{318f}', '\u{318f}'),
-    ('\u{31e6}', '\u{31ee}'),
-    ('\u{321f}', '\u{321f}'),
-    ('\u{a48d}', '\u{a48f}'),
-    ('\u{a4c7}', '\u{a4cf}'),
-    ('\u{a62c}', '\u{a63f}'),
-    ('\u{a6f8}', '\u{a6ff}'),
-    ('\u{a7ce}', '\u{a7cf}'),
-    ('\u{a7d2}', '\u{a7d2}'),
-    ('\u{a7d4}', '\u{a7d4}'),
-    ('\u{a7dd}', '\u{a7f1}'),
-    ('\u{a82d}', '\u{a82f}'),
-    ('\u{a83a}', '\u{a83f}'),
-    ('\u{a878}', '\u{a87f}'),
-    ('\u{a8c6}', '\u{a8cd}'),
-    ('\u{a8da}', '\u{a8df}'),
-    ('\u{a954}', '\u{a95e}'),
-    ('\u{a97d}', '\u{a97f}'),
-    ('\u{a9ce}', '\u{a9ce}'),
-    ('\u{a9da}', '\u{a9dd}'),
-    ('\u{a9ff}', '\u{a9ff}'),
-    ('\u{aa37}', '\u{aa3f}'),
-    ('\u{aa4e}', '\u{aa4f}'),
-    ('\u{aa5a}', '\u{aa5b}'),
-    ('\u{aac3}', '\u{aada}'),
-    ('\u{aaf7}', '\u{ab00}'),
-    ('\u{ab07}', '\u{ab08}'),
-    ('\u{ab0f}', '\u{ab10}'),
-    ('\u{ab17}', '\u{ab1f}'),
-    ('\u{ab27}', '\u{ab27}'),
-    ('\u{ab2f}', '\u{ab2f}'),
-    ('\u{ab6c}', '\u{ab6f}'),
-    ('\u{abee}', '\u{abef}'),
-    ('\u{abfa}', '\u{abff}'),
-    ('\u{d7a4}', '\u{d7af}'),
-    ('\u{d7c7}', '\u{d7ca}'),
-    ('\u{d7fc}', '\u{f8ff}'),
-    ('\u{fa6e}', '\u{fa6f}'),
-    ('\u{fada}', '\u{faff}'),
-    ('\u{fb07}', '\u{fb12}'),
-    ('\u{fb18}', '\u{fb1c}'),
-    ('\u{fb37}', '\u{fb37}'),
-    ('\u{fb3d}', '\u{fb3d}'),
-    ('\u{fb3f}', '\u{fb3f}'),
-    ('\u{fb42}', '\u{fb42}'),
-    ('\u{fb45}', '\u{fb45}'),
-    ('\u{fbc3}', '\u{fbd2}'),
-    ('\u{fd90}', '\u{fd91}'),
-    ('\u{fdc8}', '\u{fdce}'),
-    ('\u{fdd0}', '\u{fdef}'),
-    ('\u{fe1a}', '\u{fe1f}'),
-    ('\u{fe53}', '\u{fe53}'),
-    ('\u{fe67}', '\u{fe67}'),
-    ('\u{fe6c}', '\u{fe6f}'),
-    ('\u{fe75}', '\u{fe75}'),
-    ('\u{fefd}', '\u{ff00}'),
-    ('\u{ffbf}', '\u{ffc1}'),
-    ('\u{ffc8}', '\u{ffc9}'),
-    ('\u{ffd0}', '\u{ffd1}'),
-    ('\u{ffd8}', '\u{ffd9}'),
-    ('\u{ffdd}', '\u{ffdf}'),
-    ('\u{ffe7}', '\u{ffe7}'),
-    ('\u{ffef}', '\u{fffb}'),
-    ('\u{fffe}', '\u{ffff}'),
-    ('\u{1000c}', '\u{1000c}'),
-    ('\u{10027}', '\u{10027}'),
-    ('\u{1003b}', '\u{1003b}'),
-    ('\u{1003e}', '\u{1003e}'),
-    ('\u{1004e}', '\u{1004f}'),
-    ('\u{1005e}', '\u{1007f}'),
-    ('\u{100fb}', '\u{100ff}'),
-    ('\u{10103}', '\u{10106}'),
-    ('\u{10134}', '\u{10136}'),
-    ('\u{1018f}', '\u{1018f}'),
-    ('\u{1019d}', '\u{1019f}'),
-    ('\u{101a1}', '\u{101cf}'),
-    ('\u{101fe}', '\u{1027f}'),
-    ('\u{1029d}', '\u{1029f}'),
-    ('\u{102d1}', '\u{102df}'),
-    ('\u{102fc}', '\u{102ff}'),
-    ('\u{10324}', '\u{1032c}'),
-    ('\u{1034b}', '\u{1034f}'),
-    ('\u{1037b}', '\u{1037f}'),
-    ('\u{1039e}', '\u{1039e}'),
-    ('\u{103c4}', '\u{103c7}'),
-    ('\u{103d6}', '\u{103ff}'),
-    ('\u{1049e}', '\u{1049f}'),
-    ('\u{104aa}', '\u{104af}'),
-    ('\u{104d4}', '\u{104d7}'),
-    ('\u{104fc}', '\u{104ff}'),
-    ('\u{10528}', '\u{1052f}'),
-    ('\u{10564}', '\u{1056e}'),
-    ('\u{1057b}', '\u{1057b}'),
-    ('\u{1058b}', '\u{1058b}'),
-    ('\u{10593}', '\u{10593}'),
-    ('\u{10596}', '\u{10596}'),
-    ('\u{105a2}', '\u{105a2}'),
-    ('\u{105b2}', '\u{105b2}'),
-    ('\u{105ba}', '\u{105ba}'),
-    ('\u{105bd}', '\u{105bf}'),
-    ('\u{105f4}', '\u{105ff}'),
-    ('\u{10737}', '\u{1073f}'),
-    ('\u{10756}', '\u{1075f}'),
-    ('\u{10768}', '\u{1077f}'),
-    ('\u{10786}', '\u{10786}'),
-    ('\u{107b1}', '\u{107b1}'),
-    ('\u{107bb}', '\u{107ff}'),
-    ('\u{10806}', '\u{10807}'),
-    ('\u{10809}', '\u{10809}'),
-    ('\u{10836}', '\u{10836}'),
-    ('\u{10839}', '\u{1083b}'),
-    ('\u{1083d}', '\u{1083e}'),
-    ('\u{10856}', '\u{10856}'),
-    ('\u{1089f}', '\u{108a6}'),
-    ('\u{108b0}', '\u{108df}'),
-    ('\u{108f3}', '\u{108f3}'),
-    ('\u{108f6}', '\u{108fa}'),
-    ('\u{1091c}', '\u{1091e}'),
-    ('\u{1093a}', '\u{1093e}'),
-    ('\u{10940}', '\u{1097f}'),
-    ('\u{109b8}', '\u{109bb}'),
-    ('\u{109d0}', '\u{109d1}'),
-    ('\u{10a04}', '\u{10a04}'),
-    ('\u{10a07}', '\u{10a0b}'),
-    ('\u{10a14}', '\u{10a14}'),
-    ('\u{10a18}', '\u{10a18}'),
-    ('\u{10a36}', '\u{10a37}'),
-    ('\u{10a3b}', '\u{10a3e}'),
-    ('\u{10a49}', '\u{10a4f}'),
-    ('\u{10a59}', '\u{10a5f}'),
-    ('\u{10aa0}', '\u{10abf}'),
-    ('\u{10ae7}', '\u{10aea}'),
-    ('\u{10af7}', '\u{10aff}'),
-    ('\u{10b36}', '\u{10b38}'),
-    ('\u{10b56}', '\u{10b57}'),
-    ('\u{10b73}', '\u{10b77}'),
-    ('\u{10b92}', '\u{10b98}'),
-    ('\u{10b9d}', '\u{10ba8}'),
-    ('\u{10bb0}', '\u{10bff}'),
-    ('\u{10c49}', '\u{10c7f}'),
-    ('\u{10cb3}', '\u{10cbf}'),
-    ('\u{10cf3}', '\u{10cf9}'),
-    ('\u{10d28}', '\u{10d2f}'),
-    ('\u{10d3a}', '\u{10d3f}'),
-    ('\u{10d66}', '\u{10d68}'),
-    ('\u{10d86}', '\u{10d8d}'),
-    ('\u{10d90}', '\u{10e5f}'),
-    ('\u{10e7f}', '\u{10e7f}'),
-    ('\u{10eaa}', '\u{10eaa}'),
-    ('\u{10eae}', '\u{10eaf}'),
-    ('\u{10eb2}', '\u{10ec1}'),
-    ('\u{10ec5}', '\u{10efb}'),
-    ('\u{10f28}', '\u{10f2f}'),
-    ('\u{10f5a}', '\u{10f6f}'),
-    ('\u{10f8a}', '\u{10faf}'),
-    ('\u{10fcc}', '\u{10fdf}'),
-    ('\u{10ff7}', '\u{10fff}'),
-    ('\u{1104e}', '\u{11051}'),
-    ('\u{11076}', '\u{1107e}'),
-    ('\u{110bd}', '\u{110bd}'),
-    ('\u{110c3}', '\u{110cf}'),
-    ('\u{110e9}', '\u{110ef}'),
-    ('\u{110fa}', '\u{110ff}'),
-    ('\u{11135}', '\u{11135}'),
-    ('\u{11148}', '\u{1114f}'),
-    ('\u{11177}', '\u{1117f}'),
-    ('\u{111e0}', '\u{111e0}'),
-    ('\u{111f5}', '\u{111ff}'),
-    ('\u{11212}', '\u{11212}'),
-    ('\u{11242}', '\u{1127f}'),
-    ('\u{11287}', '\u{11287}'),
-    ('\u{11289}', '\u{11289}'),
-    ('\u{1128e}', '\u{1128e}'),
-    ('\u{1129e}', '\u{1129e}'),
-    ('\u{112aa}', '\u{112af}'),
-    ('\u{112eb}', '\u{112ef}'),
-    ('\u{112fa}', '\u{112ff}'),
-    ('\u{11304}', '\u{11304}'),
-    ('\u{1130d}', '\u{1130e}'),
-    ('\u{11311}', '\u{11312}'),
-    ('\u{11329}', '\u{11329}'),
-    ('\u{11331}', '\u{11331}'),
-    ('\u{11334}', '\u{11334}'),
-    ('\u{1133a}', '\u{1133a}'),
-    ('\u{11345}', '\u{11346}'),
-    ('\u{11349}', '\u{1134a}'),
-    ('\u{1134e}', '\u{1134f}'),
-    ('\u{11351}', '\u{11356}'),
-    ('\u{11358}', '\u{1135c}'),
-    ('\u{11364}', '\u{11365}'),
-    ('\u{1136d}', '\u{1136f}'),
-    ('\u{11375}', '\u{1137f}'),
-    ('\u{1138a}', '\u{1138a}'),
-    ('\u{1138c}', '\u{1138d}'),
-    ('\u{1138f}', '\u{1138f}'),
-    ('\u{113b6}', '\u{113b6}'),
-    ('\u{113c1}', '\u{113c1}'),
-    ('\u{113c3}', '\u{113c4}'),
-    ('\u{113c6}', '\u{113c6}'),
-    ('\u{113cb}', '\u{113cb}'),
-    ('\u{113d6}', '\u{113d6}'),
-    ('\u{113d9}', '\u{113e0}'),
-    ('\u{113e3}', '\u{113ff}'),
-    ('\u{1145c}', '\u{1145c}'),
-    ('\u{11462}', '\u{1147f}'),
-    ('\u{114c8}', '\u{114cf}'),
-    ('\u{114da}', '\u{1157f}'),
-    ('\u{115b6}', '\u{115b7}'),
-    ('\u{115de}', '\u{115ff}'),
-    ('\u{11645}', '\u{1164f}'),
-    ('\u{1165a}', '\u{1165f}'),
-    ('\u{1166d}', '\u{1167f}'),
-    ('\u{116ba}', '\u{116bf}'),
-    ('\u{116ca}', '\u{116cf}'),
-    ('\u{116e4}', '\u{116ff}'),
-    ('\u{1171b}', '\u{1171c}'),
-    ('\u{1172c}', '\u{1172f}'),
-    ('\u{11747}', '\u{117ff}'),
-    ('\u{1183c}', '\u{1189f}'),
-    ('\u{118f3}', '\u{118fe}'),
-    ('\u{11907}', '\u{11908}'),
-    ('\u{1190a}', '\u{1190b}'),
-    ('\u{11914}', '\u{11914}'),
-    ('\u{11917}', '\u{11917}'),
-    ('\u{11936}', '\u{11936}'),
-    ('\u{11939}', '\u{1193a}'),
-    ('\u{11947}', '\u{1194f}'),
-    ('\u{1195a}', '\u{1199f}'),
-    ('\u{119a8}', '\u{119a9}'),
-    ('\u{119d8}', '\u{119d9}'),
-    ('\u{119e5}', '\u{119ff}'),
-    ('\u{11a48}', '\u{11a4f}'),
-    ('\u{11aa3}', '\u{11aaf}'),
-    ('\u{11af9}', '\u{11aff}'),
-    ('\u{11b0a}', '\u{11bbf}'),
-    ('\u{11be2}', '\u{11bef}'),
-    ('\u{11bfa}', '\u{11bff}'),
-    ('\u{11c09}', '\u{11c09}'),
-    ('\u{11c37}', '\u{11c37}'),
-    ('\u{11c46}', '\u{11c4f}'),
-    ('\u{11c6d}', '\u{11c6f}'),
-    ('\u{11c90}', '\u{11c91}'),
-    ('\u{11ca8}', '\u{11ca8}'),
-    ('\u{11cb7}', '\u{11cff}'),
-    ('\u{11d07}', '\u{11d07}'),
-    ('\u{11d0a}', '\u{11d0a}'),
-    ('\u{11d37}', '\u{11d39}'),
-    ('\u{11d3b}', '\u{11d3b}'),
-    ('\u{11d3e}', '\u{11d3e}'),
-    ('\u{11d48}', '\u{11d4f}'),
-    ('\u{11d5a}', '\u{11d5f}'),
-    ('\u{11d66}', '\u{11d66}'),
-    ('\u{11d69}', '\u{11d69}'),
-    ('\u{11d8f}', '\u{11d8f}'),
-    ('\u{11d92}', '\u{11d92}'),
-    ('\u{11d99}', '\u{11d9f}'),
-    ('\u{11daa}', '\u{11edf}'),
-    ('\u{11ef9}', '\u{11eff}'),
-    ('\u{11f11}', '\u{11f11}'),
-    ('\u{11f3b}', '\u{11f3d}'),
-    ('\u{11f5b}', '\u{11faf}'),
-    ('\u{11fb1}', '\u{11fbf}'),
-    ('\u{11ff2}', '\u{11ffe}'),
-    ('\u{1239a}', '\u{123ff}'),
-    ('\u{1246f}', '\u{1246f}'),
-    ('\u{12475}', '\u{1247f}'),
-    ('\u{12544}', '\u{12f8f}'),
-    ('\u{12ff3}', '\u{12fff}'),
-    ('\u{13430}', '\u{1343f}'),
-    ('\u{13456}', '\u{1345f}'),
-    ('\u{143fb}', '\u{143ff}'),
-    ('\u{14647}', '\u{160ff}'),
-    ('\u{1613a}', '\u{167ff}'),
-    ('\u{16a39}', '\u{16a3f}'),
-    ('\u{16a5f}', '\u{16a5f}'),
-    ('\u{16a6a}', '\u{16a6d}'),
-    ('\u{16abf}', '\u{16abf}'),
-    ('\u{16aca}', '\u{16acf}'),
-    ('\u{16aee}', '\u{16aef}'),
-    ('\u{16af6}', '\u{16aff}'),
-    ('\u{16b46}', '\u{16b4f}'),
-    ('\u{16b5a}', '\u{16b5a}'),
-    ('\u{16b62}', '\u{16b62}'),
-    ('\u{16b78}', '\u{16b7c}'),
-    ('\u{16b90}', '\u{16d3f}'),
-    ('\u{16d7a}', '\u{16e3f}'),
-    ('\u{16e9b}', '\u{16eff}'),
-    ('\u{16f4b}', '\u{16f4e}'),
-    ('\u{16f88}', '\u{16f8e}'),
-    ('\u{16fa0}', '\u{16fdf}'),
-    ('\u{16fe5}', '\u{16fef}'),
-    ('\u{16ff2}', '\u{16fff}'),
-    ('\u{187f8}', '\u{187ff}'),
-    ('\u{18cd6}', '\u{18cfe}'),
-    ('\u{18d09}', '\u{1afef}'),
-    ('\u{1aff4}', '\u{1aff4}'),
-    ('\u{1affc}', '\u{1affc}'),
-    ('\u{1afff}', '\u{1afff}'),
-    ('\u{1b123}', '\u{1b131}'),
-    ('\u{1b133}', '\u{1b14f}'),
-    ('\u{1b153}', '\u{1b154}'),
-    ('\u{1b156}', '\u{1b163}'),
-    ('\u{1b168}', '\u{1b16f}'),
-    ('\u{1b2fc}', '\u{1bbff}'),
-    ('\u{1bc6b}', '\u{1bc6f}'),
-    ('\u{1bc7d}', '\u{1bc7f}'),
-    ('\u{1bc89}', '\u{1bc8f}'),
-    ('\u{1bc9a}', '\u{1bc9b}'),
-    ('\u{1bca0}', '\u{1cbff}'),
-    ('\u{1ccfa}', '\u{1ccff}'),
-    ('\u{1ceb4}', '\u{1ceff}'),
-    ('\u{1cf2e}', '\u{1cf2f}'),
-    ('\u{1cf47}', '\u{1cf4f}'),
-    ('\u{1cfc4}', '\u{1cfff}'),
-    ('\u{1d0f6}', '\u{1d0ff}'),
-    ('\u{1d127}', '\u{1d128}'),
-    ('\u{1d173}', '\u{1d17a}'),
-    ('\u{1d1eb}', '\u{1d1ff}'),
-    ('\u{1d246}', '\u{1d2bf}'),
-    ('\u{1d2d4}', '\u{1d2df}'),
-    ('\u{1d2f4}', '\u{1d2ff}'),
-    ('\u{1d357}', '\u{1d35f}'),
-    ('\u{1d379}', '\u{1d3ff}'),
-    ('\u{1d455}', '\u{1d455}'),
-    ('\u{1d49d}', '\u{1d49d}'),
-    ('\u{1d4a0}', '\u{1d4a1}'),
-    ('\u{1d4a3}', '\u{1d4a4}'),
-    ('\u{1d4a7}', '\u{1d4a8}'),
-    ('\u{1d4ad}', '\u{1d4ad}'),
-    ('\u{1d4ba}', '\u{1d4ba}'),
-    ('\u{1d4bc}', '\u{1d4bc}'),
-    ('\u{1d4c4}', '\u{1d4c4}'),
-    ('\u{1d506}', '\u{1d506}'),
-    ('\u{1d50b}', '\u{1d50c}'),
-    ('\u{1d515}', '\u{1d515}'),
-    ('\u{1d51d}', '\u{1d51d}'),
-    ('\u{1d53a}', '\u{1d53a}'),
-    ('\u{1d53f}', '\u{1d53f}'),
-    ('\u{1d545}', '\u{1d545}'),
-    ('\u{1d547}', '\u{1d549}'),
-    ('\u{1d551}', '\u{1d551}'),
-    ('\u{1d6a6}', '\u{1d6a7}'),
-    ('\u{1d7cc}', '\u{1d7cd}'),
-    ('\u{1da8c}', '\u{1da9a}'),
-    ('\u{1daa0}', '\u{1daa0}'),
-    ('\u{1dab0}', '\u{1deff}'),
-    ('\u{1df1f}', '\u{1df24}'),
-    ('\u{1df2b}', '\u{1dfff}'),
-    ('\u{1e007}', '\u{1e007}'),
-    ('\u{1e019}', '\u{1e01a}'),
-    ('\u{1e022}', '\u{1e022}'),
-    ('\u{1e025}', '\u{1e025}'),
-    ('\u{1e02b}', '\u{1e02f}'),
-    ('\u{1e06e}', '\u{1e08e}'),
-    ('\u{1e090}', '\u{1e0ff}'),
-    ('\u{1e12d}', '\u{1e12f}'),
-    ('\u{1e13e}', '\u{1e13f}'),
-    ('\u{1e14a}', '\u{1e14d}'),
-    ('\u{1e150}', '\u{1e28f}'),
-    ('\u{1e2af}', '\u{1e2bf}'),
-    ('\u{1e2fa}', '\u{1e2fe}'),
-    ('\u{1e300}', '\u{1e4cf}'),
-    ('\u{1e4fa}', '\u{1e5cf}'),
-    ('\u{1e5fb}', '\u{1e5fe}'),
-    ('\u{1e600}', '\u{1e7df}'),
-    ('\u{1e7e7}', '\u{1e7e7}'),
-    ('\u{1e7ec}', '\u{1e7ec}'),
-    ('\u{1e7ef}', '\u{1e7ef}'),
-    ('\u{1e7ff}', '\u{1e7ff}'),
-    ('\u{1e8c5}', '\u{1e8c6}'),
-    ('\u{1e8d7}', '\u{1e8ff}'),
-    ('\u{1e94c}', '\u{1e94f}'),
-    ('\u{1e95a}', '\u{1e95d}'),
-    ('\u{1e960}', '\u{1ec70}'),
-    ('\u{1ecb5}', '\u{1ed00}'),
-    ('\u{1ed3e}', '\u{1edff}'),
-    ('\u{1ee04}', '\u{1ee04}'),
-    ('\u{1ee20}', '\u{1ee20}'),
-    ('\u{1ee23}', '\u{1ee23}'),
-    ('\u{1ee25}', '\u{1ee26}'),
-    ('\u{1ee28}', '\u{1ee28}'),
-    ('\u{1ee33}', '\u{1ee33}'),
-    ('\u{1ee38}', '\u{1ee38}'),
-    ('\u{1ee3a}', '\u{1ee3a}'),
-    ('\u{1ee3c}', '\u{1ee41}'),
-    ('\u{1ee43}', '\u{1ee46}'),
-    ('\u{1ee48}', '\u{1ee48}'),
-    ('\u{1ee4a}', '\u{1ee4a}'),
-    ('\u{1ee4c}', '\u{1ee4c}'),
-    ('\u{1ee50}', '\u{1ee50}'),
-    ('\u{1ee53}', '\u{1ee53}'),
-    ('\u{1ee55}', '\u{1ee56}'),
-    ('\u{1ee58}', '\u{1ee58}'),
-    ('\u{1ee5a}', '\u{1ee5a}'),
-    ('\u{1ee5c}', '\u{1ee5c}'),
-    ('\u{1ee5e}', '\u{1ee5e}'),
-    ('\u{1ee60}', '\u{1ee60}'),
-    ('\u{1ee63}', '\u{1ee63}'),
-    ('\u{1ee65}', '\u{1ee66}'),
-    ('\u{1ee6b}', '\u{1ee6b}'),
-    ('\u{1ee73}', '\u{1ee73}'),
-    ('\u{1ee78}', '\u{1ee78}'),
-    ('\u{1ee7d}', '\u{1ee7d}'),
-    ('\u{1ee7f}', '\u{1ee7f}'),
-    ('\u{1ee8a}', '\u{1ee8a}'),
-    ('\u{1ee9c}', '\u{1eea0}'),
-    ('\u{1eea4}', '\u{1eea4}'),
-    ('\u{1eeaa}', '\u{1eeaa}'),
-    ('\u{1eebc}', '\u{1eeef}'),
-    ('\u{1eef2}', '\u{1efff}'),
-    ('\u{1f02c}', '\u{1f02f}'),
-    ('\u{1f094}', '\u{1f09f}'),
-    ('\u{1f0af}', '\u{1f0b0}'),
-    ('\u{1f0c0}', '\u{1f0c0}'),
-    ('\u{1f0d0}', '\u{1f0d0}'),
-    ('\u{1f0f6}', '\u{1f0ff}'),
-    ('\u{1f1ae}', '\u{1f1e5}'),
-    ('\u{1f203}', '\u{1f20f}'),
-    ('\u{1f23c}', '\u{1f23f}'),
-    ('\u{1f249}', '\u{1f24f}'),
-    ('\u{1f252}', '\u{1f25f}'),
-    ('\u{1f266}', '\u{1f2ff}'),
-    ('\u{1f6d8}', '\u{1f6db}'),
-    ('\u{1f6ed}', '\u{1f6ef}'),
-    ('\u{1f6fd}', '\u{1f6ff}'),
-    ('\u{1f777}', '\u{1f77a}'),
-    ('\u{1f7da}', '\u{1f7df}'),
-    ('\u{1f7ec}', '\u{1f7ef}'),
-    ('\u{1f7f1}', '\u{1f7ff}'),
-    ('\u{1f80c}', '\u{1f80f}'),
-    ('\u{1f848}', '\u{1f84f}'),
-    ('\u{1f85a}', '\u{1f85f}'),
-    ('\u{1f888}', '\u{1f88f}'),
-    ('\u{1f8ae}', '\u{1f8af}'),
-    ('\u{1f8bc}', '\u{1f8bf}'),
-    ('\u{1f8c2}', '\u{1f8ff}'),
-    ('\u{1fa54}', '\u{1fa5f}'),
-    ('\u{1fa6e}', '\u{1fa6f}'),
-    ('\u{1fa7d}', '\u{1fa7f}'),
-    ('\u{1fa8a}', '\u{1fa8e}'),
-    ('\u{1fac7}', '\u{1facd}'),
-    ('\u{1fadd}', '\u{1fade}'),
-    ('\u{1faea}', '\u{1faef}'),
-    ('\u{1faf9}', '\u{1faff}'),
-    ('\u{1fb93}', '\u{1fb93}'),
-    ('\u{1fbfa}', '\u{1ffff}'),
-    ('\u{2a6e0}', '\u{2a6ff}'),
-    ('\u{2b73a}', '\u{2b73f}'),
-    ('\u{2b81e}', '\u{2b81f}'),
-    ('\u{2cea2}', '\u{2ceaf}'),
-    ('\u{2ebe1}', '\u{2ebef}'),
-    ('\u{2ee5e}', '\u{2f7ff}'),
-    ('\u{2fa1e}', '\u{2ffff}'),
-    ('\u{3134b}', '\u{3134f}'),
-    ('\u{323b0}', '\u{e00ff}'),
-    ('\u{e01f0}', '\u{10ffff}'),
-];
-
-pub const OTHER_LETTER: &'static [(char, char)] = &[
-    ('ª', 'ª'),
-    ('º', 'º'),
-    ('Æ»', 'Æ»'),
-    ('ǀ', 'ǃ'),
-    ('ʔ', 'ʔ'),
-    ('ڐ', 'ŚȘ'),
-    ('ŚŻ', 'ŚČ'),
-    ('Ű ', 'Űż'),
-    ('ف', 'ي'),
-    ('Ùź', 'ÙŻ'),
-    ('ٱ', 'ۓ'),
-    ('ە', 'ە'),
-    ('Ûź', 'ÛŻ'),
-    ('Ûș', 'ÛŒ'),
-    ('Ûż', 'Ûż'),
-    ('ܐ', 'ܐ'),
-    ('ܒ', 'ܯ'),
-    ('ʍ', 'Ț„'),
-    ('Ț±', 'Ț±'),
-    ('ߊ', 'ßȘ'),
-    ('ࠀ', 'ࠕ'),
-    ('àĄ€', 'àĄ˜'),
-    ('àĄ ', 'àĄȘ'),
-    ('àĄ°', 'àą‡'),
-    ('àą‰', 'àąŽ'),
-    ('àą ', 'àŁˆ'),
-    ('à€„', 'à€č'),
-    ('à€œ', 'à€œ'),
-    ('à„', 'à„'),
-    ('à„˜', 'à„Ą'),
-    ('à„Č', 'àŠ€'),
-    ('àŠ…', 'àŠŒ'),
-    ('àŠ', 'àŠ'),
-    ('àŠ“', 'àŠš'),
-    ('àŠȘ', 'àŠ°'),
-    ('àŠČ', 'àŠČ'),
-    ('àŠ¶', 'àŠč'),
-    ('àŠœ', 'àŠœ'),
-    ('ৎ', 'ৎ'),
-    ('ড়', 'ঢ়'),
-    ('য়', 'à§Ą'),
-    ('à§°', 'à§±'),
-    ('ৌ', 'ৌ'),
-    ('àš…', 'àšŠ'),
-    ('àš', 'àš'),
-    ('àš“', 'àšš'),
-    ('àšȘ', 'àš°'),
-    ('àšČ', 'àšł'),
-    ('àš”', 'àš¶'),
-    ('àšž', 'àšč'),
-    ('ਖ਼', 'ੜ'),
-    ('ਫ਼', 'ਫ਼'),
-    ('à©Č', '੎'),
-    ('àȘ…', 'àȘ'),
-    ('àȘ', 'àȘ‘'),
-    ('àȘ“', 'àȘš'),
-    ('àȘȘ', 'àȘ°'),
-    ('àȘČ', 'àȘł'),
-    ('àȘ”', 'àȘč'),
-    ('àȘœ', 'àȘœ'),
-    ('ૐ', 'ૐ'),
-    ('à« ', 'à«Ą'),
-    ('à«č', 'à«č'),
-    ('àŹ…', 'àŹŒ'),
-    ('àŹ', 'àŹ'),
-    ('àŹ“', 'àŹš'),
-    ('àŹȘ', 'àŹ°'),
-    ('àŹČ', 'àŹł'),
-    ('àŹ”', 'àŹč'),
-    ('àŹœ', 'àŹœ'),
-    ('ଡ଼', 'ଢ଼'),
-    ('ୟ', 'à­Ą'),
-    ('à­±', 'à­±'),
-    ('àźƒ', 'àźƒ'),
-    ('àź…', 'àźŠ'),
-    ('àźŽ', 'àź'),
-    ('àź’', 'àź•'),
-    ('àź™', 'àźš'),
-    ('àźœ', 'àźœ'),
-    ('àźž', 'àźŸ'),
-    ('àźŁ', 'àź€'),
-    ('àźš', 'àźȘ'),
-    ('àźź', 'àźč'),
-    ('àŻ', 'àŻ'),
-    ('అ', 'ఌ'),
-    ('ఎ', 'ఐ'),
-    ('ఒ', 'à°š'),
-    ('à°Ș', 'à°č'),
-    ('జ', 'జ'),
-    ('ౘ', 'ౚ'),
-    ('ౝ', 'ౝ'),
-    ('à± ', 'à±Ą'),
-    ('àȀ', 'àȀ'),
-    ('àȅ', 'àȌ'),
-    ('àȎ', 'àȐ'),
-    ('àȒ', 'àČš'),
-    ('àČȘ', 'àČł'),
-    ('àČ”', 'àČč'),
-    ('àČœ', 'àČœ'),
-    ('àł', 'àłž'),
-    ('àł ', 'àłĄ'),
-    ('àł±', 'àłČ'),
-    ('àŽ„', 'àŽŒ'),
-    ('àŽŽ', 'àŽ'),
-    ('àŽ’', 'àŽș'),
-    ('àŽœ', 'àŽœ'),
-    ('à”Ž', 'à”Ž'),
-    ('à””', 'à”–'),
-    ('à”Ÿ', 'à”Ą'),
-    ('à”ș', 'à”ż'),
-    ('අ', 'ඖ'),
-    ('ක', 'න'),
-    ('à¶ł', 'à¶»'),
-    ('ග', 'ග'),
-    ('ව', 'ෆ'),
-    ('àž', 'àž°'),
-    ('àžČ', 'àžł'),
-    ('àč€', 'àč…'),
-    ('àș', 'àș‚'),
-    ('àș„', 'àș„'),
-    ('àș†', 'àșŠ'),
-    ('àșŒ', 'àșŁ'),
-    ('àș„', 'àș„'),
-    ('àș§', 'àș°'),
-    ('àșČ', 'àșł'),
-    ('àșœ', 'àșœ'),
-    ('ເ', 'ໄ'),
-    ('ໜ', 'ໟ'),
-    ('àŒ€', 'àŒ€'),
-    ('àœ€', 'àœ‡'),
-    ('àœ‰', 'àœŹ'),
-    ('àŸˆ', 'àŸŒ'),
-    ('က', 'á€Ș'),
-    ('ဿ', 'ဿ'),
-    ('ၐ', 'ၕ'),
-    ('ၚ', 'ၝ'),
-    ('ၥ', 'ၥ'),
-    ('၄', '၊'),
-    ('ၟ', 'ၰ'),
-    ('ၔ', 'ႁ'),
-    ('ႎ', 'ႎ'),
-    ('ᄀ', 'ቈ'),
-    ('ቊ', 'ቍ'),
-    ('ቐ', 'ቖ'),
-    ('ቘ', 'ቘ'),
-    ('ቚ', 'ቝ'),
-    ('በ', 'ኈ'),
-    ('ኊ', 'ኍ'),
-    ('ነ', 'ኰ'),
-    ('áŠČ', 'ኔ'),
-    ('ኾ', 'ኟ'),
-    ('ዀ', 'ዀ'),
-    ('ዂ', 'ዅ'),
-    ('ወ', 'ዖ'),
-    ('ዘ', 'ጐ'),
-    ('ጒ', 'ጕ'),
-    ('ጘ', 'ፚ'),
-    ('ᎀ', 'ᎏ'),
-    ('ᐁ', 'ᙬ'),
-    ('ᙯ', 'ᙿ'),
-    ('ᚁ', 'ᚚ'),
-    ('ᚠ', 'á›Ș'),
-    ('ᛱ', '᛾'),
-    ('ᜀ', 'ᜑ'),
-    ('ᜟ', 'ᜱ'),
-    ('ᝀ', 'ᝑ'),
-    ('ᝠ', 'ᝬ'),
-    ('᝼', 'ᝰ'),
-    ('ក', 'ឳ'),
-    ('ៜ', 'ៜ'),
-    ('ᠠ', 'ᡂ'),
-    ('ᡄ', '᡾'),
-    ('᱀', '᱄'),
-    ('᱇', 'ᱹ'),
-    ('áąȘ', 'áąȘ'),
-    ('Ṱ', 'ᣔ'),
-    ('က', 'သ'),
-    ('ᄐ', 'ᄭ'),
-    ('ᄰ', 'ᄎ'),
-    ('ᩀ', 'ካ'),
-    ('ᩰ', 'ᧉ'),
-    ('Ṁ', 'Ṗ'),
-    ('áš ', 'ᩔ'),
-    ('ᬅ', 'ᬳ'),
-    ('ᭅ', 'ᭌ'),
-    ('ៃ', '០'),
-    ('៟', '៯'),
-    ('áźș', 'ᯄ'),
-    ('ᰀ', 'ᰣ'),
-    ('ᱍ', 'ᱏ'),
-    ('ᱚ', 'ᱷ'),
-    ('ᳩ', '᳏'),
-    ('áłź', 'áłł'),
-    ('áł”', 'áł¶'),
-    ('áłș', 'áłș'),
-    ('ℵ', 'ℾ'),
-    ('⎰', '┧'),
-    ('ⶀ', 'ⶖ'),
-    ('â¶ ', 'â¶Š'),
-    ('â¶š', 'â¶ź'),
-    ('â¶°', 'â¶¶'),
-    ('â¶ž', 'â¶Ÿ'),
-    ('ⷀ', 'ⷆ'),
-    ('ⷈ', 'ⷎ'),
-    ('ⷐ', 'ⷖ'),
-    ('ⷘ', 'ⷞ'),
-    ('〆', '〆'),
-    ('ă€Œ', 'ă€Œ'),
-    ('ぁ', 'ゖ'),
-    ('ゟ', 'ゟ'),
-    ('ァ', 'ăƒș'),
-    ('ヿ', 'ヿ'),
-    ('ㄅ', 'ㄯ'),
-    ('ㄱ', 'ㆎ'),
-    ('ㆠ', 'ㆿ'),
-    ('ㇰ', 'ㇿ'),
-    ('㐀', 'ä¶ż'),
-    ('侀', 'ꀔ'),
-    ('ꀖ', 'ꒌ'),
-    ('ꓐ', 'ꓷ'),
-    ('ꔀ', 'ꘋ'),
-    ('ꘐ', 'ꘟ'),
-    ('ê˜Ș', 'ꘫ'),
-    ('ê™ź', 'ê™ź'),
-    ('ꚠ', 'ꛄ'),
-    ('ꞏ', 'ꞏ'),
-    ('ꟷ', 'ꟷ'),
-    ('ꟻ', 'ꠁ'),
-    ('ꠃ', 'ꠅ'),
-    ('ꠇ', 'ꠊ'),
-    ('ꠌ', 'ê ą'),
-    ('êĄ€', 'êĄł'),
-    ('êą‚', 'êął'),
-    ('êŁČ', 'êŁ·'),
-    ('êŁ»', 'êŁ»'),
-    ('êŁœ', 'êŁŸ'),
-    ('ꀊ', 'ꀄ'),
-    ('ꀰ', 'ꄆ'),
-    ('ꄠ', 'ꄌ'),
-    ('ꊄ', 'êŠČ'),
-    ('ê§ ', 'ê§€'),
-    ('ê§§', 'ê§Ż'),
-    ('ê§ș', 'ê§Ÿ'),
-    ('Ꚁ', 'êšš'),
-    ('ꩀ', 'ꩂ'),
-    ('ꩄ', 'ꩋ'),
-    ('ê© ', 'ê©Ż'),
-    ('ꩱ', 'ꩶ'),
-    ('ê©ș', 'ê©ș'),
-    ('꩟', 'êȘŻ'),
-    ('êȘ±', 'êȘ±'),
-    ('êȘ”', 'êȘ¶'),
-    ('êȘč', 'êȘœ'),
-    ('ꫀ', 'ꫀ'),
-    ('ꫂ', 'ꫂ'),
-    ('ꫛ', 'ꫜ'),
-    ('ê« ', 'ê«Ș'),
-    ('ê«Č', 'ê«Č'),
-    ('êŹ', 'êŹ†'),
-    ('êŹ‰', 'êŹŽ'),
-    ('êŹ‘', 'êŹ–'),
-    ('êŹ ', 'êŹŠ'),
-    ('êŹš', 'êŹź'),
-    ('êŻ€', 'êŻą'),
-    ('가', '힣'),
-    ('ힰ', 'ퟆ'),
-    ('ퟋ', 'ퟻ'),
-    ('', 'ï©­'),
-    ('並', '龎'),
-    ('ïŹ', 'ïŹ'),
-    ('ïŹŸ', 'ïŹš'),
-    ('ïŹȘ', 'ïŹ¶'),
-    ('ïŹž', 'ïŹŒ'),
-    ('ïŹŸ', 'ïŹŸ'),
-    ('נּ', 'סּ'),
-    ('ףּ', 'פּ'),
-    ('צּ', 'ïź±'),
-    ('ïŻ“', ''),
-    ('', 'ﶏ'),
-    ('ﶒ', 'ﷇ'),
-    ('ï·°', 'ï·»'),
-    ('ïč°', 'ïčŽ'),
-    ('ïč¶', 'ﻌ'),
-    ('', 'ïœŻ'),
-    ('', ''),
-    ('', ''),
-    ('ïż‚', 'ïż‡'),
-    ('ïżŠ', 'ïż'),
-    ('ïż’', 'ïż—'),
-    ('ïżš', 'ïżœ'),
-    ('𐀀', '𐀋'),
-    ('𐀍', '𐀩'),
-    ('𐀹', 'đ€ș'),
-    ('đ€Œ', 'đ€œ'),
-    ('𐀿', '𐁍'),
-    ('𐁐', '𐁝'),
-    ('𐂀', 'đƒș'),
-    ('𐊀', '𐊜'),
-    ('𐊠', '𐋐'),
-    ('𐌀', '𐌟'),
-    ('𐌭', '𐍀'),
-    ('𐍂', '𐍉'),
-    ('𐍐', 'đ”'),
-    ('𐎀', '𐎝'),
-    ('𐎠', '𐏃'),
-    ('𐏈', '𐏏'),
-    ('𐑐', '𐒝'),
-    ('𐔀', '𐔧'),
-    ('𐔰', '𐕣'),
-    ('𐗀', '𐗳'),
-    ('𐘀', 'đœ¶'),
-    ('𐝀', '𐝕'),
-    ('𐝠', '𐝧'),
-    ('𐠀', '𐠅'),
-    ('𐠈', '𐠈'),
-    ('𐠊', '𐠔'),
-    ('𐠷', '𐠞'),
-    ('đ Œ', 'đ Œ'),
-    ('𐠿', '𐡕'),
-    ('𐥠', '𐥶'),
-    ('𐱀', '𐱞'),
-    ('𐣠', 'đŁČ'),
-    ('𐣎', '𐣔'),
-    ('𐀀', '𐀕'),
-    ('𐀠', 'đ€č'),
-    ('𐩀', '𐊷'),
-    ('đŠŸ', '𐊿'),
-    ('𐹀', '𐹀'),
-    ('𐹐', '𐹓'),
-    ('𐹕', '𐹗'),
-    ('𐹙', '𐚔'),
-    ('𐩠', 'đ©Œ'),
-    ('đȘ€', 'đȘœ'),
-    ('𐫀', '𐫇'),
-    ('𐫉', '𐫀'),
-    ('𐬀', '𐏔'),
-    ('𐭀', '𐭕'),
-    ('𐭠', 'đ­Č'),
-    ('𐼀', '𐼑'),
-    ('𐰀', '𐱈'),
-    ('𐮀', '𐮣'),
-    ('𐔊', 'đ”'),
-    ('đ”', 'đ”'),
-    ('đș€', 'đș©'),
-    ('đș°', 'đș±'),
-    ('𐻂', '𐻄'),
-    ('đŒ€', 'đŒœ'),
-    ('đŒ§', 'đŒ§'),
-    ('đŒ°', 'đœ…'),
-    ('đœ°', 'đŸ'),
-    ('đŸ°', '𐿄'),
-    ('𐿠', '𐿶'),
-    ('𑀃', 'đ‘€·'),
-    ('𑁱', 'đ‘Č'),
-    ('𑁔', '𑁔'),
-    ('𑂃', '𑂯'),
-    ('𑃐', '𑃹'),
-    ('𑄃', '𑄩'),
-    ('𑅄', '𑅄'),
-    ('𑅇', '𑅇'),
-    ('𑅐', 'đ‘…Č'),
-    ('đ‘…¶', 'đ‘…¶'),
-    ('𑆃', 'đ‘†Č'),
-    ('𑇁', '𑇄'),
-    ('𑇚', '𑇚'),
-    ('𑇜', '𑇜'),
-    ('𑈀', '𑈑'),
-    ('𑈓', 'đ‘ˆ«'),
-    ('𑈿', '𑉀'),
-    ('𑊀', '𑊆'),
-    ('𑊈', '𑊈'),
-    ('𑊊', '𑊍'),
-    ('𑊏', '𑊝'),
-    ('𑊟', '𑊹'),
-    ('𑊰', '𑋞'),
-    ('𑌅', '𑌌'),
-    ('𑌏', '𑌐'),
-    ('𑌓', '𑌹'),
-    ('đ‘ŒȘ', '𑌰'),
-    ('đ‘ŒČ', '𑌳'),
-    ('đ‘Œ”', 'đ‘Œč'),
-    ('đ‘Œœ', 'đ‘Œœ'),
-    ('𑍐', '𑍐'),
-    ('𑍝', '𑍡'),
-    ('𑎀', '𑎉'),
-    ('𑎋', '𑎋'),
-    ('𑎎', '𑎎'),
-    ('𑎐', '𑎔'),
-    ('𑎷', '𑎷'),
-    ('𑏑', '𑏑'),
-    ('𑏓', '𑏓'),
-    ('𑐀', '𑐮'),
-    ('𑑇', '𑑊'),
-    ('𑑟', '𑑡'),
-    ('𑒀', '𑒯'),
-    ('𑓄', '𑓅'),
-    ('𑓇', '𑓇'),
-    ('𑖀', '𑖼'),
-    ('𑗘', '𑗛'),
-    ('𑘀', '𑘯'),
-    ('𑙄', '𑙄'),
-    ('𑚀', 'đ‘šȘ'),
-    ('𑚾', '𑚾'),
-    ('𑜀', '𑜚'),
-    ('𑝀', '𑝆'),
-    ('𑠀', 'đ‘ «'),
-    ('𑣿', '𑀆'),
-    ('𑀉', '𑀉'),
-    ('đ‘€Œ', '𑀓'),
-    ('𑀕', 'đ‘€–'),
-    ('đ‘€˜', '𑀯'),
-    ('𑀿', '𑀿'),
-    ('𑄁', '𑄁'),
-    ('𑩠', '𑩧'),
-    ('đ‘ŠȘ', '𑧐'),
-    ('𑧡', '𑧡'),
-    ('𑧣', '𑧣'),
-    ('𑹀', '𑹀'),
-    ('𑹋', 'đ‘šČ'),
-    ('đ‘šș', 'đ‘šș'),
-    ('𑩐', '𑩐'),
-    ('đ‘©œ', 'đ‘Ș‰'),
-    ('đ‘Ș', 'đ‘Ș'),
-    ('đ‘Ș°', 'đ‘«ž'),
-    ('𑯀', '𑯠'),
-    ('𑰀', '𑰈'),
-    ('𑰊', '𑰼'),
-    ('𑱀', '𑱀'),
-    ('đ‘±Č', 'đ‘ȏ'),
-    ('𑮀', '𑮆'),
-    ('𑮈', '𑮉'),
-    ('𑮋', '𑮰'),
-    ('𑔆', '𑔆'),
-    ('đ‘” ', 'đ‘”„'),
-    ('đ‘”§', '𑔚'),
-    ('đ‘”Ș', '𑶉'),
-    ('đ‘¶˜', 'đ‘¶˜'),
-    ('đ‘» ', 'đ‘»Č'),
-    ('đ‘Œ‚', 'đ‘Œ‚'),
-    ('đ‘Œ„', 'đ‘Œ'),
-    ('đ‘Œ’', 'đ‘Œł'),
-    ('đ‘Ÿ°', 'đ‘Ÿ°'),
-    ('𒀀', '𒎙'),
-    ('𒒀', '𒕃'),
-    ('đ’Ÿ', '𒿰'),
-    ('𓀀', '𓐯'),
-    ('𓑁', '𓑆'),
-    ('𓑠', 'đ”ș'),
-    ('𔐀', '𔙆'),
-    ('𖄀', '𖄝'),
-    ('𖠀', '𖹾'),
-    ('đ–©€', 'đ–©ž'),
-    ('đ–©°', 'đ–ȘŸ'),
-    ('𖫐', 'đ–«­'),
-    ('𖬀', '𖬯'),
-    ('𖭣', 'đ–­·'),
-    ('đ–­œ', '𖼏'),
-    ('đ–”ƒ', 'đ–”Ș'),
-    ('đ–Œ€', 'đ–œŠ'),
-    ('đ–œ', 'đ–œ'),
-    ('𗀀', 'đ˜Ÿ·'),
-    ('𘠀', '𘳕'),
-    ('𘳿', '𘎈'),
-    ('𛀀', '𛄱'),
-    ('đ›„Č', 'đ›„Č'),
-    ('𛅐', '𛅒'),
-    ('𛅕', '𛅕'),
-    ('đ›…€', '𛅧'),
-    ('𛅰', '𛋻'),
-    ('𛰀', 'đ›±Ș'),
-    ('đ›±°', 'đ›±Œ'),
-    ('đ›Č€', 'đ›Čˆ'),
-    ('đ›Č', 'đ›Č™'),
-    ('đŒŠ', 'đŒŠ'),
-    ('𞄀', '𞄬'),
-    ('𞅎', '𞅎'),
-    ('𞊐', '𞊭'),
-    ('𞋀', 'đž‹«'),
-    ('𞓐', 'đž“Ș'),
-    ('𞗐', '𞗭'),
-    ('𞗰', '𞗰'),
-    ('𞟠', '𞟩'),
-    ('𞟹', 'đžŸ«'),
-    ('𞟭', '𞟼'),
-    ('𞟰', 'đžŸŸ'),
-    ('𞠀', '𞣄'),
-    ('𞾀', '𞾃'),
-    ('𞾅', '𞾟'),
-    ('𞾡', '𞾱'),
-    ('𞞀', '𞞀'),
-    ('𞾧', '𞾧'),
-    ('đžž©', 'đžžČ'),
-    ('𞾮', 'đžž·'),
-    ('đžžč', 'đžžč'),
-    ('đžž»', 'đžž»'),
-    ('đžč‚', 'đžč‚'),
-    ('đžč‡', 'đžč‡'),
-    ('đžč‰', 'đžč‰'),
-    ('đžč‹', 'đžč‹'),
-    ('đžč', 'đžč'),
-    ('đžč‘', 'đžč’'),
-    ('đžč”', 'đžč”'),
-    ('đžč—', 'đžč—'),
-    ('đžč™', 'đžč™'),
-    ('đžč›', 'đžč›'),
-    ('đžč', 'đžč'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžčĄ', 'đžčą'),
-    ('đžč€', 'đžč€'),
-    ('đžč§', 'đžčȘ'),
-    ('đžčŹ', 'đžčČ'),
-    ('đžčŽ', 'đžč·'),
-    ('đžčč', 'đžčŒ'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžș€', 'đžș‰'),
-    ('đžș‹', 'đžș›'),
-    ('đžșĄ', 'đžșŁ'),
-    ('đžș„', 'đžș©'),
-    ('đžș«', 'đžș»'),
-    ('𠀀', 'đȘ›Ÿ'),
-    ('đȘœ€', 'đ«œč'),
-    ('đ«€', 'đ« '),
-    ('đ«  ', 'đŹșĄ'),
-    ('đŹș°', '🯠'),
-    ('🯰', 'đźč'),
-    ('丽', '𯹝'),
-    ('𰀀', 'đ±Š'),
-    ('đ±', 'đȎŻ'),
-];
-
-pub const OTHER_NUMBER: &'static [(char, char)] = &[
-    ('²', '³'),
-    ('¹', '¹'),
-    ('¼', '¾'),
-    ('à§Ž', 'à§č'),
-    ('à­Č', 'à­·'),
-    ('àŻ°', 'àŻČ'),
-    ('౞', '౟'),
-    ('à”˜', 'à”ž'),
-    ('à”°', 'à”ž'),
-    ('àŒȘ', 'àŒł'),
-    ('፩', 'ፌ'),
-    ('៰', 'áŸč'),
-    ('᧚', '᧚'),
-    ('⁰', '⁰'),
-    ('⁎', 'âč'),
-    ('₀', '₉'),
-    ('⅐', '⅟'),
-    ('↉', '↉'),
-    ('①', '⒛'),
-    ('â“Ș', '⓿'),
-    ('❶', '➓'),
-    ('âłœ', 'âłœ'),
-    ('㆒', '㆕'),
-    ('㈠', '㈩'),
-    ('㉈', '㉏'),
-    ('㉑', '㉟'),
-    ('㊀', '㊉'),
-    ('㊱', '㊿'),
-    ('ê °', 'ê ”'),
-    ('𐄇', '𐄳'),
-    ('𐅔', '𐅾'),
-    ('𐆊', '𐆋'),
-    ('𐋡', '𐋻'),
-    ('𐌠', '𐌣'),
-    ('𐡘', '𐡟'),
-    ('đĄč', '𐥿'),
-    ('𐹧', '𐹯'),
-    ('𐣻', '𐣿'),
-    ('𐀖', '𐀛'),
-    ('đŠŒ', 'đŠœ'),
-    ('𐧀', '𐧏'),
-    ('𐧒', '𐧿'),
-    ('𐩀', '𐩈'),
-    ('đ©œ', 'đ©Ÿ'),
-    ('đȘ', 'đȘŸ'),
-    ('𐫫', '𐫯'),
-    ('𐭘', '𐭟'),
-    ('𐭞', '𐭿'),
-    ('𐟩', '𐟯'),
-    ('đłș', '𐳿'),
-    ('đč ', 'đčŸ'),
-    ('đŒ', 'đŒŠ'),
-    ('đœ‘', 'đœ”'),
-    ('𐿅', '𐿋'),
-    ('𑁒', '𑁄'),
-    ('𑇡', '𑇮'),
-    ('đ‘œș', 'đ‘œ»'),
-    ('đ‘ŁȘ', 'đ‘ŁČ'),
-    ('𑱚', '𑱏'),
-    ('𑿀', '𑿔'),
-    ('𖭛', '𖭡'),
-    ('đ–ș€', 'đ–ș–'),
-    ('𝋀', '𝋓'),
-    ('𝋠', '𝋳'),
-    ('𝍠', '𝍾'),
-    ('𞣇', '𞣏'),
-    ('đž±±', 'đžČ«'),
-    ('đžČ­', 'đžČŻ'),
-    ('đžČ±', 'đžČŽ'),
-    ('𞮁', '𞮭'),
-    ('𞮯', 'đžŽœ'),
-    ('🄀', '🄌'),
-];
-
-pub const OTHER_PUNCTUATION: &'static [(char, char)] = &[
-    ('!', '#'),
-    ('%', '\''),
-    ('*', '*'),
-    (',', ','),
-    ('.', '/'),
-    (':', ';'),
-    ('?', '@'),
-    ('\\', '\\'),
-    ('¡', '¡'),
-    ('§', '§'),
-    ('¶', '·'),
-    ('¿', '¿'),
-    ('ÍŸ', 'ÍŸ'),
-    ('·', '·'),
-    ('՚', '՟'),
-    ('։', '։'),
-    ('Ś€', 'Ś€'),
-    ('ڃ', 'ڃ'),
-    ('چ', 'چ'),
-    ('Śł', 'ŚŽ'),
-    ('ۉ', 'ۊ'),
-    ('ی', 'ۍ'),
-    ('ۛ', 'ۛ'),
-    ('۝', '۟'),
-    ('ÙȘ', 'Ù­'),
-    ('۔', '۔'),
-    ('܀', '܍'),
-    ('ß·', 'ßč'),
-    ('à °', 'à Ÿ'),
-    ('àĄž', 'àĄž'),
-    ('à„€', 'à„„'),
-    ('à„°', 'à„°'),
-    ('ড়', 'ড়'),
-    ('à©¶', 'à©¶'),
-    ('à«°', 'à«°'),
-    ('à±·', 'à±·'),
-    ('àȄ', 'àȄ'),
-    ('à·Ž', 'à·Ž'),
-    ('àč', 'àč'),
-    ('àčš', 'àč›'),
-    ('àŒ„', 'àŒ’'),
-    ('àŒ”', 'àŒ”'),
-    ('àŸ…', 'àŸ…'),
-    ('àż', 'àż”'),
-    ('àż™', 'àżš'),
-    ('၊', '၏'),
-    ('჻', '჻'),
-    ('፠', 'ፚ'),
-    ('ᙼ', 'ᙼ'),
-    ('᛫', '᛭'),
-    ('᜔', '᜶'),
-    ('។', '៖'),
-    ('៘', '៚'),
-    ('᠀', '᠅'),
-    ('᠇', '᠊'),
-    ('á„„', 'á„…'),
-    ('Ṟ', 'ṟ'),
-    ('áȘ ', 'áȘŠ'),
-    ('áȘš', 'áȘ­'),
-    ('᭎', '᭏'),
-    ('᭚', '᭠'),
-    ('á­œ', 'á­ż'),
-    ('áŻŒ', '᯿'),
-    ('á°»', 'á°ż'),
-    ('ᱟ', '᱿'),
-    ('᳀', '᳇'),
-    ('᳓', '᳓'),
-    ('‖', '‗'),
-    ('†', '‧'),
-    ('‰', '‾'),
-    ('※', '‾'),
-    ('⁁', '⁃'),
-    ('⁇', '⁑'),
-    ('⁓', '⁓'),
-    ('⁕', '⁞'),
-    ('âłč', 'âłŒ'),
-    ('âłŸ', 'âłż'),
-    ('â”°', 'â”°'),
-    ('⾀', '⾁'),
-    ('⾆', '⾈'),
-    ('⾋', '⾋'),
-    ('⾎', '⾖'),
-    ('⾘', '⾙'),
-    ('⾛', '⾛'),
-    ('⾞', '⾟'),
-    ('âžȘ', 'âžź'),
-    ('âž°', 'âžč'),
-    ('➌', 'âžż'),
-    ('âč', 'âč'),
-    ('âčƒ', 'âč'),
-    ('âč’', 'âč”'),
-    ('、', '〃'),
-    ('ă€œ', 'ă€œ'),
-    ('・', '・'),
-    ('ꓟ', 'ê“ż'),
-    ('꘍', '꘏'),
-    ('ê™ł', 'ê™ł'),
-    ('ꙟ', 'ꙟ'),
-    ('ê›Č', '꛷'),
-    ('êĄŽ', 'êĄ·'),
-    ('êŁŽ', 'êŁ'),
-    ('êŁž', 'êŁș'),
-    ('êŁŒ', 'êŁŒ'),
-    ('ê€ź', 'ê€Ż'),
-    ('ꄟ', 'ꄟ'),
-    ('꧁', '꧍'),
-    ('꧞', '꧟'),
-    ('꩜', '꩟'),
-    ('꫞', '꫟'),
-    ('꫰', '꫱'),
-    ('êŻ«', 'êŻ«'),
-    ('', 'ïž–'),
-    ('ïž™', 'ïž™'),
-    ('ïž°', 'ïž°'),
-    ('ïč…', 'ïč†'),
-    ('ïč‰', 'ïčŒ'),
-    ('ïč', 'ïč’'),
-    ('ïč”', 'ïč—'),
-    ('ïčŸ', 'ïčĄ'),
-    ('ïčš', 'ïčš'),
-    ('ïčȘ', 'ïč«'),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('ïœĄ', 'ïœĄ'),
-    ('', ''),
-    ('𐄀', '𐄂'),
-    ('𐎟', '𐎟'),
-    ('𐏐', '𐏐'),
-    ('𐕯', '𐕯'),
-    ('𐡗', '𐡗'),
-    ('đ€Ÿ', 'đ€Ÿ'),
-    ('𐀿', '𐀿'),
-    ('𐩐', '𐩘'),
-    ('𐩿', '𐩿'),
-    ('𐫰', '𐫶'),
-    ('đŹč', '𐏿'),
-    ('𐼙', '𐼜'),
-    ('đœ•', 'đœ™'),
-    ('đŸ†', 'đŸ‰'),
-    ('𑁇', '𑁍'),
-    ('đ‘‚»', 'đ‘‚Œ'),
-    ('đ‘‚Ÿ', '𑃁'),
-    ('𑅀', '𑅃'),
-    ('𑅮', 'đ‘…”'),
-    ('𑇅', '𑇈'),
-    ('𑇍', '𑇍'),
-    ('𑇛', '𑇛'),
-    ('𑇝', '𑇟'),
-    ('𑈾', 'đ‘ˆœ'),
-    ('𑊩', '𑊩'),
-    ('𑏔', '𑏕'),
-    ('𑏗', '𑏘'),
-    ('𑑋', '𑑏'),
-    ('𑑚', '𑑛'),
-    ('𑑝', '𑑝'),
-    ('𑓆', '𑓆'),
-    ('𑗁', '𑗗'),
-    ('𑙁', '𑙃'),
-    ('𑙠', '𑙬'),
-    ('đ‘šč', 'đ‘šč'),
-    ('đ‘œŒ', 'đ‘œŸ'),
-    ('đ‘ »', 'đ‘ »'),
-    ('đ‘„„', '𑄆'),
-    ('𑧱', '𑧱'),
-    ('𑹿', '𑩆'),
-    ('đ‘Șš', 'đ‘Șœ'),
-    ('đ‘Șž', 'đ‘Șą'),
-    ('𑬀', '𑬉'),
-    ('𑯡', '𑯡'),
-    ('𑱁', '𑱅'),
-    ('𑱰', '𑱱'),
-    ('đ‘»·', '𑻞'),
-    ('đ‘œƒ', 'đ‘œ'),
-    ('𑿿', '𑿿'),
-    ('𒑰', '𒑮'),
-    ('đ’ż±', 'đ’żČ'),
-    ('đ–©ź', 'đ–©Ż'),
-    ('đ–«”', 'đ–«”'),
-    ('đ–Ź·', 'đ–Ź»'),
-    ('𖭄', '𖭄'),
-    ('đ–”­', '𖔯'),
-    ('đ–ș—', 'đ–șš'),
-    ('𖿱', '𖿱'),
-    ('đ›ČŸ', 'đ›ČŸ'),
-    ('đȘ‡', 'đȘ‹'),
-    ('𞗿', '𞗿'),
-    ('𞄞', 'đž„Ÿ'),
-];
-
-pub const OTHER_SYMBOL: &'static [(char, char)] = &[
-    ('¦', '¦'),
-    ('©', '©'),
-    ('®', '®'),
-    ('°', '°'),
-    ('҂', '҂'),
-    ('֍', '֎'),
-    ('ێ', 'ۏ'),
-    ('۞', '۞'),
-    ('Û©', 'Û©'),
-    ('ۜ', '۟'),
-    ('ß¶', 'ß¶'),
-    ('à§ș', 'à§ș'),
-    ('à­°', 'à­°'),
-    ('àŻł', 'àŻž'),
-    ('àŻș', 'àŻș'),
-    ('à±ż', 'à±ż'),
-    ('à”', 'à”'),
-    ('à”č', 'à”č'),
-    ('àŒ', 'àŒƒ'),
-    ('àŒ“', 'àŒ“'),
-    ('àŒ•', 'àŒ—'),
-    ('àŒš', 'àŒŸ'),
-    ('àŒŽ', 'àŒŽ'),
-    ('àŒ¶', 'àŒ¶'),
-    ('àŒž', 'àŒž'),
-    ('àŸŸ', 'àż…'),
-    ('àż‡', 'àżŒ'),
-    ('àżŽ', 'àż'),
-    ('àż•', 'àż˜'),
-    ('႞', '႟'),
-    ('᎐', '᎙'),
-    ('᙭', '᙭'),
-    ('á„€', 'á„€'),
-    ('᧞', '᧿'),
-    ('á­Ą', 'á­Ș'),
-    ('᭎', 'ᭌ'),
-    ('℀', '℁'),
-    ('℃', '℆'),
-    ('℈', '℉'),
-    ('℔', '℔'),
-    ('№', '℗'),
-    ('℞', '℣'),
-    ('â„„', 'â„„'),
-    ('℧', '℧'),
-    ('℩', '℩'),
-    ('ℼ', 'ℼ'),
-    ('â„ș', '℻'),
-    ('⅊', '⅊'),
-    ('⅌', '⅍'),
-    ('⅏', '⅏'),
-    ('↊', '↋'),
-    ('↕', '↙'),
-    ('↜', '↟'),
-    ('↡', '↱'),
-    ('ↀ', 'ↄ'),
-    ('↧', '↭'),
-    ('↯', '⇍'),
-    ('⇐', '⇑'),
-    ('⇓', '⇓'),
-    ('⇕', '⇳'),
-    ('⌀', '⌇'),
-    ('⌌', '⌟'),
-    ('⌱', '⌹'),
-    ('⌫', '⍻'),
-    ('⍜', '⎚'),
-    ('⎮', '⏛'),
-    ('⏱', '␩'),
-    ('⑀', '⑊'),
-    ('⒜', 'ⓩ'),
-    ('─', '▶'),
-    ('▾', '◀'),
-    ('◂', '◷'),
-    ('☀', '♼'),
-    ('♰', '❧'),
-    ('➔', '➿'),
-    ('⠀', '⣿'),
-    ('⬀', '⬯'),
-    ('⭅', '⭆'),
-    ('⭍', '⭳'),
-    ('â­¶', '⼕'),
-    ('⼗', '⯿'),
-    ('âł„', 'âłȘ'),
-    ('âč', 'âč‘'),
-    ('âș€', 'âș™'),
-    ('âș›', '⻳'),
-    ('⌀', '⿕'),
-    ('âż°', 'âżż'),
-    ('〄', '〄'),
-    ('〒', '〓'),
-    ('〠', '〠'),
-    ('〶', '〷'),
-    ('ă€Ÿ', '〿'),
-    ('㆐', '㆑'),
-    ('㆖', '㆟'),
-    ('㇀', '㇄'),
-    ('㇯', '㇯'),
-    ('㈀', '㈞'),
-    ('ăˆȘ', '㉇'),
-    ('㉐', '㉐'),
-    ('㉠', '㉿'),
-    ('㊊', '㊰'),
-    ('㋀', '㏿'),
-    ('䷀', 'ä·ż'),
-    ('꒐', '꓆'),
-    ('ê š', 'ê «'),
-    ('ê ¶', 'ê ·'),
-    ('ê č', 'ê č'),
-    ('ê©·', 'ê©č'),
-    ('', ''),
-    ('﷏', '﷏'),
-    ('ï·œ', 'ï·ż'),
-    ('ïż€', 'ïż€'),
-    ('ïżš', 'ïżš'),
-    ('ïż­', 'ïżź'),
-    ('ïżŒ', 'ïżœ'),
-    ('𐄷', '𐄿'),
-    ('đ…č', '𐆉'),
-    ('𐆌', '𐆎'),
-    ('𐆐', '𐆜'),
-    ('𐆠', '𐆠'),
-    ('𐇐', 'đ‡Œ'),
-    ('𐥷', '𐥞'),
-    ('𐫈', '𐫈'),
-    ('𑜿', '𑜿'),
-    ('𑿕', '𑿜'),
-    ('𑿡', '𑿱'),
-    ('đ–ŹŒ', '𖬿'),
-    ('𖭅', '𖭅'),
-    ('đ›Čœ', 'đ›Čœ'),
-    ('𜰀', '𜳯'),
-    ('𜮀', 'đœșł'),
-    ('đœœ', '𜿃'),
-    ('𝀀', 'đƒ”'),
-    ('𝄀', '𝄩'),
-    ('đ„©', 'đ…€'),
-    ('đ…Ș', '𝅬'),
-    ('𝆃', '𝆄'),
-    ('𝆌', 'đ†©'),
-    ('𝆺𝅥', 'đ‡Ș'),
-    ('𝈀', '𝉁'),
-    ('𝉅', '𝉅'),
-    ('𝌀', '𝍖'),
-    ('𝠀', '𝧿'),
-    ('đš·', 'đšș'),
-    ('đ©­', 'đ©Ž'),
-    ('đ©¶', 'đȘƒ'),
-    ('đȘ…', 'đȘ†'),
-    ('𞅏', '𞅏'),
-    ('đžČŹ', 'đžČŹ'),
-    ('𞮼', '𞮼'),
-    ('🀀', 'đŸ€«'),
-    ('🀰', '🂓'),
-    ('🂠', '🂼'),
-    ('đŸ‚±', '🂿'),
-    ('🃁', '🃏'),
-    ('🃑', 'đŸƒ”'),
-    ('🄍', '🆭'),
-    ('🇩', '🈂'),
-    ('🈐', 'đŸˆ»'),
-    ('🉀', '🉈'),
-    ('🉐', '🉑'),
-    ('🉠', 'đŸ‰„'),
-    ('🌀', 'đŸș'),
-    ('🐀', '🛗'),
-    ('🛜', '🛬'),
-    ('🛰', 'đŸ›Œ'),
-    ('🜀', 'đŸ¶'),
-    ('đŸ»', '🟙'),
-    ('🟠', 'đŸŸ«'),
-    ('🟰', '🟰'),
-    ('🠀', '🠋'),
-    ('🠐', '🡇'),
-    ('🡐', '🡙'),
-    ('🡠', '🱇'),
-    ('🱐', '🱭'),
-    ('🱰', 'đŸą»'),
-    ('🣀', '🣁'),
-    ('đŸ€€', 'đŸ©“'),
-    ('đŸ© ', 'đŸ©­'),
-    ('đŸ©°', 'đŸ©Œ'),
-    ('đŸȘ€', 'đŸȘ‰'),
-    ('đŸȘ', 'đŸ«†'),
-    ('đŸ«Ž', 'đŸ«œ'),
-    ('đŸ«Ÿ', 'đŸ«©'),
-    ('đŸ«°', 'đŸ«ž'),
-    ('🬀', '🼒'),
-    ('🼔', '🯯'),
-];
-
-pub const PARAGRAPH_SEPARATOR: &'static [(char, char)] =
-    &[('\u{2029}', '\u{2029}')];
-
-pub const PRIVATE_USE: &'static [(char, char)] = &[
-    ('\u{e000}', '\u{f8ff}'),
-    ('\u{f0000}', '\u{ffffd}'),
-    ('\u{100000}', '\u{10fffd}'),
-];
-
-pub const PUNCTUATION: &'static [(char, char)] = &[
-    ('!', '#'),
-    ('%', '*'),
-    (',', '/'),
-    (':', ';'),
-    ('?', '@'),
-    ('[', ']'),
-    ('_', '_'),
-    ('{', '{'),
-    ('}', '}'),
-    ('¡', '¡'),
-    ('§', '§'),
-    ('«', '«'),
-    ('¶', '·'),
-    ('»', '»'),
-    ('¿', '¿'),
-    ('ÍŸ', 'ÍŸ'),
-    ('·', '·'),
-    ('՚', '՟'),
-    ('։', '֊'),
-    ('ÖŸ', 'ÖŸ'),
-    ('Ś€', 'Ś€'),
-    ('ڃ', 'ڃ'),
-    ('چ', 'چ'),
-    ('Śł', 'ŚŽ'),
-    ('ۉ', 'ۊ'),
-    ('ی', 'ۍ'),
-    ('ۛ', 'ۛ'),
-    ('۝', '۟'),
-    ('ÙȘ', 'Ù­'),
-    ('۔', '۔'),
-    ('܀', '܍'),
-    ('ß·', 'ßč'),
-    ('à °', 'à Ÿ'),
-    ('àĄž', 'àĄž'),
-    ('à„€', 'à„„'),
-    ('à„°', 'à„°'),
-    ('ড়', 'ড়'),
-    ('à©¶', 'à©¶'),
-    ('à«°', 'à«°'),
-    ('à±·', 'à±·'),
-    ('àȄ', 'àȄ'),
-    ('à·Ž', 'à·Ž'),
-    ('àč', 'àč'),
-    ('àčš', 'àč›'),
-    ('àŒ„', 'àŒ’'),
-    ('àŒ”', 'àŒ”'),
-    ('àŒș', 'àŒœ'),
-    ('àŸ…', 'àŸ…'),
-    ('àż', 'àż”'),
-    ('àż™', 'àżš'),
-    ('၊', '၏'),
-    ('჻', '჻'),
-    ('፠', 'ፚ'),
-    ('᐀', '᐀'),
-    ('ᙼ', 'ᙼ'),
-    ('᚛', '᚜'),
-    ('᛫', '᛭'),
-    ('᜔', '᜶'),
-    ('។', '៖'),
-    ('៘', '៚'),
-    ('᠀', '᠊'),
-    ('á„„', 'á„…'),
-    ('Ṟ', 'ṟ'),
-    ('áȘ ', 'áȘŠ'),
-    ('áȘš', 'áȘ­'),
-    ('᭎', '᭏'),
-    ('᭚', '᭠'),
-    ('á­œ', 'á­ż'),
-    ('áŻŒ', '᯿'),
-    ('á°»', 'á°ż'),
-    ('ᱟ', '᱿'),
-    ('᳀', '᳇'),
-    ('᳓', '᳓'),
-    ('‐', '‧'),
-    ('‰', '⁃'),
-    ('⁅', '⁑'),
-    ('⁓', '⁞'),
-    ('⁜', ' '),
-    ('₍', '₎'),
-    ('⌈', '⌋'),
-    ('⟨', '⟩'),
-    ('❚', '❔'),
-    ('⟅', '⟆'),
-    ('⟩', '⟯'),
-    ('⊃', '⊘'),
-    ('⧘', '⧛'),
-    ('⧌', '⧜'),
-    ('âłč', 'âłŒ'),
-    ('âłŸ', 'âłż'),
-    ('â”°', 'â”°'),
-    ('⾀', '⾼'),
-    ('âž°', 'âč'),
-    ('âč’', 'âč'),
-    ('、', '〃'),
-    ('〈', '】'),
-    ('〔', '〟'),
-    ('〰', '〰'),
-    ('ă€œ', 'ă€œ'),
-    ('゠', '゠'),
-    ('・', '・'),
-    ('ꓟ', 'ê“ż'),
-    ('꘍', '꘏'),
-    ('ê™ł', 'ê™ł'),
-    ('ꙟ', 'ꙟ'),
-    ('ê›Č', '꛷'),
-    ('êĄŽ', 'êĄ·'),
-    ('êŁŽ', 'êŁ'),
-    ('êŁž', 'êŁș'),
-    ('êŁŒ', 'êŁŒ'),
-    ('ê€ź', 'ê€Ż'),
-    ('ꄟ', 'ꄟ'),
-    ('꧁', '꧍'),
-    ('꧞', '꧟'),
-    ('꩜', '꩟'),
-    ('꫞', '꫟'),
-    ('꫰', '꫱'),
-    ('êŻ«', 'êŻ«'),
-    ('', 'ïŽż'),
-    ('', 'ïž™'),
-    ('ïž°', 'ïč’'),
-    ('ïč”', 'ïčĄ'),
-    ('ïčŁ', 'ïčŁ'),
-    ('ïčš', 'ïčš'),
-    ('ïčȘ', 'ïč«'),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('ïŒż', 'ïŒż'),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('𐄀', '𐄂'),
-    ('𐎟', '𐎟'),
-    ('𐏐', '𐏐'),
-    ('𐕯', '𐕯'),
-    ('𐡗', '𐡗'),
-    ('đ€Ÿ', 'đ€Ÿ'),
-    ('𐀿', '𐀿'),
-    ('𐩐', '𐩘'),
-    ('𐩿', '𐩿'),
-    ('𐫰', '𐫶'),
-    ('đŹč', '𐏿'),
-    ('𐼙', '𐼜'),
-    ('𐔟', '𐔟'),
-    ('đș­', 'đș­'),
-    ('đœ•', 'đœ™'),
-    ('đŸ†', 'đŸ‰'),
-    ('𑁇', '𑁍'),
-    ('đ‘‚»', 'đ‘‚Œ'),
-    ('đ‘‚Ÿ', '𑃁'),
-    ('𑅀', '𑅃'),
-    ('𑅮', 'đ‘…”'),
-    ('𑇅', '𑇈'),
-    ('𑇍', '𑇍'),
-    ('𑇛', '𑇛'),
-    ('𑇝', '𑇟'),
-    ('𑈾', 'đ‘ˆœ'),
-    ('𑊩', '𑊩'),
-    ('𑏔', '𑏕'),
-    ('𑏗', '𑏘'),
-    ('𑑋', '𑑏'),
-    ('𑑚', '𑑛'),
-    ('𑑝', '𑑝'),
-    ('𑓆', '𑓆'),
-    ('𑗁', '𑗗'),
-    ('𑙁', '𑙃'),
-    ('𑙠', '𑙬'),
-    ('đ‘šč', 'đ‘šč'),
-    ('đ‘œŒ', 'đ‘œŸ'),
-    ('đ‘ »', 'đ‘ »'),
-    ('đ‘„„', '𑄆'),
-    ('𑧱', '𑧱'),
-    ('𑹿', '𑩆'),
-    ('đ‘Șš', 'đ‘Șœ'),
-    ('đ‘Șž', 'đ‘Șą'),
-    ('𑬀', '𑬉'),
-    ('𑯡', '𑯡'),
-    ('𑱁', '𑱅'),
-    ('𑱰', '𑱱'),
-    ('đ‘»·', '𑻞'),
-    ('đ‘œƒ', 'đ‘œ'),
-    ('𑿿', '𑿿'),
-    ('𒑰', '𒑮'),
-    ('đ’ż±', 'đ’żČ'),
-    ('đ–©ź', 'đ–©Ż'),
-    ('đ–«”', 'đ–«”'),
-    ('đ–Ź·', 'đ–Ź»'),
-    ('𖭄', '𖭄'),
-    ('đ–”­', '𖔯'),
-    ('đ–ș—', 'đ–șš'),
-    ('𖿱', '𖿱'),
-    ('đ›ČŸ', 'đ›ČŸ'),
-    ('đȘ‡', 'đȘ‹'),
-    ('𞗿', '𞗿'),
-    ('𞄞', 'đž„Ÿ'),
-];
-
-pub const SEPARATOR: &'static [(char, char)] = &[
-    (' ', ' '),
-    ('\u{a0}', '\u{a0}'),
-    ('\u{1680}', '\u{1680}'),
-    ('\u{2000}', '\u{200a}'),
-    ('\u{2028}', '\u{2029}'),
-    ('\u{202f}', '\u{202f}'),
-    ('\u{205f}', '\u{205f}'),
-    ('\u{3000}', '\u{3000}'),
-];
-
-pub const SPACE_SEPARATOR: &'static [(char, char)] = &[
-    (' ', ' '),
-    ('\u{a0}', '\u{a0}'),
-    ('\u{1680}', '\u{1680}'),
-    ('\u{2000}', '\u{200a}'),
-    ('\u{202f}', '\u{202f}'),
-    ('\u{205f}', '\u{205f}'),
-    ('\u{3000}', '\u{3000}'),
-];
-
-pub const SPACING_MARK: &'static [(char, char)] = &[
-    ('à€ƒ', 'à€ƒ'),
-    ('à€»', 'à€»'),
-    ('à€Ÿ', 'à„€'),
-    ('à„‰', 'à„Œ'),
-    ('à„Ž', 'à„'),
-    ('àŠ‚', 'àŠƒ'),
-    ('\u{9be}', 'ী'),
-    ('ে', 'ৈ'),
-    ('ো', 'ৌ'),
-    ('\u{9d7}', '\u{9d7}'),
-    ('àšƒ', 'àšƒ'),
-    ('àšŸ', 'ੀ'),
-    ('àȘƒ', 'àȘƒ'),
-    ('àȘŸ', 'ી'),
-    ('ૉ', 'ૉ'),
-    ('ો', 'ૌ'),
-    ('àŹ‚', 'àŹƒ'),
-    ('\u{b3e}', '\u{b3e}'),
-    ('ୀ', 'ୀ'),
-    ('େ', 'ୈ'),
-    ('ୋ', 'ୌ'),
-    ('\u{b57}', '\u{b57}'),
-    ('\u{bbe}', 'àźż'),
-    ('àŻ', 'àŻ‚'),
-    ('àŻ†', 'àŻˆ'),
-    ('àŻŠ', 'àŻŒ'),
-    ('\u{bd7}', '\u{bd7}'),
-    ('ఁ', 'ః'),
-    ('ు', 'ౄ'),
-    ('àȂ', 'àȃ'),
-    ('àČŸ', 'àČŸ'),
-    ('\u{cc0}', 'àł„'),
-    ('\u{cc7}', '\u{cc8}'),
-    ('\u{cca}', '\u{ccb}'),
-    ('\u{cd5}', '\u{cd6}'),
-    ('àłł', 'àłł'),
-    ('àŽ‚', 'àŽƒ'),
-    ('\u{d3e}', 'à”€'),
-    ('à”†', 'à”ˆ'),
-    ('à”Š', 'à”Œ'),
-    ('\u{d57}', '\u{d57}'),
-    ('ං', 'ඃ'),
-    ('\u{dcf}', 'ෑ'),
-    ('ෘ', '\u{ddf}'),
-    ('à·Č', 'à·ł'),
-    ('àŒŸ', 'àŒż'),
-    ('àœż', 'àœż'),
-    ('ါ', 'ာ'),
-    ('ေ', 'ေ'),
-    ('ှ', 'ှ'),
-    ('ျ', 'ဌ'),
-    ('ၖ', 'ၗ'),
-    ('ၹ', '၀'),
-    ('ၧ', 'ၭ'),
-    ('ႃ', 'ႄ'),
-    ('ႇ', 'ႌ'),
-    ('ႏ', 'ႏ'),
-    ('ႚ', 'ႜ'),
-    ('\u{1715}', '\u{1715}'),
-    ('\u{1734}', '\u{1734}'),
-    ('ា', 'ា'),
-    ('ស', 'ៅ'),
-    ('ះ', 'ៈ'),
-    ('ဣ', 'ည'),
-    ('ဩ', 'ါ'),
-    ('ူ', 'ေ'),
-    ('ဳ', 'သ'),
-    ('ṙ', 'Ṛ'),
-    ('ᩕ', 'ᩕ'),
-    ('ᩗ', 'ᩗ'),
-    ('ᩥ', 'ᩥ'),
-    ('ᩣ', 'ᩀ'),
-    ('á©­', 'á©Č'),
-    ('ᬄ', 'ᬄ'),
-    ('\u{1b35}', '\u{1b35}'),
-    ('\u{1b3b}', '\u{1b3b}'),
-    ('\u{1b3d}', 'ᭁ'),
-    ('\u{1b43}', '\u{1b44}'),
-    ('ἂ', 'ἂ'),
-    ('៥', '៥'),
-    ('៊', '៧'),
-    ('\u{1baa}', '\u{1baa}'),
-    ('ᯧ', 'ᯧ'),
-    ('áŻȘ', 'ᯏ'),
-    ('ᯟ', 'ᯟ'),
-    ('\u{1bf2}', '\u{1bf3}'),
-    ('á°€', 'á°«'),
-    ('á°Ž', 'á°”'),
-    ('᳥', '᳥'),
-    ('áł·', 'áł·'),
-    ('\u{302e}', '\u{302f}'),
-    ('ê Ł', 'ê €'),
-    ('ê §', 'ê §'),
-    ('êą€', 'êą'),
-    ('êąŽ', 'êŁƒ'),
-    ('ê„’', '\u{a953}'),
-    ('ꊃ', 'ꊃ'),
-    ('ꊎ', 'ꊔ'),
-    ('êŠș', 'ꊻ'),
-    ('ꊟ', '\u{a9c0}'),
-    ('êšŻ', 'êš°'),
-    ('êšł', 'Ꚏ'),
-    ('ꩍ', 'ꩍ'),
-    ('ê©»', 'ê©»'),
-    ('꩜', '꩜'),
-    ('ê««', 'ê««'),
-    ('ê«ź', 'ê«Ż'),
-    ('ê«”', 'ê«”'),
-    ('êŻŁ', 'êŻ€'),
-    ('êŻŠ', 'êŻ§'),
-    ('êŻ©', 'êŻȘ'),
-    ('êŻŹ', 'êŻŹ'),
-    ('𑀀', '𑀀'),
-    ('𑀂', '𑀂'),
-    ('𑂂', '𑂂'),
-    ('𑂰', 'đ‘‚Č'),
-    ('đ‘‚·', '𑂾'),
-    ('𑄬', '𑄬'),
-    ('𑅅', '𑅆'),
-    ('𑆂', '𑆂'),
-    ('𑆳', '𑆔'),
-    ('𑆿', '\u{111c0}'),
-    ('𑇎', '𑇎'),
-    ('𑈬', '𑈼'),
-    ('đ‘ˆČ', '𑈳'),
-    ('\u{11235}', '\u{11235}'),
-    ('𑋠', '𑋱'),
-    ('𑌂', '𑌃'),
-    ('\u{1133e}', '𑌿'),
-    ('𑍁', '𑍄'),
-    ('𑍇', '𑍈'),
-    ('𑍋', '\u{1134d}'),
-    ('\u{11357}', '\u{11357}'),
-    ('𑍱', '𑍣'),
-    ('\u{113b8}', 'đ‘Žș'),
-    ('\u{113c2}', '\u{113c2}'),
-    ('\u{113c5}', '\u{113c5}'),
-    ('\u{113c7}', '𑏊'),
-    ('𑏌', '𑏍'),
-    ('\u{113cf}', '\u{113cf}'),
-    ('𑐔', '𑐷'),
-    ('𑑀', '𑑁'),
-    ('𑑅', '𑑅'),
-    ('\u{114b0}', 'đ‘’Č'),
-    ('đ‘’č', 'đ‘’č'),
-    ('đ‘’»', 'đ‘’Ÿ'),
-    ('𑓁', '𑓁'),
-    ('\u{115af}', 'đ‘–±'),
-    ('𑖾', 'đ‘–»'),
-    ('đ‘–Ÿ', 'đ‘–Ÿ'),
-    ('𑘰', 'đ‘˜Č'),
-    ('đ‘˜»', 'đ‘˜Œ'),
-    ('đ‘˜Ÿ', 'đ‘˜Ÿ'),
-    ('𑚬', '𑚬'),
-    ('𑚼', '𑚯'),
-    ('\u{116b6}', '\u{116b6}'),
-    ('𑜞', '𑜞'),
-    ('𑜠', '𑜡'),
-    ('𑜩', '𑜩'),
-    ('𑠬', '𑠼'),
-    ('𑠾', '𑠾'),
-    ('\u{11930}', 'đ‘€”'),
-    ('đ‘€·', '𑀞'),
-    ('\u{1193d}', '\u{1193d}'),
-    ('đ‘„€', 'đ‘„€'),
-    ('đ‘„‚', 'đ‘„‚'),
-    ('𑧑', '𑧓'),
-    ('𑧜', '𑧟'),
-    ('đ‘§€', 'đ‘§€'),
-    ('đ‘šč', 'đ‘šč'),
-    ('đ‘©—', 'đ‘©˜'),
-    ('đ‘Ș—', 'đ‘Ș—'),
-    ('𑰯', '𑰯'),
-    ('đ‘°Ÿ', 'đ‘°Ÿ'),
-    ('đ‘Č©', 'đ‘Č©'),
-    ('đ‘ȱ', 'đ‘ȱ'),
-    ('đ‘ČŽ', 'đ‘ČŽ'),
-    ('đ‘¶Š', 'đ‘¶Ž'),
-    ('đ‘¶“', 'đ‘¶”'),
-    ('đ‘¶–', 'đ‘¶–'),
-    ('đ‘»”', 'đ‘»¶'),
-    ('đ‘Œƒ', 'đ‘Œƒ'),
-    ('đ‘ŒŽ', 'đ‘Œ”'),
-    ('đ‘ŒŸ', 'đ‘Œż'),
-    ('\u{11f41}', '\u{11f41}'),
-    ('đ–„Ș', '𖄬'),
-    ('đ–œ‘', 'đ–Ÿ‡'),
-    ('\u{16ff0}', '\u{16ff1}'),
-    ('\u{1d165}', '\u{1d166}'),
-    ('\u{1d16d}', '\u{1d172}'),
-];
-
-pub const SYMBOL: &'static [(char, char)] = &[
-    ('$', '$'),
-    ('+', '+'),
-    ('<', '>'),
-    ('^', '^'),
-    ('`', '`'),
-    ('|', '|'),
-    ('~', '~'),
-    ('¢', '¦'),
-    ('¨', '©'),
-    ('¬', '¬'),
-    ('®', '±'),
-    ('´', '´'),
-    ('¸', '¸'),
-    ('×', '×'),
-    ('÷', '÷'),
-    ('˂', '˅'),
-    ('˒', '˟'),
-    ('Ë„', 'Ë«'),
-    ('Ë­', 'Ë­'),
-    ('ËŻ', 'Ëż'),
-    ('Í”', 'Í”'),
-    ('΄', '΅'),
-    ('϶', '϶'),
-    ('҂', '҂'),
-    ('֍', '֏'),
-    ('ۆ', 'ۈ'),
-    ('ۋ', 'ۋ'),
-    ('ێ', 'ۏ'),
-    ('۞', '۞'),
-    ('Û©', 'Û©'),
-    ('ۜ', '۟'),
-    ('ß¶', 'ß¶'),
-    ('ߟ', 'ßż'),
-    ('àąˆ', 'àąˆ'),
-    ('à§Č', 'à§ł'),
-    ('à§ș', 'à§»'),
-    ('૱', '૱'),
-    ('à­°', 'à­°'),
-    ('àŻł', 'àŻș'),
-    ('à±ż', 'à±ż'),
-    ('à”', 'à”'),
-    ('à”č', 'à”č'),
-    ('àžż', 'àžż'),
-    ('àŒ', 'àŒƒ'),
-    ('àŒ“', 'àŒ“'),
-    ('àŒ•', 'àŒ—'),
-    ('àŒš', 'àŒŸ'),
-    ('àŒŽ', 'àŒŽ'),
-    ('àŒ¶', 'àŒ¶'),
-    ('àŒž', 'àŒž'),
-    ('àŸŸ', 'àż…'),
-    ('àż‡', 'àżŒ'),
-    ('àżŽ', 'àż'),
-    ('àż•', 'àż˜'),
-    ('႞', '႟'),
-    ('᎐', '᎙'),
-    ('᙭', '᙭'),
-    ('៛', '៛'),
-    ('á„€', 'á„€'),
-    ('᧞', '᧿'),
-    ('á­Ą', 'á­Ș'),
-    ('᭎', 'ᭌ'),
-    ('ៜ', 'ៜ'),
-    ('áŸż', '῁'),
-    ('῍', '῏'),
-    ('῝', '῟'),
-    ('῭', '`'),
-    ('áżœ', 'áżŸ'),
-    ('⁄', '⁄'),
-    ('⁒', '⁒'),
-    ('âș', '⁌'),
-    ('₊', '₌'),
-    ('₠', '⃀'),
-    ('℀', '℁'),
-    ('℃', '℆'),
-    ('℈', '℉'),
-    ('℔', '℔'),
-    ('№', '℘'),
-    ('℞', '℣'),
-    ('â„„', 'â„„'),
-    ('℧', '℧'),
-    ('℩', '℩'),
-    ('ℼ', 'ℼ'),
-    ('â„ș', '℻'),
-    ('⅀', '⅄'),
-    ('⅊', '⅍'),
-    ('⅏', '⅏'),
-    ('↊', '↋'),
-    ('←', '⌇'),
-    ('⌌', '⌹'),
-    ('⌫', '␩'),
-    ('⑀', '⑊'),
-    ('⒜', 'ⓩ'),
-    ('─', '❧'),
-    ('➔', '⟄'),
-    ('⟇', '⟄'),
-    ('⟰', '⩂'),
-    ('⩙', '⧗'),
-    ('⧜', '⧻'),
-    ('â§Ÿ', 'â­ł'),
-    ('â­¶', '⼕'),
-    ('⼗', '⯿'),
-    ('âł„', 'âłȘ'),
-    ('âč', 'âč‘'),
-    ('âș€', 'âș™'),
-    ('âș›', '⻳'),
-    ('⌀', '⿕'),
-    ('âż°', 'âżż'),
-    ('〄', '〄'),
-    ('〒', '〓'),
-    ('〠', '〠'),
-    ('〶', '〷'),
-    ('ă€Ÿ', '〿'),
-    ('゛', '゜'),
-    ('㆐', '㆑'),
-    ('㆖', '㆟'),
-    ('㇀', '㇄'),
-    ('㇯', '㇯'),
-    ('㈀', '㈞'),
-    ('ăˆȘ', '㉇'),
-    ('㉐', '㉐'),
-    ('㉠', '㉿'),
-    ('㊊', '㊰'),
-    ('㋀', '㏿'),
-    ('䷀', 'ä·ż'),
-    ('꒐', '꓆'),
-    ('꜀', '꜖'),
-    ('꜠', 'êœĄ'),
-    ('꞉', '꞊'),
-    ('ê š', 'ê «'),
-    ('ê ¶', 'ê č'),
-    ('ê©·', 'ê©č'),
-    ('꭛', '꭛'),
-    ('ê­Ș', 'ê­«'),
-    ('ïŹ©', 'ïŹ©'),
-    ('ïźČ', 'ïŻ‚'),
-    ('', ''),
-    ('﷏', '﷏'),
-    ('ï·Œ', 'ï·ż'),
-    ('ïčą', 'ïčą'),
-    ('ïč€', 'ïčŠ'),
-    ('ïč©', 'ïč©'),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('ïż ', 'ïżŠ'),
-    ('ïżš', 'ïżź'),
-    ('ïżŒ', 'ïżœ'),
-    ('𐄷', '𐄿'),
-    ('đ…č', '𐆉'),
-    ('𐆌', '𐆎'),
-    ('𐆐', '𐆜'),
-    ('𐆠', '𐆠'),
-    ('𐇐', 'đ‡Œ'),
-    ('𐥷', '𐥞'),
-    ('𐫈', '𐫈'),
-    ('𐶎', 'đ¶'),
-    ('𑜿', '𑜿'),
-    ('𑿕', '𑿱'),
-    ('đ–ŹŒ', '𖬿'),
-    ('𖭅', '𖭅'),
-    ('đ›Čœ', 'đ›Čœ'),
-    ('𜰀', '𜳯'),
-    ('𜮀', 'đœșł'),
-    ('đœœ', '𜿃'),
-    ('𝀀', 'đƒ”'),
-    ('𝄀', '𝄩'),
-    ('đ„©', 'đ…€'),
-    ('đ…Ș', '𝅬'),
-    ('𝆃', '𝆄'),
-    ('𝆌', 'đ†©'),
-    ('𝆺𝅥', 'đ‡Ș'),
-    ('𝈀', '𝉁'),
-    ('𝉅', '𝉅'),
-    ('𝌀', '𝍖'),
-    ('𝛁', '𝛁'),
-    ('𝛛', '𝛛'),
-    ('đ›»', 'đ›»'),
-    ('𝜕', '𝜕'),
-    ('đœ”', 'đœ”'),
-    ('𝝏', '𝝏'),
-    ('𝝯', '𝝯'),
-    ('𝞉', '𝞉'),
-    ('đž©', 'đž©'),
-    ('𝟃', '𝟃'),
-    ('𝠀', '𝧿'),
-    ('đš·', 'đšș'),
-    ('đ©­', 'đ©Ž'),
-    ('đ©¶', 'đȘƒ'),
-    ('đȘ…', 'đȘ†'),
-    ('𞅏', '𞅏'),
-    ('𞋿', '𞋿'),
-    ('đžČŹ', 'đžČŹ'),
-    ('đžČ°', 'đžČ°'),
-    ('𞮼', '𞮼'),
-    ('đž»°', 'đž»±'),
-    ('🀀', 'đŸ€«'),
-    ('🀰', '🂓'),
-    ('🂠', '🂼'),
-    ('đŸ‚±', '🂿'),
-    ('🃁', '🃏'),
-    ('🃑', 'đŸƒ”'),
-    ('🄍', '🆭'),
-    ('🇩', '🈂'),
-    ('🈐', 'đŸˆ»'),
-    ('🉀', '🉈'),
-    ('🉐', '🉑'),
-    ('🉠', 'đŸ‰„'),
-    ('🌀', '🛗'),
-    ('🛜', '🛬'),
-    ('🛰', 'đŸ›Œ'),
-    ('🜀', 'đŸ¶'),
-    ('đŸ»', '🟙'),
-    ('🟠', 'đŸŸ«'),
-    ('🟰', '🟰'),
-    ('🠀', '🠋'),
-    ('🠐', '🡇'),
-    ('🡐', '🡙'),
-    ('🡠', '🱇'),
-    ('🱐', '🱭'),
-    ('🱰', 'đŸą»'),
-    ('🣀', '🣁'),
-    ('đŸ€€', 'đŸ©“'),
-    ('đŸ© ', 'đŸ©­'),
-    ('đŸ©°', 'đŸ©Œ'),
-    ('đŸȘ€', 'đŸȘ‰'),
-    ('đŸȘ', 'đŸ«†'),
-    ('đŸ«Ž', 'đŸ«œ'),
-    ('đŸ«Ÿ', 'đŸ«©'),
-    ('đŸ«°', 'đŸ«ž'),
-    ('🬀', '🼒'),
-    ('🼔', '🯯'),
-];
-
-pub const TITLECASE_LETTER: &'static [(char, char)] = &[
-    ('Dž', 'Dž'),
-    ('Lj', 'Lj'),
-    ('Nj', 'Nj'),
-    ('ÇČ', 'ÇČ'),
-    ('ៈ', '៏'),
-    ('៘', '៟'),
-    ('៚', 'áŸŻ'),
-    ('៌', '៌'),
-    ('ῌ', 'ῌ'),
-    ('áżŒ', 'áżŒ'),
-];
-
-pub const UNASSIGNED: &'static [(char, char)] = &[
-    ('\u{378}', '\u{379}'),
-    ('\u{380}', '\u{383}'),
-    ('\u{38b}', '\u{38b}'),
-    ('\u{38d}', '\u{38d}'),
-    ('\u{3a2}', '\u{3a2}'),
-    ('\u{530}', '\u{530}'),
-    ('\u{557}', '\u{558}'),
-    ('\u{58b}', '\u{58c}'),
-    ('\u{590}', '\u{590}'),
-    ('\u{5c8}', '\u{5cf}'),
-    ('\u{5eb}', '\u{5ee}'),
-    ('\u{5f5}', '\u{5ff}'),
-    ('\u{70e}', '\u{70e}'),
-    ('\u{74b}', '\u{74c}'),
-    ('\u{7b2}', '\u{7bf}'),
-    ('\u{7fb}', '\u{7fc}'),
-    ('\u{82e}', '\u{82f}'),
-    ('\u{83f}', '\u{83f}'),
-    ('\u{85c}', '\u{85d}'),
-    ('\u{85f}', '\u{85f}'),
-    ('\u{86b}', '\u{86f}'),
-    ('\u{88f}', '\u{88f}'),
-    ('\u{892}', '\u{896}'),
-    ('\u{984}', '\u{984}'),
-    ('\u{98d}', '\u{98e}'),
-    ('\u{991}', '\u{992}'),
-    ('\u{9a9}', '\u{9a9}'),
-    ('\u{9b1}', '\u{9b1}'),
-    ('\u{9b3}', '\u{9b5}'),
-    ('\u{9ba}', '\u{9bb}'),
-    ('\u{9c5}', '\u{9c6}'),
-    ('\u{9c9}', '\u{9ca}'),
-    ('\u{9cf}', '\u{9d6}'),
-    ('\u{9d8}', '\u{9db}'),
-    ('\u{9de}', '\u{9de}'),
-    ('\u{9e4}', '\u{9e5}'),
-    ('\u{9ff}', '\u{a00}'),
-    ('\u{a04}', '\u{a04}'),
-    ('\u{a0b}', '\u{a0e}'),
-    ('\u{a11}', '\u{a12}'),
-    ('\u{a29}', '\u{a29}'),
-    ('\u{a31}', '\u{a31}'),
-    ('\u{a34}', '\u{a34}'),
-    ('\u{a37}', '\u{a37}'),
-    ('\u{a3a}', '\u{a3b}'),
-    ('\u{a3d}', '\u{a3d}'),
-    ('\u{a43}', '\u{a46}'),
-    ('\u{a49}', '\u{a4a}'),
-    ('\u{a4e}', '\u{a50}'),
-    ('\u{a52}', '\u{a58}'),
-    ('\u{a5d}', '\u{a5d}'),
-    ('\u{a5f}', '\u{a65}'),
-    ('\u{a77}', '\u{a80}'),
-    ('\u{a84}', '\u{a84}'),
-    ('\u{a8e}', '\u{a8e}'),
-    ('\u{a92}', '\u{a92}'),
-    ('\u{aa9}', '\u{aa9}'),
-    ('\u{ab1}', '\u{ab1}'),
-    ('\u{ab4}', '\u{ab4}'),
-    ('\u{aba}', '\u{abb}'),
-    ('\u{ac6}', '\u{ac6}'),
-    ('\u{aca}', '\u{aca}'),
-    ('\u{ace}', '\u{acf}'),
-    ('\u{ad1}', '\u{adf}'),
-    ('\u{ae4}', '\u{ae5}'),
-    ('\u{af2}', '\u{af8}'),
-    ('\u{b00}', '\u{b00}'),
-    ('\u{b04}', '\u{b04}'),
-    ('\u{b0d}', '\u{b0e}'),
-    ('\u{b11}', '\u{b12}'),
-    ('\u{b29}', '\u{b29}'),
-    ('\u{b31}', '\u{b31}'),
-    ('\u{b34}', '\u{b34}'),
-    ('\u{b3a}', '\u{b3b}'),
-    ('\u{b45}', '\u{b46}'),
-    ('\u{b49}', '\u{b4a}'),
-    ('\u{b4e}', '\u{b54}'),
-    ('\u{b58}', '\u{b5b}'),
-    ('\u{b5e}', '\u{b5e}'),
-    ('\u{b64}', '\u{b65}'),
-    ('\u{b78}', '\u{b81}'),
-    ('\u{b84}', '\u{b84}'),
-    ('\u{b8b}', '\u{b8d}'),
-    ('\u{b91}', '\u{b91}'),
-    ('\u{b96}', '\u{b98}'),
-    ('\u{b9b}', '\u{b9b}'),
-    ('\u{b9d}', '\u{b9d}'),
-    ('\u{ba0}', '\u{ba2}'),
-    ('\u{ba5}', '\u{ba7}'),
-    ('\u{bab}', '\u{bad}'),
-    ('\u{bba}', '\u{bbd}'),
-    ('\u{bc3}', '\u{bc5}'),
-    ('\u{bc9}', '\u{bc9}'),
-    ('\u{bce}', '\u{bcf}'),
-    ('\u{bd1}', '\u{bd6}'),
-    ('\u{bd8}', '\u{be5}'),
-    ('\u{bfb}', '\u{bff}'),
-    ('\u{c0d}', '\u{c0d}'),
-    ('\u{c11}', '\u{c11}'),
-    ('\u{c29}', '\u{c29}'),
-    ('\u{c3a}', '\u{c3b}'),
-    ('\u{c45}', '\u{c45}'),
-    ('\u{c49}', '\u{c49}'),
-    ('\u{c4e}', '\u{c54}'),
-    ('\u{c57}', '\u{c57}'),
-    ('\u{c5b}', '\u{c5c}'),
-    ('\u{c5e}', '\u{c5f}'),
-    ('\u{c64}', '\u{c65}'),
-    ('\u{c70}', '\u{c76}'),
-    ('\u{c8d}', '\u{c8d}'),
-    ('\u{c91}', '\u{c91}'),
-    ('\u{ca9}', '\u{ca9}'),
-    ('\u{cb4}', '\u{cb4}'),
-    ('\u{cba}', '\u{cbb}'),
-    ('\u{cc5}', '\u{cc5}'),
-    ('\u{cc9}', '\u{cc9}'),
-    ('\u{cce}', '\u{cd4}'),
-    ('\u{cd7}', '\u{cdc}'),
-    ('\u{cdf}', '\u{cdf}'),
-    ('\u{ce4}', '\u{ce5}'),
-    ('\u{cf0}', '\u{cf0}'),
-    ('\u{cf4}', '\u{cff}'),
-    ('\u{d0d}', '\u{d0d}'),
-    ('\u{d11}', '\u{d11}'),
-    ('\u{d45}', '\u{d45}'),
-    ('\u{d49}', '\u{d49}'),
-    ('\u{d50}', '\u{d53}'),
-    ('\u{d64}', '\u{d65}'),
-    ('\u{d80}', '\u{d80}'),
-    ('\u{d84}', '\u{d84}'),
-    ('\u{d97}', '\u{d99}'),
-    ('\u{db2}', '\u{db2}'),
-    ('\u{dbc}', '\u{dbc}'),
-    ('\u{dbe}', '\u{dbf}'),
-    ('\u{dc7}', '\u{dc9}'),
-    ('\u{dcb}', '\u{dce}'),
-    ('\u{dd5}', '\u{dd5}'),
-    ('\u{dd7}', '\u{dd7}'),
-    ('\u{de0}', '\u{de5}'),
-    ('\u{df0}', '\u{df1}'),
-    ('\u{df5}', '\u{e00}'),
-    ('\u{e3b}', '\u{e3e}'),
-    ('\u{e5c}', '\u{e80}'),
-    ('\u{e83}', '\u{e83}'),
-    ('\u{e85}', '\u{e85}'),
-    ('\u{e8b}', '\u{e8b}'),
-    ('\u{ea4}', '\u{ea4}'),
-    ('\u{ea6}', '\u{ea6}'),
-    ('\u{ebe}', '\u{ebf}'),
-    ('\u{ec5}', '\u{ec5}'),
-    ('\u{ec7}', '\u{ec7}'),
-    ('\u{ecf}', '\u{ecf}'),
-    ('\u{eda}', '\u{edb}'),
-    ('\u{ee0}', '\u{eff}'),
-    ('\u{f48}', '\u{f48}'),
-    ('\u{f6d}', '\u{f70}'),
-    ('\u{f98}', '\u{f98}'),
-    ('\u{fbd}', '\u{fbd}'),
-    ('\u{fcd}', '\u{fcd}'),
-    ('\u{fdb}', '\u{fff}'),
-    ('\u{10c6}', '\u{10c6}'),
-    ('\u{10c8}', '\u{10cc}'),
-    ('\u{10ce}', '\u{10cf}'),
-    ('\u{1249}', '\u{1249}'),
-    ('\u{124e}', '\u{124f}'),
-    ('\u{1257}', '\u{1257}'),
-    ('\u{1259}', '\u{1259}'),
-    ('\u{125e}', '\u{125f}'),
-    ('\u{1289}', '\u{1289}'),
-    ('\u{128e}', '\u{128f}'),
-    ('\u{12b1}', '\u{12b1}'),
-    ('\u{12b6}', '\u{12b7}'),
-    ('\u{12bf}', '\u{12bf}'),
-    ('\u{12c1}', '\u{12c1}'),
-    ('\u{12c6}', '\u{12c7}'),
-    ('\u{12d7}', '\u{12d7}'),
-    ('\u{1311}', '\u{1311}'),
-    ('\u{1316}', '\u{1317}'),
-    ('\u{135b}', '\u{135c}'),
-    ('\u{137d}', '\u{137f}'),
-    ('\u{139a}', '\u{139f}'),
-    ('\u{13f6}', '\u{13f7}'),
-    ('\u{13fe}', '\u{13ff}'),
-    ('\u{169d}', '\u{169f}'),
-    ('\u{16f9}', '\u{16ff}'),
-    ('\u{1716}', '\u{171e}'),
-    ('\u{1737}', '\u{173f}'),
-    ('\u{1754}', '\u{175f}'),
-    ('\u{176d}', '\u{176d}'),
-    ('\u{1771}', '\u{1771}'),
-    ('\u{1774}', '\u{177f}'),
-    ('\u{17de}', '\u{17df}'),
-    ('\u{17ea}', '\u{17ef}'),
-    ('\u{17fa}', '\u{17ff}'),
-    ('\u{181a}', '\u{181f}'),
-    ('\u{1879}', '\u{187f}'),
-    ('\u{18ab}', '\u{18af}'),
-    ('\u{18f6}', '\u{18ff}'),
-    ('\u{191f}', '\u{191f}'),
-    ('\u{192c}', '\u{192f}'),
-    ('\u{193c}', '\u{193f}'),
-    ('\u{1941}', '\u{1943}'),
-    ('\u{196e}', '\u{196f}'),
-    ('\u{1975}', '\u{197f}'),
-    ('\u{19ac}', '\u{19af}'),
-    ('\u{19ca}', '\u{19cf}'),
-    ('\u{19db}', '\u{19dd}'),
-    ('\u{1a1c}', '\u{1a1d}'),
-    ('\u{1a5f}', '\u{1a5f}'),
-    ('\u{1a7d}', '\u{1a7e}'),
-    ('\u{1a8a}', '\u{1a8f}'),
-    ('\u{1a9a}', '\u{1a9f}'),
-    ('\u{1aae}', '\u{1aaf}'),
-    ('\u{1acf}', '\u{1aff}'),
-    ('\u{1b4d}', '\u{1b4d}'),
-    ('\u{1bf4}', '\u{1bfb}'),
-    ('\u{1c38}', '\u{1c3a}'),
-    ('\u{1c4a}', '\u{1c4c}'),
-    ('\u{1c8b}', '\u{1c8f}'),
-    ('\u{1cbb}', '\u{1cbc}'),
-    ('\u{1cc8}', '\u{1ccf}'),
-    ('\u{1cfb}', '\u{1cff}'),
-    ('\u{1f16}', '\u{1f17}'),
-    ('\u{1f1e}', '\u{1f1f}'),
-    ('\u{1f46}', '\u{1f47}'),
-    ('\u{1f4e}', '\u{1f4f}'),
-    ('\u{1f58}', '\u{1f58}'),
-    ('\u{1f5a}', '\u{1f5a}'),
-    ('\u{1f5c}', '\u{1f5c}'),
-    ('\u{1f5e}', '\u{1f5e}'),
-    ('\u{1f7e}', '\u{1f7f}'),
-    ('\u{1fb5}', '\u{1fb5}'),
-    ('\u{1fc5}', '\u{1fc5}'),
-    ('\u{1fd4}', '\u{1fd5}'),
-    ('\u{1fdc}', '\u{1fdc}'),
-    ('\u{1ff0}', '\u{1ff1}'),
-    ('\u{1ff5}', '\u{1ff5}'),
-    ('\u{1fff}', '\u{1fff}'),
-    ('\u{2065}', '\u{2065}'),
-    ('\u{2072}', '\u{2073}'),
-    ('\u{208f}', '\u{208f}'),
-    ('\u{209d}', '\u{209f}'),
-    ('\u{20c1}', '\u{20cf}'),
-    ('\u{20f1}', '\u{20ff}'),
-    ('\u{218c}', '\u{218f}'),
-    ('\u{242a}', '\u{243f}'),
-    ('\u{244b}', '\u{245f}'),
-    ('\u{2b74}', '\u{2b75}'),
-    ('\u{2b96}', '\u{2b96}'),
-    ('\u{2cf4}', '\u{2cf8}'),
-    ('\u{2d26}', '\u{2d26}'),
-    ('\u{2d28}', '\u{2d2c}'),
-    ('\u{2d2e}', '\u{2d2f}'),
-    ('\u{2d68}', '\u{2d6e}'),
-    ('\u{2d71}', '\u{2d7e}'),
-    ('\u{2d97}', '\u{2d9f}'),
-    ('\u{2da7}', '\u{2da7}'),
-    ('\u{2daf}', '\u{2daf}'),
-    ('\u{2db7}', '\u{2db7}'),
-    ('\u{2dbf}', '\u{2dbf}'),
-    ('\u{2dc7}', '\u{2dc7}'),
-    ('\u{2dcf}', '\u{2dcf}'),
-    ('\u{2dd7}', '\u{2dd7}'),
-    ('\u{2ddf}', '\u{2ddf}'),
-    ('\u{2e5e}', '\u{2e7f}'),
-    ('\u{2e9a}', '\u{2e9a}'),
-    ('\u{2ef4}', '\u{2eff}'),
-    ('\u{2fd6}', '\u{2fef}'),
-    ('\u{3040}', '\u{3040}'),
-    ('\u{3097}', '\u{3098}'),
-    ('\u{3100}', '\u{3104}'),
-    ('\u{3130}', '\u{3130}'),
-    ('\u{318f}', '\u{318f}'),
-    ('\u{31e6}', '\u{31ee}'),
-    ('\u{321f}', '\u{321f}'),
-    ('\u{a48d}', '\u{a48f}'),
-    ('\u{a4c7}', '\u{a4cf}'),
-    ('\u{a62c}', '\u{a63f}'),
-    ('\u{a6f8}', '\u{a6ff}'),
-    ('\u{a7ce}', '\u{a7cf}'),
-    ('\u{a7d2}', '\u{a7d2}'),
-    ('\u{a7d4}', '\u{a7d4}'),
-    ('\u{a7dd}', '\u{a7f1}'),
-    ('\u{a82d}', '\u{a82f}'),
-    ('\u{a83a}', '\u{a83f}'),
-    ('\u{a878}', '\u{a87f}'),
-    ('\u{a8c6}', '\u{a8cd}'),
-    ('\u{a8da}', '\u{a8df}'),
-    ('\u{a954}', '\u{a95e}'),
-    ('\u{a97d}', '\u{a97f}'),
-    ('\u{a9ce}', '\u{a9ce}'),
-    ('\u{a9da}', '\u{a9dd}'),
-    ('\u{a9ff}', '\u{a9ff}'),
-    ('\u{aa37}', '\u{aa3f}'),
-    ('\u{aa4e}', '\u{aa4f}'),
-    ('\u{aa5a}', '\u{aa5b}'),
-    ('\u{aac3}', '\u{aada}'),
-    ('\u{aaf7}', '\u{ab00}'),
-    ('\u{ab07}', '\u{ab08}'),
-    ('\u{ab0f}', '\u{ab10}'),
-    ('\u{ab17}', '\u{ab1f}'),
-    ('\u{ab27}', '\u{ab27}'),
-    ('\u{ab2f}', '\u{ab2f}'),
-    ('\u{ab6c}', '\u{ab6f}'),
-    ('\u{abee}', '\u{abef}'),
-    ('\u{abfa}', '\u{abff}'),
-    ('\u{d7a4}', '\u{d7af}'),
-    ('\u{d7c7}', '\u{d7ca}'),
-    ('\u{d7fc}', '\u{d7ff}'),
-    ('\u{fa6e}', '\u{fa6f}'),
-    ('\u{fada}', '\u{faff}'),
-    ('\u{fb07}', '\u{fb12}'),
-    ('\u{fb18}', '\u{fb1c}'),
-    ('\u{fb37}', '\u{fb37}'),
-    ('\u{fb3d}', '\u{fb3d}'),
-    ('\u{fb3f}', '\u{fb3f}'),
-    ('\u{fb42}', '\u{fb42}'),
-    ('\u{fb45}', '\u{fb45}'),
-    ('\u{fbc3}', '\u{fbd2}'),
-    ('\u{fd90}', '\u{fd91}'),
-    ('\u{fdc8}', '\u{fdce}'),
-    ('\u{fdd0}', '\u{fdef}'),
-    ('\u{fe1a}', '\u{fe1f}'),
-    ('\u{fe53}', '\u{fe53}'),
-    ('\u{fe67}', '\u{fe67}'),
-    ('\u{fe6c}', '\u{fe6f}'),
-    ('\u{fe75}', '\u{fe75}'),
-    ('\u{fefd}', '\u{fefe}'),
-    ('\u{ff00}', '\u{ff00}'),
-    ('\u{ffbf}', '\u{ffc1}'),
-    ('\u{ffc8}', '\u{ffc9}'),
-    ('\u{ffd0}', '\u{ffd1}'),
-    ('\u{ffd8}', '\u{ffd9}'),
-    ('\u{ffdd}', '\u{ffdf}'),
-    ('\u{ffe7}', '\u{ffe7}'),
-    ('\u{ffef}', '\u{fff8}'),
-    ('\u{fffe}', '\u{ffff}'),
-    ('\u{1000c}', '\u{1000c}'),
-    ('\u{10027}', '\u{10027}'),
-    ('\u{1003b}', '\u{1003b}'),
-    ('\u{1003e}', '\u{1003e}'),
-    ('\u{1004e}', '\u{1004f}'),
-    ('\u{1005e}', '\u{1007f}'),
-    ('\u{100fb}', '\u{100ff}'),
-    ('\u{10103}', '\u{10106}'),
-    ('\u{10134}', '\u{10136}'),
-    ('\u{1018f}', '\u{1018f}'),
-    ('\u{1019d}', '\u{1019f}'),
-    ('\u{101a1}', '\u{101cf}'),
-    ('\u{101fe}', '\u{1027f}'),
-    ('\u{1029d}', '\u{1029f}'),
-    ('\u{102d1}', '\u{102df}'),
-    ('\u{102fc}', '\u{102ff}'),
-    ('\u{10324}', '\u{1032c}'),
-    ('\u{1034b}', '\u{1034f}'),
-    ('\u{1037b}', '\u{1037f}'),
-    ('\u{1039e}', '\u{1039e}'),
-    ('\u{103c4}', '\u{103c7}'),
-    ('\u{103d6}', '\u{103ff}'),
-    ('\u{1049e}', '\u{1049f}'),
-    ('\u{104aa}', '\u{104af}'),
-    ('\u{104d4}', '\u{104d7}'),
-    ('\u{104fc}', '\u{104ff}'),
-    ('\u{10528}', '\u{1052f}'),
-    ('\u{10564}', '\u{1056e}'),
-    ('\u{1057b}', '\u{1057b}'),
-    ('\u{1058b}', '\u{1058b}'),
-    ('\u{10593}', '\u{10593}'),
-    ('\u{10596}', '\u{10596}'),
-    ('\u{105a2}', '\u{105a2}'),
-    ('\u{105b2}', '\u{105b2}'),
-    ('\u{105ba}', '\u{105ba}'),
-    ('\u{105bd}', '\u{105bf}'),
-    ('\u{105f4}', '\u{105ff}'),
-    ('\u{10737}', '\u{1073f}'),
-    ('\u{10756}', '\u{1075f}'),
-    ('\u{10768}', '\u{1077f}'),
-    ('\u{10786}', '\u{10786}'),
-    ('\u{107b1}', '\u{107b1}'),
-    ('\u{107bb}', '\u{107ff}'),
-    ('\u{10806}', '\u{10807}'),
-    ('\u{10809}', '\u{10809}'),
-    ('\u{10836}', '\u{10836}'),
-    ('\u{10839}', '\u{1083b}'),
-    ('\u{1083d}', '\u{1083e}'),
-    ('\u{10856}', '\u{10856}'),
-    ('\u{1089f}', '\u{108a6}'),
-    ('\u{108b0}', '\u{108df}'),
-    ('\u{108f3}', '\u{108f3}'),
-    ('\u{108f6}', '\u{108fa}'),
-    ('\u{1091c}', '\u{1091e}'),
-    ('\u{1093a}', '\u{1093e}'),
-    ('\u{10940}', '\u{1097f}'),
-    ('\u{109b8}', '\u{109bb}'),
-    ('\u{109d0}', '\u{109d1}'),
-    ('\u{10a04}', '\u{10a04}'),
-    ('\u{10a07}', '\u{10a0b}'),
-    ('\u{10a14}', '\u{10a14}'),
-    ('\u{10a18}', '\u{10a18}'),
-    ('\u{10a36}', '\u{10a37}'),
-    ('\u{10a3b}', '\u{10a3e}'),
-    ('\u{10a49}', '\u{10a4f}'),
-    ('\u{10a59}', '\u{10a5f}'),
-    ('\u{10aa0}', '\u{10abf}'),
-    ('\u{10ae7}', '\u{10aea}'),
-    ('\u{10af7}', '\u{10aff}'),
-    ('\u{10b36}', '\u{10b38}'),
-    ('\u{10b56}', '\u{10b57}'),
-    ('\u{10b73}', '\u{10b77}'),
-    ('\u{10b92}', '\u{10b98}'),
-    ('\u{10b9d}', '\u{10ba8}'),
-    ('\u{10bb0}', '\u{10bff}'),
-    ('\u{10c49}', '\u{10c7f}'),
-    ('\u{10cb3}', '\u{10cbf}'),
-    ('\u{10cf3}', '\u{10cf9}'),
-    ('\u{10d28}', '\u{10d2f}'),
-    ('\u{10d3a}', '\u{10d3f}'),
-    ('\u{10d66}', '\u{10d68}'),
-    ('\u{10d86}', '\u{10d8d}'),
-    ('\u{10d90}', '\u{10e5f}'),
-    ('\u{10e7f}', '\u{10e7f}'),
-    ('\u{10eaa}', '\u{10eaa}'),
-    ('\u{10eae}', '\u{10eaf}'),
-    ('\u{10eb2}', '\u{10ec1}'),
-    ('\u{10ec5}', '\u{10efb}'),
-    ('\u{10f28}', '\u{10f2f}'),
-    ('\u{10f5a}', '\u{10f6f}'),
-    ('\u{10f8a}', '\u{10faf}'),
-    ('\u{10fcc}', '\u{10fdf}'),
-    ('\u{10ff7}', '\u{10fff}'),
-    ('\u{1104e}', '\u{11051}'),
-    ('\u{11076}', '\u{1107e}'),
-    ('\u{110c3}', '\u{110cc}'),
-    ('\u{110ce}', '\u{110cf}'),
-    ('\u{110e9}', '\u{110ef}'),
-    ('\u{110fa}', '\u{110ff}'),
-    ('\u{11135}', '\u{11135}'),
-    ('\u{11148}', '\u{1114f}'),
-    ('\u{11177}', '\u{1117f}'),
-    ('\u{111e0}', '\u{111e0}'),
-    ('\u{111f5}', '\u{111ff}'),
-    ('\u{11212}', '\u{11212}'),
-    ('\u{11242}', '\u{1127f}'),
-    ('\u{11287}', '\u{11287}'),
-    ('\u{11289}', '\u{11289}'),
-    ('\u{1128e}', '\u{1128e}'),
-    ('\u{1129e}', '\u{1129e}'),
-    ('\u{112aa}', '\u{112af}'),
-    ('\u{112eb}', '\u{112ef}'),
-    ('\u{112fa}', '\u{112ff}'),
-    ('\u{11304}', '\u{11304}'),
-    ('\u{1130d}', '\u{1130e}'),
-    ('\u{11311}', '\u{11312}'),
-    ('\u{11329}', '\u{11329}'),
-    ('\u{11331}', '\u{11331}'),
-    ('\u{11334}', '\u{11334}'),
-    ('\u{1133a}', '\u{1133a}'),
-    ('\u{11345}', '\u{11346}'),
-    ('\u{11349}', '\u{1134a}'),
-    ('\u{1134e}', '\u{1134f}'),
-    ('\u{11351}', '\u{11356}'),
-    ('\u{11358}', '\u{1135c}'),
-    ('\u{11364}', '\u{11365}'),
-    ('\u{1136d}', '\u{1136f}'),
-    ('\u{11375}', '\u{1137f}'),
-    ('\u{1138a}', '\u{1138a}'),
-    ('\u{1138c}', '\u{1138d}'),
-    ('\u{1138f}', '\u{1138f}'),
-    ('\u{113b6}', '\u{113b6}'),
-    ('\u{113c1}', '\u{113c1}'),
-    ('\u{113c3}', '\u{113c4}'),
-    ('\u{113c6}', '\u{113c6}'),
-    ('\u{113cb}', '\u{113cb}'),
-    ('\u{113d6}', '\u{113d6}'),
-    ('\u{113d9}', '\u{113e0}'),
-    ('\u{113e3}', '\u{113ff}'),
-    ('\u{1145c}', '\u{1145c}'),
-    ('\u{11462}', '\u{1147f}'),
-    ('\u{114c8}', '\u{114cf}'),
-    ('\u{114da}', '\u{1157f}'),
-    ('\u{115b6}', '\u{115b7}'),
-    ('\u{115de}', '\u{115ff}'),
-    ('\u{11645}', '\u{1164f}'),
-    ('\u{1165a}', '\u{1165f}'),
-    ('\u{1166d}', '\u{1167f}'),
-    ('\u{116ba}', '\u{116bf}'),
-    ('\u{116ca}', '\u{116cf}'),
-    ('\u{116e4}', '\u{116ff}'),
-    ('\u{1171b}', '\u{1171c}'),
-    ('\u{1172c}', '\u{1172f}'),
-    ('\u{11747}', '\u{117ff}'),
-    ('\u{1183c}', '\u{1189f}'),
-    ('\u{118f3}', '\u{118fe}'),
-    ('\u{11907}', '\u{11908}'),
-    ('\u{1190a}', '\u{1190b}'),
-    ('\u{11914}', '\u{11914}'),
-    ('\u{11917}', '\u{11917}'),
-    ('\u{11936}', '\u{11936}'),
-    ('\u{11939}', '\u{1193a}'),
-    ('\u{11947}', '\u{1194f}'),
-    ('\u{1195a}', '\u{1199f}'),
-    ('\u{119a8}', '\u{119a9}'),
-    ('\u{119d8}', '\u{119d9}'),
-    ('\u{119e5}', '\u{119ff}'),
-    ('\u{11a48}', '\u{11a4f}'),
-    ('\u{11aa3}', '\u{11aaf}'),
-    ('\u{11af9}', '\u{11aff}'),
-    ('\u{11b0a}', '\u{11bbf}'),
-    ('\u{11be2}', '\u{11bef}'),
-    ('\u{11bfa}', '\u{11bff}'),
-    ('\u{11c09}', '\u{11c09}'),
-    ('\u{11c37}', '\u{11c37}'),
-    ('\u{11c46}', '\u{11c4f}'),
-    ('\u{11c6d}', '\u{11c6f}'),
-    ('\u{11c90}', '\u{11c91}'),
-    ('\u{11ca8}', '\u{11ca8}'),
-    ('\u{11cb7}', '\u{11cff}'),
-    ('\u{11d07}', '\u{11d07}'),
-    ('\u{11d0a}', '\u{11d0a}'),
-    ('\u{11d37}', '\u{11d39}'),
-    ('\u{11d3b}', '\u{11d3b}'),
-    ('\u{11d3e}', '\u{11d3e}'),
-    ('\u{11d48}', '\u{11d4f}'),
-    ('\u{11d5a}', '\u{11d5f}'),
-    ('\u{11d66}', '\u{11d66}'),
-    ('\u{11d69}', '\u{11d69}'),
-    ('\u{11d8f}', '\u{11d8f}'),
-    ('\u{11d92}', '\u{11d92}'),
-    ('\u{11d99}', '\u{11d9f}'),
-    ('\u{11daa}', '\u{11edf}'),
-    ('\u{11ef9}', '\u{11eff}'),
-    ('\u{11f11}', '\u{11f11}'),
-    ('\u{11f3b}', '\u{11f3d}'),
-    ('\u{11f5b}', '\u{11faf}'),
-    ('\u{11fb1}', '\u{11fbf}'),
-    ('\u{11ff2}', '\u{11ffe}'),
-    ('\u{1239a}', '\u{123ff}'),
-    ('\u{1246f}', '\u{1246f}'),
-    ('\u{12475}', '\u{1247f}'),
-    ('\u{12544}', '\u{12f8f}'),
-    ('\u{12ff3}', '\u{12fff}'),
-    ('\u{13456}', '\u{1345f}'),
-    ('\u{143fb}', '\u{143ff}'),
-    ('\u{14647}', '\u{160ff}'),
-    ('\u{1613a}', '\u{167ff}'),
-    ('\u{16a39}', '\u{16a3f}'),
-    ('\u{16a5f}', '\u{16a5f}'),
-    ('\u{16a6a}', '\u{16a6d}'),
-    ('\u{16abf}', '\u{16abf}'),
-    ('\u{16aca}', '\u{16acf}'),
-    ('\u{16aee}', '\u{16aef}'),
-    ('\u{16af6}', '\u{16aff}'),
-    ('\u{16b46}', '\u{16b4f}'),
-    ('\u{16b5a}', '\u{16b5a}'),
-    ('\u{16b62}', '\u{16b62}'),
-    ('\u{16b78}', '\u{16b7c}'),
-    ('\u{16b90}', '\u{16d3f}'),
-    ('\u{16d7a}', '\u{16e3f}'),
-    ('\u{16e9b}', '\u{16eff}'),
-    ('\u{16f4b}', '\u{16f4e}'),
-    ('\u{16f88}', '\u{16f8e}'),
-    ('\u{16fa0}', '\u{16fdf}'),
-    ('\u{16fe5}', '\u{16fef}'),
-    ('\u{16ff2}', '\u{16fff}'),
-    ('\u{187f8}', '\u{187ff}'),
-    ('\u{18cd6}', '\u{18cfe}'),
-    ('\u{18d09}', '\u{1afef}'),
-    ('\u{1aff4}', '\u{1aff4}'),
-    ('\u{1affc}', '\u{1affc}'),
-    ('\u{1afff}', '\u{1afff}'),
-    ('\u{1b123}', '\u{1b131}'),
-    ('\u{1b133}', '\u{1b14f}'),
-    ('\u{1b153}', '\u{1b154}'),
-    ('\u{1b156}', '\u{1b163}'),
-    ('\u{1b168}', '\u{1b16f}'),
-    ('\u{1b2fc}', '\u{1bbff}'),
-    ('\u{1bc6b}', '\u{1bc6f}'),
-    ('\u{1bc7d}', '\u{1bc7f}'),
-    ('\u{1bc89}', '\u{1bc8f}'),
-    ('\u{1bc9a}', '\u{1bc9b}'),
-    ('\u{1bca4}', '\u{1cbff}'),
-    ('\u{1ccfa}', '\u{1ccff}'),
-    ('\u{1ceb4}', '\u{1ceff}'),
-    ('\u{1cf2e}', '\u{1cf2f}'),
-    ('\u{1cf47}', '\u{1cf4f}'),
-    ('\u{1cfc4}', '\u{1cfff}'),
-    ('\u{1d0f6}', '\u{1d0ff}'),
-    ('\u{1d127}', '\u{1d128}'),
-    ('\u{1d1eb}', '\u{1d1ff}'),
-    ('\u{1d246}', '\u{1d2bf}'),
-    ('\u{1d2d4}', '\u{1d2df}'),
-    ('\u{1d2f4}', '\u{1d2ff}'),
-    ('\u{1d357}', '\u{1d35f}'),
-    ('\u{1d379}', '\u{1d3ff}'),
-    ('\u{1d455}', '\u{1d455}'),
-    ('\u{1d49d}', '\u{1d49d}'),
-    ('\u{1d4a0}', '\u{1d4a1}'),
-    ('\u{1d4a3}', '\u{1d4a4}'),
-    ('\u{1d4a7}', '\u{1d4a8}'),
-    ('\u{1d4ad}', '\u{1d4ad}'),
-    ('\u{1d4ba}', '\u{1d4ba}'),
-    ('\u{1d4bc}', '\u{1d4bc}'),
-    ('\u{1d4c4}', '\u{1d4c4}'),
-    ('\u{1d506}', '\u{1d506}'),
-    ('\u{1d50b}', '\u{1d50c}'),
-    ('\u{1d515}', '\u{1d515}'),
-    ('\u{1d51d}', '\u{1d51d}'),
-    ('\u{1d53a}', '\u{1d53a}'),
-    ('\u{1d53f}', '\u{1d53f}'),
-    ('\u{1d545}', '\u{1d545}'),
-    ('\u{1d547}', '\u{1d549}'),
-    ('\u{1d551}', '\u{1d551}'),
-    ('\u{1d6a6}', '\u{1d6a7}'),
-    ('\u{1d7cc}', '\u{1d7cd}'),
-    ('\u{1da8c}', '\u{1da9a}'),
-    ('\u{1daa0}', '\u{1daa0}'),
-    ('\u{1dab0}', '\u{1deff}'),
-    ('\u{1df1f}', '\u{1df24}'),
-    ('\u{1df2b}', '\u{1dfff}'),
-    ('\u{1e007}', '\u{1e007}'),
-    ('\u{1e019}', '\u{1e01a}'),
-    ('\u{1e022}', '\u{1e022}'),
-    ('\u{1e025}', '\u{1e025}'),
-    ('\u{1e02b}', '\u{1e02f}'),
-    ('\u{1e06e}', '\u{1e08e}'),
-    ('\u{1e090}', '\u{1e0ff}'),
-    ('\u{1e12d}', '\u{1e12f}'),
-    ('\u{1e13e}', '\u{1e13f}'),
-    ('\u{1e14a}', '\u{1e14d}'),
-    ('\u{1e150}', '\u{1e28f}'),
-    ('\u{1e2af}', '\u{1e2bf}'),
-    ('\u{1e2fa}', '\u{1e2fe}'),
-    ('\u{1e300}', '\u{1e4cf}'),
-    ('\u{1e4fa}', '\u{1e5cf}'),
-    ('\u{1e5fb}', '\u{1e5fe}'),
-    ('\u{1e600}', '\u{1e7df}'),
-    ('\u{1e7e7}', '\u{1e7e7}'),
-    ('\u{1e7ec}', '\u{1e7ec}'),
-    ('\u{1e7ef}', '\u{1e7ef}'),
-    ('\u{1e7ff}', '\u{1e7ff}'),
-    ('\u{1e8c5}', '\u{1e8c6}'),
-    ('\u{1e8d7}', '\u{1e8ff}'),
-    ('\u{1e94c}', '\u{1e94f}'),
-    ('\u{1e95a}', '\u{1e95d}'),
-    ('\u{1e960}', '\u{1ec70}'),
-    ('\u{1ecb5}', '\u{1ed00}'),
-    ('\u{1ed3e}', '\u{1edff}'),
-    ('\u{1ee04}', '\u{1ee04}'),
-    ('\u{1ee20}', '\u{1ee20}'),
-    ('\u{1ee23}', '\u{1ee23}'),
-    ('\u{1ee25}', '\u{1ee26}'),
-    ('\u{1ee28}', '\u{1ee28}'),
-    ('\u{1ee33}', '\u{1ee33}'),
-    ('\u{1ee38}', '\u{1ee38}'),
-    ('\u{1ee3a}', '\u{1ee3a}'),
-    ('\u{1ee3c}', '\u{1ee41}'),
-    ('\u{1ee43}', '\u{1ee46}'),
-    ('\u{1ee48}', '\u{1ee48}'),
-    ('\u{1ee4a}', '\u{1ee4a}'),
-    ('\u{1ee4c}', '\u{1ee4c}'),
-    ('\u{1ee50}', '\u{1ee50}'),
-    ('\u{1ee53}', '\u{1ee53}'),
-    ('\u{1ee55}', '\u{1ee56}'),
-    ('\u{1ee58}', '\u{1ee58}'),
-    ('\u{1ee5a}', '\u{1ee5a}'),
-    ('\u{1ee5c}', '\u{1ee5c}'),
-    ('\u{1ee5e}', '\u{1ee5e}'),
-    ('\u{1ee60}', '\u{1ee60}'),
-    ('\u{1ee63}', '\u{1ee63}'),
-    ('\u{1ee65}', '\u{1ee66}'),
-    ('\u{1ee6b}', '\u{1ee6b}'),
-    ('\u{1ee73}', '\u{1ee73}'),
-    ('\u{1ee78}', '\u{1ee78}'),
-    ('\u{1ee7d}', '\u{1ee7d}'),
-    ('\u{1ee7f}', '\u{1ee7f}'),
-    ('\u{1ee8a}', '\u{1ee8a}'),
-    ('\u{1ee9c}', '\u{1eea0}'),
-    ('\u{1eea4}', '\u{1eea4}'),
-    ('\u{1eeaa}', '\u{1eeaa}'),
-    ('\u{1eebc}', '\u{1eeef}'),
-    ('\u{1eef2}', '\u{1efff}'),
-    ('\u{1f02c}', '\u{1f02f}'),
-    ('\u{1f094}', '\u{1f09f}'),
-    ('\u{1f0af}', '\u{1f0b0}'),
-    ('\u{1f0c0}', '\u{1f0c0}'),
-    ('\u{1f0d0}', '\u{1f0d0}'),
-    ('\u{1f0f6}', '\u{1f0ff}'),
-    ('\u{1f1ae}', '\u{1f1e5}'),
-    ('\u{1f203}', '\u{1f20f}'),
-    ('\u{1f23c}', '\u{1f23f}'),
-    ('\u{1f249}', '\u{1f24f}'),
-    ('\u{1f252}', '\u{1f25f}'),
-    ('\u{1f266}', '\u{1f2ff}'),
-    ('\u{1f6d8}', '\u{1f6db}'),
-    ('\u{1f6ed}', '\u{1f6ef}'),
-    ('\u{1f6fd}', '\u{1f6ff}'),
-    ('\u{1f777}', '\u{1f77a}'),
-    ('\u{1f7da}', '\u{1f7df}'),
-    ('\u{1f7ec}', '\u{1f7ef}'),
-    ('\u{1f7f1}', '\u{1f7ff}'),
-    ('\u{1f80c}', '\u{1f80f}'),
-    ('\u{1f848}', '\u{1f84f}'),
-    ('\u{1f85a}', '\u{1f85f}'),
-    ('\u{1f888}', '\u{1f88f}'),
-    ('\u{1f8ae}', '\u{1f8af}'),
-    ('\u{1f8bc}', '\u{1f8bf}'),
-    ('\u{1f8c2}', '\u{1f8ff}'),
-    ('\u{1fa54}', '\u{1fa5f}'),
-    ('\u{1fa6e}', '\u{1fa6f}'),
-    ('\u{1fa7d}', '\u{1fa7f}'),
-    ('\u{1fa8a}', '\u{1fa8e}'),
-    ('\u{1fac7}', '\u{1facd}'),
-    ('\u{1fadd}', '\u{1fade}'),
-    ('\u{1faea}', '\u{1faef}'),
-    ('\u{1faf9}', '\u{1faff}'),
-    ('\u{1fb93}', '\u{1fb93}'),
-    ('\u{1fbfa}', '\u{1ffff}'),
-    ('\u{2a6e0}', '\u{2a6ff}'),
-    ('\u{2b73a}', '\u{2b73f}'),
-    ('\u{2b81e}', '\u{2b81f}'),
-    ('\u{2cea2}', '\u{2ceaf}'),
-    ('\u{2ebe1}', '\u{2ebef}'),
-    ('\u{2ee5e}', '\u{2f7ff}'),
-    ('\u{2fa1e}', '\u{2ffff}'),
-    ('\u{3134b}', '\u{3134f}'),
-    ('\u{323b0}', '\u{e0000}'),
-    ('\u{e0002}', '\u{e001f}'),
-    ('\u{e0080}', '\u{e00ff}'),
-    ('\u{e01f0}', '\u{effff}'),
-    ('\u{ffffe}', '\u{fffff}'),
-    ('\u{10fffe}', '\u{10ffff}'),
-];
-
-pub const UPPERCASE_LETTER: &'static [(char, char)] = &[
-    ('A', 'Z'),
-    ('À', 'Ö'),
-    ('Ø', 'Þ'),
-    ('Ā', 'Ā'),
-    ('Ă', 'Ă'),
-    ('Ą', 'Ą'),
-    ('Ć', 'Ć'),
-    ('Ĉ', 'Ĉ'),
-    ('Ċ', 'Ċ'),
-    ('Č', 'Č'),
-    ('Ď', 'Ď'),
-    ('Đ', 'Đ'),
-    ('Ē', 'Ē'),
-    ('Ĕ', 'Ĕ'),
-    ('Ė', 'Ė'),
-    ('Ę', 'Ę'),
-    ('Ě', 'Ě'),
-    ('Ĝ', 'Ĝ'),
-    ('Ğ', 'Ğ'),
-    ('Ä ', 'Ä '),
-    ('Äą', 'Äą'),
-    ('Ä€', 'Ä€'),
-    ('ÄŠ', 'ÄŠ'),
-    ('Äš', 'Äš'),
-    ('ÄȘ', 'ÄȘ'),
-    ('ÄŹ', 'ÄŹ'),
-    ('Äź', 'Äź'),
-    ('İ', 'İ'),
-    ('ÄČ', 'ÄČ'),
-    ('ÄŽ', 'ÄŽ'),
-    ('Ķ', 'Ķ'),
-    ('Äč', 'Äč'),
-    ('Ä»', 'Ä»'),
-    ('Ĝ', 'Ĝ'),
-    ('Äż', 'Äż'),
-    ('Ɓ', 'Ɓ'),
-    ('ƃ', 'ƃ'),
-    ('ƅ', 'ƅ'),
-    ('Ƈ', 'Ƈ'),
-    ('Ê', 'Ê'),
-    ('Ì', 'Ì'),
-    ('Ǝ', 'Ǝ'),
-    ('Ɛ', 'Ɛ'),
-    ('Œ', 'Œ'),
-    ('Ɣ', 'Ɣ'),
-    ('Ɩ', 'Ɩ'),
-    ('Ƙ', 'Ƙ'),
-    ('Ú', 'Ú'),
-    ('Ü', 'Ü'),
-    ('ƞ', 'ƞ'),
-    ('Š', 'Š'),
-    ('Ćą', 'Ćą'),
-    ('Ć€', 'Ć€'),
-    ('ĆŠ', 'ĆŠ'),
-    ('Ćš', 'Ćš'),
-    ('ĆȘ', 'ĆȘ'),
-    ('ĆŹ', 'ĆŹ'),
-    ('Ćź', 'Ćź'),
-    ('ư', 'ư'),
-    ('ĆČ', 'ĆČ'),
-    ('ĆŽ', 'ĆŽ'),
-    ('ƶ', 'ƶ'),
-    ('Ÿ', 'Ćč'),
-    ('Ć»', 'Ć»'),
-    ('Ćœ', 'Ćœ'),
-    ('Ɓ', 'Ƃ'),
-    ('Ƅ', 'Ƅ'),
-    ('Ɔ', 'Ƈ'),
-    ('Ɖ', 'Ƌ'),
-    ('Ǝ', 'Ƒ'),
-    ('Ɠ', 'Ɣ'),
-    ('Ɩ', 'Ƙ'),
-    ('Ɯ', 'Ɲ'),
-    ('Ɵ', 'Ơ'),
-    ('Æą', 'Æą'),
-    ('Æ€', 'Æ€'),
-    ('Ɗ', 'Ƨ'),
-    ('Æ©', 'Æ©'),
-    ('ÆŹ', 'ÆŹ'),
-    ('Æź', 'ÆŻ'),
-    ('Ʊ', 'Æł'),
-    ('Æ”', 'Æ”'),
-    ('Æ·', 'Æž'),
-    ('ƌ', 'ƌ'),
-    ('DŽ', 'DŽ'),
-    ('LJ', 'LJ'),
-    ('NJ', 'NJ'),
-    ('Ǎ', 'Ǎ'),
-    ('Ǐ', 'Ǐ'),
-    ('Ǒ', 'Ǒ'),
-    ('Ǔ', 'Ǔ'),
-    ('Ǖ', 'Ǖ'),
-    ('Ǘ', 'Ǘ'),
-    ('Ǚ', 'Ǚ'),
-    ('Ǜ', 'Ǜ'),
-    ('Ǟ', 'Ǟ'),
-    ('Ç ', 'Ç '),
-    ('Çą', 'Çą'),
-    ('Ç€', 'Ç€'),
-    ('ÇŠ', 'ÇŠ'),
-    ('Çš', 'Çš'),
-    ('ÇȘ', 'ÇȘ'),
-    ('ÇŹ', 'ÇŹ'),
-    ('Çź', 'Çź'),
-    ('DZ', 'DZ'),
-    ('ÇŽ', 'ÇŽ'),
-    ('Ƕ', 'Ǟ'),
-    ('Çș', 'Çș'),
-    ('nj', 'nj'),
-    ('ÇŸ', 'ÇŸ'),
-    ('Ȁ', 'Ȁ'),
-    ('Ȃ', 'Ȃ'),
-    ('Ȅ', 'Ȅ'),
-    ('Ȇ', 'Ȇ'),
-    ('Ȉ', 'Ȉ'),
-    ('Ȋ', 'Ȋ'),
-    ('Ȍ', 'Ȍ'),
-    ('Ȏ', 'Ȏ'),
-    ('Ȑ', 'Ȑ'),
-    ('Ȓ', 'Ȓ'),
-    ('Ȕ', 'Ȕ'),
-    ('Ȗ', 'Ȗ'),
-    ('Ș', 'Ș'),
-    ('Ț', 'Ț'),
-    ('Ȝ', 'Ȝ'),
-    ('Ȟ', 'Ȟ'),
-    ('È ', 'È '),
-    ('Èą', 'Èą'),
-    ('È€', 'È€'),
-    ('ÈŠ', 'ÈŠ'),
-    ('Èš', 'Èš'),
-    ('ÈȘ', 'ÈȘ'),
-    ('ÈŹ', 'ÈŹ'),
-    ('Èź', 'Èź'),
-    ('Ȱ', 'Ȱ'),
-    ('ÈČ', 'ÈČ'),
-    ('Èș', 'È»'),
-    ('Ȝ', 'ȟ'),
-    ('Ɂ', 'Ɂ'),
-    ('Ƀ', 'Ɇ'),
-    ('Ɉ', 'Ɉ'),
-    ('Ɋ', 'Ɋ'),
-    ('Ɍ', 'Ɍ'),
-    ('Ɏ', 'Ɏ'),
-    ('Ͱ', 'Ͱ'),
-    ('ÍČ', 'ÍČ'),
-    ('Ͷ', 'Ͷ'),
-    ('Íż', 'Íż'),
-    ('Ά', 'Ά'),
-    ('Έ', 'Ί'),
-    ('Ό', 'Ό'),
-    ('Ύ', 'Ώ'),
-    ('Α', 'Ρ'),
-    ('Σ', 'Ϋ'),
-    ('Ϗ', 'Ϗ'),
-    ('ϒ', 'ϔ'),
-    ('Ϙ', 'Ϙ'),
-    ('Ϛ', 'Ϛ'),
-    ('Ϝ', 'Ϝ'),
-    ('Ϟ', 'Ϟ'),
-    ('Ï ', 'Ï '),
-    ('Ïą', 'Ïą'),
-    ('Ï€', 'Ï€'),
-    ('ÏŠ', 'ÏŠ'),
-    ('Ïš', 'Ïš'),
-    ('ÏȘ', 'ÏȘ'),
-    ('ÏŹ', 'ÏŹ'),
-    ('Ïź', 'Ïź'),
-    ('ÏŽ', 'ÏŽ'),
-    ('Ï·', 'Ï·'),
-    ('Ïč', 'Ïș'),
-    ('Ïœ', 'ĐŻ'),
-    ('Ń ', 'Ń '),
-    ('Ńą', 'Ńą'),
-    ('Ń€', 'Ń€'),
-    ('ŃŠ', 'ŃŠ'),
-    ('Ńš', 'Ńš'),
-    ('ŃȘ', 'ŃȘ'),
-    ('ŃŹ', 'ŃŹ'),
-    ('Ńź', 'Ńź'),
-    ('Ѱ', 'Ѱ'),
-    ('ŃČ', 'ŃČ'),
-    ('ŃŽ', 'ŃŽ'),
-    ('Ѷ', 'Ѷ'),
-    ('Ńž', 'Ńž'),
-    ('Ńș', 'Ńș'),
-    ('ŃŒ', 'ŃŒ'),
-    ('ŃŸ', 'ŃŸ'),
-    ('Ҁ', 'Ҁ'),
-    ('Ҋ', 'Ҋ'),
-    ('Ҍ', 'Ҍ'),
-    ('Ҏ', 'Ҏ'),
-    ('Ґ', 'Ґ'),
-    ('Ғ', 'Ғ'),
-    ('Ҕ', 'Ҕ'),
-    ('Җ', 'Җ'),
-    ('Ҙ', 'Ҙ'),
-    ('Қ', 'Қ'),
-    ('Ҝ', 'Ҝ'),
-    ('Ҟ', 'Ҟ'),
-    ('Ò ', 'Ò '),
-    ('Òą', 'Òą'),
-    ('Ò€', 'Ò€'),
-    ('ÒŠ', 'ÒŠ'),
-    ('Òš', 'Òš'),
-    ('ÒȘ', 'ÒȘ'),
-    ('ÒŹ', 'ÒŹ'),
-    ('Òź', 'Òź'),
-    ('Ò°', 'Ò°'),
-    ('ÒČ', 'ÒČ'),
-    ('ÒŽ', 'ÒŽ'),
-    ('Ò¶', 'Ò¶'),
-    ('Òž', 'Òž'),
-    ('Òș', 'Òș'),
-    ('Ҍ', 'Ҍ'),
-    ('ÒŸ', 'ÒŸ'),
-    ('Ӏ', 'Ӂ'),
-    ('Ӄ', 'Ӄ'),
-    ('Ӆ', 'Ӆ'),
-    ('Ӈ', 'Ӈ'),
-    ('Ӊ', 'Ӊ'),
-    ('Ӌ', 'Ӌ'),
-    ('Ӎ', 'Ӎ'),
-    ('Ӑ', 'Ӑ'),
-    ('Ӓ', 'Ӓ'),
-    ('Ӕ', 'Ӕ'),
-    ('Ӗ', 'Ӗ'),
-    ('Ә', 'Ә'),
-    ('Ӛ', 'Ӛ'),
-    ('Ӝ', 'Ӝ'),
-    ('Ӟ', 'Ӟ'),
-    ('Ó ', 'Ó '),
-    ('Óą', 'Óą'),
-    ('Ó€', 'Ó€'),
-    ('ÓŠ', 'ÓŠ'),
-    ('Óš', 'Óš'),
-    ('ÓȘ', 'ÓȘ'),
-    ('ÓŹ', 'ÓŹ'),
-    ('Óź', 'Óź'),
-    ('Ó°', 'Ó°'),
-    ('ÓČ', 'ÓČ'),
-    ('ÓŽ', 'ÓŽ'),
-    ('Ó¶', 'Ó¶'),
-    ('Óž', 'Óž'),
-    ('Óș', 'Óș'),
-    ('ӌ', 'ӌ'),
-    ('ÓŸ', 'ÓŸ'),
-    ('Ԁ', 'Ԁ'),
-    ('Ԃ', 'Ԃ'),
-    ('Ԅ', 'Ԅ'),
-    ('Ԇ', 'Ԇ'),
-    ('Ԉ', 'Ԉ'),
-    ('Ԋ', 'Ԋ'),
-    ('Ԍ', 'Ԍ'),
-    ('Ԏ', 'Ԏ'),
-    ('Ԑ', 'Ԑ'),
-    ('Ԓ', 'Ԓ'),
-    ('Ԕ', 'Ԕ'),
-    ('Ԗ', 'Ԗ'),
-    ('Ԙ', 'Ԙ'),
-    ('Ԛ', 'Ԛ'),
-    ('Ԝ', 'Ԝ'),
-    ('Ԟ', 'Ԟ'),
-    ('Ô ', 'Ô '),
-    ('Ôą', 'Ôą'),
-    ('Ô€', 'Ô€'),
-    ('ÔŠ', 'ÔŠ'),
-    ('Ôš', 'Ôš'),
-    ('ÔȘ', 'ÔȘ'),
-    ('ÔŹ', 'ÔŹ'),
-    ('Ôź', 'Ôź'),
-    ('Ô±', 'Ֆ'),
-    ('Ⴀ', 'Ⴥ'),
-    ('Ⴧ', 'Ⴧ'),
-    ('Ⴭ', 'Ⴭ'),
-    ('Ꭰ', 'Ꮤ'),
-    ('áȉ', 'áȉ'),
-    ('áȐ', 'áČș'),
-    ('áČœ', 'áČż'),
-    ('ᾀ', 'ᾀ'),
-    ('ᾂ', 'ᾂ'),
-    ('ᾄ', 'ᾄ'),
-    ('ᾆ', 'ᾆ'),
-    ('ឈ', 'ឈ'),
-    ('ᾊ', 'ᾊ'),
-    ('ᾌ', 'ᾌ'),
-    ('ᾎ', 'ᾎ'),
-    ('ថ', 'ថ'),
-    ('ᾒ', 'ᾒ'),
-    ('ᾔ', 'ᾔ'),
-    ('ᾖ', 'ᾖ'),
-    ('ម', 'ម'),
-    ('ᾚ', 'ᾚ'),
-    ('ᾜ', 'ᾜ'),
-    ('ᾞ', 'ᾞ'),
-    ('áž ', 'áž '),
-    ('ážą', 'ážą'),
-    ('ក', 'ក'),
-    ('ដ', 'ដ'),
-    ('ážš', 'ážš'),
-    ('ážȘ', 'ážȘ'),
-    ('ត', 'ត'),
-    ('ážź', 'ážź'),
-    ('áž°', 'áž°'),
-    ('ážČ', 'ážČ'),
-    ('ណ', 'ណ'),
-    ('áž¶', 'áž¶'),
-    ('ážž', 'ážž'),
-    ('ážș', 'ážș'),
-    ('ឌ', 'ឌ'),
-    ('ស', 'ស'),
-    ('áč€', 'áč€'),
-    ('áč‚', 'áč‚'),
-    ('áč„', 'áč„'),
-    ('áč†', 'áč†'),
-    ('áčˆ', 'áčˆ'),
-    ('áčŠ', 'áčŠ'),
-    ('áčŒ', 'áčŒ'),
-    ('áčŽ', 'áčŽ'),
-    ('áč', 'áč'),
-    ('áč’', 'áč’'),
-    ('áč”', 'áč”'),
-    ('áč–', 'áč–'),
-    ('áč˜', 'áč˜'),
-    ('áčš', 'áčš'),
-    ('áčœ', 'áčœ'),
-    ('áčž', 'áčž'),
-    ('áč ', 'áč '),
-    ('áčą', 'áčą'),
-    ('áč€', 'áč€'),
-    ('áčŠ', 'áčŠ'),
-    ('áčš', 'áčš'),
-    ('áčȘ', 'áčȘ'),
-    ('áčŹ', 'áčŹ'),
-    ('áčź', 'áčź'),
-    ('áč°', 'áč°'),
-    ('áčČ', 'áčČ'),
-    ('áčŽ', 'áčŽ'),
-    ('áč¶', 'áč¶'),
-    ('áčž', 'áčž'),
-    ('áčș', 'áčș'),
-    ('áčŒ', 'áčŒ'),
-    ('áčŸ', 'áčŸ'),
-    ('áș€', 'áș€'),
-    ('áș‚', 'áș‚'),
-    ('áș„', 'áș„'),
-    ('áș†', 'áș†'),
-    ('áșˆ', 'áșˆ'),
-    ('áșŠ', 'áșŠ'),
-    ('áșŒ', 'áșŒ'),
-    ('áșŽ', 'áșŽ'),
-    ('áș', 'áș'),
-    ('áș’', 'áș’'),
-    ('áș”', 'áș”'),
-    ('áșž', 'áșž'),
-    ('áș ', 'áș '),
-    ('áșą', 'áșą'),
-    ('áș€', 'áș€'),
-    ('áșŠ', 'áșŠ'),
-    ('áșš', 'áșš'),
-    ('áșȘ', 'áșȘ'),
-    ('áșŹ', 'áșŹ'),
-    ('áșź', 'áșź'),
-    ('áș°', 'áș°'),
-    ('áșČ', 'áșČ'),
-    ('áșŽ', 'áșŽ'),
-    ('áș¶', 'áș¶'),
-    ('áșž', 'áșž'),
-    ('áșș', 'áșș'),
-    ('áșŒ', 'áșŒ'),
-    ('áșŸ', 'áșŸ'),
-    ('Ề', 'Ề'),
-    ('Ể', 'Ể'),
-    ('Ễ', 'Ễ'),
-    ('Ệ', 'Ệ'),
-    ('Ỉ', 'Ỉ'),
-    ('Ị', 'Ị'),
-    ('Ọ', 'Ọ'),
-    ('Ỏ', 'Ỏ'),
-    ('Ố', 'Ố'),
-    ('Ồ', 'Ồ'),
-    ('Ổ', 'Ổ'),
-    ('Ỗ', 'Ỗ'),
-    ('Ộ', 'Ộ'),
-    ('Ớ', 'Ớ'),
-    ('Ờ', 'Ờ'),
-    ('Ở', 'Ở'),
-    ('á» ', 'á» '),
-    ('ỹ', 'ỹ'),
-    ('Ề', 'Ề'),
-    ('Ị', 'Ị'),
-    ('Ớ', 'Ớ'),
-    ('á»Ș', 'á»Ș'),
-    ('ỏ', 'ỏ'),
-    ('ở', 'ở'),
-    ('á»°', 'á»°'),
-    ('á»Č', 'á»Č'),
-    ('Ỏ', 'Ỏ'),
-    ('á»¶', 'á»¶'),
-    ('Ở', 'Ở'),
-    ('á»ș', 'á»ș'),
-    ('Ọ', 'Ọ'),
-    ('ở', 'ở'),
-    ('ገ', 'ጏ'),
-    ('ጘ', 'ጝ'),
-    ('ጚ', 'áŒŻ'),
-    ('ጞ', 'áŒż'),
-    ('ᜈ', 'ᜍ'),
-    ('᜙', '᜙'),
-    ('᜛', '᜛'),
-    ('᜝', '᜝'),
-    ('ᜟ', 'ᜟ'),
-    ('᜚', 'áœŻ'),
-    ('៞', '៻'),
-    ('Ὲ', 'Ή'),
-    ('Ῐ', 'Ί'),
-    ('Ὶ', '῏'),
-    ('áżž', 'áż»'),
-    ('ℂ', 'ℂ'),
-    ('ℇ', 'ℇ'),
-    ('ℋ', 'ℍ'),
-    ('ℐ', 'ℒ'),
-    ('ℕ', 'ℕ'),
-    ('ℙ', 'ℝ'),
-    ('â„€', 'â„€'),
-    ('℩', '℩'),
-    ('ℹ', 'ℹ'),
-    ('â„Ș', 'ℭ'),
-    ('ℰ', 'ℳ'),
-    ('℟', 'ℿ'),
-    ('ⅅ', 'ⅅ'),
-    ('Ↄ', 'Ↄ'),
-    ('Ⰰ', 'Ⱟ'),
-    ('â± ', 'â± '),
-    ('ⱹ', 'ⱀ'),
-    ('â±§', 'â±§'),
-    ('Ⱪ', 'Ⱪ'),
-    ('Ⱬ', 'Ⱬ'),
-    ('â±­', 'â±°'),
-    ('â±Č', 'â±Č'),
-    ('â±”', 'â±”'),
-    ('ⱟ', 'âȀ'),
-    ('âȂ', 'âȂ'),
-    ('âȄ', 'âȄ'),
-    ('âȆ', 'âȆ'),
-    ('âȈ', 'âȈ'),
-    ('âȊ', 'âȊ'),
-    ('âȌ', 'âȌ'),
-    ('âȎ', 'âȎ'),
-    ('âȐ', 'âȐ'),
-    ('âȒ', 'âȒ'),
-    ('âȔ', 'âȔ'),
-    ('âȖ', 'âȖ'),
-    ('âȘ', 'âȘ'),
-    ('âȚ', 'âȚ'),
-    ('âȜ', 'âȜ'),
-    ('âȞ', 'âȞ'),
-    ('âČ ', 'âČ '),
-    ('âČą', 'âČą'),
-    ('âČ€', 'âČ€'),
-    ('âČŠ', 'âČŠ'),
-    ('âČš', 'âČš'),
-    ('âČȘ', 'âČȘ'),
-    ('âČŹ', 'âČŹ'),
-    ('âČź', 'âČź'),
-    ('âȰ', 'âȰ'),
-    ('âČČ', 'âČČ'),
-    ('âČŽ', 'âČŽ'),
-    ('âȶ', 'âȶ'),
-    ('âČž', 'âČž'),
-    ('âČș', 'âČș'),
-    ('âČŒ', 'âČŒ'),
-    ('âČŸ', 'âČŸ'),
-    ('Ⳁ', 'Ⳁ'),
-    ('Ⳃ', 'Ⳃ'),
-    ('Ⳅ', 'Ⳅ'),
-    ('Ⳇ', 'Ⳇ'),
-    ('Ⳉ', 'Ⳉ'),
-    ('Ⳋ', 'Ⳋ'),
-    ('Ⳍ', 'Ⳍ'),
-    ('Ⳏ', 'Ⳏ'),
-    ('Ⳑ', 'Ⳑ'),
-    ('Ⳓ', 'Ⳓ'),
-    ('Ⳕ', 'Ⳕ'),
-    ('Ⳗ', 'Ⳗ'),
-    ('Ⳙ', 'Ⳙ'),
-    ('Ⳛ', 'Ⳛ'),
-    ('Ⳝ', 'Ⳝ'),
-    ('Ⳟ', 'Ⳟ'),
-    ('âł ', 'âł '),
-    ('âłą', 'âłą'),
-    ('âł«', 'âł«'),
-    ('âł­', 'âł­'),
-    ('âłČ', 'âłČ'),
-    ('Ꙁ', 'Ꙁ'),
-    ('Ꙃ', 'Ꙃ'),
-    ('Ꙅ', 'Ꙅ'),
-    ('Ꙇ', 'Ꙇ'),
-    ('Ꙉ', 'Ꙉ'),
-    ('Ꙋ', 'Ꙋ'),
-    ('Ꙍ', 'Ꙍ'),
-    ('Ꙏ', 'Ꙏ'),
-    ('Ꙑ', 'Ꙑ'),
-    ('Ꙓ', 'Ꙓ'),
-    ('Ꙕ', 'Ꙕ'),
-    ('Ꙗ', 'Ꙗ'),
-    ('Ꙙ', 'Ꙙ'),
-    ('Ꙛ', 'Ꙛ'),
-    ('Ꙝ', 'Ꙝ'),
-    ('Ꙟ', 'Ꙟ'),
-    ('Ꙡ', 'Ꙡ'),
-    ('ê™ą', 'ê™ą'),
-    ('Ꙁ', 'Ꙁ'),
-    ('Ꙋ', 'Ꙋ'),
-    ('Ꙛ', 'Ꙛ'),
-    ('ê™Ș', 'ê™Ș'),
-    ('ê™Ź', 'ê™Ź'),
-    ('Ꚁ', 'Ꚁ'),
-    ('Ꚃ', 'Ꚃ'),
-    ('Ꚅ', 'Ꚅ'),
-    ('Ꚇ', 'Ꚇ'),
-    ('Ꚉ', 'Ꚉ'),
-    ('Ꚋ', 'Ꚋ'),
-    ('Ꚍ', 'Ꚍ'),
-    ('Ꚏ', 'Ꚏ'),
-    ('Ꚑ', 'Ꚑ'),
-    ('Ꚓ', 'Ꚓ'),
-    ('Ꚕ', 'Ꚕ'),
-    ('Ꚗ', 'Ꚗ'),
-    ('Ꚙ', 'Ꚙ'),
-    ('Ꚛ', 'Ꚛ'),
-    ('êœą', 'êœą'),
-    ('꜀', '꜀'),
-    ('꜊', '꜊'),
-    ('ꜚ', 'ꜚ'),
-    ('êœȘ', 'êœȘ'),
-    ('êœŹ', 'êœŹ'),
-    ('êœź', 'êœź'),
-    ('êœČ', 'êœČ'),
-    ('꜎', '꜎'),
-    ('Ꜷ', 'Ꜷ'),
-    ('ꜞ', 'ꜞ'),
-    ('êœș', 'êœș'),
-    ('꜌', '꜌'),
-    ('ꜟ', 'ꜟ'),
-    ('Ꝁ', 'Ꝁ'),
-    ('Ꝃ', 'Ꝃ'),
-    ('Ꝅ', 'Ꝅ'),
-    ('Ꝇ', 'Ꝇ'),
-    ('Ꝉ', 'Ꝉ'),
-    ('Ꝋ', 'Ꝋ'),
-    ('Ꝍ', 'Ꝍ'),
-    ('Ꝏ', 'Ꝏ'),
-    ('Ꝑ', 'Ꝑ'),
-    ('Ꝓ', 'Ꝓ'),
-    ('Ꝕ', 'Ꝕ'),
-    ('Ꝗ', 'Ꝗ'),
-    ('Ꝙ', 'Ꝙ'),
-    ('Ꝛ', 'Ꝛ'),
-    ('Ꝝ', 'Ꝝ'),
-    ('Ꝟ', 'Ꝟ'),
-    ('Ꝡ', 'Ꝡ'),
-    ('êą', 'êą'),
-    ('Ꝁ', 'Ꝁ'),
-    ('Ꝋ', 'Ꝋ'),
-    ('Ꝛ', 'Ꝛ'),
-    ('êȘ', 'êȘ'),
-    ('êŹ', 'êŹ'),
-    ('êź', 'êź'),
-    ('êč', 'êč'),
-    ('Ꝼ', 'Ꝼ'),
-    ('Ꝝ', 'ꝟ'),
-    ('Ꞁ', 'Ꞁ'),
-    ('Ꞃ', 'Ꞃ'),
-    ('Ꞅ', 'Ꞅ'),
-    ('Ꞇ', 'Ꞇ'),
-    ('Ꞌ', 'Ꞌ'),
-    ('Ɥ', 'Ɥ'),
-    ('Ꞑ', 'Ꞑ'),
-    ('Ꞓ', 'Ꞓ'),
-    ('Ꞗ', 'Ꞗ'),
-    ('Ꞙ', 'Ꞙ'),
-    ('Ꞛ', 'Ꞛ'),
-    ('Ꞝ', 'Ꞝ'),
-    ('Ꞟ', 'Ꞟ'),
-    ('Ꞡ', 'Ꞡ'),
-    ('êžą', 'êžą'),
-    ('Ꞁ', 'Ꞁ'),
-    ('꞊', '꞊'),
-    ('Ꞛ', 'Ꞛ'),
-    ('êžȘ', 'êžź'),
-    ('Ʞ', 'ꞎ'),
-    ('Ꞷ', 'Ꞷ'),
-    ('êžž', 'êžž'),
-    ('êžș', 'êžș'),
-    ('ꞌ', 'ꞌ'),
-    ('ꞟ', 'ꞟ'),
-    ('Ꟁ', 'Ꟁ'),
-    ('Ꟃ', 'Ꟃ'),
-    ('Ꞔ', 'Ꟈ'),
-    ('Ꟊ', 'Ꟊ'),
-    ('Ɤ', 'Ꟍ'),
-    ('Ꟑ', 'Ꟑ'),
-    ('Ꟗ', 'Ꟗ'),
-    ('Ꟙ', 'Ꟙ'),
-    ('Ꟛ', 'Ꟛ'),
-    ('Ƛ', 'Ƛ'),
-    ('꟔', '꟔'),
-    ('ïŒĄ', 'ïŒș'),
-    ('𐐀', '𐐧'),
-    ('𐒰', '𐓓'),
-    ('𐕰', 'đ•ș'),
-    ('đ•Œ', '𐖊'),
-    ('𐖌', '𐖒'),
-    ('𐖔', '𐖕'),
-    ('đČ€', 'đČČ'),
-    ('𐔐', '𐔄'),
-    ('𑱠', '𑱿'),
-    ('đ–č€', 'đ–čŸ'),
-    ('𝐀', '𝐙'),
-    ('𝐮', '𝑍'),
-    ('𝑹', '𝒁'),
-    ('𝒜', '𝒜'),
-    ('𝒞', '𝒟'),
-    ('𝒱', '𝒱'),
-    ('đ’„', '𝒩'),
-    ('đ’©', '𝒬'),
-    ('𝒼', 'đ’”'),
-    ('𝓐', 'đ“©'),
-    ('𝔄', '𝔅'),
-    ('𝔇', '𝔊'),
-    ('𝔍', '𝔔'),
-    ('𝔖', '𝔜'),
-    ('𝔾', 'đ”č'),
-    ('đ”»', 'đ”Ÿ'),
-    ('𝕀', '𝕄'),
-    ('𝕆', '𝕆'),
-    ('𝕊', '𝕐'),
-    ('𝕬', '𝖅'),
-    ('𝖠', 'đ–č'),
-    ('𝗔', '𝗭'),
-    ('𝘈', '𝘡'),
-    ('đ˜Œ', '𝙕'),
-    ('𝙰', '𝚉'),
-    ('𝚹', '𝛀'),
-    ('𝛱', 'đ›ș'),
-    ('𝜜', '𝜮'),
-    ('𝝖', '𝝼'),
-    ('𝞐', '𝞹'),
-    ('𝟊', '𝟊'),
-    ('𞀀', '𞀥'),
-];
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/grapheme_cluster_break.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/grapheme_cluster_break.rs
deleted file mode 100644
index 6a6ec2a..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/grapheme_cluster_break.rs
+++ /dev/null
@@ -1,1420 +0,0 @@
-// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY:
-//
-//   ucd-generate grapheme-cluster-break ucd-16.0.0 --chars
-//
-// Unicode version: 16.0.0.
-//
-// ucd-generate 0.3.1 is available on crates.io.
-
-pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[
-    ("CR", CR),
-    ("Control", CONTROL),
-    ("Extend", EXTEND),
-    ("L", L),
-    ("LF", LF),
-    ("LV", LV),
-    ("LVT", LVT),
-    ("Prepend", PREPEND),
-    ("Regional_Indicator", REGIONAL_INDICATOR),
-    ("SpacingMark", SPACINGMARK),
-    ("T", T),
-    ("V", V),
-    ("ZWJ", ZWJ),
-];
-
-pub const CR: &'static [(char, char)] = &[('\r', '\r')];
-
-pub const CONTROL: &'static [(char, char)] = &[
-    ('\0', '\t'),
-    ('\u{b}', '\u{c}'),
-    ('\u{e}', '\u{1f}'),
-    ('\u{7f}', '\u{9f}'),
-    ('\u{ad}', '\u{ad}'),
-    ('\u{61c}', '\u{61c}'),
-    ('\u{180e}', '\u{180e}'),
-    ('\u{200b}', '\u{200b}'),
-    ('\u{200e}', '\u{200f}'),
-    ('\u{2028}', '\u{202e}'),
-    ('\u{2060}', '\u{206f}'),
-    ('\u{feff}', '\u{feff}'),
-    ('\u{fff0}', '\u{fffb}'),
-    ('\u{13430}', '\u{1343f}'),
-    ('\u{1bca0}', '\u{1bca3}'),
-    ('\u{1d173}', '\u{1d17a}'),
-    ('\u{e0000}', '\u{e001f}'),
-    ('\u{e0080}', '\u{e00ff}'),
-    ('\u{e01f0}', '\u{e0fff}'),
-];
-
-pub const EXTEND: &'static [(char, char)] = &[
-    ('\u{300}', '\u{36f}'),
-    ('\u{483}', '\u{489}'),
-    ('\u{591}', '\u{5bd}'),
-    ('\u{5bf}', '\u{5bf}'),
-    ('\u{5c1}', '\u{5c2}'),
-    ('\u{5c4}', '\u{5c5}'),
-    ('\u{5c7}', '\u{5c7}'),
-    ('\u{610}', '\u{61a}'),
-    ('\u{64b}', '\u{65f}'),
-    ('\u{670}', '\u{670}'),
-    ('\u{6d6}', '\u{6dc}'),
-    ('\u{6df}', '\u{6e4}'),
-    ('\u{6e7}', '\u{6e8}'),
-    ('\u{6ea}', '\u{6ed}'),
-    ('\u{711}', '\u{711}'),
-    ('\u{730}', '\u{74a}'),
-    ('\u{7a6}', '\u{7b0}'),
-    ('\u{7eb}', '\u{7f3}'),
-    ('\u{7fd}', '\u{7fd}'),
-    ('\u{816}', '\u{819}'),
-    ('\u{81b}', '\u{823}'),
-    ('\u{825}', '\u{827}'),
-    ('\u{829}', '\u{82d}'),
-    ('\u{859}', '\u{85b}'),
-    ('\u{897}', '\u{89f}'),
-    ('\u{8ca}', '\u{8e1}'),
-    ('\u{8e3}', '\u{902}'),
-    ('\u{93a}', '\u{93a}'),
-    ('\u{93c}', '\u{93c}'),
-    ('\u{941}', '\u{948}'),
-    ('\u{94d}', '\u{94d}'),
-    ('\u{951}', '\u{957}'),
-    ('\u{962}', '\u{963}'),
-    ('\u{981}', '\u{981}'),
-    ('\u{9bc}', '\u{9bc}'),
-    ('\u{9be}', '\u{9be}'),
-    ('\u{9c1}', '\u{9c4}'),
-    ('\u{9cd}', '\u{9cd}'),
-    ('\u{9d7}', '\u{9d7}'),
-    ('\u{9e2}', '\u{9e3}'),
-    ('\u{9fe}', '\u{9fe}'),
-    ('\u{a01}', '\u{a02}'),
-    ('\u{a3c}', '\u{a3c}'),
-    ('\u{a41}', '\u{a42}'),
-    ('\u{a47}', '\u{a48}'),
-    ('\u{a4b}', '\u{a4d}'),
-    ('\u{a51}', '\u{a51}'),
-    ('\u{a70}', '\u{a71}'),
-    ('\u{a75}', '\u{a75}'),
-    ('\u{a81}', '\u{a82}'),
-    ('\u{abc}', '\u{abc}'),
-    ('\u{ac1}', '\u{ac5}'),
-    ('\u{ac7}', '\u{ac8}'),
-    ('\u{acd}', '\u{acd}'),
-    ('\u{ae2}', '\u{ae3}'),
-    ('\u{afa}', '\u{aff}'),
-    ('\u{b01}', '\u{b01}'),
-    ('\u{b3c}', '\u{b3c}'),
-    ('\u{b3e}', '\u{b3f}'),
-    ('\u{b41}', '\u{b44}'),
-    ('\u{b4d}', '\u{b4d}'),
-    ('\u{b55}', '\u{b57}'),
-    ('\u{b62}', '\u{b63}'),
-    ('\u{b82}', '\u{b82}'),
-    ('\u{bbe}', '\u{bbe}'),
-    ('\u{bc0}', '\u{bc0}'),
-    ('\u{bcd}', '\u{bcd}'),
-    ('\u{bd7}', '\u{bd7}'),
-    ('\u{c00}', '\u{c00}'),
-    ('\u{c04}', '\u{c04}'),
-    ('\u{c3c}', '\u{c3c}'),
-    ('\u{c3e}', '\u{c40}'),
-    ('\u{c46}', '\u{c48}'),
-    ('\u{c4a}', '\u{c4d}'),
-    ('\u{c55}', '\u{c56}'),
-    ('\u{c62}', '\u{c63}'),
-    ('\u{c81}', '\u{c81}'),
-    ('\u{cbc}', '\u{cbc}'),
-    ('\u{cbf}', '\u{cc0}'),
-    ('\u{cc2}', '\u{cc2}'),
-    ('\u{cc6}', '\u{cc8}'),
-    ('\u{cca}', '\u{ccd}'),
-    ('\u{cd5}', '\u{cd6}'),
-    ('\u{ce2}', '\u{ce3}'),
-    ('\u{d00}', '\u{d01}'),
-    ('\u{d3b}', '\u{d3c}'),
-    ('\u{d3e}', '\u{d3e}'),
-    ('\u{d41}', '\u{d44}'),
-    ('\u{d4d}', '\u{d4d}'),
-    ('\u{d57}', '\u{d57}'),
-    ('\u{d62}', '\u{d63}'),
-    ('\u{d81}', '\u{d81}'),
-    ('\u{dca}', '\u{dca}'),
-    ('\u{dcf}', '\u{dcf}'),
-    ('\u{dd2}', '\u{dd4}'),
-    ('\u{dd6}', '\u{dd6}'),
-    ('\u{ddf}', '\u{ddf}'),
-    ('\u{e31}', '\u{e31}'),
-    ('\u{e34}', '\u{e3a}'),
-    ('\u{e47}', '\u{e4e}'),
-    ('\u{eb1}', '\u{eb1}'),
-    ('\u{eb4}', '\u{ebc}'),
-    ('\u{ec8}', '\u{ece}'),
-    ('\u{f18}', '\u{f19}'),
-    ('\u{f35}', '\u{f35}'),
-    ('\u{f37}', '\u{f37}'),
-    ('\u{f39}', '\u{f39}'),
-    ('\u{f71}', '\u{f7e}'),
-    ('\u{f80}', '\u{f84}'),
-    ('\u{f86}', '\u{f87}'),
-    ('\u{f8d}', '\u{f97}'),
-    ('\u{f99}', '\u{fbc}'),
-    ('\u{fc6}', '\u{fc6}'),
-    ('\u{102d}', '\u{1030}'),
-    ('\u{1032}', '\u{1037}'),
-    ('\u{1039}', '\u{103a}'),
-    ('\u{103d}', '\u{103e}'),
-    ('\u{1058}', '\u{1059}'),
-    ('\u{105e}', '\u{1060}'),
-    ('\u{1071}', '\u{1074}'),
-    ('\u{1082}', '\u{1082}'),
-    ('\u{1085}', '\u{1086}'),
-    ('\u{108d}', '\u{108d}'),
-    ('\u{109d}', '\u{109d}'),
-    ('\u{135d}', '\u{135f}'),
-    ('\u{1712}', '\u{1715}'),
-    ('\u{1732}', '\u{1734}'),
-    ('\u{1752}', '\u{1753}'),
-    ('\u{1772}', '\u{1773}'),
-    ('\u{17b4}', '\u{17b5}'),
-    ('\u{17b7}', '\u{17bd}'),
-    ('\u{17c6}', '\u{17c6}'),
-    ('\u{17c9}', '\u{17d3}'),
-    ('\u{17dd}', '\u{17dd}'),
-    ('\u{180b}', '\u{180d}'),
-    ('\u{180f}', '\u{180f}'),
-    ('\u{1885}', '\u{1886}'),
-    ('\u{18a9}', '\u{18a9}'),
-    ('\u{1920}', '\u{1922}'),
-    ('\u{1927}', '\u{1928}'),
-    ('\u{1932}', '\u{1932}'),
-    ('\u{1939}', '\u{193b}'),
-    ('\u{1a17}', '\u{1a18}'),
-    ('\u{1a1b}', '\u{1a1b}'),
-    ('\u{1a56}', '\u{1a56}'),
-    ('\u{1a58}', '\u{1a5e}'),
-    ('\u{1a60}', '\u{1a60}'),
-    ('\u{1a62}', '\u{1a62}'),
-    ('\u{1a65}', '\u{1a6c}'),
-    ('\u{1a73}', '\u{1a7c}'),
-    ('\u{1a7f}', '\u{1a7f}'),
-    ('\u{1ab0}', '\u{1ace}'),
-    ('\u{1b00}', '\u{1b03}'),
-    ('\u{1b34}', '\u{1b3d}'),
-    ('\u{1b42}', '\u{1b44}'),
-    ('\u{1b6b}', '\u{1b73}'),
-    ('\u{1b80}', '\u{1b81}'),
-    ('\u{1ba2}', '\u{1ba5}'),
-    ('\u{1ba8}', '\u{1bad}'),
-    ('\u{1be6}', '\u{1be6}'),
-    ('\u{1be8}', '\u{1be9}'),
-    ('\u{1bed}', '\u{1bed}'),
-    ('\u{1bef}', '\u{1bf3}'),
-    ('\u{1c2c}', '\u{1c33}'),
-    ('\u{1c36}', '\u{1c37}'),
-    ('\u{1cd0}', '\u{1cd2}'),
-    ('\u{1cd4}', '\u{1ce0}'),
-    ('\u{1ce2}', '\u{1ce8}'),
-    ('\u{1ced}', '\u{1ced}'),
-    ('\u{1cf4}', '\u{1cf4}'),
-    ('\u{1cf8}', '\u{1cf9}'),
-    ('\u{1dc0}', '\u{1dff}'),
-    ('\u{200c}', '\u{200c}'),
-    ('\u{20d0}', '\u{20f0}'),
-    ('\u{2cef}', '\u{2cf1}'),
-    ('\u{2d7f}', '\u{2d7f}'),
-    ('\u{2de0}', '\u{2dff}'),
-    ('\u{302a}', '\u{302f}'),
-    ('\u{3099}', '\u{309a}'),
-    ('\u{a66f}', '\u{a672}'),
-    ('\u{a674}', '\u{a67d}'),
-    ('\u{a69e}', '\u{a69f}'),
-    ('\u{a6f0}', '\u{a6f1}'),
-    ('\u{a802}', '\u{a802}'),
-    ('\u{a806}', '\u{a806}'),
-    ('\u{a80b}', '\u{a80b}'),
-    ('\u{a825}', '\u{a826}'),
-    ('\u{a82c}', '\u{a82c}'),
-    ('\u{a8c4}', '\u{a8c5}'),
-    ('\u{a8e0}', '\u{a8f1}'),
-    ('\u{a8ff}', '\u{a8ff}'),
-    ('\u{a926}', '\u{a92d}'),
-    ('\u{a947}', '\u{a951}'),
-    ('\u{a953}', '\u{a953}'),
-    ('\u{a980}', '\u{a982}'),
-    ('\u{a9b3}', '\u{a9b3}'),
-    ('\u{a9b6}', '\u{a9b9}'),
-    ('\u{a9bc}', '\u{a9bd}'),
-    ('\u{a9c0}', '\u{a9c0}'),
-    ('\u{a9e5}', '\u{a9e5}'),
-    ('\u{aa29}', '\u{aa2e}'),
-    ('\u{aa31}', '\u{aa32}'),
-    ('\u{aa35}', '\u{aa36}'),
-    ('\u{aa43}', '\u{aa43}'),
-    ('\u{aa4c}', '\u{aa4c}'),
-    ('\u{aa7c}', '\u{aa7c}'),
-    ('\u{aab0}', '\u{aab0}'),
-    ('\u{aab2}', '\u{aab4}'),
-    ('\u{aab7}', '\u{aab8}'),
-    ('\u{aabe}', '\u{aabf}'),
-    ('\u{aac1}', '\u{aac1}'),
-    ('\u{aaec}', '\u{aaed}'),
-    ('\u{aaf6}', '\u{aaf6}'),
-    ('\u{abe5}', '\u{abe5}'),
-    ('\u{abe8}', '\u{abe8}'),
-    ('\u{abed}', '\u{abed}'),
-    ('\u{fb1e}', '\u{fb1e}'),
-    ('\u{fe00}', '\u{fe0f}'),
-    ('\u{fe20}', '\u{fe2f}'),
-    ('\u{ff9e}', '\u{ff9f}'),
-    ('\u{101fd}', '\u{101fd}'),
-    ('\u{102e0}', '\u{102e0}'),
-    ('\u{10376}', '\u{1037a}'),
-    ('\u{10a01}', '\u{10a03}'),
-    ('\u{10a05}', '\u{10a06}'),
-    ('\u{10a0c}', '\u{10a0f}'),
-    ('\u{10a38}', '\u{10a3a}'),
-    ('\u{10a3f}', '\u{10a3f}'),
-    ('\u{10ae5}', '\u{10ae6}'),
-    ('\u{10d24}', '\u{10d27}'),
-    ('\u{10d69}', '\u{10d6d}'),
-    ('\u{10eab}', '\u{10eac}'),
-    ('\u{10efc}', '\u{10eff}'),
-    ('\u{10f46}', '\u{10f50}'),
-    ('\u{10f82}', '\u{10f85}'),
-    ('\u{11001}', '\u{11001}'),
-    ('\u{11038}', '\u{11046}'),
-    ('\u{11070}', '\u{11070}'),
-    ('\u{11073}', '\u{11074}'),
-    ('\u{1107f}', '\u{11081}'),
-    ('\u{110b3}', '\u{110b6}'),
-    ('\u{110b9}', '\u{110ba}'),
-    ('\u{110c2}', '\u{110c2}'),
-    ('\u{11100}', '\u{11102}'),
-    ('\u{11127}', '\u{1112b}'),
-    ('\u{1112d}', '\u{11134}'),
-    ('\u{11173}', '\u{11173}'),
-    ('\u{11180}', '\u{11181}'),
-    ('\u{111b6}', '\u{111be}'),
-    ('\u{111c0}', '\u{111c0}'),
-    ('\u{111c9}', '\u{111cc}'),
-    ('\u{111cf}', '\u{111cf}'),
-    ('\u{1122f}', '\u{11231}'),
-    ('\u{11234}', '\u{11237}'),
-    ('\u{1123e}', '\u{1123e}'),
-    ('\u{11241}', '\u{11241}'),
-    ('\u{112df}', '\u{112df}'),
-    ('\u{112e3}', '\u{112ea}'),
-    ('\u{11300}', '\u{11301}'),
-    ('\u{1133b}', '\u{1133c}'),
-    ('\u{1133e}', '\u{1133e}'),
-    ('\u{11340}', '\u{11340}'),
-    ('\u{1134d}', '\u{1134d}'),
-    ('\u{11357}', '\u{11357}'),
-    ('\u{11366}', '\u{1136c}'),
-    ('\u{11370}', '\u{11374}'),
-    ('\u{113b8}', '\u{113b8}'),
-    ('\u{113bb}', '\u{113c0}'),
-    ('\u{113c2}', '\u{113c2}'),
-    ('\u{113c5}', '\u{113c5}'),
-    ('\u{113c7}', '\u{113c9}'),
-    ('\u{113ce}', '\u{113d0}'),
-    ('\u{113d2}', '\u{113d2}'),
-    ('\u{113e1}', '\u{113e2}'),
-    ('\u{11438}', '\u{1143f}'),
-    ('\u{11442}', '\u{11444}'),
-    ('\u{11446}', '\u{11446}'),
-    ('\u{1145e}', '\u{1145e}'),
-    ('\u{114b0}', '\u{114b0}'),
-    ('\u{114b3}', '\u{114b8}'),
-    ('\u{114ba}', '\u{114ba}'),
-    ('\u{114bd}', '\u{114bd}'),
-    ('\u{114bf}', '\u{114c0}'),
-    ('\u{114c2}', '\u{114c3}'),
-    ('\u{115af}', '\u{115af}'),
-    ('\u{115b2}', '\u{115b5}'),
-    ('\u{115bc}', '\u{115bd}'),
-    ('\u{115bf}', '\u{115c0}'),
-    ('\u{115dc}', '\u{115dd}'),
-    ('\u{11633}', '\u{1163a}'),
-    ('\u{1163d}', '\u{1163d}'),
-    ('\u{1163f}', '\u{11640}'),
-    ('\u{116ab}', '\u{116ab}'),
-    ('\u{116ad}', '\u{116ad}'),
-    ('\u{116b0}', '\u{116b7}'),
-    ('\u{1171d}', '\u{1171d}'),
-    ('\u{1171f}', '\u{1171f}'),
-    ('\u{11722}', '\u{11725}'),
-    ('\u{11727}', '\u{1172b}'),
-    ('\u{1182f}', '\u{11837}'),
-    ('\u{11839}', '\u{1183a}'),
-    ('\u{11930}', '\u{11930}'),
-    ('\u{1193b}', '\u{1193e}'),
-    ('\u{11943}', '\u{11943}'),
-    ('\u{119d4}', '\u{119d7}'),
-    ('\u{119da}', '\u{119db}'),
-    ('\u{119e0}', '\u{119e0}'),
-    ('\u{11a01}', '\u{11a0a}'),
-    ('\u{11a33}', '\u{11a38}'),
-    ('\u{11a3b}', '\u{11a3e}'),
-    ('\u{11a47}', '\u{11a47}'),
-    ('\u{11a51}', '\u{11a56}'),
-    ('\u{11a59}', '\u{11a5b}'),
-    ('\u{11a8a}', '\u{11a96}'),
-    ('\u{11a98}', '\u{11a99}'),
-    ('\u{11c30}', '\u{11c36}'),
-    ('\u{11c38}', '\u{11c3d}'),
-    ('\u{11c3f}', '\u{11c3f}'),
-    ('\u{11c92}', '\u{11ca7}'),
-    ('\u{11caa}', '\u{11cb0}'),
-    ('\u{11cb2}', '\u{11cb3}'),
-    ('\u{11cb5}', '\u{11cb6}'),
-    ('\u{11d31}', '\u{11d36}'),
-    ('\u{11d3a}', '\u{11d3a}'),
-    ('\u{11d3c}', '\u{11d3d}'),
-    ('\u{11d3f}', '\u{11d45}'),
-    ('\u{11d47}', '\u{11d47}'),
-    ('\u{11d90}', '\u{11d91}'),
-    ('\u{11d95}', '\u{11d95}'),
-    ('\u{11d97}', '\u{11d97}'),
-    ('\u{11ef3}', '\u{11ef4}'),
-    ('\u{11f00}', '\u{11f01}'),
-    ('\u{11f36}', '\u{11f3a}'),
-    ('\u{11f40}', '\u{11f42}'),
-    ('\u{11f5a}', '\u{11f5a}'),
-    ('\u{13440}', '\u{13440}'),
-    ('\u{13447}', '\u{13455}'),
-    ('\u{1611e}', '\u{16129}'),
-    ('\u{1612d}', '\u{1612f}'),
-    ('\u{16af0}', '\u{16af4}'),
-    ('\u{16b30}', '\u{16b36}'),
-    ('\u{16f4f}', '\u{16f4f}'),
-    ('\u{16f8f}', '\u{16f92}'),
-    ('\u{16fe4}', '\u{16fe4}'),
-    ('\u{16ff0}', '\u{16ff1}'),
-    ('\u{1bc9d}', '\u{1bc9e}'),
-    ('\u{1cf00}', '\u{1cf2d}'),
-    ('\u{1cf30}', '\u{1cf46}'),
-    ('\u{1d165}', '\u{1d169}'),
-    ('\u{1d16d}', '\u{1d172}'),
-    ('\u{1d17b}', '\u{1d182}'),
-    ('\u{1d185}', '\u{1d18b}'),
-    ('\u{1d1aa}', '\u{1d1ad}'),
-    ('\u{1d242}', '\u{1d244}'),
-    ('\u{1da00}', '\u{1da36}'),
-    ('\u{1da3b}', '\u{1da6c}'),
-    ('\u{1da75}', '\u{1da75}'),
-    ('\u{1da84}', '\u{1da84}'),
-    ('\u{1da9b}', '\u{1da9f}'),
-    ('\u{1daa1}', '\u{1daaf}'),
-    ('\u{1e000}', '\u{1e006}'),
-    ('\u{1e008}', '\u{1e018}'),
-    ('\u{1e01b}', '\u{1e021}'),
-    ('\u{1e023}', '\u{1e024}'),
-    ('\u{1e026}', '\u{1e02a}'),
-    ('\u{1e08f}', '\u{1e08f}'),
-    ('\u{1e130}', '\u{1e136}'),
-    ('\u{1e2ae}', '\u{1e2ae}'),
-    ('\u{1e2ec}', '\u{1e2ef}'),
-    ('\u{1e4ec}', '\u{1e4ef}'),
-    ('\u{1e5ee}', '\u{1e5ef}'),
-    ('\u{1e8d0}', '\u{1e8d6}'),
-    ('\u{1e944}', '\u{1e94a}'),
-    ('đŸ»', '🏿'),
-    ('\u{e0020}', '\u{e007f}'),
-    ('\u{e0100}', '\u{e01ef}'),
-];
-
-pub const L: &'static [(char, char)] = &[('ᄀ', 'ᅟ'), ('ê„ ', 'ꄌ')];
-
-pub const LF: &'static [(char, char)] = &[('\n', '\n')];
-
-pub const LV: &'static [(char, char)] = &[
-    ('가', '가'),
-    ('개', '개'),
-    ('ê°ž', 'ê°ž'),
-    ('걔', '걔'),
-    ('ê±°', 'ê±°'),
-    ('êȌ', 'êȌ'),
-    ('êČš', 'êČš'),
-    ('êł„', 'êł„'),
-    ('êł ', 'êł '),
-    ('êłŒ', 'êłŒ'),
-    ('ꎘ', 'ꎘ'),
-    ('ꎎ', 'ꎎ'),
-    ('ꔐ', 'ꔐ'),
-    ('ê”Ź', 'ê”Ź'),
-    ('궈', '궈'),
-    ('ê¶€', 'ê¶€'),
-    ('귀', '귀'),
-    ('규', '규'),
-    ('ê·ž', 'ê·ž'),
-    ('êž”', 'êž”'),
-    ('êž°', 'êž°'),
-    ('êčŒ', 'êčŒ'),
-    ('êčš', 'êčš'),
-    ('êș„', 'êș„'),
-    ('êș ', 'êș '),
-    ('êșŒ', 'êșŒ'),
-    ('께', '께'),
-    ('껎', '껎'),
-    ('ꌐ', 'ꌐ'),
-    ('êŒŹ', 'êŒŹ'),
-    ('꜈', '꜈'),
-    ('꜀', '꜀'),
-    ('Ꟁ', 'Ꟁ'),
-    ('Ƛ', 'Ƛ'),
-    ('꟞', '꟞'),
-    ('êż”', 'êż”'),
-    ('êż°', 'êż°'),
-    ('뀌', '뀌'),
-    ('뀹', '뀹'),
-    ('끄', '끄'),
-    ('끠', '끠'),
-    ('끌', '끌'),
-    ('나', '나'),
-    ('낮', '낮'),
-    ('냐', '냐'),
-    ('냏', '냏'),
-    ('너', '너'),
-    ('ë„€', 'ë„€'),
-    ('녀', '녀'),
-    ('녜', '녜'),
-    ('녾', '녾'),
-    ('놔', '놔'),
-    ('놰', '놰'),
-    ('뇌', '뇌'),
-    ('뇹', '뇹'),
-    ('누', '누'),
-    ('눠', '눠'),
-    ('눌', '눌'),
-    ('뉘', '뉘'),
-    ('뉮', '뉮'),
-    ('느', '느'),
-    ('늬', '늬'),
-    ('니', '니'),
-    ('ë‹€', 'ë‹€'),
-    ('대', '대'),
-    ('댜', '댜'),
-    ('댾', '댾'),
-    ('더', '더'),
-    ('데', '데'),
-    ('뎌', '뎌'),
-    ('뎚', '뎚'),
-    ('도', '도'),
-    ('돠', '돠'),
-    ('돌', '돌'),
-    ('되', '되'),
-    ('됎', '됎'),
-    ('두', '두'),
-    ('둬', '둬'),
-    ('뒈', '뒈'),
-    ('ë’€', 'ë’€'),
-    ('듀', '듀'),
-    ('드', '드'),
-    ('듾', '듾'),
-    ('디', '디'),
-    ('따', '따'),
-    ('때', '때'),
-    ('땹', '땹'),
-    ('떄', '떄'),
-    ('떠', '떠'),
-    ('떌', '떌'),
-    ('뗘', '뗘'),
-    ('뗮', '뗮'),
-    ('또', '또'),
-    ('똏', '똏'),
-    ('뙈', '뙈'),
-    ('뙀', '뙀'),
-    ('뚀', '뚀'),
-    ('뚜', '뚜'),
-    ('뚾', '뚾'),
-    ('뛔', '뛔'),
-    ('뛰', '뛰'),
-    ('뜌', '뜌'),
-    ('뜹', '뜹'),
-    ('띄', '띄'),
-    ('띠', '띠'),
-    ('띌', '띌'),
-    ('래', '래'),
-    ('랮', '랮'),
-    ('럐', '럐'),
-    ('러', '러'),
-    ('레', '레'),
-    ('ë €', 'ë €'),
-    ('례', '례'),
-    ('로', '로'),
-    ('륞', '륞'),
-    ('뱔', '뱔'),
-    ('ëą°', 'ëą°'),
-    ('료', '료'),
-    ('룚', '룚'),
-    ('뀄', '뀄'),
-    ('뀠', '뀠'),
-    ('뀌', '뀌'),
-    ('넘', '넘'),
-    ('넎', '넎'),
-    ('느', '느'),
-    ('늏', '늏'),
-    ('마', '마'),
-    ('ë§€', 'ë§€'),
-    ('빀', '빀'),
-    ('빜', '빜'),
-    ('ëšž', 'ëšž'),
-    ('메', '메'),
-    ('ë©°', 'ë©°'),
-    ('ëȘŒ', 'ëȘŒ'),
-    ('ëȘš', 'ëȘš'),
-    ('뫄', '뫄'),
-    ('ë« ', 'ë« '),
-    ('뫌', '뫌'),
-    ('돘', '돘'),
-    ('돎', '돎'),
-    ('뭐', '뭐'),
-    ('ë­Ź', 'ë­Ź'),
-    ('럈', '럈'),
-    ('럀', '럀'),
-    ('므', '므'),
-    ('믜', '믜'),
-    ('믞', '믞'),
-    ('바', '바'),
-    ('ë°°', 'ë°°'),
-    ('뱌', '뱌'),
-    ('뱚', '뱚'),
-    ('ëȄ', 'ëȄ'),
-    ('ëČ ', 'ëČ '),
-    ('ëČŒ', 'ëČŒ'),
-    ('볘', '볘'),
-    ('볎', '볎'),
-    ('뎐', '뎐'),
-    ('뎏', '뎏'),
-    ('딈', '딈'),
-    ('딀', '딀'),
-    ('부', '부'),
-    ('붜', '붜'),
-    ('ë¶ž', 'ë¶ž'),
-    ('뷔', '뷔'),
-    ('ë·°', 'ë·°'),
-    ('뾌', '뾌'),
-    ('ëžš', 'ëžš'),
-    ('ëč„', 'ëč„'),
-    ('ëč ', 'ëč '),
-    ('ëčŒ', 'ëčŒ'),
-    ('ëș˜', 'ëș˜'),
-    ('ëșŽ', 'ëșŽ'),
-    ('뻐', '뻐'),
-    ('뻏', '뻏'),
-    ('댈', '댈'),
-    ('대', '대'),
-    ('뜀', '뜀'),
-    ('뜜', '뜜'),
-    ('뜞', '뜞'),
-    ('럔', '럔'),
-    ('런', '런'),
-    ('뿌', '뿌'),
-    ('ëżš', 'ëżš'),
-    ('쀄', '쀄'),
-    ('쀠', '쀠'),
-    ('쀌', '쀌'),
-    ('쁘', '쁘'),
-    ('쁎', '쁎'),
-    ('삐', '삐'),
-    ('ì‚Ź', 'ì‚Ź'),
-    ('새', '새'),
-    ('샀', '샀'),
-    ('섀', '섀'),
-    ('서', '서'),
-    ('섞', '섞'),
-    ('셔', '셔'),
-    ('셰', '셰'),
-    ('소', '소'),
-    ('솚', '솚'),
-    ('쇄', '쇄'),
-    ('쇠', '쇠'),
-    ('쇌', '쇌'),
-    ('수', '수'),
-    ('숎', '숎'),
-    ('쉐', '쉐'),
-    ('ì‰Ź', 'ì‰Ź'),
-    ('슈', '슈'),
-    ('슀', '슀'),
-    ('싀', '싀'),
-    ('시', '시'),
-    ('싞', '싞'),
-    ('쌔', '쌔'),
-    ('쌰', '쌰'),
-    ('썌', '썌'),
-    ('썚', '썚'),
-    ('쎄', '쎄'),
-    ('쎠', '쎠'),
-    ('쎌', '쎌'),
-    ('쏘', '쏘'),
-    ('쏎', '쏎'),
-    ('쐐', '쐐'),
-    ('ìŹ', 'ìŹ'),
-    ('쑈', '쑈'),
-    ('ì‘€', 'ì‘€'),
-    ('쒀', '쒀'),
-    ('쒜', '쒜'),
-    ('ì’ž', 'ì’ž'),
-    ('쓔', '쓔'),
-    ('쓰', '쓰'),
-    ('씌', '씌'),
-    ('씚', '씚'),
-    ('아', '아'),
-    ('애', '애'),
-    ('알', '알'),
-    ('얘', '얘'),
-    ('ì–Ž', 'ì–Ž'),
-    ('에', '에'),
-    ('ì—Ź', 'ì—Ź'),
-    ('예', '예'),
-    ('였', '였'),
-    ('와', '와'),
-    ('왜', '왜'),
-    ('왞', '왞'),
-    ('요', '요'),
-    ('우', '우'),
-    ('워', '워'),
-    ('웚', '웚'),
-    ('위', '위'),
-    ('유', '유'),
-    ('윌', '윌'),
-    ('의', '의'),
-    ('읎', '읎'),
-    ('자', '자'),
-    ('ìžŹ', 'ìžŹ'),
-    ('쟈', '쟈'),
-    ('쟀', '쟀'),
-    ('저', '저'),
-    ('제', '제'),
-    ('ì ž', 'ì ž'),
-    ('ìĄ”', 'ìĄ”'),
-    ('ìĄ°', 'ìĄ°'),
-    ('ìąŒ', 'ìąŒ'),
-    ('ìąš', 'ìąš'),
-    ('ìŁ„', 'ìŁ„'),
-    ('ìŁ ', 'ìŁ '),
-    ('ìŁŒ', 'ìŁŒ'),
-    ('쀘', '쀘'),
-    ('쀎', '쀎'),
-    ('섐', '섐'),
-    ('ì„Ź', 'ì„Ź'),
-    ('슈', '슈'),
-    ('슀', '슀'),
-    ('지', '지'),
-    ('짜', '짜'),
-    ('ì§ž', 'ì§ž'),
-    ('ìš”', 'ìš”'),
-    ('ìš°', 'ìš°'),
-    ('쩌', '쩌'),
-    ('쩚', '쩚'),
-    ('ìȘ„', 'ìȘ„'),
-    ('ìȘ ', 'ìȘ '),
-    ('ìȘŒ', 'ìȘŒ'),
-    ('쫘', '쫘'),
-    ('쫎', '쫎'),
-    ('ìŹ', 'ìŹ'),
-    ('ìŹŹ', 'ìŹŹ'),
-    ('쭈', '쭈'),
-    ('ì­€', 'ì­€'),
-    ('ìź€', 'ìź€'),
-    ('ìźœ', 'ìźœ'),
-    ('ìźž', 'ìźž'),
-    ('ìŻ”', 'ìŻ”'),
-    ('ìŻ°', 'ìŻ°'),
-    ('찌', '찌'),
-    ('ì°š', 'ì°š'),
-    ('채', '채'),
-    ('ì± ', 'ì± '),
-    ('챌', '챌'),
-    ('ìȘ', 'ìȘ'),
-    ('ìČŽ', 'ìČŽ'),
-    ('ìł', 'ìł'),
-    ('ìłŹ', 'ìłŹ'),
-    ('쎈', '쎈'),
-    ('쎀', '쎀'),
-    ('씀', '씀'),
-    ('씜', '씜'),
-    ('씞', '씞'),
-    ('추', '추'),
-    ('ì¶°', 'ì¶°'),
-    ('췌', '췌'),
-    ('ì·š', 'ì·š'),
-    ('ìž„', 'ìž„'),
-    ('ìž ', 'ìž '),
-    ('잌', '잌'),
-    ('ìč˜', 'ìč˜'),
-    ('ìčŽ', 'ìčŽ'),
-    ('ìș', 'ìș'),
-    ('ìșŹ', 'ìșŹ'),
-    ('컈', '컈'),
-    ('컀', '컀'),
-    ('쌀', '쌀'),
-    ('쌜', '쌜'),
-    ('쌞', '쌞'),
-    ('윔', '윔'),
-    ('윰', '윰'),
-    ('쟌', '쟌'),
-    ('쟚', '쟚'),
-    ('ìż„', 'ìż„'),
-    ('ìż ', 'ìż '),
-    ('ìżŒ', 'ìżŒ'),
-    ('퀘', '퀘'),
-    ('퀮', '퀮'),
-    ('큐', '큐'),
-    ('큏', '큏'),
-    ('킈', '킈'),
-    ('í‚€', 'í‚€'),
-    ('타', '타'),
-    ('태', '태'),
-    ('탞', '탞'),
-    ('턔', '턔'),
-    ('터', '터'),
-    ('테', '테'),
-    ('텹', '텹'),
-    ('톄', '톄'),
-    ('토', '토'),
-    ('톌', '톌'),
-    ('퇘', '퇘'),
-    ('퇮', '퇮'),
-    ('툐', '툐'),
-    ('툏', '툏'),
-    ('퉈', '퉈'),
-    ('퉀', '퉀'),
-    ('튀', '튀'),
-    ('튜', '튜'),
-    ('튾', '튾'),
-    ('틔', '틔'),
-    ('티', '티'),
-    ('파', '파'),
-    ('팹', '팹'),
-    ('퍄', '퍄'),
-    ('퍠', '퍠'),
-    ('퍌', '퍌'),
-    ('페', '페'),
-    ('펮', '펮'),
-    ('폐', '폐'),
-    ('포', '포'),
-    ('퐈', '퐈'),
-    ('퐀', '퐀'),
-    ('푀', '푀'),
-    ('표', '표'),
-    ('푾', '푾'),
-    ('풔', '풔'),
-    ('풰', '풰'),
-    ('퓌', '퓌'),
-    ('퓹', '퓹'),
-    ('프', '프'),
-    ('픠', '픠'),
-    ('플', '플'),
-    ('하', '하'),
-    ('핮', '핮'),
-    ('햐', '햐'),
-    ('햬', '햬'),
-    ('허', '허'),
-    ('í—€', 'í—€'),
-    ('혀', '혀'),
-    ('혜', '혜'),
-    ('혞', '혞'),
-    ('화', '화'),
-    ('홰', '홰'),
-    ('회', '회'),
-    ('횹', '횹'),
-    ('후', '후'),
-    ('훠', '훠'),
-    ('훌', '훌'),
-    ('휘', '휘'),
-    ('휮', '휮'),
-    ('흐', '흐'),
-    ('희', '희'),
-    ('히', '히'),
-];
-
-pub const LVT: &'static [(char, char)] = &[
-    ('각', '갛'),
-    ('객', '갷'),
-    ('ê°č', '걓'),
-    ('걕', 'ê±Ż'),
-    ('ê±±', 'êȋ'),
-    ('êȍ', 'êȧ'),
-    ('êČ©', 'êłƒ'),
-    ('êł…', 'êłŸ'),
-    ('êłĄ', 'êł»'),
-    ('êłœ', 'ꎗ'),
-    ('ꎙ', 'êŽł'),
-    ('ꎔ', 'ꔏ'),
-    ('ꔑ', 'ꔫ'),
-    ('ê”­', '궇'),
-    ('궉', 'ê¶Ł'),
-    ('ê¶„', 'ê¶ż'),
-    ('귁', '귛'),
-    ('귝', '귷'),
-    ('ê·č', 'êž“'),
-    ('êž•', 'êžŻ'),
-    ('êž±', 'êč‹'),
-    ('êč', 'êč§'),
-    ('êč©', 'êșƒ'),
-    ('êș…', 'êșŸ'),
-    ('êșĄ', 'êș»'),
-    ('êșœ', '껗'),
-    ('껙', 'ê»ł'),
-    ('껔', 'ꌏ'),
-    ('ꌑ', 'ꌫ'),
-    ('ꌭ', '꜇'),
-    ('꜉', 'êœŁ'),
-    ('꜄', 'êœż'),
-    ('ꟁ', 'ꟛ'),
-    ('꟝', 'ꟷ'),
-    ('êŸč', 'êż“'),
-    ('êż•', 'êżŻ'),
-    ('êż±', '뀋'),
-    ('뀍', '뀧'),
-    ('뀩', '끃'),
-    ('끅', '끟'),
-    ('끥', '끻'),
-    ('끜', '낗'),
-    ('낙', '낳'),
-    ('ë‚”', '냏'),
-    ('냑', '냫'),
-    ('냭', '넇'),
-    ('넉', '넣'),
-    ('ë„„', '넿'),
-    ('녁', '녛'),
-    ('녝', '녷'),
-    ('ë…č', '놓'),
-    ('놕', '놯'),
-    ('놱', '뇋'),
-    ('뇍', '뇧'),
-    ('뇩', '눃'),
-    ('눅', '눟'),
-    ('눥', '눻'),
-    ('눜', '뉗'),
-    ('뉙', '뉳'),
-    ('뉔', '늏'),
-    ('늑', '늫'),
-    ('늭', '닇'),
-    ('닉', '닣'),
-    ('ë‹„', '닿'),
-    ('댁', '댛'),
-    ('댝', '댷'),
-    ('ëŒč', '덓'),
-    ('덕', '덯'),
-    ('덱', '뎋'),
-    ('뎍', '뎧'),
-    ('뎩', '돃'),
-    ('독', '돟'),
-    ('돡', '돻'),
-    ('돜', '됗'),
-    ('됙', '됳'),
-    ('됔', '둏'),
-    ('둑', '둫'),
-    ('둭', '뒇'),
-    ('뒉', '뒣'),
-    ('ë’„', '뒿'),
-    ('듁', '듛'),
-    ('득', '듷'),
-    ('ë“č', '딓'),
-    ('딕', '딯'),
-    ('딱', '땋'),
-    ('땍', '땧'),
-    ('땩', '떃'),
-    ('떅', '떟'),
-    ('떡', '떻'),
-    ('ë–œ', '뗗'),
-    ('뗙', '뗳'),
-    ('ë—”', '똏'),
-    ('똑', '똫'),
-    ('똭', '뙇'),
-    ('뙉', '뙣'),
-    ('뙄', '뙿'),
-    ('뚁', '뚛'),
-    ('뚝', '뚷'),
-    ('ëšč', '뛓'),
-    ('뛕', '뛯'),
-    ('뛱', '뜋'),
-    ('뜍', '뜧'),
-    ('뜩', '띃'),
-    ('띅', '띟'),
-    ('띡', '띻'),
-    ('띜', '랗'),
-    ('랙', '랳'),
-    ('랔', '럏'),
-    ('럑', '럫'),
-    ('럭', '렇'),
-    ('렉', '렣'),
-    ('ë „', 'ë ż'),
-    ('롁', '롛'),
-    ('록', '륷'),
-    ('ëĄč', '뱓'),
-    ('뱕', '뱯'),
-    ('ëą±', '룋'),
-    ('룍', '룧'),
-    ('룩', '뀃'),
-    ('뀅', '뀟'),
-    ('뀥', '뀻'),
-    ('뀜', 'ë„—'),
-    ('ë„™', '넳'),
-    ('넔', '늏'),
-    ('멑', '늫'),
-    ('멭', '맇'),
-    ('막', '맣'),
-    ('ë§„', 'ë§ż'),
-    ('빁', '빛'),
-    ('뚝', '뚷'),
-    ('ëšč', '멓'),
-    ('멕', '멯'),
-    ('멱', 'ëȘ‹'),
-    ('ëȘ', 'ëȘ§'),
-    ('ëȘ©', '뫃'),
-    ('뫅', '뫟'),
-    ('뫥', '뫻'),
-    ('뫜', '묗'),
-    ('묙', '묳'),
-    ('돔', '뭏'),
-    ('뭑', '뭫'),
-    ('뭭', '뼇'),
-    ('뼉', '뼣'),
-    ('ëź„', 'ëźż'),
-    ('믁', '믛'),
-    ('믝', '믷'),
-    ('ëŻč', '밓'),
-    ('박', '밯'),
-    ('백', '뱋'),
-    ('뱍', '뱧'),
-    ('뱩', 'ëȃ'),
-    ('ëȅ', 'ëȟ'),
-    ('ëČĄ', 'ëČ»'),
-    ('ëČœ', '볗'),
-    ('볙', '볳'),
-    ('ëł”', '뎏'),
-    ('뮑', '뎫'),
-    ('뎭', '딇'),
-    ('딉', '딣'),
-    ('딄', '딿'),
-    ('북', '붛'),
-    ('붝', '붷'),
-    ('ë¶č', '뷓'),
-    ('뷕', 'ë·Ż'),
-    ('ë·±', '뾋'),
-    ('랍', '랧'),
-    ('ëž©', 'ëčƒ'),
-    ('ëč…', 'ëčŸ'),
-    ('ëčĄ', 'ëč»'),
-    ('ëčœ', 'ëș—'),
-    ('ëș™', 'ëșł'),
-    ('ëș”', '뻏'),
-    ('뻑', '뻫'),
-    ('ë»­', '댇'),
-    ('댉', 'ëŒŁ'),
-    ('댄', 'ëŒż'),
-    ('뜁', '뜛'),
-    ('뜝', '뜷'),
-    ('ëœč', '럓'),
-    ('럕', 'ëŸŻ'),
-    ('럱', '뿋'),
-    ('뿍', '뿧'),
-    ('ëż©', '쀃'),
-    ('쀅', '쀟'),
-    ('ì€Ą', '쀻'),
-    ('쀜', '쁗'),
-    ('쁙', 'ìł'),
-    ('쁔', '삏'),
-    ('삑', '삫'),
-    ('삭', '샇'),
-    ('색', 'ìƒŁ'),
-    ('샄', 'ìƒż'),
-    ('섁', '섛'),
-    ('석', '섷'),
-    ('ì„č', '셓'),
-    ('셕', 'ì…Ż'),
-    ('셱', '솋'),
-    ('속', '솧'),
-    ('솩', '쇃'),
-    ('쇅', '쇟'),
-    ('ì‡Ą', '쇻'),
-    ('쇜', '숗'),
-    ('숙', 'ìˆł'),
-    ('숔', '쉏'),
-    ('쉑', '쉫'),
-    ('쉭', '슇'),
-    ('슉', 'ìŠŁ'),
-    ('슄', 'ìŠż'),
-    ('싁', '싛'),
-    ('식', '싷'),
-    ('ì‹č', '쌓'),
-    ('쌕', 'ìŒŻ'),
-    ('쌱', '썋'),
-    ('썍', '썧'),
-    ('썩', '쎃'),
-    ('쎅', '쎟'),
-    ('ìŽĄ', '쎻'),
-    ('쎜', '쏗'),
-    ('쏙', 'ìł'),
-    ('쏔', '쐏'),
-    ('쐑', '쐫'),
-    ('쐭', '쑇'),
-    ('쑉', 'ì‘Ł'),
-    ('ì‘„', 'ì‘ż'),
-    ('쒁', '쒛'),
-    ('쒝', '쒷'),
-    ('ì’č', '쓓'),
-    ('쓕', 'ì“Ż'),
-    ('쓱', '씋'),
-    ('씍', '씧'),
-    ('씩', '앃'),
-    ('악', '앟'),
-    ('ì•Ą', '앻'),
-    ('앜', '얗'),
-    ('얙', 'ì–ł'),
-    ('ì–”', '엏'),
-    ('엑', '엫'),
-    ('역', '옇'),
-    ('옉', 'ì˜Ł'),
-    ('옄', 'ì˜ż'),
-    ('왁', '왛'),
-    ('왝', '왷'),
-    ('ì™č', '욓'),
-    ('욕', 'ìšŻ'),
-    ('욱', '웋'),
-    ('웍', '웧'),
-    ('웩', '윃'),
-    ('윅', '윟'),
-    ('ìœĄ', '윻'),
-    ('윜', '읗'),
-    ('읙', 'ìł'),
-    ('읔', '잏'),
-    ('작', '잫'),
-    ('잭', '쟇'),
-    ('쟉', 'ìŸŁ'),
-    ('쟄', 'ìŸż'),
-    ('적', '젛'),
-    ('젝', '젷'),
-    ('ì č', 'ìĄ“'),
-    ('ìĄ•', 'ìĄŻ'),
-    ('ìĄ±', 'ìą‹'),
-    ('ìą', 'ìą§'),
-    ('ìą©', 'ìŁƒ'),
-    ('ìŁ…', 'ìŁŸ'),
-    ('ìŁĄ', 'ìŁ»'),
-    ('ìŁœ', '쀗'),
-    ('쀙', 'ì€ł'),
-    ('쀔', '섏'),
-    ('ì„‘', 'ì„«'),
-    ('ì„­', '슇'),
-    ('슉', 'ìŠŁ'),
-    ('슄', 'ìŠż'),
-    ('직', '짛'),
-    ('짝', '짷'),
-    ('ì§č', 'ìš“'),
-    ('ìš•', 'ìšŻ'),
-    ('ìš±', '쩋'),
-    ('쩍', '쩧'),
-    ('ì©©', 'ìȘƒ'),
-    ('ìȘ…', 'ìȘŸ'),
-    ('ìȘĄ', 'ìȘ»'),
-    ('ìȘœ', '쫗'),
-    ('쫙', 'ì«ł'),
-    ('ì«”', 'ìŹ'),
-    ('ìŹ‘', 'ìŹ«'),
-    ('ìŹ­', '쭇'),
-    ('쭉', 'ì­Ł'),
-    ('ì­„', 'ì­ż'),
-    ('ìź', 'ìź›'),
-    ('ìź', 'ìź·'),
-    ('ìźč', 'ìŻ“'),
-    ('ìŻ•', 'ìŻŻ'),
-    ('ìŻ±', '찋'),
-    ('찍', '찧'),
-    ('착', '챃'),
-    ('책', '챟'),
-    ('ì±Ą', 'ì±»'),
-    ('챜', 'ìȗ'),
-    ('ìș', 'ìČł'),
-    ('ìČ”', 'ìł'),
-    ('ìł‘', 'ìł«'),
-    ('ìł­', '쎇'),
-    ('쎉', 'ìŽŁ'),
-    ('쎄', 'ìŽż'),
-    ('씁', 'ì”›'),
-    ('씝', '씷'),
-    ('ì”č', '춓'),
-    ('축', 'ì¶Ż'),
-    ('춱', '췋'),
-    ('췍', '췧'),
-    ('ì·©', '잃'),
-    ('ìž…', '잟'),
-    ('ìžĄ', 'ìž»'),
-    ('ìžœ', 'ìč—'),
-    ('ìč™', 'ìčł'),
-    ('ìč”', 'ìș'),
-    ('ìș‘', 'ìș«'),
-    ('ìș­', '컇'),
-    ('컉', 'ì»Ł'),
-    ('컄', 'ì»ż'),
-    ('쌁', '쌛'),
-    ('쌝', '쌷'),
-    ('ìŒč', '윓'),
-    ('윕', 'ìœŻ'),
-    ('윱', '쟋'),
-    ('쟍', '쟧'),
-    ('쟩', 'ìżƒ'),
-    ('ìż…', 'ìżŸ'),
-    ('ìżĄ', 'ìż»'),
-    ('ìżœ', '퀗'),
-    ('퀙', '퀳'),
-    ('퀔', '큏'),
-    ('큑', '큫'),
-    ('큭', '킇'),
-    ('킉', '킣'),
-    ('í‚„', '킿'),
-    ('탁', '탛'),
-    ('택', '탷'),
-    ('íƒč', '턓'),
-    ('턕', '턯'),
-    ('턱', '텋'),
-    ('텍', '텧'),
-    ('텩', '톃'),
-    ('톅', '톟'),
-    ('톡', '톻'),
-    ('톜', '퇗'),
-    ('퇙', '퇳'),
-    ('퇔', '툏'),
-    ('툑', '툫'),
-    ('툭', '퉇'),
-    ('퉉', '퉣'),
-    ('퉄', '퉿'),
-    ('튁', '튛'),
-    ('튝', '튷'),
-    ('íŠč', '틓'),
-    ('틕', '틯'),
-    ('틱', '팋'),
-    ('팍', '팧'),
-    ('팩', '퍃'),
-    ('퍅', '퍟'),
-    ('퍡', '퍻'),
-    ('퍜', '펗'),
-    ('펙', '펳'),
-    ('펔', '폏'),
-    ('폑', '폫'),
-    ('폭', '퐇'),
-    ('퐉', '퐣'),
-    ('퐄', '퐿'),
-    ('푁', '푛'),
-    ('푝', '푷'),
-    ('í‘č', '풓'),
-    ('풕', '풯'),
-    ('풱', '퓋'),
-    ('퓍', '퓧'),
-    ('퓩', '픃'),
-    ('픅', '픟'),
-    ('픡', '픻'),
-    ('픜', '핗'),
-    ('학', '핳'),
-    ('í•”', '햏'),
-    ('햑', '햫'),
-    ('햭', '헇'),
-    ('헉', '헣'),
-    ('í—„', '헿'),
-    ('혁', '혛'),
-    ('혝', '혷'),
-    ('í˜č', '홓'),
-    ('확', '홯'),
-    ('홱', '횋'),
-    ('획', '횧'),
-    ('횩', '훃'),
-    ('훅', '훟'),
-    ('훡', '훻'),
-    ('훜', '휗'),
-    ('휙', '휳'),
-    ('휔', '흏'),
-    ('흑', '흫'),
-    ('흭', '힇'),
-    ('힉', '힣'),
-];
-
-pub const PREPEND: &'static [(char, char)] = &[
-    ('\u{600}', '\u{605}'),
-    ('\u{6dd}', '\u{6dd}'),
-    ('\u{70f}', '\u{70f}'),
-    ('\u{890}', '\u{891}'),
-    ('\u{8e2}', '\u{8e2}'),
-    ('à”Ž', 'à”Ž'),
-    ('\u{110bd}', '\u{110bd}'),
-    ('\u{110cd}', '\u{110cd}'),
-    ('𑇂', '𑇃'),
-    ('𑏑', '𑏑'),
-    ('𑀿', '𑀿'),
-    ('𑄁', '𑄁'),
-    ('đ‘šș', 'đ‘šș'),
-    ('đ‘Ș„', 'đ‘Ș‰'),
-    ('𑔆', '𑔆'),
-    ('đ‘Œ‚', 'đ‘Œ‚'),
-];
-
-pub const REGIONAL_INDICATOR: &'static [(char, char)] = &[('🇩', '🇿')];
-
-pub const SPACINGMARK: &'static [(char, char)] = &[
-    ('à€ƒ', 'à€ƒ'),
-    ('à€»', 'à€»'),
-    ('à€Ÿ', 'à„€'),
-    ('à„‰', 'à„Œ'),
-    ('à„Ž', 'à„'),
-    ('àŠ‚', 'àŠƒ'),
-    ('àŠż', 'ী'),
-    ('ে', 'ৈ'),
-    ('ো', 'ৌ'),
-    ('àšƒ', 'àšƒ'),
-    ('àšŸ', 'ੀ'),
-    ('àȘƒ', 'àȘƒ'),
-    ('àȘŸ', 'ી'),
-    ('ૉ', 'ૉ'),
-    ('ો', 'ૌ'),
-    ('àŹ‚', 'àŹƒ'),
-    ('ୀ', 'ୀ'),
-    ('େ', 'ୈ'),
-    ('ୋ', 'ୌ'),
-    ('àźż', 'àźż'),
-    ('àŻ', 'àŻ‚'),
-    ('àŻ†', 'àŻˆ'),
-    ('àŻŠ', 'àŻŒ'),
-    ('ఁ', 'ః'),
-    ('ు', 'ౄ'),
-    ('àȂ', 'àȃ'),
-    ('àČŸ', 'àČŸ'),
-    ('àł', 'àł'),
-    ('àłƒ', 'àł„'),
-    ('àłł', 'àłł'),
-    ('àŽ‚', 'àŽƒ'),
-    ('àŽż', 'à”€'),
-    ('à”†', 'à”ˆ'),
-    ('à”Š', 'à”Œ'),
-    ('ං', 'ඃ'),
-    ('ැ', 'ෑ'),
-    ('ෘ', 'ෞ'),
-    ('à·Č', 'à·ł'),
-    ('àžł', 'àžł'),
-    ('àșł', 'àșł'),
-    ('àŒŸ', 'àŒż'),
-    ('àœż', 'àœż'),
-    ('ေ', 'ေ'),
-    ('ျ', 'ဌ'),
-    ('ၖ', 'ၗ'),
-    ('ႄ', 'ႄ'),
-    ('ា', 'ា'),
-    ('ស', 'ៅ'),
-    ('ះ', 'ៈ'),
-    ('ဣ', 'ည'),
-    ('ဩ', 'ါ'),
-    ('ူ', 'ေ'),
-    ('ဳ', 'သ'),
-    ('ṙ', 'Ṛ'),
-    ('ᩕ', 'ᩕ'),
-    ('ᩗ', 'ᩗ'),
-    ('á©­', 'á©Č'),
-    ('ᬄ', 'ᬄ'),
-    ('áŹŸ', 'ᭁ'),
-    ('ἂ', 'ἂ'),
-    ('៥', '៥'),
-    ('៊', '៧'),
-    ('ᯧ', 'ᯧ'),
-    ('áŻȘ', 'ᯏ'),
-    ('ᯟ', 'ᯟ'),
-    ('á°€', 'á°«'),
-    ('á°Ž', 'á°”'),
-    ('᳥', '᳥'),
-    ('áł·', 'áł·'),
-    ('ê Ł', 'ê €'),
-    ('ê §', 'ê §'),
-    ('êą€', 'êą'),
-    ('êąŽ', 'êŁƒ'),
-    ('ê„’', 'ê„’'),
-    ('ꊃ', 'ꊃ'),
-    ('ꊎ', 'ꊔ'),
-    ('êŠș', 'ꊻ'),
-    ('ꊟ', 'êŠż'),
-    ('êšŻ', 'êš°'),
-    ('êšł', 'Ꚏ'),
-    ('ꩍ', 'ꩍ'),
-    ('ê««', 'ê««'),
-    ('ê«ź', 'ê«Ż'),
-    ('ê«”', 'ê«”'),
-    ('êŻŁ', 'êŻ€'),
-    ('êŻŠ', 'êŻ§'),
-    ('êŻ©', 'êŻȘ'),
-    ('êŻŹ', 'êŻŹ'),
-    ('𑀀', '𑀀'),
-    ('𑀂', '𑀂'),
-    ('𑂂', '𑂂'),
-    ('𑂰', 'đ‘‚Č'),
-    ('đ‘‚·', '𑂾'),
-    ('𑄬', '𑄬'),
-    ('𑅅', '𑅆'),
-    ('𑆂', '𑆂'),
-    ('𑆳', '𑆔'),
-    ('𑆿', '𑆿'),
-    ('𑇎', '𑇎'),
-    ('𑈬', '𑈼'),
-    ('đ‘ˆČ', '𑈳'),
-    ('𑋠', '𑋱'),
-    ('𑌂', '𑌃'),
-    ('𑌿', '𑌿'),
-    ('𑍁', '𑍄'),
-    ('𑍇', '𑍈'),
-    ('𑍋', '𑍌'),
-    ('𑍱', '𑍣'),
-    ('đ‘Žč', 'đ‘Žș'),
-    ('𑏊', '𑏊'),
-    ('𑏌', '𑏍'),
-    ('𑐔', '𑐷'),
-    ('𑑀', '𑑁'),
-    ('𑑅', '𑑅'),
-    ('đ‘’±', 'đ‘’Č'),
-    ('đ‘’č', 'đ‘’č'),
-    ('đ‘’»', 'đ‘’Œ'),
-    ('đ‘’Ÿ', 'đ‘’Ÿ'),
-    ('𑓁', '𑓁'),
-    ('𑖰', 'đ‘–±'),
-    ('𑖾', 'đ‘–»'),
-    ('đ‘–Ÿ', 'đ‘–Ÿ'),
-    ('𑘰', 'đ‘˜Č'),
-    ('đ‘˜»', 'đ‘˜Œ'),
-    ('đ‘˜Ÿ', 'đ‘˜Ÿ'),
-    ('𑚬', '𑚬'),
-    ('𑚼', '𑚯'),
-    ('𑜞', '𑜞'),
-    ('𑜩', '𑜩'),
-    ('𑠬', '𑠼'),
-    ('𑠾', '𑠾'),
-    ('đ‘€±', 'đ‘€”'),
-    ('đ‘€·', '𑀞'),
-    ('đ‘„€', 'đ‘„€'),
-    ('đ‘„‚', 'đ‘„‚'),
-    ('𑧑', '𑧓'),
-    ('𑧜', '𑧟'),
-    ('đ‘§€', 'đ‘§€'),
-    ('đ‘šč', 'đ‘šč'),
-    ('đ‘©—', 'đ‘©˜'),
-    ('đ‘Ș—', 'đ‘Ș—'),
-    ('𑰯', '𑰯'),
-    ('đ‘°Ÿ', 'đ‘°Ÿ'),
-    ('đ‘Č©', 'đ‘Č©'),
-    ('đ‘ȱ', 'đ‘ȱ'),
-    ('đ‘ČŽ', 'đ‘ČŽ'),
-    ('đ‘¶Š', 'đ‘¶Ž'),
-    ('đ‘¶“', 'đ‘¶”'),
-    ('đ‘¶–', 'đ‘¶–'),
-    ('đ‘»”', 'đ‘»¶'),
-    ('đ‘Œƒ', 'đ‘Œƒ'),
-    ('đ‘ŒŽ', 'đ‘Œ”'),
-    ('đ‘ŒŸ', 'đ‘Œż'),
-    ('đ–„Ș', '𖄬'),
-    ('đ–œ‘', 'đ–Ÿ‡'),
-];
-
-pub const T: &'static [(char, char)] = &[('ᆹ', 'ᇿ'), ('ퟋ', 'ퟻ')];
-
-pub const V: &'static [(char, char)] =
-    &[('ᅠ', 'ᆧ'), ('ힰ', 'ퟆ'), ('𖔣', '𖔣'), ('đ–”§', 'đ–”Ș')];
-
-pub const ZWJ: &'static [(char, char)] = &[('\u{200d}', '\u{200d}')];
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/mod.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/mod.rs
deleted file mode 100644
index 20736c7a..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/mod.rs
+++ /dev/null
@@ -1,57 +0,0 @@
-#[cfg(feature = "unicode-age")]
-pub mod age;
-
-#[cfg(feature = "unicode-case")]
-pub mod case_folding_simple;
-
-#[cfg(feature = "unicode-gencat")]
-pub mod general_category;
-
-#[cfg(feature = "unicode-segment")]
-pub mod grapheme_cluster_break;
-
-#[cfg(all(feature = "unicode-perl", not(feature = "unicode-gencat")))]
-#[allow(dead_code)]
-pub mod perl_decimal;
-
-#[cfg(all(feature = "unicode-perl", not(feature = "unicode-bool")))]
-#[allow(dead_code)]
-pub mod perl_space;
-
-#[cfg(feature = "unicode-perl")]
-pub mod perl_word;
-
-#[cfg(feature = "unicode-bool")]
-pub mod property_bool;
-
-#[cfg(any(
-    feature = "unicode-age",
-    feature = "unicode-bool",
-    feature = "unicode-gencat",
-    feature = "unicode-perl",
-    feature = "unicode-script",
-    feature = "unicode-segment",
-))]
-pub mod property_names;
-
-#[cfg(any(
-    feature = "unicode-age",
-    feature = "unicode-bool",
-    feature = "unicode-gencat",
-    feature = "unicode-perl",
-    feature = "unicode-script",
-    feature = "unicode-segment",
-))]
-pub mod property_values;
-
-#[cfg(feature = "unicode-script")]
-pub mod script;
-
-#[cfg(feature = "unicode-script")]
-pub mod script_extension;
-
-#[cfg(feature = "unicode-segment")]
-pub mod sentence_break;
-
-#[cfg(feature = "unicode-segment")]
-pub mod word_break;
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/perl_decimal.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/perl_decimal.rs
deleted file mode 100644
index 18996c2..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/perl_decimal.rs
+++ /dev/null
@@ -1,84 +0,0 @@
-// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY:
-//
-//   ucd-generate general-category ucd-16.0.0 --chars --include decimalnumber
-//
-// Unicode version: 16.0.0.
-//
-// ucd-generate 0.3.1 is available on crates.io.
-
-pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] =
-    &[("Decimal_Number", DECIMAL_NUMBER)];
-
-pub const DECIMAL_NUMBER: &'static [(char, char)] = &[
-    ('0', '9'),
-    ('Ù ', 'Ù©'),
-    ('Û°', 'Ûč'),
-    ('߀', '߉'),
-    ('à„Š', 'à„Ż'),
-    ('à§Š', 'à§Ż'),
-    ('੊', 'à©Ż'),
-    ('૊', 'à«Ż'),
-    ('à­Š', 'à­Ż'),
-    ('àŻŠ', 'àŻŻ'),
-    ('ొ', 'à±Ż'),
-    ('àłŠ', 'àłŻ'),
-    ('à”Š', 'à”Ż'),
-    ('à·Š', 'à·Ż'),
-    ('àč', 'àč™'),
-    ('໐', '໙'),
-    ('àŒ ', 'àŒ©'),
-    ('၀', '၉'),
-    ('႐', '႙'),
-    ('០', '៩'),
-    ('᠐', '᠙'),
-    ('ᄆ', 'ᄏ'),
-    ('᧐', '᧙'),
-    ('áȘ€', 'áȘ‰'),
-    ('áȘ', 'áȘ™'),
-    ('᭐', '᭙'),
-    ('áź°', 'áźč'),
-    ('᱀', '᱉'),
-    ('᱐', '᱙'),
-    ('꘠', '꘩'),
-    ('êŁ', 'êŁ™'),
-    ('ꀀ', 'ꀉ'),
-    ('꧐', '꧙'),
-    ('ê§°', 'ê§č'),
-    ('꩐', '꩙'),
-    ('êŻ°', 'êŻč'),
-    ('', ''),
-    ('𐒠', '𐒩'),
-    ('𐎰', 'đŽč'),
-    ('𐔀', '𐔉'),
-    ('𑁩', '𑁯'),
-    ('𑃰', 'đ‘ƒč'),
-    ('đ‘„¶', '𑄿'),
-    ('𑇐', '𑇙'),
-    ('𑋰', 'đ‘‹č'),
-    ('𑑐', '𑑙'),
-    ('𑓐', '𑓙'),
-    ('𑙐', '𑙙'),
-    ('𑛀', '𑛉'),
-    ('𑛐', '𑛣'),
-    ('𑜰', 'đ‘œč'),
-    ('𑣠', '𑣩'),
-    ('𑄐', 'đ‘„™'),
-    ('𑯰', 'đ‘Żč'),
-    ('𑱐', '𑱙'),
-    ('𑔐', 'đ‘”™'),
-    ('đ‘¶ ', 'đ‘¶©'),
-    ('đ‘œ', 'đ‘œ™'),
-    ('𖄰', 'đ–„č'),
-    ('đ–© ', 'đ–©©'),
-    ('đ–«€', '𖫉'),
-    ('𖭐', '𖭙'),
-    ('đ–”°', 'đ–”č'),
-    ('𜳰', 'đœłč'),
-    ('𝟎', '𝟿'),
-    ('𞅀', '𞅉'),
-    ('𞋰', 'đž‹č'),
-    ('𞓰', 'đž“č'),
-    ('đž—±', 'đž—ș'),
-    ('𞄐', 'đž„™'),
-    ('🯰', 'đŸŻč'),
-];
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/perl_space.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/perl_space.rs
deleted file mode 100644
index c969e37..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/perl_space.rs
+++ /dev/null
@@ -1,23 +0,0 @@
-// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY:
-//
-//   ucd-generate property-bool ucd-16.0.0 --chars --include whitespace
-//
-// Unicode version: 16.0.0.
-//
-// ucd-generate 0.3.1 is available on crates.io.
-
-pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] =
-    &[("White_Space", WHITE_SPACE)];
-
-pub const WHITE_SPACE: &'static [(char, char)] = &[
-    ('\t', '\r'),
-    (' ', ' '),
-    ('\u{85}', '\u{85}'),
-    ('\u{a0}', '\u{a0}'),
-    ('\u{1680}', '\u{1680}'),
-    ('\u{2000}', '\u{200a}'),
-    ('\u{2028}', '\u{2029}'),
-    ('\u{202f}', '\u{202f}'),
-    ('\u{205f}', '\u{205f}'),
-    ('\u{3000}', '\u{3000}'),
-];
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/perl_word.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/perl_word.rs
deleted file mode 100644
index 21c8c0f..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/perl_word.rs
+++ /dev/null
@@ -1,806 +0,0 @@
-// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY:
-//
-//   ucd-generate perl-word ucd-16.0.0 --chars
-//
-// Unicode version: 16.0.0.
-//
-// ucd-generate 0.3.1 is available on crates.io.
-
-pub const PERL_WORD: &'static [(char, char)] = &[
-    ('0', '9'),
-    ('A', 'Z'),
-    ('_', '_'),
-    ('a', 'z'),
-    ('ª', 'ª'),
-    ('µ', 'µ'),
-    ('º', 'º'),
-    ('À', 'Ö'),
-    ('Ø', 'ö'),
-    ('ø', 'ˁ'),
-    ('ˆ', 'ˑ'),
-    ('Ë ', 'Ë€'),
-    ('ËŹ', 'ËŹ'),
-    ('Ëź', 'Ëź'),
-    ('\u{300}', 'ÍŽ'),
-    ('Ͷ', 'ͷ'),
-    ('Íș', 'Íœ'),
-    ('Íż', 'Íż'),
-    ('Ά', 'Ά'),
-    ('Έ', 'Ί'),
-    ('Ό', 'Ό'),
-    ('Ύ', 'Ρ'),
-    ('Σ', 'Ï”'),
-    ('Ϸ', 'ҁ'),
-    ('\u{483}', 'ÔŻ'),
-    ('Ô±', 'Ֆ'),
-    ('ՙ', 'ՙ'),
-    ('ՠ', 'ֈ'),
-    ('\u{591}', '\u{5bd}'),
-    ('\u{5bf}', '\u{5bf}'),
-    ('\u{5c1}', '\u{5c2}'),
-    ('\u{5c4}', '\u{5c5}'),
-    ('\u{5c7}', '\u{5c7}'),
-    ('ڐ', 'ŚȘ'),
-    ('ŚŻ', 'ŚČ'),
-    ('\u{610}', '\u{61a}'),
-    ('Ű ', 'Ù©'),
-    ('Ùź', 'ۓ'),
-    ('ە', '\u{6dc}'),
-    ('\u{6df}', '\u{6e8}'),
-    ('\u{6ea}', 'ی'),
-    ('Ûż', 'Ûż'),
-    ('ܐ', '\u{74a}'),
-    ('ʍ', 'Ț±'),
-    ('߀', 'ß”'),
-    ('ßș', 'ßș'),
-    ('\u{7fd}', '\u{7fd}'),
-    ('ࠀ', '\u{82d}'),
-    ('àĄ€', '\u{85b}'),
-    ('àĄ ', 'àĄȘ'),
-    ('àĄ°', 'àą‡'),
-    ('àą‰', 'àąŽ'),
-    ('\u{897}', '\u{8e1}'),
-    ('\u{8e3}', '\u{963}'),
-    ('à„Š', 'à„Ż'),
-    ('à„±', 'àŠƒ'),
-    ('àŠ…', 'àŠŒ'),
-    ('àŠ', 'àŠ'),
-    ('àŠ“', 'àŠš'),
-    ('àŠȘ', 'àŠ°'),
-    ('àŠČ', 'àŠČ'),
-    ('àŠ¶', 'àŠč'),
-    ('\u{9bc}', '\u{9c4}'),
-    ('ে', 'ৈ'),
-    ('ো', 'ৎ'),
-    ('\u{9d7}', '\u{9d7}'),
-    ('ড়', 'ঢ়'),
-    ('য়', '\u{9e3}'),
-    ('à§Š', 'à§±'),
-    ('ৌ', 'ৌ'),
-    ('\u{9fe}', '\u{9fe}'),
-    ('\u{a01}', 'àšƒ'),
-    ('àš…', 'àšŠ'),
-    ('àš', 'àš'),
-    ('àš“', 'àšš'),
-    ('àšȘ', 'àš°'),
-    ('àšČ', 'àšł'),
-    ('àš”', 'àš¶'),
-    ('àšž', 'àšč'),
-    ('\u{a3c}', '\u{a3c}'),
-    ('àšŸ', '\u{a42}'),
-    ('\u{a47}', '\u{a48}'),
-    ('\u{a4b}', '\u{a4d}'),
-    ('\u{a51}', '\u{a51}'),
-    ('ਖ਼', 'ੜ'),
-    ('ਫ਼', 'ਫ਼'),
-    ('੊', '\u{a75}'),
-    ('\u{a81}', 'àȘƒ'),
-    ('àȘ…', 'àȘ'),
-    ('àȘ', 'àȘ‘'),
-    ('àȘ“', 'àȘš'),
-    ('àȘȘ', 'àȘ°'),
-    ('àȘČ', 'àȘł'),
-    ('àȘ”', 'àȘč'),
-    ('\u{abc}', '\u{ac5}'),
-    ('\u{ac7}', 'ૉ'),
-    ('ો', '\u{acd}'),
-    ('ૐ', 'ૐ'),
-    ('à« ', '\u{ae3}'),
-    ('૊', 'à«Ż'),
-    ('à«č', '\u{aff}'),
-    ('\u{b01}', 'àŹƒ'),
-    ('àŹ…', 'àŹŒ'),
-    ('àŹ', 'àŹ'),
-    ('àŹ“', 'àŹš'),
-    ('àŹȘ', 'àŹ°'),
-    ('àŹČ', 'àŹł'),
-    ('àŹ”', 'àŹč'),
-    ('\u{b3c}', '\u{b44}'),
-    ('େ', 'ୈ'),
-    ('ୋ', '\u{b4d}'),
-    ('\u{b55}', '\u{b57}'),
-    ('ଡ଼', 'ଢ଼'),
-    ('ୟ', '\u{b63}'),
-    ('à­Š', 'à­Ż'),
-    ('à­±', 'à­±'),
-    ('\u{b82}', 'àźƒ'),
-    ('àź…', 'àźŠ'),
-    ('àźŽ', 'àź'),
-    ('àź’', 'àź•'),
-    ('àź™', 'àźš'),
-    ('àźœ', 'àźœ'),
-    ('àźž', 'àźŸ'),
-    ('àźŁ', 'àź€'),
-    ('àźš', 'àźȘ'),
-    ('àźź', 'àźč'),
-    ('\u{bbe}', 'àŻ‚'),
-    ('àŻ†', 'àŻˆ'),
-    ('àŻŠ', '\u{bcd}'),
-    ('àŻ', 'àŻ'),
-    ('\u{bd7}', '\u{bd7}'),
-    ('àŻŠ', 'àŻŻ'),
-    ('\u{c00}', 'ఌ'),
-    ('ఎ', 'ఐ'),
-    ('ఒ', 'à°š'),
-    ('à°Ș', 'à°č'),
-    ('\u{c3c}', 'ౄ'),
-    ('\u{c46}', '\u{c48}'),
-    ('\u{c4a}', '\u{c4d}'),
-    ('\u{c55}', '\u{c56}'),
-    ('ౘ', 'ౚ'),
-    ('ౝ', 'ౝ'),
-    ('à± ', '\u{c63}'),
-    ('ొ', 'à±Ż'),
-    ('àȀ', 'àȃ'),
-    ('àȅ', 'àȌ'),
-    ('àȎ', 'àȐ'),
-    ('àȒ', 'àČš'),
-    ('àČȘ', 'àČł'),
-    ('àČ”', 'àČč'),
-    ('\u{cbc}', 'àł„'),
-    ('\u{cc6}', '\u{cc8}'),
-    ('\u{cca}', '\u{ccd}'),
-    ('\u{cd5}', '\u{cd6}'),
-    ('àł', 'àłž'),
-    ('àł ', '\u{ce3}'),
-    ('àłŠ', 'àłŻ'),
-    ('àł±', 'àłł'),
-    ('\u{d00}', 'àŽŒ'),
-    ('àŽŽ', 'àŽ'),
-    ('àŽ’', '\u{d44}'),
-    ('à”†', 'à”ˆ'),
-    ('à”Š', 'à”Ž'),
-    ('à””', '\u{d57}'),
-    ('à”Ÿ', '\u{d63}'),
-    ('à”Š', 'à”Ż'),
-    ('à”ș', 'à”ż'),
-    ('\u{d81}', 'ඃ'),
-    ('අ', 'ඖ'),
-    ('ක', 'න'),
-    ('à¶ł', 'à¶»'),
-    ('ග', 'ග'),
-    ('ව', 'ෆ'),
-    ('\u{dca}', '\u{dca}'),
-    ('\u{dcf}', '\u{dd4}'),
-    ('\u{dd6}', '\u{dd6}'),
-    ('ෘ', '\u{ddf}'),
-    ('à·Š', 'à·Ż'),
-    ('à·Č', 'à·ł'),
-    ('àž', '\u{e3a}'),
-    ('àč€', '\u{e4e}'),
-    ('àč', 'àč™'),
-    ('àș', 'àș‚'),
-    ('àș„', 'àș„'),
-    ('àș†', 'àșŠ'),
-    ('àșŒ', 'àșŁ'),
-    ('àș„', 'àș„'),
-    ('àș§', 'àșœ'),
-    ('ເ', 'ໄ'),
-    ('ໆ', 'ໆ'),
-    ('\u{ec8}', '\u{ece}'),
-    ('໐', '໙'),
-    ('ໜ', 'ໟ'),
-    ('àŒ€', 'àŒ€'),
-    ('\u{f18}', '\u{f19}'),
-    ('àŒ ', 'àŒ©'),
-    ('\u{f35}', '\u{f35}'),
-    ('\u{f37}', '\u{f37}'),
-    ('\u{f39}', '\u{f39}'),
-    ('àŒŸ', 'àœ‡'),
-    ('àœ‰', 'àœŹ'),
-    ('\u{f71}', '\u{f84}'),
-    ('\u{f86}', '\u{f97}'),
-    ('\u{f99}', '\u{fbc}'),
-    ('\u{fc6}', '\u{fc6}'),
-    ('က', '၉'),
-    ('ၐ', '\u{109d}'),
-    ('Ⴀ', 'Ⴥ'),
-    ('Ⴧ', 'Ⴧ'),
-    ('Ⴭ', 'Ⴭ'),
-    ('ა', 'áƒș'),
-    ('჌', 'ቈ'),
-    ('ቊ', 'ቍ'),
-    ('ቐ', 'ቖ'),
-    ('ቘ', 'ቘ'),
-    ('ቚ', 'ቝ'),
-    ('በ', 'ኈ'),
-    ('ኊ', 'ኍ'),
-    ('ነ', 'ኰ'),
-    ('áŠČ', 'ኔ'),
-    ('ኾ', 'ኟ'),
-    ('ዀ', 'ዀ'),
-    ('ዂ', 'ዅ'),
-    ('ወ', 'ዖ'),
-    ('ዘ', 'ጐ'),
-    ('ጒ', 'ጕ'),
-    ('ጘ', 'ፚ'),
-    ('\u{135d}', '\u{135f}'),
-    ('ᎀ', 'ᎏ'),
-    ('Ꭰ', 'Ꮤ'),
-    ('Ꮮ', 'Ꮬ'),
-    ('ᐁ', 'ᙬ'),
-    ('ᙯ', 'ᙿ'),
-    ('ᚁ', 'ᚚ'),
-    ('ᚠ', 'á›Ș'),
-    ('᛼', '᛾'),
-    ('ᜀ', '\u{1715}'),
-    ('ᜟ', '\u{1734}'),
-    ('ᝀ', '\u{1753}'),
-    ('ᝠ', 'ᝬ'),
-    ('᝼', 'ᝰ'),
-    ('\u{1772}', '\u{1773}'),
-    ('ក', '\u{17d3}'),
-    ('ៗ', 'ៗ'),
-    ('ៜ', '\u{17dd}'),
-    ('០', '៩'),
-    ('\u{180b}', '\u{180d}'),
-    ('\u{180f}', '᠙'),
-    ('ᠠ', 'ᥞ'),
-    ('᱀', 'áąȘ'),
-    ('Ṱ', 'ᣔ'),
-    ('က', 'သ'),
-    ('\u{1920}', 'ါ'),
-    ('ူ', '\u{193b}'),
-    ('ᄆ', 'á„­'),
-    ('ᄰ', 'ᄎ'),
-    ('ᩀ', 'ካ'),
-    ('ᩰ', 'ᧉ'),
-    ('᧐', '᧙'),
-    ('Ṁ', '\u{1a1b}'),
-    ('áš ', '\u{1a5e}'),
-    ('\u{1a60}', '\u{1a7c}'),
-    ('\u{1a7f}', 'áȘ‰'),
-    ('áȘ', 'áȘ™'),
-    ('áȘ§', 'áȘ§'),
-    ('\u{1ab0}', '\u{1ace}'),
-    ('\u{1b00}', 'ᭌ'),
-    ('᭐', '᭙'),
-    ('\u{1b6b}', '\u{1b73}'),
-    ('\u{1b80}', '\u{1bf3}'),
-    ('ᰀ', '\u{1c37}'),
-    ('᱀', '᱉'),
-    ('ᱍ', 'ᱜ'),
-    ('áȀ', 'áȊ'),
-    ('áȐ', 'áČș'),
-    ('áČœ', 'áČż'),
-    ('\u{1cd0}', '\u{1cd2}'),
-    ('\u{1cd4}', 'áłș'),
-    ('ᮀ', 'ጕ'),
-    ('ጘ', 'ጝ'),
-    ('ጠ', 'ᜅ'),
-    ('ᜈ', 'ᜍ'),
-    ('ᜐ', '᜗'),
-    ('᜙', '᜙'),
-    ('᜛', '᜛'),
-    ('᜝', '᜝'),
-    ('ᜟ', '᜜'),
-    ('ៀ', '៎'),
-    ('៶', '៌'),
-    ('៟', '៟'),
-    ('ῂ', 'ῄ'),
-    ('ῆ', 'ῌ'),
-    ('ῐ', 'ΐ'),
-    ('ῖ', 'Ί'),
-    ('ῠ', '῏'),
-    ('áżČ', '῎'),
-    ('áż¶', 'áżŒ'),
-    ('\u{200c}', '\u{200d}'),
-    ('‿', '⁀'),
-    ('⁔', '⁔'),
-    ('ⁱ', 'ⁱ'),
-    ('ⁿ', 'ⁿ'),
-    ('ₐ', 'ₜ'),
-    ('\u{20d0}', '\u{20f0}'),
-    ('ℂ', 'ℂ'),
-    ('ℇ', 'ℇ'),
-    ('ℊ', 'ℓ'),
-    ('ℕ', 'ℕ'),
-    ('ℙ', 'ℝ'),
-    ('â„€', 'â„€'),
-    ('℩', '℩'),
-    ('ℹ', 'ℹ'),
-    ('â„Ș', 'ℭ'),
-    ('ℯ', 'â„č'),
-    ('ℌ', 'ℿ'),
-    ('ⅅ', 'ⅉ'),
-    ('ⅎ', 'ⅎ'),
-    ('Ⅰ', 'ↈ'),
-    ('Ⓐ', 'ⓩ'),
-    ('Ⰰ', 'Ⳁ'),
-    ('âł«', 'âłł'),
-    ('⮀', '⎄'),
-    ('⎧', '⎧'),
-    ('⎭', '⎭'),
-    ('⎰', '┧'),
-    ('┯', '┯'),
-    ('\u{2d7f}', 'ⶖ'),
-    ('â¶ ', 'â¶Š'),
-    ('â¶š', 'â¶ź'),
-    ('â¶°', 'â¶¶'),
-    ('â¶ž', 'â¶Ÿ'),
-    ('ⷀ', 'ⷆ'),
-    ('ⷈ', 'ⷎ'),
-    ('ⷐ', 'ⷖ'),
-    ('ⷘ', 'ⷞ'),
-    ('\u{2de0}', '\u{2dff}'),
-    ('➯', '➯'),
-    ('々', '〇'),
-    ('〡', '\u{302f}'),
-    ('〱', '〔'),
-    ('〾', 'ă€Œ'),
-    ('ぁ', 'ゖ'),
-    ('\u{3099}', '\u{309a}'),
-    ('ゝ', 'ゟ'),
-    ('ァ', 'ăƒș'),
-    ('ăƒŒ', 'ヿ'),
-    ('ㄅ', 'ㄯ'),
-    ('ㄱ', 'ㆎ'),
-    ('ㆠ', 'ㆿ'),
-    ('ㇰ', 'ㇿ'),
-    ('㐀', 'ä¶ż'),
-    ('侀', 'ꒌ'),
-    ('ꓐ', 'ꓜ'),
-    ('ꔀ', 'ꘌ'),
-    ('ꘐ', 'ꘫ'),
-    ('Ꙁ', '\u{a672}'),
-    ('\u{a674}', '\u{a67d}'),
-    ('ê™ż', '\u{a6f1}'),
-    ('ꜗ', 'ꜟ'),
-    ('êœą', 'ꞈ'),
-    ('Ꞌ', 'ꟍ'),
-    ('Ꟑ', 'ꟑ'),
-    ('ꟓ', 'ꟓ'),
-    ('ꟕ', 'Ƛ'),
-    ('êŸČ', 'ê §'),
-    ('\u{a82c}', '\u{a82c}'),
-    ('êĄ€', 'êĄł'),
-    ('êą€', '\u{a8c5}'),
-    ('êŁ', 'êŁ™'),
-    ('\u{a8e0}', 'êŁ·'),
-    ('êŁ»', 'êŁ»'),
-    ('êŁœ', '\u{a92d}'),
-    ('ꀰ', '\u{a953}'),
-    ('ꄠ', 'ꄌ'),
-    ('\u{a980}', '\u{a9c0}'),
-    ('ꧏ', '꧙'),
-    ('ê§ ', 'ê§Ÿ'),
-    ('Ꚁ', '\u{aa36}'),
-    ('ꩀ', 'ꩍ'),
-    ('꩐', '꩙'),
-    ('ê© ', 'ê©¶'),
-    ('ê©ș', 'ꫂ'),
-    ('ꫛ', 'ꫝ'),
-    ('ê« ', 'ê«Ż'),
-    ('ê«Č', '\u{aaf6}'),
-    ('êŹ', 'êŹ†'),
-    ('êŹ‰', 'êŹŽ'),
-    ('êŹ‘', 'êŹ–'),
-    ('êŹ ', 'êŹŠ'),
-    ('êŹš', 'êŹź'),
-    ('êŹ°', 'ꭚ'),
-    ('ꭜ', 'ꭩ'),
-    ('ê­°', 'êŻȘ'),
-    ('êŻŹ', '\u{abed}'),
-    ('êŻ°', 'êŻč'),
-    ('가', '힣'),
-    ('ힰ', 'ퟆ'),
-    ('ퟋ', 'ퟻ'),
-    ('', 'ï©­'),
-    ('並', '龎'),
-    ('ïŹ€', 'ïŹ†'),
-    ('ïŹ“', 'ïŹ—'),
-    ('ïŹ', 'ïŹš'),
-    ('ïŹȘ', 'ïŹ¶'),
-    ('ïŹž', 'ïŹŒ'),
-    ('ïŹŸ', 'ïŹŸ'),
-    ('נּ', 'סּ'),
-    ('ףּ', 'פּ'),
-    ('צּ', 'ïź±'),
-    ('ïŻ“', ''),
-    ('', 'ﶏ'),
-    ('ﶒ', 'ﷇ'),
-    ('ï·°', 'ï·»'),
-    ('\u{fe00}', '\u{fe0f}'),
-    ('\u{fe20}', '\u{fe2f}'),
-    ('ïžł', ''),
-    ('ïč', 'ïč'),
-    ('ïč°', 'ïčŽ'),
-    ('ïč¶', 'ﻌ'),
-    ('', ''),
-    ('ïŒĄ', 'ïŒș'),
-    ('ïŒż', 'ïŒż'),
-    ('', ''),
-    ('', ''),
-    ('ïż‚', 'ïż‡'),
-    ('ïżŠ', 'ïż'),
-    ('ïż’', 'ïż—'),
-    ('ïżš', 'ïżœ'),
-    ('𐀀', '𐀋'),
-    ('𐀍', '𐀩'),
-    ('𐀹', 'đ€ș'),
-    ('đ€Œ', 'đ€œ'),
-    ('𐀿', '𐁍'),
-    ('𐁐', '𐁝'),
-    ('𐂀', 'đƒș'),
-    ('𐅀', '𐅮'),
-    ('\u{101fd}', '\u{101fd}'),
-    ('𐊀', '𐊜'),
-    ('𐊠', '𐋐'),
-    ('\u{102e0}', '\u{102e0}'),
-    ('𐌀', '𐌟'),
-    ('𐌭', '𐍊'),
-    ('𐍐', '\u{1037a}'),
-    ('𐎀', '𐎝'),
-    ('𐎠', '𐏃'),
-    ('𐏈', '𐏏'),
-    ('𐏑', '𐏕'),
-    ('𐐀', '𐒝'),
-    ('𐒠', '𐒩'),
-    ('𐒰', '𐓓'),
-    ('𐓘', '𐓻'),
-    ('𐔀', '𐔧'),
-    ('𐔰', '𐕣'),
-    ('𐕰', 'đ•ș'),
-    ('đ•Œ', '𐖊'),
-    ('𐖌', '𐖒'),
-    ('𐖔', '𐖕'),
-    ('𐖗', '𐖡'),
-    ('𐖣', '𐖱'),
-    ('𐖳', 'đ–č'),
-    ('𐖻', 'đ–Œ'),
-    ('𐗀', '𐗳'),
-    ('𐘀', 'đœ¶'),
-    ('𐝀', '𐝕'),
-    ('𐝠', '𐝧'),
-    ('𐞀', '𐞅'),
-    ('𐞇', '𐞰'),
-    ('đžČ', 'đžș'),
-    ('𐠀', '𐠅'),
-    ('𐠈', '𐠈'),
-    ('𐠊', '𐠔'),
-    ('𐠷', '𐠞'),
-    ('đ Œ', 'đ Œ'),
-    ('𐠿', '𐡕'),
-    ('𐥠', '𐥶'),
-    ('𐱀', '𐱞'),
-    ('𐣠', 'đŁČ'),
-    ('𐣎', '𐣔'),
-    ('𐀀', '𐀕'),
-    ('𐀠', 'đ€č'),
-    ('𐩀', '𐊷'),
-    ('đŠŸ', '𐊿'),
-    ('𐹀', '\u{10a03}'),
-    ('\u{10a05}', '\u{10a06}'),
-    ('\u{10a0c}', '𐹓'),
-    ('𐹕', '𐹗'),
-    ('𐹙', '𐚔'),
-    ('\u{10a38}', '\u{10a3a}'),
-    ('\u{10a3f}', '\u{10a3f}'),
-    ('𐩠', 'đ©Œ'),
-    ('đȘ€', 'đȘœ'),
-    ('𐫀', '𐫇'),
-    ('𐫉', '\u{10ae6}'),
-    ('𐬀', '𐏔'),
-    ('𐭀', '𐭕'),
-    ('𐭠', 'đ­Č'),
-    ('𐼀', '𐼑'),
-    ('𐰀', '𐱈'),
-    ('đČ€', 'đČČ'),
-    ('𐳀', 'đłČ'),
-    ('𐮀', '\u{10d27}'),
-    ('𐎰', 'đŽč'),
-    ('𐔀', '𐔄'),
-    ('\u{10d69}', '\u{10d6d}'),
-    ('𐔯', '𐶅'),
-    ('đș€', 'đș©'),
-    ('\u{10eab}', '\u{10eac}'),
-    ('đș°', 'đș±'),
-    ('𐻂', '𐻄'),
-    ('\u{10efc}', 'đŒœ'),
-    ('đŒ§', 'đŒ§'),
-    ('đŒ°', '\u{10f50}'),
-    ('đœ°', '\u{10f85}'),
-    ('đŸ°', '𐿄'),
-    ('𐿠', '𐿶'),
-    ('𑀀', '\u{11046}'),
-    ('𑁩', '𑁔'),
-    ('\u{1107f}', '\u{110ba}'),
-    ('\u{110c2}', '\u{110c2}'),
-    ('𑃐', '𑃹'),
-    ('𑃰', 'đ‘ƒč'),
-    ('\u{11100}', '\u{11134}'),
-    ('đ‘„¶', '𑄿'),
-    ('𑅄', '𑅇'),
-    ('𑅐', '\u{11173}'),
-    ('đ‘…¶', 'đ‘…¶'),
-    ('\u{11180}', '𑇄'),
-    ('\u{111c9}', '\u{111cc}'),
-    ('𑇎', '𑇚'),
-    ('𑇜', '𑇜'),
-    ('𑈀', '𑈑'),
-    ('𑈓', '\u{11237}'),
-    ('\u{1123e}', '\u{11241}'),
-    ('𑊀', '𑊆'),
-    ('𑊈', '𑊈'),
-    ('𑊊', '𑊍'),
-    ('𑊏', '𑊝'),
-    ('𑊟', '𑊹'),
-    ('𑊰', '\u{112ea}'),
-    ('𑋰', 'đ‘‹č'),
-    ('\u{11300}', '𑌃'),
-    ('𑌅', '𑌌'),
-    ('𑌏', '𑌐'),
-    ('𑌓', '𑌹'),
-    ('đ‘ŒȘ', '𑌰'),
-    ('đ‘ŒČ', '𑌳'),
-    ('đ‘Œ”', 'đ‘Œč'),
-    ('\u{1133b}', '𑍄'),
-    ('𑍇', '𑍈'),
-    ('𑍋', '\u{1134d}'),
-    ('𑍐', '𑍐'),
-    ('\u{11357}', '\u{11357}'),
-    ('𑍝', '𑍣'),
-    ('\u{11366}', '\u{1136c}'),
-    ('\u{11370}', '\u{11374}'),
-    ('𑎀', '𑎉'),
-    ('𑎋', '𑎋'),
-    ('𑎎', '𑎎'),
-    ('𑎐', '𑎔'),
-    ('𑎷', '\u{113c0}'),
-    ('\u{113c2}', '\u{113c2}'),
-    ('\u{113c5}', '\u{113c5}'),
-    ('\u{113c7}', '𑏊'),
-    ('𑏌', '𑏓'),
-    ('\u{113e1}', '\u{113e2}'),
-    ('𑐀', '𑑊'),
-    ('𑑐', '𑑙'),
-    ('\u{1145e}', '𑑡'),
-    ('𑒀', '𑓅'),
-    ('𑓇', '𑓇'),
-    ('𑓐', '𑓙'),
-    ('𑖀', '\u{115b5}'),
-    ('𑖾', '\u{115c0}'),
-    ('𑗘', '\u{115dd}'),
-    ('𑘀', '\u{11640}'),
-    ('𑙄', '𑙄'),
-    ('𑙐', '𑙙'),
-    ('𑚀', '𑚾'),
-    ('𑛀', '𑛉'),
-    ('𑛐', '𑛣'),
-    ('𑜀', '𑜚'),
-    ('\u{1171d}', '\u{1172b}'),
-    ('𑜰', 'đ‘œč'),
-    ('𑝀', '𑝆'),
-    ('𑠀', '\u{1183a}'),
-    ('𑱠', '𑣩'),
-    ('𑣿', '𑀆'),
-    ('𑀉', '𑀉'),
-    ('đ‘€Œ', '𑀓'),
-    ('𑀕', 'đ‘€–'),
-    ('đ‘€˜', 'đ‘€”'),
-    ('đ‘€·', '𑀞'),
-    ('\u{1193b}', '\u{11943}'),
-    ('𑄐', 'đ‘„™'),
-    ('𑩠', '𑩧'),
-    ('đ‘ŠȘ', '\u{119d7}'),
-    ('\u{119da}', '𑧡'),
-    ('𑧣', 'đ‘§€'),
-    ('𑹀', '\u{11a3e}'),
-    ('\u{11a47}', '\u{11a47}'),
-    ('𑩐', '\u{11a99}'),
-    ('đ‘Ș', 'đ‘Ș'),
-    ('đ‘Ș°', 'đ‘«ž'),
-    ('𑯀', '𑯠'),
-    ('𑯰', 'đ‘Żč'),
-    ('𑰀', '𑰈'),
-    ('𑰊', '\u{11c36}'),
-    ('\u{11c38}', '𑱀'),
-    ('𑱐', '𑱙'),
-    ('đ‘±Č', 'đ‘ȏ'),
-    ('\u{11c92}', '\u{11ca7}'),
-    ('đ‘Č©', '\u{11cb6}'),
-    ('𑮀', '𑮆'),
-    ('𑮈', '𑮉'),
-    ('𑮋', '\u{11d36}'),
-    ('\u{11d3a}', '\u{11d3a}'),
-    ('\u{11d3c}', '\u{11d3d}'),
-    ('\u{11d3f}', '\u{11d47}'),
-    ('𑔐', 'đ‘”™'),
-    ('đ‘” ', 'đ‘”„'),
-    ('đ‘”§', '𑔚'),
-    ('đ‘”Ș', 'đ‘¶Ž'),
-    ('\u{11d90}', '\u{11d91}'),
-    ('đ‘¶“', 'đ‘¶˜'),
-    ('đ‘¶ ', 'đ‘¶©'),
-    ('đ‘» ', 'đ‘»¶'),
-    ('\u{11f00}', 'đ‘Œ'),
-    ('đ‘Œ’', '\u{11f3a}'),
-    ('đ‘ŒŸ', '\u{11f42}'),
-    ('đ‘œ', '\u{11f5a}'),
-    ('đ‘Ÿ°', 'đ‘Ÿ°'),
-    ('𒀀', '𒎙'),
-    ('𒐀', '𒑼'),
-    ('𒒀', '𒕃'),
-    ('đ’Ÿ', '𒿰'),
-    ('𓀀', '𓐯'),
-    ('\u{13440}', '\u{13455}'),
-    ('𓑠', 'đ”ș'),
-    ('𔐀', '𔙆'),
-    ('𖄀', 'đ–„č'),
-    ('𖠀', '𖹾'),
-    ('đ–©€', 'đ–©ž'),
-    ('đ–© ', 'đ–©©'),
-    ('đ–©°', 'đ–ȘŸ'),
-    ('đ–«€', '𖫉'),
-    ('𖫐', 'đ–«­'),
-    ('\u{16af0}', '\u{16af4}'),
-    ('𖬀', '\u{16b36}'),
-    ('𖭀', '𖭃'),
-    ('𖭐', '𖭙'),
-    ('𖭣', 'đ–­·'),
-    ('đ–­œ', '𖼏'),
-    ('𖔀', '𖔏'),
-    ('đ–”°', 'đ–”č'),
-    ('đ–č€', 'đ–čż'),
-    ('đ–Œ€', 'đ–œŠ'),
-    ('\u{16f4f}', 'đ–Ÿ‡'),
-    ('\u{16f8f}', 'đ–ŸŸ'),
-    ('𖿠', '𖿡'),
-    ('𖿣', '\u{16fe4}'),
-    ('\u{16ff0}', '\u{16ff1}'),
-    ('𗀀', 'đ˜Ÿ·'),
-    ('𘠀', '𘳕'),
-    ('𘳿', '𘎈'),
-    ('𚿰', '𚿳'),
-    ('đšż”', 'đšż»'),
-    ('đšżœ', 'đšżŸ'),
-    ('𛀀', '𛄱'),
-    ('đ›„Č', 'đ›„Č'),
-    ('𛅐', '𛅒'),
-    ('𛅕', '𛅕'),
-    ('đ›…€', '𛅧'),
-    ('𛅰', '𛋻'),
-    ('𛰀', 'đ›±Ș'),
-    ('đ›±°', 'đ›±Œ'),
-    ('đ›Č€', 'đ›Čˆ'),
-    ('đ›Č', 'đ›Č™'),
-    ('\u{1bc9d}', '\u{1bc9e}'),
-    ('𜳰', 'đœłč'),
-    ('\u{1cf00}', '\u{1cf2d}'),
-    ('\u{1cf30}', '\u{1cf46}'),
-    ('\u{1d165}', '\u{1d169}'),
-    ('\u{1d16d}', '\u{1d172}'),
-    ('\u{1d17b}', '\u{1d182}'),
-    ('\u{1d185}', '\u{1d18b}'),
-    ('\u{1d1aa}', '\u{1d1ad}'),
-    ('\u{1d242}', '\u{1d244}'),
-    ('𝐀', '𝑔'),
-    ('𝑖', '𝒜'),
-    ('𝒞', '𝒟'),
-    ('𝒱', '𝒱'),
-    ('đ’„', '𝒩'),
-    ('đ’©', '𝒬'),
-    ('𝒼', 'đ’č'),
-    ('đ’»', 'đ’»'),
-    ('đ’œ', '𝓃'),
-    ('𝓅', '𝔅'),
-    ('𝔇', '𝔊'),
-    ('𝔍', '𝔔'),
-    ('𝔖', '𝔜'),
-    ('𝔞', 'đ”č'),
-    ('đ”»', 'đ”Ÿ'),
-    ('𝕀', '𝕄'),
-    ('𝕆', '𝕆'),
-    ('𝕊', '𝕐'),
-    ('𝕒', 'đš„'),
-    ('𝚹', '𝛀'),
-    ('𝛂', '𝛚'),
-    ('𝛜', 'đ›ș'),
-    ('đ›Œ', '𝜔'),
-    ('𝜖', '𝜮'),
-    ('đœ¶', '𝝎'),
-    ('𝝐', '𝝼'),
-    ('𝝰', '𝞈'),
-    ('𝞊', '𝞹'),
-    ('đžȘ', '𝟂'),
-    ('𝟄', '𝟋'),
-    ('𝟎', '𝟿'),
-    ('\u{1da00}', '\u{1da36}'),
-    ('\u{1da3b}', '\u{1da6c}'),
-    ('\u{1da75}', '\u{1da75}'),
-    ('\u{1da84}', '\u{1da84}'),
-    ('\u{1da9b}', '\u{1da9f}'),
-    ('\u{1daa1}', '\u{1daaf}'),
-    ('đŒ€', 'đŒž'),
-    ('đŒ„', 'đŒȘ'),
-    ('\u{1e000}', '\u{1e006}'),
-    ('\u{1e008}', '\u{1e018}'),
-    ('\u{1e01b}', '\u{1e021}'),
-    ('\u{1e023}', '\u{1e024}'),
-    ('\u{1e026}', '\u{1e02a}'),
-    ('𞀰', '𞁭'),
-    ('\u{1e08f}', '\u{1e08f}'),
-    ('𞄀', '𞄬'),
-    ('\u{1e130}', 'đž„œ'),
-    ('𞅀', '𞅉'),
-    ('𞅎', '𞅎'),
-    ('𞊐', '\u{1e2ae}'),
-    ('𞋀', 'đž‹č'),
-    ('𞓐', 'đž“č'),
-    ('𞗐', 'đž—ș'),
-    ('𞟠', '𞟩'),
-    ('𞟹', 'đžŸ«'),
-    ('𞟭', '𞟼'),
-    ('𞟰', 'đžŸŸ'),
-    ('𞠀', '𞣄'),
-    ('\u{1e8d0}', '\u{1e8d6}'),
-    ('𞀀', 'đž„‹'),
-    ('𞄐', 'đž„™'),
-    ('𞾀', '𞾃'),
-    ('𞾅', '𞾟'),
-    ('𞾡', '𞾱'),
-    ('𞞀', '𞞀'),
-    ('𞾧', '𞾧'),
-    ('đžž©', 'đžžČ'),
-    ('𞾮', 'đžž·'),
-    ('đžžč', 'đžžč'),
-    ('đžž»', 'đžž»'),
-    ('đžč‚', 'đžč‚'),
-    ('đžč‡', 'đžč‡'),
-    ('đžč‰', 'đžč‰'),
-    ('đžč‹', 'đžč‹'),
-    ('đžč', 'đžč'),
-    ('đžč‘', 'đžč’'),
-    ('đžč”', 'đžč”'),
-    ('đžč—', 'đžč—'),
-    ('đžč™', 'đžč™'),
-    ('đžč›', 'đžč›'),
-    ('đžč', 'đžč'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžčĄ', 'đžčą'),
-    ('đžč€', 'đžč€'),
-    ('đžč§', 'đžčȘ'),
-    ('đžčŹ', 'đžčČ'),
-    ('đžčŽ', 'đžč·'),
-    ('đžčč', 'đžčŒ'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžș€', 'đžș‰'),
-    ('đžș‹', 'đžș›'),
-    ('đžșĄ', 'đžșŁ'),
-    ('đžș„', 'đžș©'),
-    ('đžș«', 'đžș»'),
-    ('🄰', '🅉'),
-    ('🅐', 'đŸ…©'),
-    ('🅰', '🆉'),
-    ('🯰', 'đŸŻč'),
-    ('𠀀', 'đȘ›Ÿ'),
-    ('đȘœ€', 'đ«œč'),
-    ('đ«€', 'đ« '),
-    ('đ«  ', 'đŹșĄ'),
-    ('đŹș°', '🯠'),
-    ('🯰', 'đźč'),
-    ('丽', '𯹝'),
-    ('𰀀', 'đ±Š'),
-    ('đ±', 'đȎŻ'),
-    ('\u{e0100}', '\u{e01ef}'),
-];
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/property_bool.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/property_bool.rs
deleted file mode 100644
index 3d62edc..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/property_bool.rs
+++ /dev/null
@@ -1,12095 +0,0 @@
-// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY:
-//
-//   ucd-generate property-bool ucd-16.0.0 --chars
-//
-// Unicode version: 16.0.0.
-//
-// ucd-generate 0.3.1 is available on crates.io.
-
-pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[
-    ("ASCII_Hex_Digit", ASCII_HEX_DIGIT),
-    ("Alphabetic", ALPHABETIC),
-    ("Bidi_Control", BIDI_CONTROL),
-    ("Bidi_Mirrored", BIDI_MIRRORED),
-    ("Case_Ignorable", CASE_IGNORABLE),
-    ("Cased", CASED),
-    ("Changes_When_Casefolded", CHANGES_WHEN_CASEFOLDED),
-    ("Changes_When_Casemapped", CHANGES_WHEN_CASEMAPPED),
-    ("Changes_When_Lowercased", CHANGES_WHEN_LOWERCASED),
-    ("Changes_When_Titlecased", CHANGES_WHEN_TITLECASED),
-    ("Changes_When_Uppercased", CHANGES_WHEN_UPPERCASED),
-    ("Dash", DASH),
-    ("Default_Ignorable_Code_Point", DEFAULT_IGNORABLE_CODE_POINT),
-    ("Deprecated", DEPRECATED),
-    ("Diacritic", DIACRITIC),
-    ("Emoji", EMOJI),
-    ("Emoji_Component", EMOJI_COMPONENT),
-    ("Emoji_Modifier", EMOJI_MODIFIER),
-    ("Emoji_Modifier_Base", EMOJI_MODIFIER_BASE),
-    ("Emoji_Presentation", EMOJI_PRESENTATION),
-    ("Extended_Pictographic", EXTENDED_PICTOGRAPHIC),
-    ("Extender", EXTENDER),
-    ("Grapheme_Base", GRAPHEME_BASE),
-    ("Grapheme_Extend", GRAPHEME_EXTEND),
-    ("Grapheme_Link", GRAPHEME_LINK),
-    ("Hex_Digit", HEX_DIGIT),
-    ("Hyphen", HYPHEN),
-    ("IDS_Binary_Operator", IDS_BINARY_OPERATOR),
-    ("IDS_Trinary_Operator", IDS_TRINARY_OPERATOR),
-    ("IDS_Unary_Operator", IDS_UNARY_OPERATOR),
-    ("ID_Compat_Math_Continue", ID_COMPAT_MATH_CONTINUE),
-    ("ID_Compat_Math_Start", ID_COMPAT_MATH_START),
-    ("ID_Continue", ID_CONTINUE),
-    ("ID_Start", ID_START),
-    ("Ideographic", IDEOGRAPHIC),
-    ("InCB", INCB),
-    ("Join_Control", JOIN_CONTROL),
-    ("Logical_Order_Exception", LOGICAL_ORDER_EXCEPTION),
-    ("Lowercase", LOWERCASE),
-    ("Math", MATH),
-    ("Modifier_Combining_Mark", MODIFIER_COMBINING_MARK),
-    ("Noncharacter_Code_Point", NONCHARACTER_CODE_POINT),
-    ("Other_Alphabetic", OTHER_ALPHABETIC),
-    ("Other_Default_Ignorable_Code_Point", OTHER_DEFAULT_IGNORABLE_CODE_POINT),
-    ("Other_Grapheme_Extend", OTHER_GRAPHEME_EXTEND),
-    ("Other_ID_Continue", OTHER_ID_CONTINUE),
-    ("Other_ID_Start", OTHER_ID_START),
-    ("Other_Lowercase", OTHER_LOWERCASE),
-    ("Other_Math", OTHER_MATH),
-    ("Other_Uppercase", OTHER_UPPERCASE),
-    ("Pattern_Syntax", PATTERN_SYNTAX),
-    ("Pattern_White_Space", PATTERN_WHITE_SPACE),
-    ("Prepended_Concatenation_Mark", PREPENDED_CONCATENATION_MARK),
-    ("Quotation_Mark", QUOTATION_MARK),
-    ("Radical", RADICAL),
-    ("Regional_Indicator", REGIONAL_INDICATOR),
-    ("Sentence_Terminal", SENTENCE_TERMINAL),
-    ("Soft_Dotted", SOFT_DOTTED),
-    ("Terminal_Punctuation", TERMINAL_PUNCTUATION),
-    ("Unified_Ideograph", UNIFIED_IDEOGRAPH),
-    ("Uppercase", UPPERCASE),
-    ("Variation_Selector", VARIATION_SELECTOR),
-    ("White_Space", WHITE_SPACE),
-    ("XID_Continue", XID_CONTINUE),
-    ("XID_Start", XID_START),
-];
-
-pub const ASCII_HEX_DIGIT: &'static [(char, char)] =
-    &[('0', '9'), ('A', 'F'), ('a', 'f')];
-
-pub const ALPHABETIC: &'static [(char, char)] = &[
-    ('A', 'Z'),
-    ('a', 'z'),
-    ('ª', 'ª'),
-    ('µ', 'µ'),
-    ('º', 'º'),
-    ('À', 'Ö'),
-    ('Ø', 'ö'),
-    ('ø', 'ˁ'),
-    ('ˆ', 'ˑ'),
-    ('Ë ', 'Ë€'),
-    ('ËŹ', 'ËŹ'),
-    ('Ëź', 'Ëź'),
-    ('\u{345}', '\u{345}'),
-    ('\u{363}', 'ÍŽ'),
-    ('Ͷ', 'ͷ'),
-    ('Íș', 'Íœ'),
-    ('Íż', 'Íż'),
-    ('Ά', 'Ά'),
-    ('Έ', 'Ί'),
-    ('Ό', 'Ό'),
-    ('Ύ', 'Ρ'),
-    ('Σ', 'Ï”'),
-    ('Ϸ', 'ҁ'),
-    ('Ҋ', 'ÔŻ'),
-    ('Ô±', 'Ֆ'),
-    ('ՙ', 'ՙ'),
-    ('ՠ', 'ֈ'),
-    ('\u{5b0}', '\u{5bd}'),
-    ('\u{5bf}', '\u{5bf}'),
-    ('\u{5c1}', '\u{5c2}'),
-    ('\u{5c4}', '\u{5c5}'),
-    ('\u{5c7}', '\u{5c7}'),
-    ('ڐ', 'ŚȘ'),
-    ('ŚŻ', 'ŚČ'),
-    ('\u{610}', '\u{61a}'),
-    ('Ű ', '\u{657}'),
-    ('\u{659}', '\u{65f}'),
-    ('Ùź', 'ۓ'),
-    ('ە', '\u{6dc}'),
-    ('\u{6e1}', '\u{6e8}'),
-    ('\u{6ed}', 'ÛŻ'),
-    ('Ûș', 'ÛŒ'),
-    ('Ûż', 'Ûż'),
-    ('ܐ', '\u{73f}'),
-    ('ʍ', 'Ț±'),
-    ('ߊ', 'ßȘ'),
-    ('ߎ', 'ߔ'),
-    ('ßș', 'ßș'),
-    ('ࠀ', '\u{817}'),
-    ('ࠚ', '\u{82c}'),
-    ('àĄ€', 'àĄ˜'),
-    ('àĄ ', 'àĄȘ'),
-    ('àĄ°', 'àą‡'),
-    ('àą‰', 'àąŽ'),
-    ('\u{897}', '\u{897}'),
-    ('àą ', 'àŁ‰'),
-    ('\u{8d4}', '\u{8df}'),
-    ('\u{8e3}', '\u{8e9}'),
-    ('\u{8f0}', 'à€»'),
-    ('à€œ', 'à„Œ'),
-    ('à„Ž', 'à„'),
-    ('\u{955}', '\u{963}'),
-    ('à„±', 'àŠƒ'),
-    ('àŠ…', 'àŠŒ'),
-    ('àŠ', 'àŠ'),
-    ('àŠ“', 'àŠš'),
-    ('àŠȘ', 'àŠ°'),
-    ('àŠČ', 'àŠČ'),
-    ('àŠ¶', 'àŠč'),
-    ('àŠœ', '\u{9c4}'),
-    ('ে', 'ৈ'),
-    ('ো', 'ৌ'),
-    ('ৎ', 'ৎ'),
-    ('\u{9d7}', '\u{9d7}'),
-    ('ড়', 'ঢ়'),
-    ('য়', '\u{9e3}'),
-    ('à§°', 'à§±'),
-    ('ৌ', 'ৌ'),
-    ('\u{a01}', 'àšƒ'),
-    ('àš…', 'àšŠ'),
-    ('àš', 'àš'),
-    ('àš“', 'àšš'),
-    ('àšȘ', 'àš°'),
-    ('àšČ', 'àšł'),
-    ('àš”', 'àš¶'),
-    ('àšž', 'àšč'),
-    ('àšŸ', '\u{a42}'),
-    ('\u{a47}', '\u{a48}'),
-    ('\u{a4b}', '\u{a4c}'),
-    ('\u{a51}', '\u{a51}'),
-    ('ਖ਼', 'ੜ'),
-    ('ਫ਼', 'ਫ਼'),
-    ('\u{a70}', '\u{a75}'),
-    ('\u{a81}', 'àȘƒ'),
-    ('àȘ…', 'àȘ'),
-    ('àȘ', 'àȘ‘'),
-    ('àȘ“', 'àȘš'),
-    ('àȘȘ', 'àȘ°'),
-    ('àȘČ', 'àȘł'),
-    ('àȘ”', 'àȘč'),
-    ('àȘœ', '\u{ac5}'),
-    ('\u{ac7}', 'ૉ'),
-    ('ો', 'ૌ'),
-    ('ૐ', 'ૐ'),
-    ('à« ', '\u{ae3}'),
-    ('à«č', '\u{afc}'),
-    ('\u{b01}', 'àŹƒ'),
-    ('àŹ…', 'àŹŒ'),
-    ('àŹ', 'àŹ'),
-    ('àŹ“', 'àŹš'),
-    ('àŹȘ', 'àŹ°'),
-    ('àŹČ', 'àŹł'),
-    ('àŹ”', 'àŹč'),
-    ('àŹœ', '\u{b44}'),
-    ('େ', 'ୈ'),
-    ('ୋ', 'ୌ'),
-    ('\u{b56}', '\u{b57}'),
-    ('ଡ଼', 'ଢ଼'),
-    ('ୟ', '\u{b63}'),
-    ('à­±', 'à­±'),
-    ('\u{b82}', 'àźƒ'),
-    ('àź…', 'àźŠ'),
-    ('àźŽ', 'àź'),
-    ('àź’', 'àź•'),
-    ('àź™', 'àźš'),
-    ('àźœ', 'àźœ'),
-    ('àźž', 'àźŸ'),
-    ('àźŁ', 'àź€'),
-    ('àźš', 'àźȘ'),
-    ('àźź', 'àźč'),
-    ('\u{bbe}', 'àŻ‚'),
-    ('àŻ†', 'àŻˆ'),
-    ('àŻŠ', 'àŻŒ'),
-    ('àŻ', 'àŻ'),
-    ('\u{bd7}', '\u{bd7}'),
-    ('\u{c00}', 'ఌ'),
-    ('ఎ', 'ఐ'),
-    ('ఒ', 'à°š'),
-    ('à°Ș', 'à°č'),
-    ('à°œ', 'ౄ'),
-    ('\u{c46}', '\u{c48}'),
-    ('\u{c4a}', '\u{c4c}'),
-    ('\u{c55}', '\u{c56}'),
-    ('ౘ', 'ౚ'),
-    ('ౝ', 'ౝ'),
-    ('à± ', '\u{c63}'),
-    ('àȀ', 'àȃ'),
-    ('àȅ', 'àȌ'),
-    ('àȎ', 'àȐ'),
-    ('àȒ', 'àČš'),
-    ('àČȘ', 'àČł'),
-    ('àČ”', 'àČč'),
-    ('àČœ', 'àł„'),
-    ('\u{cc6}', '\u{cc8}'),
-    ('\u{cca}', '\u{ccc}'),
-    ('\u{cd5}', '\u{cd6}'),
-    ('àł', 'àłž'),
-    ('àł ', '\u{ce3}'),
-    ('àł±', 'àłł'),
-    ('\u{d00}', 'àŽŒ'),
-    ('àŽŽ', 'àŽ'),
-    ('àŽ’', 'àŽș'),
-    ('àŽœ', '\u{d44}'),
-    ('à”†', 'à”ˆ'),
-    ('à”Š', 'à”Œ'),
-    ('à”Ž', 'à”Ž'),
-    ('à””', '\u{d57}'),
-    ('à”Ÿ', '\u{d63}'),
-    ('à”ș', 'à”ż'),
-    ('\u{d81}', 'ඃ'),
-    ('අ', 'ඖ'),
-    ('ක', 'න'),
-    ('à¶ł', 'à¶»'),
-    ('ග', 'ග'),
-    ('ව', 'ෆ'),
-    ('\u{dcf}', '\u{dd4}'),
-    ('\u{dd6}', '\u{dd6}'),
-    ('ෘ', '\u{ddf}'),
-    ('à·Č', 'à·ł'),
-    ('àž', '\u{e3a}'),
-    ('àč€', 'àč†'),
-    ('\u{e4d}', '\u{e4d}'),
-    ('àș', 'àș‚'),
-    ('àș„', 'àș„'),
-    ('àș†', 'àșŠ'),
-    ('àșŒ', 'àșŁ'),
-    ('àș„', 'àș„'),
-    ('àș§', '\u{eb9}'),
-    ('\u{ebb}', 'àșœ'),
-    ('ເ', 'ໄ'),
-    ('ໆ', 'ໆ'),
-    ('\u{ecd}', '\u{ecd}'),
-    ('ໜ', 'ໟ'),
-    ('àŒ€', 'àŒ€'),
-    ('àœ€', 'àœ‡'),
-    ('àœ‰', 'àœŹ'),
-    ('\u{f71}', '\u{f83}'),
-    ('àŸˆ', '\u{f97}'),
-    ('\u{f99}', '\u{fbc}'),
-    ('က', '\u{1036}'),
-    ('ှ', 'ှ'),
-    ('ျ', 'ဿ'),
-    ('ၐ', 'ႏ'),
-    ('ႚ', '\u{109d}'),
-    ('Ⴀ', 'Ⴥ'),
-    ('Ⴧ', 'Ⴧ'),
-    ('Ⴭ', 'Ⴭ'),
-    ('ა', 'áƒș'),
-    ('჌', 'ቈ'),
-    ('ቊ', 'ቍ'),
-    ('ቐ', 'ቖ'),
-    ('ቘ', 'ቘ'),
-    ('ቚ', 'ቝ'),
-    ('በ', 'ኈ'),
-    ('ኊ', 'ኍ'),
-    ('ነ', 'ኰ'),
-    ('áŠČ', 'ኔ'),
-    ('ኾ', 'ኟ'),
-    ('ዀ', 'ዀ'),
-    ('ዂ', 'ዅ'),
-    ('ወ', 'ዖ'),
-    ('ዘ', 'ጐ'),
-    ('ጒ', 'ጕ'),
-    ('ጘ', 'ፚ'),
-    ('ᎀ', 'ᎏ'),
-    ('Ꭰ', 'Ꮤ'),
-    ('Ꮮ', 'Ꮬ'),
-    ('ᐁ', 'ᙬ'),
-    ('ᙯ', 'ᙿ'),
-    ('ᚁ', 'ᚚ'),
-    ('ᚠ', 'á›Ș'),
-    ('᛼', '᛾'),
-    ('ᜀ', '\u{1713}'),
-    ('ᜟ', '\u{1733}'),
-    ('ᝀ', '\u{1753}'),
-    ('ᝠ', 'ᝬ'),
-    ('᝼', 'ᝰ'),
-    ('\u{1772}', '\u{1773}'),
-    ('ក', 'ឳ'),
-    ('ា', 'ៈ'),
-    ('ៗ', 'ៗ'),
-    ('ៜ', 'ៜ'),
-    ('ᠠ', 'ᥞ'),
-    ('᱀', 'áąȘ'),
-    ('Ṱ', 'ᣔ'),
-    ('က', 'သ'),
-    ('\u{1920}', 'ါ'),
-    ('ူ', 'သ'),
-    ('ᄐ', 'ᄭ'),
-    ('ᄰ', 'ᄎ'),
-    ('ᩀ', 'ካ'),
-    ('ᩰ', 'ᧉ'),
-    ('Ṁ', '\u{1a1b}'),
-    ('áš ', '\u{1a5e}'),
-    ('ᩥ', '\u{1a74}'),
-    ('áȘ§', 'áȘ§'),
-    ('\u{1abf}', '\u{1ac0}'),
-    ('\u{1acc}', '\u{1ace}'),
-    ('\u{1b00}', 'Ᏻ'),
-    ('\u{1b35}', '\u{1b43}'),
-    ('ᭅ', 'ᭌ'),
-    ('\u{1b80}', '\u{1ba9}'),
-    ('\u{1bac}', '៯'),
-    ('áźș', 'ᯄ'),
-    ('ᯧ', '\u{1bf1}'),
-    ('ᰀ', '\u{1c36}'),
-    ('ᱍ', 'ᱏ'),
-    ('ᱚ', 'ᱜ'),
-    ('áȀ', 'áȊ'),
-    ('áȐ', 'áČș'),
-    ('áČœ', 'áČż'),
-    ('ᳩ', '᳏'),
-    ('áłź', 'áłł'),
-    ('áł”', 'áł¶'),
-    ('áłș', 'áłș'),
-    ('ᮀ', 'á¶ż'),
-    ('\u{1dd3}', '\u{1df4}'),
-    ('ᾀ', 'ጕ'),
-    ('ጘ', 'ጝ'),
-    ('ጠ', 'ᜅ'),
-    ('ᜈ', 'ᜍ'),
-    ('ᜐ', '᜗'),
-    ('᜙', '᜙'),
-    ('᜛', '᜛'),
-    ('᜝', '᜝'),
-    ('ᜟ', '᜜'),
-    ('ៀ', '៎'),
-    ('៶', '៌'),
-    ('៟', '៟'),
-    ('ῂ', 'ῄ'),
-    ('ῆ', 'ῌ'),
-    ('ῐ', 'ΐ'),
-    ('ῖ', 'Ί'),
-    ('ῠ', '῏'),
-    ('áżČ', '῎'),
-    ('áż¶', 'áżŒ'),
-    ('ⁱ', 'ⁱ'),
-    ('ⁿ', 'ⁿ'),
-    ('ₐ', 'ₜ'),
-    ('ℂ', 'ℂ'),
-    ('ℇ', 'ℇ'),
-    ('ℊ', 'ℓ'),
-    ('ℕ', 'ℕ'),
-    ('ℙ', 'ℝ'),
-    ('â„€', 'â„€'),
-    ('℩', '℩'),
-    ('ℹ', 'ℹ'),
-    ('â„Ș', 'ℭ'),
-    ('ℯ', 'â„č'),
-    ('ℌ', 'ℿ'),
-    ('ⅅ', 'ⅉ'),
-    ('ⅎ', 'ⅎ'),
-    ('Ⅰ', 'ↈ'),
-    ('Ⓐ', 'ⓩ'),
-    ('Ⰰ', 'Ⳁ'),
-    ('âł«', 'âłź'),
-    ('âłČ', 'âłł'),
-    ('⮀', '⎄'),
-    ('⎧', '⎧'),
-    ('⎭', '⎭'),
-    ('⎰', '┧'),
-    ('┯', '┯'),
-    ('ⶀ', 'ⶖ'),
-    ('â¶ ', 'â¶Š'),
-    ('â¶š', 'â¶ź'),
-    ('â¶°', 'â¶¶'),
-    ('â¶ž', 'â¶Ÿ'),
-    ('ⷀ', 'ⷆ'),
-    ('ⷈ', 'ⷎ'),
-    ('ⷐ', 'ⷖ'),
-    ('ⷘ', 'ⷞ'),
-    ('\u{2de0}', '\u{2dff}'),
-    ('➯', '➯'),
-    ('々', '〇'),
-    ('〡', '〩'),
-    ('〱', '〔'),
-    ('〾', 'ă€Œ'),
-    ('ぁ', 'ゖ'),
-    ('ゝ', 'ゟ'),
-    ('ァ', 'ăƒș'),
-    ('ăƒŒ', 'ヿ'),
-    ('ㄅ', 'ㄯ'),
-    ('ㄱ', 'ㆎ'),
-    ('ㆠ', 'ㆿ'),
-    ('ㇰ', 'ㇿ'),
-    ('㐀', 'ä¶ż'),
-    ('侀', 'ꒌ'),
-    ('ꓐ', 'ꓜ'),
-    ('ꔀ', 'ꘌ'),
-    ('ꘐ', 'ꘟ'),
-    ('ê˜Ș', 'ꘫ'),
-    ('Ꙁ', 'ê™ź'),
-    ('\u{a674}', '\u{a67b}'),
-    ('ê™ż', 'ê›Ż'),
-    ('ꜗ', 'ꜟ'),
-    ('êœą', 'ꞈ'),
-    ('Ꞌ', 'ꟍ'),
-    ('Ꟑ', 'ꟑ'),
-    ('ꟓ', 'ꟓ'),
-    ('ꟕ', 'Ƛ'),
-    ('êŸČ', 'ꠅ'),
-    ('ꠇ', 'ꠧ'),
-    ('êĄ€', 'êĄł'),
-    ('êą€', 'êŁƒ'),
-    ('\u{a8c5}', '\u{a8c5}'),
-    ('êŁČ', 'êŁ·'),
-    ('êŁ»', 'êŁ»'),
-    ('êŁœ', '\u{a8ff}'),
-    ('ꀊ', '\u{a92a}'),
-    ('ꀰ', 'ê„’'),
-    ('ꄠ', 'ꄌ'),
-    ('\u{a980}', 'êŠČ'),
-    ('ꊎ', 'êŠż'),
-    ('ꧏ', 'ꧏ'),
-    ('ê§ ', 'ê§Ż'),
-    ('ê§ș', 'ê§Ÿ'),
-    ('Ꚁ', '\u{aa36}'),
-    ('ꩀ', 'ꩍ'),
-    ('ê© ', 'ê©¶'),
-    ('ê©ș', '\u{aabe}'),
-    ('ꫀ', 'ꫀ'),
-    ('ꫂ', 'ꫂ'),
-    ('ꫛ', 'ꫝ'),
-    ('ê« ', 'ê«Ż'),
-    ('ê«Č', 'ê«”'),
-    ('êŹ', 'êŹ†'),
-    ('êŹ‰', 'êŹŽ'),
-    ('êŹ‘', 'êŹ–'),
-    ('êŹ ', 'êŹŠ'),
-    ('êŹš', 'êŹź'),
-    ('êŹ°', 'ꭚ'),
-    ('ꭜ', 'ꭩ'),
-    ('ê­°', 'êŻȘ'),
-    ('가', '힣'),
-    ('ힰ', 'ퟆ'),
-    ('ퟋ', 'ퟻ'),
-    ('', 'ï©­'),
-    ('並', '龎'),
-    ('ïŹ€', 'ïŹ†'),
-    ('ïŹ“', 'ïŹ—'),
-    ('ïŹ', 'ïŹš'),
-    ('ïŹȘ', 'ïŹ¶'),
-    ('ïŹž', 'ïŹŒ'),
-    ('ïŹŸ', 'ïŹŸ'),
-    ('נּ', 'סּ'),
-    ('ףּ', 'פּ'),
-    ('צּ', 'ïź±'),
-    ('ïŻ“', ''),
-    ('', 'ﶏ'),
-    ('ﶒ', 'ﷇ'),
-    ('ï·°', 'ï·»'),
-    ('ïč°', 'ïčŽ'),
-    ('ïč¶', 'ﻌ'),
-    ('ïŒĄ', 'ïŒș'),
-    ('', ''),
-    ('', ''),
-    ('ïż‚', 'ïż‡'),
-    ('ïżŠ', 'ïż'),
-    ('ïż’', 'ïż—'),
-    ('ïżš', 'ïżœ'),
-    ('𐀀', '𐀋'),
-    ('𐀍', '𐀩'),
-    ('𐀹', 'đ€ș'),
-    ('đ€Œ', 'đ€œ'),
-    ('𐀿', '𐁍'),
-    ('𐁐', '𐁝'),
-    ('𐂀', 'đƒș'),
-    ('𐅀', '𐅮'),
-    ('𐊀', '𐊜'),
-    ('𐊠', '𐋐'),
-    ('𐌀', '𐌟'),
-    ('𐌭', '𐍊'),
-    ('𐍐', '\u{1037a}'),
-    ('𐎀', '𐎝'),
-    ('𐎠', '𐏃'),
-    ('𐏈', '𐏏'),
-    ('𐏑', '𐏕'),
-    ('𐐀', '𐒝'),
-    ('𐒰', '𐓓'),
-    ('𐓘', '𐓻'),
-    ('𐔀', '𐔧'),
-    ('𐔰', '𐕣'),
-    ('𐕰', 'đ•ș'),
-    ('đ•Œ', '𐖊'),
-    ('𐖌', '𐖒'),
-    ('𐖔', '𐖕'),
-    ('𐖗', '𐖡'),
-    ('𐖣', '𐖱'),
-    ('𐖳', 'đ–č'),
-    ('𐖻', 'đ–Œ'),
-    ('𐗀', '𐗳'),
-    ('𐘀', 'đœ¶'),
-    ('𐝀', '𐝕'),
-    ('𐝠', '𐝧'),
-    ('𐞀', '𐞅'),
-    ('𐞇', '𐞰'),
-    ('đžČ', 'đžș'),
-    ('𐠀', '𐠅'),
-    ('𐠈', '𐠈'),
-    ('𐠊', '𐠔'),
-    ('𐠷', '𐠞'),
-    ('đ Œ', 'đ Œ'),
-    ('𐠿', '𐡕'),
-    ('𐥠', '𐥶'),
-    ('𐱀', '𐱞'),
-    ('𐣠', 'đŁČ'),
-    ('𐣎', '𐣔'),
-    ('𐀀', '𐀕'),
-    ('𐀠', 'đ€č'),
-    ('𐩀', '𐊷'),
-    ('đŠŸ', '𐊿'),
-    ('𐹀', '\u{10a03}'),
-    ('\u{10a05}', '\u{10a06}'),
-    ('\u{10a0c}', '𐹓'),
-    ('𐹕', '𐹗'),
-    ('𐹙', '𐚔'),
-    ('𐩠', 'đ©Œ'),
-    ('đȘ€', 'đȘœ'),
-    ('𐫀', '𐫇'),
-    ('𐫉', '𐫀'),
-    ('𐬀', '𐏔'),
-    ('𐭀', '𐭕'),
-    ('𐭠', 'đ­Č'),
-    ('𐼀', '𐼑'),
-    ('𐰀', '𐱈'),
-    ('đČ€', 'đČČ'),
-    ('𐳀', 'đłČ'),
-    ('𐮀', '\u{10d27}'),
-    ('𐔊', '𐔄'),
-    ('\u{10d69}', '\u{10d69}'),
-    ('𐔯', '𐶅'),
-    ('đș€', 'đș©'),
-    ('\u{10eab}', '\u{10eac}'),
-    ('đș°', 'đș±'),
-    ('𐻂', '𐻄'),
-    ('\u{10efc}', '\u{10efc}'),
-    ('đŒ€', 'đŒœ'),
-    ('đŒ§', 'đŒ§'),
-    ('đŒ°', 'đœ…'),
-    ('đœ°', 'đŸ'),
-    ('đŸ°', '𐿄'),
-    ('𐿠', '𐿶'),
-    ('𑀀', '\u{11045}'),
-    ('𑁱', '𑁔'),
-    ('\u{11080}', '𑂾'),
-    ('\u{110c2}', '\u{110c2}'),
-    ('𑃐', '𑃹'),
-    ('\u{11100}', '\u{11132}'),
-    ('𑅄', '𑅇'),
-    ('𑅐', 'đ‘…Č'),
-    ('đ‘…¶', 'đ‘…¶'),
-    ('\u{11180}', '𑆿'),
-    ('𑇁', '𑇄'),
-    ('𑇎', '\u{111cf}'),
-    ('𑇚', '𑇚'),
-    ('𑇜', '𑇜'),
-    ('𑈀', '𑈑'),
-    ('𑈓', '\u{11234}'),
-    ('\u{11237}', '\u{11237}'),
-    ('\u{1123e}', '\u{11241}'),
-    ('𑊀', '𑊆'),
-    ('𑊈', '𑊈'),
-    ('𑊊', '𑊍'),
-    ('𑊏', '𑊝'),
-    ('𑊟', '𑊹'),
-    ('𑊰', '\u{112e8}'),
-    ('\u{11300}', '𑌃'),
-    ('𑌅', '𑌌'),
-    ('𑌏', '𑌐'),
-    ('𑌓', '𑌹'),
-    ('đ‘ŒȘ', '𑌰'),
-    ('đ‘ŒČ', '𑌳'),
-    ('đ‘Œ”', 'đ‘Œč'),
-    ('đ‘Œœ', '𑍄'),
-    ('𑍇', '𑍈'),
-    ('𑍋', '𑍌'),
-    ('𑍐', '𑍐'),
-    ('\u{11357}', '\u{11357}'),
-    ('𑍝', '𑍣'),
-    ('𑎀', '𑎉'),
-    ('𑎋', '𑎋'),
-    ('𑎎', '𑎎'),
-    ('𑎐', '𑎔'),
-    ('𑎷', '\u{113c0}'),
-    ('\u{113c2}', '\u{113c2}'),
-    ('\u{113c5}', '\u{113c5}'),
-    ('\u{113c7}', '𑏊'),
-    ('𑏌', '𑏍'),
-    ('𑏑', '𑏑'),
-    ('𑏓', '𑏓'),
-    ('𑐀', '𑑁'),
-    ('\u{11443}', '𑑅'),
-    ('𑑇', '𑑊'),
-    ('𑑟', '𑑡'),
-    ('𑒀', '𑓁'),
-    ('𑓄', '𑓅'),
-    ('𑓇', '𑓇'),
-    ('𑖀', '\u{115b5}'),
-    ('𑖾', 'đ‘–Ÿ'),
-    ('𑗘', '\u{115dd}'),
-    ('𑘀', 'đ‘˜Ÿ'),
-    ('\u{11640}', '\u{11640}'),
-    ('𑙄', '𑙄'),
-    ('𑚀', '\u{116b5}'),
-    ('𑚾', '𑚾'),
-    ('𑜀', '𑜚'),
-    ('\u{1171d}', '\u{1172a}'),
-    ('𑝀', '𑝆'),
-    ('𑠀', '𑠾'),
-    ('𑱠', '𑣟'),
-    ('𑣿', '𑀆'),
-    ('𑀉', '𑀉'),
-    ('đ‘€Œ', '𑀓'),
-    ('𑀕', 'đ‘€–'),
-    ('đ‘€˜', 'đ‘€”'),
-    ('đ‘€·', '𑀞'),
-    ('\u{1193b}', '\u{1193c}'),
-    ('𑀿', 'đ‘„‚'),
-    ('𑩠', '𑩧'),
-    ('đ‘ŠȘ', '\u{119d7}'),
-    ('\u{119da}', '𑧟'),
-    ('𑧡', '𑧡'),
-    ('𑧣', 'đ‘§€'),
-    ('𑹀', 'đ‘šČ'),
-    ('\u{11a35}', '\u{11a3e}'),
-    ('𑩐', 'đ‘Ș—'),
-    ('đ‘Ș', 'đ‘Ș'),
-    ('đ‘Ș°', 'đ‘«ž'),
-    ('𑯀', '𑯠'),
-    ('𑰀', '𑰈'),
-    ('𑰊', '\u{11c36}'),
-    ('\u{11c38}', 'đ‘°Ÿ'),
-    ('𑱀', '𑱀'),
-    ('đ‘±Č', 'đ‘ȏ'),
-    ('\u{11c92}', '\u{11ca7}'),
-    ('đ‘Č©', '\u{11cb6}'),
-    ('𑮀', '𑮆'),
-    ('𑮈', '𑮉'),
-    ('𑮋', '\u{11d36}'),
-    ('\u{11d3a}', '\u{11d3a}'),
-    ('\u{11d3c}', '\u{11d3d}'),
-    ('\u{11d3f}', '\u{11d41}'),
-    ('\u{11d43}', '\u{11d43}'),
-    ('𑔆', '\u{11d47}'),
-    ('đ‘” ', 'đ‘”„'),
-    ('đ‘”§', '𑔚'),
-    ('đ‘”Ș', 'đ‘¶Ž'),
-    ('\u{11d90}', '\u{11d91}'),
-    ('đ‘¶“', 'đ‘¶–'),
-    ('đ‘¶˜', 'đ‘¶˜'),
-    ('đ‘» ', 'đ‘»¶'),
-    ('\u{11f00}', 'đ‘Œ'),
-    ('đ‘Œ’', '\u{11f3a}'),
-    ('đ‘ŒŸ', '\u{11f40}'),
-    ('đ‘Ÿ°', 'đ‘Ÿ°'),
-    ('𒀀', '𒎙'),
-    ('𒐀', '𒑼'),
-    ('𒒀', '𒕃'),
-    ('đ’Ÿ', '𒿰'),
-    ('𓀀', '𓐯'),
-    ('𓑁', '𓑆'),
-    ('𓑠', 'đ”ș'),
-    ('𔐀', '𔙆'),
-    ('𖄀', '\u{1612e}'),
-    ('𖠀', '𖹾'),
-    ('đ–©€', 'đ–©ž'),
-    ('đ–©°', 'đ–ȘŸ'),
-    ('𖫐', 'đ–«­'),
-    ('𖬀', '𖬯'),
-    ('𖭀', '𖭃'),
-    ('𖭣', 'đ–­·'),
-    ('đ–­œ', '𖼏'),
-    ('𖔀', '𖔏'),
-    ('đ–č€', 'đ–čż'),
-    ('đ–Œ€', 'đ–œŠ'),
-    ('\u{16f4f}', 'đ–Ÿ‡'),
-    ('\u{16f8f}', 'đ–ŸŸ'),
-    ('𖿠', '𖿡'),
-    ('𖿣', '𖿣'),
-    ('\u{16ff0}', '\u{16ff1}'),
-    ('𗀀', 'đ˜Ÿ·'),
-    ('𘠀', '𘳕'),
-    ('𘳿', '𘎈'),
-    ('𚿰', '𚿳'),
-    ('đšż”', 'đšż»'),
-    ('đšżœ', 'đšżŸ'),
-    ('𛀀', '𛄱'),
-    ('đ›„Č', 'đ›„Č'),
-    ('𛅐', '𛅒'),
-    ('𛅕', '𛅕'),
-    ('đ›…€', '𛅧'),
-    ('𛅰', '𛋻'),
-    ('𛰀', 'đ›±Ș'),
-    ('đ›±°', 'đ›±Œ'),
-    ('đ›Č€', 'đ›Čˆ'),
-    ('đ›Č', 'đ›Č™'),
-    ('\u{1bc9e}', '\u{1bc9e}'),
-    ('𝐀', '𝑔'),
-    ('𝑖', '𝒜'),
-    ('𝒞', '𝒟'),
-    ('𝒱', '𝒱'),
-    ('đ’„', '𝒩'),
-    ('đ’©', '𝒬'),
-    ('𝒼', 'đ’č'),
-    ('đ’»', 'đ’»'),
-    ('đ’œ', '𝓃'),
-    ('𝓅', '𝔅'),
-    ('𝔇', '𝔊'),
-    ('𝔍', '𝔔'),
-    ('𝔖', '𝔜'),
-    ('𝔞', 'đ”č'),
-    ('đ”»', 'đ”Ÿ'),
-    ('𝕀', '𝕄'),
-    ('𝕆', '𝕆'),
-    ('𝕊', '𝕐'),
-    ('𝕒', 'đš„'),
-    ('𝚹', '𝛀'),
-    ('𝛂', '𝛚'),
-    ('𝛜', 'đ›ș'),
-    ('đ›Œ', '𝜔'),
-    ('𝜖', '𝜮'),
-    ('đœ¶', '𝝎'),
-    ('𝝐', '𝝼'),
-    ('𝝰', '𝞈'),
-    ('𝞊', '𝞹'),
-    ('đžȘ', '𝟂'),
-    ('𝟄', '𝟋'),
-    ('đŒ€', 'đŒž'),
-    ('đŒ„', 'đŒȘ'),
-    ('\u{1e000}', '\u{1e006}'),
-    ('\u{1e008}', '\u{1e018}'),
-    ('\u{1e01b}', '\u{1e021}'),
-    ('\u{1e023}', '\u{1e024}'),
-    ('\u{1e026}', '\u{1e02a}'),
-    ('𞀰', '𞁭'),
-    ('\u{1e08f}', '\u{1e08f}'),
-    ('𞄀', '𞄬'),
-    ('đž„·', 'đž„œ'),
-    ('𞅎', '𞅎'),
-    ('𞊐', '𞊭'),
-    ('𞋀', 'đž‹«'),
-    ('𞓐', 'đž“«'),
-    ('𞗐', '𞗭'),
-    ('𞗰', '𞗰'),
-    ('𞟠', '𞟩'),
-    ('𞟹', 'đžŸ«'),
-    ('𞟭', '𞟼'),
-    ('𞟰', 'đžŸŸ'),
-    ('𞠀', '𞣄'),
-    ('𞀀', 'đž„ƒ'),
-    ('\u{1e947}', '\u{1e947}'),
-    ('đž„‹', 'đž„‹'),
-    ('𞾀', '𞾃'),
-    ('𞾅', '𞾟'),
-    ('𞾡', '𞾱'),
-    ('𞞀', '𞞀'),
-    ('𞾧', '𞾧'),
-    ('đžž©', 'đžžČ'),
-    ('𞾮', 'đžž·'),
-    ('đžžč', 'đžžč'),
-    ('đžž»', 'đžž»'),
-    ('đžč‚', 'đžč‚'),
-    ('đžč‡', 'đžč‡'),
-    ('đžč‰', 'đžč‰'),
-    ('đžč‹', 'đžč‹'),
-    ('đžč', 'đžč'),
-    ('đžč‘', 'đžč’'),
-    ('đžč”', 'đžč”'),
-    ('đžč—', 'đžč—'),
-    ('đžč™', 'đžč™'),
-    ('đžč›', 'đžč›'),
-    ('đžč', 'đžč'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžčĄ', 'đžčą'),
-    ('đžč€', 'đžč€'),
-    ('đžč§', 'đžčȘ'),
-    ('đžčŹ', 'đžčČ'),
-    ('đžčŽ', 'đžč·'),
-    ('đžčč', 'đžčŒ'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžș€', 'đžș‰'),
-    ('đžș‹', 'đžș›'),
-    ('đžșĄ', 'đžșŁ'),
-    ('đžș„', 'đžș©'),
-    ('đžș«', 'đžș»'),
-    ('🄰', '🅉'),
-    ('🅐', 'đŸ…©'),
-    ('🅰', '🆉'),
-    ('𠀀', 'đȘ›Ÿ'),
-    ('đȘœ€', 'đ«œč'),
-    ('đ«€', 'đ« '),
-    ('đ«  ', 'đŹșĄ'),
-    ('đŹș°', '🯠'),
-    ('🯰', 'đźč'),
-    ('丽', '𯹝'),
-    ('𰀀', 'đ±Š'),
-    ('đ±', 'đȎŻ'),
-];
-
-pub const BIDI_CONTROL: &'static [(char, char)] = &[
-    ('\u{61c}', '\u{61c}'),
-    ('\u{200e}', '\u{200f}'),
-    ('\u{202a}', '\u{202e}'),
-    ('\u{2066}', '\u{2069}'),
-];
-
-pub const BIDI_MIRRORED: &'static [(char, char)] = &[
-    ('(', ')'),
-    ('<', '<'),
-    ('>', '>'),
-    ('[', '['),
-    (']', ']'),
-    ('{', '{'),
-    ('}', '}'),
-    ('«', '«'),
-    ('»', '»'),
-    ('àŒș', 'àŒœ'),
-    ('᚛', '᚜'),
-    ('‹', '›'),
-    ('⁅', '⁆'),
-    ('⁜', ' '),
-    ('₍', '₎'),
-    ('⅀', '⅀'),
-    ('∁', '∄'),
-    ('∈', '∍'),
-    ('∑', '∑'),
-    ('∕', '∖'),
-    ('√', '∝'),
-    ('∟', '∱'),
-    ('∀', '∀'),
-    ('∊', '∊'),
-    ('∫', '∳'),
-    ('âˆč', 'âˆč'),
-    ('∻', '≌'),
-    ('≒', '≕'),
-    ('≟', '≠'),
-    ('≱', '≱'),
-    ('≤', '≫'),
-    ('≭', '⊌'),
-    ('⊏', '⊒'),
-    ('⊘', '⊘'),
-    ('⊱', '⊣'),
-    ('⊩', '⊾'),
-    ('⊟', '⊿'),
-    ('⋉', '⋍'),
-    ('⋐', '⋑'),
-    ('⋖', '⋭'),
-    ('⋰', '⋿'),
-    ('⌈', '⌋'),
-    ('⌠', '⌡'),
-    ('⟨', '⟩'),
-    ('❚', '❔'),
-    ('⟀', '⟀'),
-    ('⟃', '⟆'),
-    ('⟈', '⟉'),
-    ('⟋', '⟍'),
-    ('⟓', '⟖'),
-    ('⟜', '⟞'),
-    ('⟱', '⟯'),
-    ('⊃', '⊘'),
-    ('⩛', '⩠'),
-    ('⊹', '⊯'),
-    ('⊞', '⊞'),
-    ('⧀', '⧅'),
-    ('⧉', '⧉'),
-    ('⧎', '⧒'),
-    ('⧔', '⧕'),
-    ('⧘', '⧜'),
-    ('â§Ą', 'â§Ą'),
-    ('â§Ł', 'â§„'),
-    ('â§š', 'â§©'),
-    ('â§Ž', 'â§č'),
-    ('⧌', '⧜'),
-    ('⹊', '⹜'),
-    ('⹞', '⹡'),
-    ('⚀', '⚀'),
-    ('⚊', '⚊'),
-    ('âš©', 'âš©'),
-    ('âš«', 'âšź'),
-    ('⚎', '⚔'),
-    ('⚌', '⚟'),
-    ('⩗', '⩘'),
-    ('â©€', 'â©„'),
-    ('â©Ș', 'â©­'),
-    ('⩯', '⩰'),
-    ('⩳', '⩎'),
-    ('â©č', 'âȘŁ'),
-    ('âȘŠ', 'âȘ­'),
-    ('âȘŻ', '⫖'),
-    ('⫝̸', '⫝̸'),
-    ('⫞', '⫞'),
-    ('⫹', '⫊'),
-    ('⫏', '⫟'),
-    ('⫳', '⫳'),
-    ('â«·', 'â«»'),
-    ('⫝̸', '⫝̸'),
-    ('âŻŸ', 'âŻŸ'),
-    ('⾂', '⾅'),
-    ('⾉', '⾊'),
-    ('⾌', '⾍'),
-    ('⾜', '⾝'),
-    ('âž ', 'âž©'),
-    ('âč•', 'âčœ'),
-    ('〈', '】'),
-    ('〔', '〛'),
-    ('ïč™', 'ïčž'),
-    ('ïč€', 'ïč„'),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('ïœą', 'ïœŁ'),
-    ('𝛛', '𝛛'),
-    ('𝜕', '𝜕'),
-    ('𝝏', '𝝏'),
-    ('𝞉', '𝞉'),
-    ('𝟃', '𝟃'),
-];
-
-pub const CASE_IGNORABLE: &'static [(char, char)] = &[
-    ('\'', '\''),
-    ('.', '.'),
-    (':', ':'),
-    ('^', '^'),
-    ('`', '`'),
-    ('¨', '¨'),
-    ('\u{ad}', '\u{ad}'),
-    ('¯', '¯'),
-    ('´', '´'),
-    ('·', '¸'),
-    ('ʰ', '\u{36f}'),
-    ('ÍŽ', 'Í”'),
-    ('Íș', 'Íș'),
-    ('΄', '΅'),
-    ('·', '·'),
-    ('\u{483}', '\u{489}'),
-    ('ՙ', 'ՙ'),
-    ('՟', '՟'),
-    ('\u{591}', '\u{5bd}'),
-    ('\u{5bf}', '\u{5bf}'),
-    ('\u{5c1}', '\u{5c2}'),
-    ('\u{5c4}', '\u{5c5}'),
-    ('\u{5c7}', '\u{5c7}'),
-    ('ŚŽ', 'ŚŽ'),
-    ('\u{600}', '\u{605}'),
-    ('\u{610}', '\u{61a}'),
-    ('\u{61c}', '\u{61c}'),
-    ('ـ', 'ـ'),
-    ('\u{64b}', '\u{65f}'),
-    ('\u{670}', '\u{670}'),
-    ('\u{6d6}', '\u{6dd}'),
-    ('\u{6df}', '\u{6e8}'),
-    ('\u{6ea}', '\u{6ed}'),
-    ('\u{70f}', '\u{70f}'),
-    ('\u{711}', '\u{711}'),
-    ('\u{730}', '\u{74a}'),
-    ('\u{7a6}', '\u{7b0}'),
-    ('\u{7eb}', 'ß”'),
-    ('ßș', 'ßș'),
-    ('\u{7fd}', '\u{7fd}'),
-    ('\u{816}', '\u{82d}'),
-    ('\u{859}', '\u{85b}'),
-    ('àąˆ', 'àąˆ'),
-    ('\u{890}', '\u{891}'),
-    ('\u{897}', '\u{89f}'),
-    ('àŁ‰', '\u{902}'),
-    ('\u{93a}', '\u{93a}'),
-    ('\u{93c}', '\u{93c}'),
-    ('\u{941}', '\u{948}'),
-    ('\u{94d}', '\u{94d}'),
-    ('\u{951}', '\u{957}'),
-    ('\u{962}', '\u{963}'),
-    ('à„±', 'à„±'),
-    ('\u{981}', '\u{981}'),
-    ('\u{9bc}', '\u{9bc}'),
-    ('\u{9c1}', '\u{9c4}'),
-    ('\u{9cd}', '\u{9cd}'),
-    ('\u{9e2}', '\u{9e3}'),
-    ('\u{9fe}', '\u{9fe}'),
-    ('\u{a01}', '\u{a02}'),
-    ('\u{a3c}', '\u{a3c}'),
-    ('\u{a41}', '\u{a42}'),
-    ('\u{a47}', '\u{a48}'),
-    ('\u{a4b}', '\u{a4d}'),
-    ('\u{a51}', '\u{a51}'),
-    ('\u{a70}', '\u{a71}'),
-    ('\u{a75}', '\u{a75}'),
-    ('\u{a81}', '\u{a82}'),
-    ('\u{abc}', '\u{abc}'),
-    ('\u{ac1}', '\u{ac5}'),
-    ('\u{ac7}', '\u{ac8}'),
-    ('\u{acd}', '\u{acd}'),
-    ('\u{ae2}', '\u{ae3}'),
-    ('\u{afa}', '\u{aff}'),
-    ('\u{b01}', '\u{b01}'),
-    ('\u{b3c}', '\u{b3c}'),
-    ('\u{b3f}', '\u{b3f}'),
-    ('\u{b41}', '\u{b44}'),
-    ('\u{b4d}', '\u{b4d}'),
-    ('\u{b55}', '\u{b56}'),
-    ('\u{b62}', '\u{b63}'),
-    ('\u{b82}', '\u{b82}'),
-    ('\u{bc0}', '\u{bc0}'),
-    ('\u{bcd}', '\u{bcd}'),
-    ('\u{c00}', '\u{c00}'),
-    ('\u{c04}', '\u{c04}'),
-    ('\u{c3c}', '\u{c3c}'),
-    ('\u{c3e}', '\u{c40}'),
-    ('\u{c46}', '\u{c48}'),
-    ('\u{c4a}', '\u{c4d}'),
-    ('\u{c55}', '\u{c56}'),
-    ('\u{c62}', '\u{c63}'),
-    ('\u{c81}', '\u{c81}'),
-    ('\u{cbc}', '\u{cbc}'),
-    ('\u{cbf}', '\u{cbf}'),
-    ('\u{cc6}', '\u{cc6}'),
-    ('\u{ccc}', '\u{ccd}'),
-    ('\u{ce2}', '\u{ce3}'),
-    ('\u{d00}', '\u{d01}'),
-    ('\u{d3b}', '\u{d3c}'),
-    ('\u{d41}', '\u{d44}'),
-    ('\u{d4d}', '\u{d4d}'),
-    ('\u{d62}', '\u{d63}'),
-    ('\u{d81}', '\u{d81}'),
-    ('\u{dca}', '\u{dca}'),
-    ('\u{dd2}', '\u{dd4}'),
-    ('\u{dd6}', '\u{dd6}'),
-    ('\u{e31}', '\u{e31}'),
-    ('\u{e34}', '\u{e3a}'),
-    ('àč†', '\u{e4e}'),
-    ('\u{eb1}', '\u{eb1}'),
-    ('\u{eb4}', '\u{ebc}'),
-    ('ໆ', 'ໆ'),
-    ('\u{ec8}', '\u{ece}'),
-    ('\u{f18}', '\u{f19}'),
-    ('\u{f35}', '\u{f35}'),
-    ('\u{f37}', '\u{f37}'),
-    ('\u{f39}', '\u{f39}'),
-    ('\u{f71}', '\u{f7e}'),
-    ('\u{f80}', '\u{f84}'),
-    ('\u{f86}', '\u{f87}'),
-    ('\u{f8d}', '\u{f97}'),
-    ('\u{f99}', '\u{fbc}'),
-    ('\u{fc6}', '\u{fc6}'),
-    ('\u{102d}', '\u{1030}'),
-    ('\u{1032}', '\u{1037}'),
-    ('\u{1039}', '\u{103a}'),
-    ('\u{103d}', '\u{103e}'),
-    ('\u{1058}', '\u{1059}'),
-    ('\u{105e}', '\u{1060}'),
-    ('\u{1071}', '\u{1074}'),
-    ('\u{1082}', '\u{1082}'),
-    ('\u{1085}', '\u{1086}'),
-    ('\u{108d}', '\u{108d}'),
-    ('\u{109d}', '\u{109d}'),
-    ('჌', '჌'),
-    ('\u{135d}', '\u{135f}'),
-    ('\u{1712}', '\u{1714}'),
-    ('\u{1732}', '\u{1733}'),
-    ('\u{1752}', '\u{1753}'),
-    ('\u{1772}', '\u{1773}'),
-    ('\u{17b4}', '\u{17b5}'),
-    ('\u{17b7}', '\u{17bd}'),
-    ('\u{17c6}', '\u{17c6}'),
-    ('\u{17c9}', '\u{17d3}'),
-    ('ៗ', 'ៗ'),
-    ('\u{17dd}', '\u{17dd}'),
-    ('\u{180b}', '\u{180f}'),
-    ('᥃', '᥃'),
-    ('\u{1885}', '\u{1886}'),
-    ('\u{18a9}', '\u{18a9}'),
-    ('\u{1920}', '\u{1922}'),
-    ('\u{1927}', '\u{1928}'),
-    ('\u{1932}', '\u{1932}'),
-    ('\u{1939}', '\u{193b}'),
-    ('\u{1a17}', '\u{1a18}'),
-    ('\u{1a1b}', '\u{1a1b}'),
-    ('\u{1a56}', '\u{1a56}'),
-    ('\u{1a58}', '\u{1a5e}'),
-    ('\u{1a60}', '\u{1a60}'),
-    ('\u{1a62}', '\u{1a62}'),
-    ('\u{1a65}', '\u{1a6c}'),
-    ('\u{1a73}', '\u{1a7c}'),
-    ('\u{1a7f}', '\u{1a7f}'),
-    ('áȘ§', 'áȘ§'),
-    ('\u{1ab0}', '\u{1ace}'),
-    ('\u{1b00}', '\u{1b03}'),
-    ('\u{1b34}', '\u{1b34}'),
-    ('\u{1b36}', '\u{1b3a}'),
-    ('\u{1b3c}', '\u{1b3c}'),
-    ('\u{1b42}', '\u{1b42}'),
-    ('\u{1b6b}', '\u{1b73}'),
-    ('\u{1b80}', '\u{1b81}'),
-    ('\u{1ba2}', '\u{1ba5}'),
-    ('\u{1ba8}', '\u{1ba9}'),
-    ('\u{1bab}', '\u{1bad}'),
-    ('\u{1be6}', '\u{1be6}'),
-    ('\u{1be8}', '\u{1be9}'),
-    ('\u{1bed}', '\u{1bed}'),
-    ('\u{1bef}', '\u{1bf1}'),
-    ('\u{1c2c}', '\u{1c33}'),
-    ('\u{1c36}', '\u{1c37}'),
-    ('ᱞ', 'ᱜ'),
-    ('\u{1cd0}', '\u{1cd2}'),
-    ('\u{1cd4}', '\u{1ce0}'),
-    ('\u{1ce2}', '\u{1ce8}'),
-    ('\u{1ced}', '\u{1ced}'),
-    ('\u{1cf4}', '\u{1cf4}'),
-    ('\u{1cf8}', '\u{1cf9}'),
-    ('ᎏ', 'á”Ș'),
-    ('ᔞ', 'ᔞ'),
-    ('ᶛ', '\u{1dff}'),
-    ('ៜ', 'ៜ'),
-    ('áŸż', '῁'),
-    ('῍', '῏'),
-    ('῝', '῟'),
-    ('῭', '`'),
-    ('áżœ', 'áżŸ'),
-    ('\u{200b}', '\u{200f}'),
-    ('‘', '’'),
-    (' ', ' '),
-    ('‧', '‧'),
-    ('\u{202a}', '\u{202e}'),
-    ('\u{2060}', '\u{2064}'),
-    ('\u{2066}', '\u{206f}'),
-    ('ⁱ', 'ⁱ'),
-    ('ⁿ', 'ⁿ'),
-    ('ₐ', 'ₜ'),
-    ('\u{20d0}', '\u{20f0}'),
-    ('ⱌ', 'ⱜ'),
-    ('\u{2cef}', '\u{2cf1}'),
-    ('┯', '┯'),
-    ('\u{2d7f}', '\u{2d7f}'),
-    ('\u{2de0}', '\u{2dff}'),
-    ('➯', '➯'),
-    ('々', '々'),
-    ('\u{302a}', '\u{302d}'),
-    ('〱', '〔'),
-    ('〻', '〻'),
-    ('\u{3099}', 'ゞ'),
-    ('ăƒŒ', 'ăƒŸ'),
-    ('ꀕ', 'ꀕ'),
-    ('ꓞ', 'ꓜ'),
-    ('ꘌ', 'ꘌ'),
-    ('\u{a66f}', '\u{a672}'),
-    ('\u{a674}', '\u{a67d}'),
-    ('ê™ż', 'ê™ż'),
-    ('ꚜ', '\u{a69f}'),
-    ('\u{a6f0}', '\u{a6f1}'),
-    ('꜀', 'êœĄ'),
-    ('ꝰ', 'ꝰ'),
-    ('ꞈ', '꞊'),
-    ('êŸČ', '꟎'),
-    ('꟞', 'êŸč'),
-    ('\u{a802}', '\u{a802}'),
-    ('\u{a806}', '\u{a806}'),
-    ('\u{a80b}', '\u{a80b}'),
-    ('\u{a825}', '\u{a826}'),
-    ('\u{a82c}', '\u{a82c}'),
-    ('\u{a8c4}', '\u{a8c5}'),
-    ('\u{a8e0}', '\u{a8f1}'),
-    ('\u{a8ff}', '\u{a8ff}'),
-    ('\u{a926}', '\u{a92d}'),
-    ('\u{a947}', '\u{a951}'),
-    ('\u{a980}', '\u{a982}'),
-    ('\u{a9b3}', '\u{a9b3}'),
-    ('\u{a9b6}', '\u{a9b9}'),
-    ('\u{a9bc}', '\u{a9bd}'),
-    ('ꧏ', 'ꧏ'),
-    ('\u{a9e5}', 'ê§Š'),
-    ('\u{aa29}', '\u{aa2e}'),
-    ('\u{aa31}', '\u{aa32}'),
-    ('\u{aa35}', '\u{aa36}'),
-    ('\u{aa43}', '\u{aa43}'),
-    ('\u{aa4c}', '\u{aa4c}'),
-    ('ê©°', 'ê©°'),
-    ('\u{aa7c}', '\u{aa7c}'),
-    ('\u{aab0}', '\u{aab0}'),
-    ('\u{aab2}', '\u{aab4}'),
-    ('\u{aab7}', '\u{aab8}'),
-    ('\u{aabe}', '\u{aabf}'),
-    ('\u{aac1}', '\u{aac1}'),
-    ('ꫝ', 'ꫝ'),
-    ('\u{aaec}', '\u{aaed}'),
-    ('ê«ł', '꫎'),
-    ('\u{aaf6}', '\u{aaf6}'),
-    ('꭛', 'ꭟ'),
-    ('ê­©', 'ê­«'),
-    ('\u{abe5}', '\u{abe5}'),
-    ('\u{abe8}', '\u{abe8}'),
-    ('\u{abed}', '\u{abed}'),
-    ('\u{fb1e}', '\u{fb1e}'),
-    ('ïźČ', 'ïŻ‚'),
-    ('\u{fe00}', '\u{fe0f}'),
-    ('ïž“', 'ïž“'),
-    ('\u{fe20}', '\u{fe2f}'),
-    ('ïč’', 'ïč’'),
-    ('ïč•', 'ïč•'),
-    ('\u{feff}', '\u{feff}'),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('\u{ff9e}', '\u{ff9f}'),
-    ('ïżŁ', 'ïżŁ'),
-    ('\u{fff9}', '\u{fffb}'),
-    ('\u{101fd}', '\u{101fd}'),
-    ('\u{102e0}', '\u{102e0}'),
-    ('\u{10376}', '\u{1037a}'),
-    ('𐞀', '𐞅'),
-    ('𐞇', '𐞰'),
-    ('đžČ', 'đžș'),
-    ('\u{10a01}', '\u{10a03}'),
-    ('\u{10a05}', '\u{10a06}'),
-    ('\u{10a0c}', '\u{10a0f}'),
-    ('\u{10a38}', '\u{10a3a}'),
-    ('\u{10a3f}', '\u{10a3f}'),
-    ('\u{10ae5}', '\u{10ae6}'),
-    ('\u{10d24}', '\u{10d27}'),
-    ('𐔎', '𐔎'),
-    ('\u{10d69}', '\u{10d6d}'),
-    ('𐔯', '𐔯'),
-    ('\u{10eab}', '\u{10eac}'),
-    ('\u{10efc}', '\u{10eff}'),
-    ('\u{10f46}', '\u{10f50}'),
-    ('\u{10f82}', '\u{10f85}'),
-    ('\u{11001}', '\u{11001}'),
-    ('\u{11038}', '\u{11046}'),
-    ('\u{11070}', '\u{11070}'),
-    ('\u{11073}', '\u{11074}'),
-    ('\u{1107f}', '\u{11081}'),
-    ('\u{110b3}', '\u{110b6}'),
-    ('\u{110b9}', '\u{110ba}'),
-    ('\u{110bd}', '\u{110bd}'),
-    ('\u{110c2}', '\u{110c2}'),
-    ('\u{110cd}', '\u{110cd}'),
-    ('\u{11100}', '\u{11102}'),
-    ('\u{11127}', '\u{1112b}'),
-    ('\u{1112d}', '\u{11134}'),
-    ('\u{11173}', '\u{11173}'),
-    ('\u{11180}', '\u{11181}'),
-    ('\u{111b6}', '\u{111be}'),
-    ('\u{111c9}', '\u{111cc}'),
-    ('\u{111cf}', '\u{111cf}'),
-    ('\u{1122f}', '\u{11231}'),
-    ('\u{11234}', '\u{11234}'),
-    ('\u{11236}', '\u{11237}'),
-    ('\u{1123e}', '\u{1123e}'),
-    ('\u{11241}', '\u{11241}'),
-    ('\u{112df}', '\u{112df}'),
-    ('\u{112e3}', '\u{112ea}'),
-    ('\u{11300}', '\u{11301}'),
-    ('\u{1133b}', '\u{1133c}'),
-    ('\u{11340}', '\u{11340}'),
-    ('\u{11366}', '\u{1136c}'),
-    ('\u{11370}', '\u{11374}'),
-    ('\u{113bb}', '\u{113c0}'),
-    ('\u{113ce}', '\u{113ce}'),
-    ('\u{113d0}', '\u{113d0}'),
-    ('\u{113d2}', '\u{113d2}'),
-    ('\u{113e1}', '\u{113e2}'),
-    ('\u{11438}', '\u{1143f}'),
-    ('\u{11442}', '\u{11444}'),
-    ('\u{11446}', '\u{11446}'),
-    ('\u{1145e}', '\u{1145e}'),
-    ('\u{114b3}', '\u{114b8}'),
-    ('\u{114ba}', '\u{114ba}'),
-    ('\u{114bf}', '\u{114c0}'),
-    ('\u{114c2}', '\u{114c3}'),
-    ('\u{115b2}', '\u{115b5}'),
-    ('\u{115bc}', '\u{115bd}'),
-    ('\u{115bf}', '\u{115c0}'),
-    ('\u{115dc}', '\u{115dd}'),
-    ('\u{11633}', '\u{1163a}'),
-    ('\u{1163d}', '\u{1163d}'),
-    ('\u{1163f}', '\u{11640}'),
-    ('\u{116ab}', '\u{116ab}'),
-    ('\u{116ad}', '\u{116ad}'),
-    ('\u{116b0}', '\u{116b5}'),
-    ('\u{116b7}', '\u{116b7}'),
-    ('\u{1171d}', '\u{1171d}'),
-    ('\u{1171f}', '\u{1171f}'),
-    ('\u{11722}', '\u{11725}'),
-    ('\u{11727}', '\u{1172b}'),
-    ('\u{1182f}', '\u{11837}'),
-    ('\u{11839}', '\u{1183a}'),
-    ('\u{1193b}', '\u{1193c}'),
-    ('\u{1193e}', '\u{1193e}'),
-    ('\u{11943}', '\u{11943}'),
-    ('\u{119d4}', '\u{119d7}'),
-    ('\u{119da}', '\u{119db}'),
-    ('\u{119e0}', '\u{119e0}'),
-    ('\u{11a01}', '\u{11a0a}'),
-    ('\u{11a33}', '\u{11a38}'),
-    ('\u{11a3b}', '\u{11a3e}'),
-    ('\u{11a47}', '\u{11a47}'),
-    ('\u{11a51}', '\u{11a56}'),
-    ('\u{11a59}', '\u{11a5b}'),
-    ('\u{11a8a}', '\u{11a96}'),
-    ('\u{11a98}', '\u{11a99}'),
-    ('\u{11c30}', '\u{11c36}'),
-    ('\u{11c38}', '\u{11c3d}'),
-    ('\u{11c3f}', '\u{11c3f}'),
-    ('\u{11c92}', '\u{11ca7}'),
-    ('\u{11caa}', '\u{11cb0}'),
-    ('\u{11cb2}', '\u{11cb3}'),
-    ('\u{11cb5}', '\u{11cb6}'),
-    ('\u{11d31}', '\u{11d36}'),
-    ('\u{11d3a}', '\u{11d3a}'),
-    ('\u{11d3c}', '\u{11d3d}'),
-    ('\u{11d3f}', '\u{11d45}'),
-    ('\u{11d47}', '\u{11d47}'),
-    ('\u{11d90}', '\u{11d91}'),
-    ('\u{11d95}', '\u{11d95}'),
-    ('\u{11d97}', '\u{11d97}'),
-    ('\u{11ef3}', '\u{11ef4}'),
-    ('\u{11f00}', '\u{11f01}'),
-    ('\u{11f36}', '\u{11f3a}'),
-    ('\u{11f40}', '\u{11f40}'),
-    ('\u{11f42}', '\u{11f42}'),
-    ('\u{11f5a}', '\u{11f5a}'),
-    ('\u{13430}', '\u{13440}'),
-    ('\u{13447}', '\u{13455}'),
-    ('\u{1611e}', '\u{16129}'),
-    ('\u{1612d}', '\u{1612f}'),
-    ('\u{16af0}', '\u{16af4}'),
-    ('\u{16b30}', '\u{16b36}'),
-    ('𖭀', '𖭃'),
-    ('𖔀', 'đ–”‚'),
-    ('đ–”«', '𖔏'),
-    ('\u{16f4f}', '\u{16f4f}'),
-    ('\u{16f8f}', 'đ–ŸŸ'),
-    ('𖿠', '𖿡'),
-    ('𖿣', '\u{16fe4}'),
-    ('𚿰', '𚿳'),
-    ('đšż”', 'đšż»'),
-    ('đšżœ', 'đšżŸ'),
-    ('\u{1bc9d}', '\u{1bc9e}'),
-    ('\u{1bca0}', '\u{1bca3}'),
-    ('\u{1cf00}', '\u{1cf2d}'),
-    ('\u{1cf30}', '\u{1cf46}'),
-    ('\u{1d167}', '\u{1d169}'),
-    ('\u{1d173}', '\u{1d182}'),
-    ('\u{1d185}', '\u{1d18b}'),
-    ('\u{1d1aa}', '\u{1d1ad}'),
-    ('\u{1d242}', '\u{1d244}'),
-    ('\u{1da00}', '\u{1da36}'),
-    ('\u{1da3b}', '\u{1da6c}'),
-    ('\u{1da75}', '\u{1da75}'),
-    ('\u{1da84}', '\u{1da84}'),
-    ('\u{1da9b}', '\u{1da9f}'),
-    ('\u{1daa1}', '\u{1daaf}'),
-    ('\u{1e000}', '\u{1e006}'),
-    ('\u{1e008}', '\u{1e018}'),
-    ('\u{1e01b}', '\u{1e021}'),
-    ('\u{1e023}', '\u{1e024}'),
-    ('\u{1e026}', '\u{1e02a}'),
-    ('𞀰', '𞁭'),
-    ('\u{1e08f}', '\u{1e08f}'),
-    ('\u{1e130}', 'đž„œ'),
-    ('\u{1e2ae}', '\u{1e2ae}'),
-    ('\u{1e2ec}', '\u{1e2ef}'),
-    ('đž“«', '\u{1e4ef}'),
-    ('\u{1e5ee}', '\u{1e5ef}'),
-    ('\u{1e8d0}', '\u{1e8d6}'),
-    ('\u{1e944}', 'đž„‹'),
-    ('đŸ»', '🏿'),
-    ('\u{e0001}', '\u{e0001}'),
-    ('\u{e0020}', '\u{e007f}'),
-    ('\u{e0100}', '\u{e01ef}'),
-];
-
-pub const CASED: &'static [(char, char)] = &[
-    ('A', 'Z'),
-    ('a', 'z'),
-    ('ª', 'ª'),
-    ('µ', 'µ'),
-    ('º', 'º'),
-    ('À', 'Ö'),
-    ('Ø', 'ö'),
-    ('ø', 'Æș'),
-    ('ÆŒ', 'Æż'),
-    ('DŽ', 'ʓ'),
-    ('ʕ', 'Êž'),
-    ('ˀ', 'ˁ'),
-    ('Ë ', 'Ë€'),
-    ('\u{345}', '\u{345}'),
-    ('Ͱ', 'ͳ'),
-    ('Ͷ', 'ͷ'),
-    ('Íș', 'Íœ'),
-    ('Íż', 'Íż'),
-    ('Ά', 'Ά'),
-    ('Έ', 'Ί'),
-    ('Ό', 'Ό'),
-    ('Ύ', 'Ρ'),
-    ('Σ', 'Ï”'),
-    ('Ϸ', 'ҁ'),
-    ('Ҋ', 'ÔŻ'),
-    ('Ô±', 'Ֆ'),
-    ('ՠ', 'ֈ'),
-    ('Ⴀ', 'Ⴥ'),
-    ('Ⴧ', 'Ⴧ'),
-    ('Ⴭ', 'Ⴭ'),
-    ('ა', 'áƒș'),
-    ('჌', 'ჿ'),
-    ('Ꭰ', 'Ꮤ'),
-    ('Ꮮ', 'Ꮬ'),
-    ('áȀ', 'áȊ'),
-    ('áȐ', 'áČș'),
-    ('áČœ', 'áČż'),
-    ('ᮀ', 'á¶ż'),
-    ('ᾀ', 'ጕ'),
-    ('ጘ', 'ጝ'),
-    ('ጠ', 'ᜅ'),
-    ('ᜈ', 'ᜍ'),
-    ('ᜐ', '᜗'),
-    ('᜙', '᜙'),
-    ('᜛', '᜛'),
-    ('᜝', '᜝'),
-    ('ᜟ', '᜜'),
-    ('ៀ', '៎'),
-    ('៶', '៌'),
-    ('៟', '៟'),
-    ('ῂ', 'ῄ'),
-    ('ῆ', 'ῌ'),
-    ('ῐ', 'ΐ'),
-    ('ῖ', 'Ί'),
-    ('ῠ', '῏'),
-    ('áżČ', '῎'),
-    ('áż¶', 'áżŒ'),
-    ('ⁱ', 'ⁱ'),
-    ('ⁿ', 'ⁿ'),
-    ('ₐ', 'ₜ'),
-    ('ℂ', 'ℂ'),
-    ('ℇ', 'ℇ'),
-    ('ℊ', 'ℓ'),
-    ('ℕ', 'ℕ'),
-    ('ℙ', 'ℝ'),
-    ('â„€', 'â„€'),
-    ('℩', '℩'),
-    ('ℹ', 'ℹ'),
-    ('â„Ș', 'ℭ'),
-    ('ℯ', '℮'),
-    ('â„č', 'â„č'),
-    ('ℌ', 'ℿ'),
-    ('ⅅ', 'ⅉ'),
-    ('ⅎ', 'ⅎ'),
-    ('Ⅰ', 'ⅿ'),
-    ('Ↄ', 'ↄ'),
-    ('Ⓐ', 'ⓩ'),
-    ('Ⰰ', 'Ⳁ'),
-    ('âł«', 'âłź'),
-    ('âłČ', 'âłł'),
-    ('⮀', '⎄'),
-    ('⎧', '⎧'),
-    ('⎭', '⎭'),
-    ('Ꙁ', 'ꙭ'),
-    ('Ꚁ', 'ꚝ'),
-    ('êœą', 'ꞇ'),
-    ('Ꞌ', 'ꞎ'),
-    ('Ꞑ', 'ꟍ'),
-    ('Ꟑ', 'ꟑ'),
-    ('ꟓ', 'ꟓ'),
-    ('ꟕ', 'Ƛ'),
-    ('êŸČ', 'ꟶ'),
-    ('꟞', 'êŸș'),
-    ('êŹ°', 'ꭚ'),
-    ('ꭜ', 'ꭩ'),
-    ('ê­°', 'êźż'),
-    ('ïŹ€', 'ïŹ†'),
-    ('ïŹ“', 'ïŹ—'),
-    ('ïŒĄ', 'ïŒș'),
-    ('', ''),
-    ('𐐀', '𐑏'),
-    ('𐒰', '𐓓'),
-    ('𐓘', '𐓻'),
-    ('𐕰', 'đ•ș'),
-    ('đ•Œ', '𐖊'),
-    ('𐖌', '𐖒'),
-    ('𐖔', '𐖕'),
-    ('𐖗', '𐖡'),
-    ('𐖣', '𐖱'),
-    ('𐖳', 'đ–č'),
-    ('𐖻', 'đ–Œ'),
-    ('𐞀', '𐞀'),
-    ('𐞃', '𐞅'),
-    ('𐞇', '𐞰'),
-    ('đžČ', 'đžș'),
-    ('đČ€', 'đČČ'),
-    ('𐳀', 'đłČ'),
-    ('𐔐', '𐔄'),
-    ('𐔰', '𐶅'),
-    ('𑱠', '𑣟'),
-    ('đ–č€', 'đ–čż'),
-    ('𝐀', '𝑔'),
-    ('𝑖', '𝒜'),
-    ('𝒞', '𝒟'),
-    ('𝒱', '𝒱'),
-    ('đ’„', '𝒩'),
-    ('đ’©', '𝒬'),
-    ('𝒼', 'đ’č'),
-    ('đ’»', 'đ’»'),
-    ('đ’œ', '𝓃'),
-    ('𝓅', '𝔅'),
-    ('𝔇', '𝔊'),
-    ('𝔍', '𝔔'),
-    ('𝔖', '𝔜'),
-    ('𝔞', 'đ”č'),
-    ('đ”»', 'đ”Ÿ'),
-    ('𝕀', '𝕄'),
-    ('𝕆', '𝕆'),
-    ('𝕊', '𝕐'),
-    ('𝕒', 'đš„'),
-    ('𝚹', '𝛀'),
-    ('𝛂', '𝛚'),
-    ('𝛜', 'đ›ș'),
-    ('đ›Œ', '𝜔'),
-    ('𝜖', '𝜮'),
-    ('đœ¶', '𝝎'),
-    ('𝝐', '𝝼'),
-    ('𝝰', '𝞈'),
-    ('𝞊', '𝞹'),
-    ('đžȘ', '𝟂'),
-    ('𝟄', '𝟋'),
-    ('đŒ€', 'đŒ‰'),
-    ('đŒ‹', 'đŒž'),
-    ('đŒ„', 'đŒȘ'),
-    ('𞀰', '𞁭'),
-    ('𞀀', 'đž„ƒ'),
-    ('🄰', '🅉'),
-    ('🅐', 'đŸ…©'),
-    ('🅰', '🆉'),
-];
-
-pub const CHANGES_WHEN_CASEFOLDED: &'static [(char, char)] = &[
-    ('A', 'Z'),
-    ('µ', 'µ'),
-    ('À', 'Ö'),
-    ('Ø', 'ß'),
-    ('Ā', 'Ā'),
-    ('Ă', 'Ă'),
-    ('Ą', 'Ą'),
-    ('Ć', 'Ć'),
-    ('Ĉ', 'Ĉ'),
-    ('Ċ', 'Ċ'),
-    ('Č', 'Č'),
-    ('Ď', 'Ď'),
-    ('Đ', 'Đ'),
-    ('Ē', 'Ē'),
-    ('Ĕ', 'Ĕ'),
-    ('Ė', 'Ė'),
-    ('Ę', 'Ę'),
-    ('Ě', 'Ě'),
-    ('Ĝ', 'Ĝ'),
-    ('Ğ', 'Ğ'),
-    ('Ä ', 'Ä '),
-    ('Äą', 'Äą'),
-    ('Ä€', 'Ä€'),
-    ('ÄŠ', 'ÄŠ'),
-    ('Äš', 'Äš'),
-    ('ÄȘ', 'ÄȘ'),
-    ('ÄŹ', 'ÄŹ'),
-    ('Äź', 'Äź'),
-    ('İ', 'İ'),
-    ('ÄČ', 'ÄČ'),
-    ('ÄŽ', 'ÄŽ'),
-    ('Ķ', 'Ķ'),
-    ('Äč', 'Äč'),
-    ('Ä»', 'Ä»'),
-    ('Ĝ', 'Ĝ'),
-    ('Äż', 'Äż'),
-    ('Ɓ', 'Ɓ'),
-    ('ƃ', 'ƃ'),
-    ('ƅ', 'ƅ'),
-    ('Ƈ', 'Ƈ'),
-    ('Ɖ', 'Ɗ'),
-    ('Ì', 'Ì'),
-    ('Ǝ', 'Ǝ'),
-    ('Ɛ', 'Ɛ'),
-    ('Œ', 'Œ'),
-    ('Ɣ', 'Ɣ'),
-    ('Ɩ', 'Ɩ'),
-    ('Ƙ', 'Ƙ'),
-    ('Ú', 'Ú'),
-    ('Ü', 'Ü'),
-    ('ƞ', 'ƞ'),
-    ('Š', 'Š'),
-    ('Ćą', 'Ćą'),
-    ('Ć€', 'Ć€'),
-    ('ĆŠ', 'ĆŠ'),
-    ('Ćš', 'Ćš'),
-    ('ĆȘ', 'ĆȘ'),
-    ('ĆŹ', 'ĆŹ'),
-    ('Ćź', 'Ćź'),
-    ('ư', 'ư'),
-    ('ĆČ', 'ĆČ'),
-    ('ĆŽ', 'ĆŽ'),
-    ('ƶ', 'ƶ'),
-    ('Ÿ', 'Ćč'),
-    ('Ć»', 'Ć»'),
-    ('Ćœ', 'Ćœ'),
-    ('Ćż', 'Ćż'),
-    ('Ɓ', 'Ƃ'),
-    ('Ƅ', 'Ƅ'),
-    ('Ɔ', 'Ƈ'),
-    ('Ɖ', 'Ƌ'),
-    ('Ǝ', 'Ƒ'),
-    ('Ɠ', 'Ɣ'),
-    ('Ɩ', 'Ƙ'),
-    ('Ɯ', 'Ɲ'),
-    ('Ɵ', 'Ơ'),
-    ('Æą', 'Æą'),
-    ('Æ€', 'Æ€'),
-    ('Ɗ', 'Ƨ'),
-    ('Æ©', 'Æ©'),
-    ('ÆŹ', 'ÆŹ'),
-    ('Æź', 'ÆŻ'),
-    ('Ʊ', 'Æł'),
-    ('Æ”', 'Æ”'),
-    ('Æ·', 'Æž'),
-    ('ƌ', 'ƌ'),
-    ('DŽ', 'Dž'),
-    ('LJ', 'Lj'),
-    ('NJ', 'Nj'),
-    ('Ǎ', 'Ǎ'),
-    ('Ǐ', 'Ǐ'),
-    ('Ǒ', 'Ǒ'),
-    ('Ǔ', 'Ǔ'),
-    ('Ǖ', 'Ǖ'),
-    ('Ǘ', 'Ǘ'),
-    ('Ǚ', 'Ǚ'),
-    ('Ǜ', 'Ǜ'),
-    ('Ǟ', 'Ǟ'),
-    ('Ç ', 'Ç '),
-    ('Çą', 'Çą'),
-    ('Ç€', 'Ç€'),
-    ('ÇŠ', 'ÇŠ'),
-    ('Çš', 'Çš'),
-    ('ÇȘ', 'ÇȘ'),
-    ('ÇŹ', 'ÇŹ'),
-    ('Çź', 'Çź'),
-    ('DZ', 'ÇČ'),
-    ('ÇŽ', 'ÇŽ'),
-    ('Ƕ', 'Ǟ'),
-    ('Çș', 'Çș'),
-    ('nj', 'nj'),
-    ('ÇŸ', 'ÇŸ'),
-    ('Ȁ', 'Ȁ'),
-    ('Ȃ', 'Ȃ'),
-    ('Ȅ', 'Ȅ'),
-    ('Ȇ', 'Ȇ'),
-    ('Ȉ', 'Ȉ'),
-    ('Ȋ', 'Ȋ'),
-    ('Ȍ', 'Ȍ'),
-    ('Ȏ', 'Ȏ'),
-    ('Ȑ', 'Ȑ'),
-    ('Ȓ', 'Ȓ'),
-    ('Ȕ', 'Ȕ'),
-    ('Ȗ', 'Ȗ'),
-    ('Ș', 'Ș'),
-    ('Ț', 'Ț'),
-    ('Ȝ', 'Ȝ'),
-    ('Ȟ', 'Ȟ'),
-    ('È ', 'È '),
-    ('Èą', 'Èą'),
-    ('È€', 'È€'),
-    ('ÈŠ', 'ÈŠ'),
-    ('Èš', 'Èš'),
-    ('ÈȘ', 'ÈȘ'),
-    ('ÈŹ', 'ÈŹ'),
-    ('Èź', 'Èź'),
-    ('Ȱ', 'Ȱ'),
-    ('ÈČ', 'ÈČ'),
-    ('Èș', 'È»'),
-    ('Ȝ', 'ȟ'),
-    ('Ɂ', 'Ɂ'),
-    ('Ƀ', 'Ɇ'),
-    ('Ɉ', 'Ɉ'),
-    ('Ɋ', 'Ɋ'),
-    ('Ɍ', 'Ɍ'),
-    ('Ɏ', 'Ɏ'),
-    ('\u{345}', '\u{345}'),
-    ('Ͱ', 'Ͱ'),
-    ('ÍČ', 'ÍČ'),
-    ('Ͷ', 'Ͷ'),
-    ('Íż', 'Íż'),
-    ('Ά', 'Ά'),
-    ('Έ', 'Ί'),
-    ('Ό', 'Ό'),
-    ('Ύ', 'Ώ'),
-    ('Α', 'Ρ'),
-    ('Σ', 'Ϋ'),
-    ('ς', 'ς'),
-    ('Ϗ', 'ϑ'),
-    ('ϕ', 'ϖ'),
-    ('Ϙ', 'Ϙ'),
-    ('Ϛ', 'Ϛ'),
-    ('Ϝ', 'Ϝ'),
-    ('Ϟ', 'Ϟ'),
-    ('Ï ', 'Ï '),
-    ('Ïą', 'Ïą'),
-    ('Ï€', 'Ï€'),
-    ('ÏŠ', 'ÏŠ'),
-    ('Ïš', 'Ïš'),
-    ('ÏȘ', 'ÏȘ'),
-    ('ÏŹ', 'ÏŹ'),
-    ('Ïź', 'Ïź'),
-    ('ϰ', 'ϱ'),
-    ('ÏŽ', 'Ï”'),
-    ('Ï·', 'Ï·'),
-    ('Ïč', 'Ïș'),
-    ('Ïœ', 'ĐŻ'),
-    ('Ń ', 'Ń '),
-    ('Ńą', 'Ńą'),
-    ('Ń€', 'Ń€'),
-    ('ŃŠ', 'ŃŠ'),
-    ('Ńš', 'Ńš'),
-    ('ŃȘ', 'ŃȘ'),
-    ('ŃŹ', 'ŃŹ'),
-    ('Ńź', 'Ńź'),
-    ('Ѱ', 'Ѱ'),
-    ('ŃČ', 'ŃČ'),
-    ('ŃŽ', 'ŃŽ'),
-    ('Ѷ', 'Ѷ'),
-    ('Ńž', 'Ńž'),
-    ('Ńș', 'Ńș'),
-    ('ŃŒ', 'ŃŒ'),
-    ('ŃŸ', 'ŃŸ'),
-    ('Ҁ', 'Ҁ'),
-    ('Ҋ', 'Ҋ'),
-    ('Ҍ', 'Ҍ'),
-    ('Ҏ', 'Ҏ'),
-    ('Ґ', 'Ґ'),
-    ('Ғ', 'Ғ'),
-    ('Ҕ', 'Ҕ'),
-    ('Җ', 'Җ'),
-    ('Ҙ', 'Ҙ'),
-    ('Қ', 'Қ'),
-    ('Ҝ', 'Ҝ'),
-    ('Ҟ', 'Ҟ'),
-    ('Ò ', 'Ò '),
-    ('Òą', 'Òą'),
-    ('Ò€', 'Ò€'),
-    ('ÒŠ', 'ÒŠ'),
-    ('Òš', 'Òš'),
-    ('ÒȘ', 'ÒȘ'),
-    ('ÒŹ', 'ÒŹ'),
-    ('Òź', 'Òź'),
-    ('Ò°', 'Ò°'),
-    ('ÒČ', 'ÒČ'),
-    ('ÒŽ', 'ÒŽ'),
-    ('Ò¶', 'Ò¶'),
-    ('Òž', 'Òž'),
-    ('Òș', 'Òș'),
-    ('Ҍ', 'Ҍ'),
-    ('ÒŸ', 'ÒŸ'),
-    ('Ӏ', 'Ӂ'),
-    ('Ӄ', 'Ӄ'),
-    ('Ӆ', 'Ӆ'),
-    ('Ӈ', 'Ӈ'),
-    ('Ӊ', 'Ӊ'),
-    ('Ӌ', 'Ӌ'),
-    ('Ӎ', 'Ӎ'),
-    ('Ӑ', 'Ӑ'),
-    ('Ӓ', 'Ӓ'),
-    ('Ӕ', 'Ӕ'),
-    ('Ӗ', 'Ӗ'),
-    ('Ә', 'Ә'),
-    ('Ӛ', 'Ӛ'),
-    ('Ӝ', 'Ӝ'),
-    ('Ӟ', 'Ӟ'),
-    ('Ó ', 'Ó '),
-    ('Óą', 'Óą'),
-    ('Ó€', 'Ó€'),
-    ('ÓŠ', 'ÓŠ'),
-    ('Óš', 'Óš'),
-    ('ÓȘ', 'ÓȘ'),
-    ('ÓŹ', 'ÓŹ'),
-    ('Óź', 'Óź'),
-    ('Ó°', 'Ó°'),
-    ('ÓČ', 'ÓČ'),
-    ('ÓŽ', 'ÓŽ'),
-    ('Ó¶', 'Ó¶'),
-    ('Óž', 'Óž'),
-    ('Óș', 'Óș'),
-    ('ӌ', 'ӌ'),
-    ('ÓŸ', 'ÓŸ'),
-    ('Ԁ', 'Ԁ'),
-    ('Ԃ', 'Ԃ'),
-    ('Ԅ', 'Ԅ'),
-    ('Ԇ', 'Ԇ'),
-    ('Ԉ', 'Ԉ'),
-    ('Ԋ', 'Ԋ'),
-    ('Ԍ', 'Ԍ'),
-    ('Ԏ', 'Ԏ'),
-    ('Ԑ', 'Ԑ'),
-    ('Ԓ', 'Ԓ'),
-    ('Ԕ', 'Ԕ'),
-    ('Ԗ', 'Ԗ'),
-    ('Ԙ', 'Ԙ'),
-    ('Ԛ', 'Ԛ'),
-    ('Ԝ', 'Ԝ'),
-    ('Ԟ', 'Ԟ'),
-    ('Ô ', 'Ô '),
-    ('Ôą', 'Ôą'),
-    ('Ô€', 'Ô€'),
-    ('ÔŠ', 'ÔŠ'),
-    ('Ôš', 'Ôš'),
-    ('ÔȘ', 'ÔȘ'),
-    ('ÔŹ', 'ÔŹ'),
-    ('Ôź', 'Ôź'),
-    ('Ô±', 'Ֆ'),
-    ('և', 'և'),
-    ('Ⴀ', 'Ⴥ'),
-    ('Ⴧ', 'Ⴧ'),
-    ('Ⴭ', 'Ⴭ'),
-    ('Ꮮ', 'Ꮬ'),
-    ('áȀ', 'áȉ'),
-    ('áȐ', 'áČș'),
-    ('áČœ', 'áČż'),
-    ('ᾀ', 'ᾀ'),
-    ('ᾂ', 'ᾂ'),
-    ('ᾄ', 'ᾄ'),
-    ('ᾆ', 'ᾆ'),
-    ('ឈ', 'ឈ'),
-    ('ᾊ', 'ᾊ'),
-    ('ᾌ', 'ᾌ'),
-    ('ᾎ', 'ᾎ'),
-    ('ថ', 'ថ'),
-    ('ᾒ', 'ᾒ'),
-    ('ᾔ', 'ᾔ'),
-    ('ᾖ', 'ᾖ'),
-    ('ម', 'ម'),
-    ('ᾚ', 'ᾚ'),
-    ('ᾜ', 'ᾜ'),
-    ('ᾞ', 'ᾞ'),
-    ('áž ', 'áž '),
-    ('ážą', 'ážą'),
-    ('ក', 'ក'),
-    ('ដ', 'ដ'),
-    ('ážš', 'ážš'),
-    ('ážȘ', 'ážȘ'),
-    ('ត', 'ត'),
-    ('ážź', 'ážź'),
-    ('áž°', 'áž°'),
-    ('ážČ', 'ážČ'),
-    ('ណ', 'ណ'),
-    ('áž¶', 'áž¶'),
-    ('ážž', 'ážž'),
-    ('ážș', 'ážș'),
-    ('ឌ', 'ឌ'),
-    ('ស', 'ស'),
-    ('áč€', 'áč€'),
-    ('áč‚', 'áč‚'),
-    ('áč„', 'áč„'),
-    ('áč†', 'áč†'),
-    ('áčˆ', 'áčˆ'),
-    ('áčŠ', 'áčŠ'),
-    ('áčŒ', 'áčŒ'),
-    ('áčŽ', 'áčŽ'),
-    ('áč', 'áč'),
-    ('áč’', 'áč’'),
-    ('áč”', 'áč”'),
-    ('áč–', 'áč–'),
-    ('áč˜', 'áč˜'),
-    ('áčš', 'áčš'),
-    ('áčœ', 'áčœ'),
-    ('áčž', 'áčž'),
-    ('áč ', 'áč '),
-    ('áčą', 'áčą'),
-    ('áč€', 'áč€'),
-    ('áčŠ', 'áčŠ'),
-    ('áčš', 'áčš'),
-    ('áčȘ', 'áčȘ'),
-    ('áčŹ', 'áčŹ'),
-    ('áčź', 'áčź'),
-    ('áč°', 'áč°'),
-    ('áčČ', 'áčČ'),
-    ('áčŽ', 'áčŽ'),
-    ('áč¶', 'áč¶'),
-    ('áčž', 'áčž'),
-    ('áčș', 'áčș'),
-    ('áčŒ', 'áčŒ'),
-    ('áčŸ', 'áčŸ'),
-    ('áș€', 'áș€'),
-    ('áș‚', 'áș‚'),
-    ('áș„', 'áș„'),
-    ('áș†', 'áș†'),
-    ('áșˆ', 'áșˆ'),
-    ('áșŠ', 'áșŠ'),
-    ('áșŒ', 'áșŒ'),
-    ('áșŽ', 'áșŽ'),
-    ('áș', 'áș'),
-    ('áș’', 'áș’'),
-    ('áș”', 'áș”'),
-    ('áșš', 'áș›'),
-    ('áșž', 'áșž'),
-    ('áș ', 'áș '),
-    ('áșą', 'áșą'),
-    ('áș€', 'áș€'),
-    ('áșŠ', 'áșŠ'),
-    ('áșš', 'áșš'),
-    ('áșȘ', 'áșȘ'),
-    ('áșŹ', 'áșŹ'),
-    ('áșź', 'áșź'),
-    ('áș°', 'áș°'),
-    ('áșČ', 'áșČ'),
-    ('áșŽ', 'áșŽ'),
-    ('áș¶', 'áș¶'),
-    ('áșž', 'áșž'),
-    ('áșș', 'áșș'),
-    ('áșŒ', 'áșŒ'),
-    ('áșŸ', 'áșŸ'),
-    ('Ề', 'Ề'),
-    ('Ể', 'Ể'),
-    ('Ễ', 'Ễ'),
-    ('Ệ', 'Ệ'),
-    ('Ỉ', 'Ỉ'),
-    ('Ị', 'Ị'),
-    ('Ọ', 'Ọ'),
-    ('Ỏ', 'Ỏ'),
-    ('Ố', 'Ố'),
-    ('Ồ', 'Ồ'),
-    ('Ổ', 'Ổ'),
-    ('Ỗ', 'Ỗ'),
-    ('Ộ', 'Ộ'),
-    ('Ớ', 'Ớ'),
-    ('Ờ', 'Ờ'),
-    ('Ở', 'Ở'),
-    ('á» ', 'á» '),
-    ('ỹ', 'ỹ'),
-    ('Ề', 'Ề'),
-    ('Ị', 'Ị'),
-    ('Ớ', 'Ớ'),
-    ('á»Ș', 'á»Ș'),
-    ('ỏ', 'ỏ'),
-    ('ở', 'ở'),
-    ('á»°', 'á»°'),
-    ('á»Č', 'á»Č'),
-    ('Ỏ', 'Ỏ'),
-    ('á»¶', 'á»¶'),
-    ('Ở', 'Ở'),
-    ('á»ș', 'á»ș'),
-    ('Ọ', 'Ọ'),
-    ('ở', 'ở'),
-    ('ገ', 'ጏ'),
-    ('ጘ', 'ጝ'),
-    ('ጚ', 'áŒŻ'),
-    ('ጞ', 'áŒż'),
-    ('ᜈ', 'ᜍ'),
-    ('᜙', '᜙'),
-    ('᜛', '᜛'),
-    ('᜝', '᜝'),
-    ('ᜟ', 'ᜟ'),
-    ('᜚', 'áœŻ'),
-    ('ៀ', 'áŸŻ'),
-    ('áŸČ', '៎'),
-    ('៷', '៌'),
-    ('ῂ', 'ῄ'),
-    ('ῇ', 'ῌ'),
-    ('Ῐ', 'Ί'),
-    ('Ὶ', '῏'),
-    ('áżČ', '῎'),
-    ('áż·', 'áżŒ'),
-    ('℩', '℩'),
-    ('â„Ș', 'Å'),
-    ('â„Č', 'â„Č'),
-    ('Ⅰ', 'Ⅿ'),
-    ('Ↄ', 'Ↄ'),
-    ('Ⓐ', 'Ⓩ'),
-    ('Ⰰ', 'Ⱟ'),
-    ('â± ', 'â± '),
-    ('ⱹ', 'ⱀ'),
-    ('â±§', 'â±§'),
-    ('Ⱪ', 'Ⱪ'),
-    ('Ⱬ', 'Ⱬ'),
-    ('â±­', 'â±°'),
-    ('â±Č', 'â±Č'),
-    ('â±”', 'â±”'),
-    ('ⱟ', 'âȀ'),
-    ('âȂ', 'âȂ'),
-    ('âȄ', 'âȄ'),
-    ('âȆ', 'âȆ'),
-    ('âȈ', 'âȈ'),
-    ('âȊ', 'âȊ'),
-    ('âȌ', 'âȌ'),
-    ('âȎ', 'âȎ'),
-    ('âȐ', 'âȐ'),
-    ('âȒ', 'âȒ'),
-    ('âȔ', 'âȔ'),
-    ('âȖ', 'âȖ'),
-    ('âȘ', 'âȘ'),
-    ('âȚ', 'âȚ'),
-    ('âȜ', 'âȜ'),
-    ('âȞ', 'âȞ'),
-    ('âČ ', 'âČ '),
-    ('âČą', 'âČą'),
-    ('âČ€', 'âČ€'),
-    ('âČŠ', 'âČŠ'),
-    ('âČš', 'âČš'),
-    ('âČȘ', 'âČȘ'),
-    ('âČŹ', 'âČŹ'),
-    ('âČź', 'âČź'),
-    ('âȰ', 'âȰ'),
-    ('âČČ', 'âČČ'),
-    ('âČŽ', 'âČŽ'),
-    ('âȶ', 'âȶ'),
-    ('âČž', 'âČž'),
-    ('âČș', 'âČș'),
-    ('âČŒ', 'âČŒ'),
-    ('âČŸ', 'âČŸ'),
-    ('Ⳁ', 'Ⳁ'),
-    ('Ⳃ', 'Ⳃ'),
-    ('Ⳅ', 'Ⳅ'),
-    ('Ⳇ', 'Ⳇ'),
-    ('Ⳉ', 'Ⳉ'),
-    ('Ⳋ', 'Ⳋ'),
-    ('Ⳍ', 'Ⳍ'),
-    ('Ⳏ', 'Ⳏ'),
-    ('Ⳑ', 'Ⳑ'),
-    ('Ⳓ', 'Ⳓ'),
-    ('Ⳕ', 'Ⳕ'),
-    ('Ⳗ', 'Ⳗ'),
-    ('Ⳙ', 'Ⳙ'),
-    ('Ⳛ', 'Ⳛ'),
-    ('Ⳝ', 'Ⳝ'),
-    ('Ⳟ', 'Ⳟ'),
-    ('âł ', 'âł '),
-    ('âłą', 'âłą'),
-    ('âł«', 'âł«'),
-    ('âł­', 'âł­'),
-    ('âłČ', 'âłČ'),
-    ('Ꙁ', 'Ꙁ'),
-    ('Ꙃ', 'Ꙃ'),
-    ('Ꙅ', 'Ꙅ'),
-    ('Ꙇ', 'Ꙇ'),
-    ('Ꙉ', 'Ꙉ'),
-    ('Ꙋ', 'Ꙋ'),
-    ('Ꙍ', 'Ꙍ'),
-    ('Ꙏ', 'Ꙏ'),
-    ('Ꙑ', 'Ꙑ'),
-    ('Ꙓ', 'Ꙓ'),
-    ('Ꙕ', 'Ꙕ'),
-    ('Ꙗ', 'Ꙗ'),
-    ('Ꙙ', 'Ꙙ'),
-    ('Ꙛ', 'Ꙛ'),
-    ('Ꙝ', 'Ꙝ'),
-    ('Ꙟ', 'Ꙟ'),
-    ('Ꙡ', 'Ꙡ'),
-    ('ê™ą', 'ê™ą'),
-    ('Ꙁ', 'Ꙁ'),
-    ('Ꙋ', 'Ꙋ'),
-    ('Ꙛ', 'Ꙛ'),
-    ('ê™Ș', 'ê™Ș'),
-    ('ê™Ź', 'ê™Ź'),
-    ('Ꚁ', 'Ꚁ'),
-    ('Ꚃ', 'Ꚃ'),
-    ('Ꚅ', 'Ꚅ'),
-    ('Ꚇ', 'Ꚇ'),
-    ('Ꚉ', 'Ꚉ'),
-    ('Ꚋ', 'Ꚋ'),
-    ('Ꚍ', 'Ꚍ'),
-    ('Ꚏ', 'Ꚏ'),
-    ('Ꚑ', 'Ꚑ'),
-    ('Ꚓ', 'Ꚓ'),
-    ('Ꚕ', 'Ꚕ'),
-    ('Ꚗ', 'Ꚗ'),
-    ('Ꚙ', 'Ꚙ'),
-    ('Ꚛ', 'Ꚛ'),
-    ('êœą', 'êœą'),
-    ('꜀', '꜀'),
-    ('꜊', '꜊'),
-    ('ꜚ', 'ꜚ'),
-    ('êœȘ', 'êœȘ'),
-    ('êœŹ', 'êœŹ'),
-    ('êœź', 'êœź'),
-    ('êœČ', 'êœČ'),
-    ('꜎', '꜎'),
-    ('Ꜷ', 'Ꜷ'),
-    ('ꜞ', 'ꜞ'),
-    ('êœș', 'êœș'),
-    ('꜌', '꜌'),
-    ('ꜟ', 'ꜟ'),
-    ('Ꝁ', 'Ꝁ'),
-    ('Ꝃ', 'Ꝃ'),
-    ('Ꝅ', 'Ꝅ'),
-    ('Ꝇ', 'Ꝇ'),
-    ('Ꝉ', 'Ꝉ'),
-    ('Ꝋ', 'Ꝋ'),
-    ('Ꝍ', 'Ꝍ'),
-    ('Ꝏ', 'Ꝏ'),
-    ('Ꝑ', 'Ꝑ'),
-    ('Ꝓ', 'Ꝓ'),
-    ('Ꝕ', 'Ꝕ'),
-    ('Ꝗ', 'Ꝗ'),
-    ('Ꝙ', 'Ꝙ'),
-    ('Ꝛ', 'Ꝛ'),
-    ('Ꝝ', 'Ꝝ'),
-    ('Ꝟ', 'Ꝟ'),
-    ('Ꝡ', 'Ꝡ'),
-    ('êą', 'êą'),
-    ('Ꝁ', 'Ꝁ'),
-    ('Ꝋ', 'Ꝋ'),
-    ('Ꝛ', 'Ꝛ'),
-    ('êȘ', 'êȘ'),
-    ('êŹ', 'êŹ'),
-    ('êź', 'êź'),
-    ('êč', 'êč'),
-    ('Ꝼ', 'Ꝼ'),
-    ('Ꝝ', 'ꝟ'),
-    ('Ꞁ', 'Ꞁ'),
-    ('Ꞃ', 'Ꞃ'),
-    ('Ꞅ', 'Ꞅ'),
-    ('Ꞇ', 'Ꞇ'),
-    ('Ꞌ', 'Ꞌ'),
-    ('Ɥ', 'Ɥ'),
-    ('Ꞑ', 'Ꞑ'),
-    ('Ꞓ', 'Ꞓ'),
-    ('Ꞗ', 'Ꞗ'),
-    ('Ꞙ', 'Ꞙ'),
-    ('Ꞛ', 'Ꞛ'),
-    ('Ꞝ', 'Ꞝ'),
-    ('Ꞟ', 'Ꞟ'),
-    ('Ꞡ', 'Ꞡ'),
-    ('êžą', 'êžą'),
-    ('Ꞁ', 'Ꞁ'),
-    ('꞊', '꞊'),
-    ('Ꞛ', 'Ꞛ'),
-    ('êžȘ', 'êžź'),
-    ('Ʞ', 'ꞎ'),
-    ('Ꞷ', 'Ꞷ'),
-    ('êžž', 'êžž'),
-    ('êžș', 'êžș'),
-    ('ꞌ', 'ꞌ'),
-    ('ꞟ', 'ꞟ'),
-    ('Ꟁ', 'Ꟁ'),
-    ('Ꟃ', 'Ꟃ'),
-    ('Ꞔ', 'Ꟈ'),
-    ('Ꟊ', 'Ꟊ'),
-    ('Ɤ', 'Ꟍ'),
-    ('Ꟑ', 'Ꟑ'),
-    ('Ꟗ', 'Ꟗ'),
-    ('Ꟙ', 'Ꟙ'),
-    ('Ꟛ', 'Ꟛ'),
-    ('Ƛ', 'Ƛ'),
-    ('꟔', '꟔'),
-    ('ê­°', 'êźż'),
-    ('ïŹ€', 'ïŹ†'),
-    ('ïŹ“', 'ïŹ—'),
-    ('ïŒĄ', 'ïŒș'),
-    ('𐐀', '𐐧'),
-    ('𐒰', '𐓓'),
-    ('𐕰', 'đ•ș'),
-    ('đ•Œ', '𐖊'),
-    ('𐖌', '𐖒'),
-    ('𐖔', '𐖕'),
-    ('đČ€', 'đČČ'),
-    ('𐔐', '𐔄'),
-    ('𑱠', '𑱿'),
-    ('đ–č€', 'đ–čŸ'),
-    ('𞀀', '𞀥'),
-];
-
-pub const CHANGES_WHEN_CASEMAPPED: &'static [(char, char)] = &[
-    ('A', 'Z'),
-    ('a', 'z'),
-    ('µ', 'µ'),
-    ('À', 'Ö'),
-    ('Ø', 'ö'),
-    ('ø', 'Ä·'),
-    ('Äč', 'ƌ'),
-    ('Ǝ', 'Ʃ'),
-    ('ÆŹ', 'Æč'),
-    ('ƌ', 'Ɯ'),
-    ('Æż', 'Æż'),
-    ('DŽ', 'Ƞ'),
-    ('Èą', 'Èł'),
-    ('Èș', 'ɔ'),
-    ('ɖ', 'ɗ'),
-    ('ə', 'ə'),
-    ('ɛ', 'ɜ'),
-    ('É ', 'ÉĄ'),
-    ('ÉŁ', 'ÉŠ'),
-    ('Éš', 'ÉŹ'),
-    ('ÉŻ', 'ÉŻ'),
-    ('ɱ', 'ÉČ'),
-    ('É”', 'É”'),
-    ('ɜ', 'ɜ'),
-    ('ʀ', 'ʀ'),
-    ('ʂ', 'ʃ'),
-    ('ʇ', 'ʌ'),
-    ('ʒ', 'ʒ'),
-    ('ʝ', 'ʞ'),
-    ('\u{345}', '\u{345}'),
-    ('Ͱ', 'ͳ'),
-    ('Ͷ', 'ͷ'),
-    ('ͻ', '͜'),
-    ('Íż', 'Íż'),
-    ('Ά', 'Ά'),
-    ('Έ', 'Ί'),
-    ('Ό', 'Ό'),
-    ('Ύ', 'Ρ'),
-    ('Σ', 'ϑ'),
-    ('ϕ', 'Ï”'),
-    ('Ï·', 'Ï»'),
-    ('Ϝ', 'ҁ'),
-    ('Ҋ', 'ÔŻ'),
-    ('Ô±', 'Ֆ'),
-    ('ա', 'և'),
-    ('Ⴀ', 'Ⴥ'),
-    ('Ⴧ', 'Ⴧ'),
-    ('Ⴭ', 'Ⴭ'),
-    ('ა', 'áƒș'),
-    ('ნ', 'ჿ'),
-    ('Ꭰ', 'Ꮤ'),
-    ('Ꮮ', 'Ꮬ'),
-    ('áȀ', 'áȊ'),
-    ('áȐ', 'áČș'),
-    ('áČœ', 'áČż'),
-    ('á”č', 'á”č'),
-    ('ᔜ', 'ᔜ'),
-    ('ᶎ', 'ᶎ'),
-    ('ᾀ', 'áș›'),
-    ('áșž', 'áșž'),
-    ('áș ', 'ጕ'),
-    ('ጘ', 'ጝ'),
-    ('ጠ', 'ᜅ'),
-    ('ᜈ', 'ᜍ'),
-    ('ᜐ', '᜗'),
-    ('᜙', '᜙'),
-    ('᜛', '᜛'),
-    ('᜝', '᜝'),
-    ('ᜟ', '᜜'),
-    ('ៀ', '៎'),
-    ('៶', '៌'),
-    ('៟', '៟'),
-    ('ῂ', 'ῄ'),
-    ('ῆ', 'ῌ'),
-    ('ῐ', 'ΐ'),
-    ('ῖ', 'Ί'),
-    ('ῠ', '῏'),
-    ('áżČ', '῎'),
-    ('áż¶', 'áżŒ'),
-    ('℩', '℩'),
-    ('â„Ș', 'Å'),
-    ('â„Č', 'â„Č'),
-    ('ⅎ', 'ⅎ'),
-    ('Ⅰ', 'ⅿ'),
-    ('Ↄ', 'ↄ'),
-    ('Ⓐ', 'ⓩ'),
-    ('Ⰰ', 'Ɒ'),
-    ('â±Č', 'ⱳ'),
-    ('â±”', 'â±¶'),
-    ('ⱟ', 'ⳣ'),
-    ('âł«', 'âłź'),
-    ('âłČ', 'âłł'),
-    ('⮀', '⎄'),
-    ('⎧', '⎧'),
-    ('⎭', '⎭'),
-    ('Ꙁ', 'ꙭ'),
-    ('Ꚁ', 'ꚛ'),
-    ('êœą', 'êœŻ'),
-    ('êœČ', 'êŻ'),
-    ('êč', 'ꞇ'),
-    ('Ꞌ', 'Ɥ'),
-    ('Ꞑ', 'ꞔ'),
-    ('Ꞗ', 'êžź'),
-    ('Ʞ', 'ꟍ'),
-    ('Ꟑ', 'ꟑ'),
-    ('Ꟗ', 'Ƛ'),
-    ('꟔', 'ꟶ'),
-    ('ꭓ', 'ꭓ'),
-    ('ê­°', 'êźż'),
-    ('ïŹ€', 'ïŹ†'),
-    ('ïŹ“', 'ïŹ—'),
-    ('ïŒĄ', 'ïŒș'),
-    ('', ''),
-    ('𐐀', '𐑏'),
-    ('𐒰', '𐓓'),
-    ('𐓘', '𐓻'),
-    ('𐕰', 'đ•ș'),
-    ('đ•Œ', '𐖊'),
-    ('𐖌', '𐖒'),
-    ('𐖔', '𐖕'),
-    ('𐖗', '𐖡'),
-    ('𐖣', '𐖱'),
-    ('𐖳', 'đ–č'),
-    ('𐖻', 'đ–Œ'),
-    ('đČ€', 'đČČ'),
-    ('𐳀', 'đłČ'),
-    ('𐔐', '𐔄'),
-    ('𐔰', '𐶅'),
-    ('𑱠', '𑣟'),
-    ('đ–č€', 'đ–čż'),
-    ('𞀀', 'đž„ƒ'),
-];
-
-pub const CHANGES_WHEN_LOWERCASED: &'static [(char, char)] = &[
-    ('A', 'Z'),
-    ('À', 'Ö'),
-    ('Ø', 'Þ'),
-    ('Ā', 'Ā'),
-    ('Ă', 'Ă'),
-    ('Ą', 'Ą'),
-    ('Ć', 'Ć'),
-    ('Ĉ', 'Ĉ'),
-    ('Ċ', 'Ċ'),
-    ('Č', 'Č'),
-    ('Ď', 'Ď'),
-    ('Đ', 'Đ'),
-    ('Ē', 'Ē'),
-    ('Ĕ', 'Ĕ'),
-    ('Ė', 'Ė'),
-    ('Ę', 'Ę'),
-    ('Ě', 'Ě'),
-    ('Ĝ', 'Ĝ'),
-    ('Ğ', 'Ğ'),
-    ('Ä ', 'Ä '),
-    ('Äą', 'Äą'),
-    ('Ä€', 'Ä€'),
-    ('ÄŠ', 'ÄŠ'),
-    ('Äš', 'Äš'),
-    ('ÄȘ', 'ÄȘ'),
-    ('ÄŹ', 'ÄŹ'),
-    ('Äź', 'Äź'),
-    ('İ', 'İ'),
-    ('ÄČ', 'ÄČ'),
-    ('ÄŽ', 'ÄŽ'),
-    ('Ķ', 'Ķ'),
-    ('Äč', 'Äč'),
-    ('Ä»', 'Ä»'),
-    ('Ĝ', 'Ĝ'),
-    ('Äż', 'Äż'),
-    ('Ɓ', 'Ɓ'),
-    ('ƃ', 'ƃ'),
-    ('ƅ', 'ƅ'),
-    ('Ƈ', 'Ƈ'),
-    ('Ê', 'Ê'),
-    ('Ì', 'Ì'),
-    ('Ǝ', 'Ǝ'),
-    ('Ɛ', 'Ɛ'),
-    ('Œ', 'Œ'),
-    ('Ɣ', 'Ɣ'),
-    ('Ɩ', 'Ɩ'),
-    ('Ƙ', 'Ƙ'),
-    ('Ú', 'Ú'),
-    ('Ü', 'Ü'),
-    ('ƞ', 'ƞ'),
-    ('Š', 'Š'),
-    ('Ćą', 'Ćą'),
-    ('Ć€', 'Ć€'),
-    ('ĆŠ', 'ĆŠ'),
-    ('Ćš', 'Ćš'),
-    ('ĆȘ', 'ĆȘ'),
-    ('ĆŹ', 'ĆŹ'),
-    ('Ćź', 'Ćź'),
-    ('ư', 'ư'),
-    ('ĆČ', 'ĆČ'),
-    ('ĆŽ', 'ĆŽ'),
-    ('ƶ', 'ƶ'),
-    ('Ÿ', 'Ćč'),
-    ('Ć»', 'Ć»'),
-    ('Ćœ', 'Ćœ'),
-    ('Ɓ', 'Ƃ'),
-    ('Ƅ', 'Ƅ'),
-    ('Ɔ', 'Ƈ'),
-    ('Ɖ', 'Ƌ'),
-    ('Ǝ', 'Ƒ'),
-    ('Ɠ', 'Ɣ'),
-    ('Ɩ', 'Ƙ'),
-    ('Ɯ', 'Ɲ'),
-    ('Ɵ', 'Ơ'),
-    ('Æą', 'Æą'),
-    ('Æ€', 'Æ€'),
-    ('Ɗ', 'Ƨ'),
-    ('Æ©', 'Æ©'),
-    ('ÆŹ', 'ÆŹ'),
-    ('Æź', 'ÆŻ'),
-    ('Ʊ', 'Æł'),
-    ('Æ”', 'Æ”'),
-    ('Æ·', 'Æž'),
-    ('ƌ', 'ƌ'),
-    ('DŽ', 'Dž'),
-    ('LJ', 'Lj'),
-    ('NJ', 'Nj'),
-    ('Ǎ', 'Ǎ'),
-    ('Ǐ', 'Ǐ'),
-    ('Ǒ', 'Ǒ'),
-    ('Ǔ', 'Ǔ'),
-    ('Ǖ', 'Ǖ'),
-    ('Ǘ', 'Ǘ'),
-    ('Ǚ', 'Ǚ'),
-    ('Ǜ', 'Ǜ'),
-    ('Ǟ', 'Ǟ'),
-    ('Ç ', 'Ç '),
-    ('Çą', 'Çą'),
-    ('Ç€', 'Ç€'),
-    ('ÇŠ', 'ÇŠ'),
-    ('Çš', 'Çš'),
-    ('ÇȘ', 'ÇȘ'),
-    ('ÇŹ', 'ÇŹ'),
-    ('Çź', 'Çź'),
-    ('DZ', 'ÇČ'),
-    ('ÇŽ', 'ÇŽ'),
-    ('Ƕ', 'Ǟ'),
-    ('Çș', 'Çș'),
-    ('nj', 'nj'),
-    ('ÇŸ', 'ÇŸ'),
-    ('Ȁ', 'Ȁ'),
-    ('Ȃ', 'Ȃ'),
-    ('Ȅ', 'Ȅ'),
-    ('Ȇ', 'Ȇ'),
-    ('Ȉ', 'Ȉ'),
-    ('Ȋ', 'Ȋ'),
-    ('Ȍ', 'Ȍ'),
-    ('Ȏ', 'Ȏ'),
-    ('Ȑ', 'Ȑ'),
-    ('Ȓ', 'Ȓ'),
-    ('Ȕ', 'Ȕ'),
-    ('Ȗ', 'Ȗ'),
-    ('Ș', 'Ș'),
-    ('Ț', 'Ț'),
-    ('Ȝ', 'Ȝ'),
-    ('Ȟ', 'Ȟ'),
-    ('È ', 'È '),
-    ('Èą', 'Èą'),
-    ('È€', 'È€'),
-    ('ÈŠ', 'ÈŠ'),
-    ('Èš', 'Èš'),
-    ('ÈȘ', 'ÈȘ'),
-    ('ÈŹ', 'ÈŹ'),
-    ('Èź', 'Èź'),
-    ('Ȱ', 'Ȱ'),
-    ('ÈČ', 'ÈČ'),
-    ('Èș', 'È»'),
-    ('Ȝ', 'ȟ'),
-    ('Ɂ', 'Ɂ'),
-    ('Ƀ', 'Ɇ'),
-    ('Ɉ', 'Ɉ'),
-    ('Ɋ', 'Ɋ'),
-    ('Ɍ', 'Ɍ'),
-    ('Ɏ', 'Ɏ'),
-    ('Ͱ', 'Ͱ'),
-    ('ÍČ', 'ÍČ'),
-    ('Ͷ', 'Ͷ'),
-    ('Íż', 'Íż'),
-    ('Ά', 'Ά'),
-    ('Έ', 'Ί'),
-    ('Ό', 'Ό'),
-    ('Ύ', 'Ώ'),
-    ('Α', 'Ρ'),
-    ('Σ', 'Ϋ'),
-    ('Ϗ', 'Ϗ'),
-    ('Ϙ', 'Ϙ'),
-    ('Ϛ', 'Ϛ'),
-    ('Ϝ', 'Ϝ'),
-    ('Ϟ', 'Ϟ'),
-    ('Ï ', 'Ï '),
-    ('Ïą', 'Ïą'),
-    ('Ï€', 'Ï€'),
-    ('ÏŠ', 'ÏŠ'),
-    ('Ïš', 'Ïš'),
-    ('ÏȘ', 'ÏȘ'),
-    ('ÏŹ', 'ÏŹ'),
-    ('Ïź', 'Ïź'),
-    ('ÏŽ', 'ÏŽ'),
-    ('Ï·', 'Ï·'),
-    ('Ïč', 'Ïș'),
-    ('Ïœ', 'ĐŻ'),
-    ('Ń ', 'Ń '),
-    ('Ńą', 'Ńą'),
-    ('Ń€', 'Ń€'),
-    ('ŃŠ', 'ŃŠ'),
-    ('Ńš', 'Ńš'),
-    ('ŃȘ', 'ŃȘ'),
-    ('ŃŹ', 'ŃŹ'),
-    ('Ńź', 'Ńź'),
-    ('Ѱ', 'Ѱ'),
-    ('ŃČ', 'ŃČ'),
-    ('ŃŽ', 'ŃŽ'),
-    ('Ѷ', 'Ѷ'),
-    ('Ńž', 'Ńž'),
-    ('Ńș', 'Ńș'),
-    ('ŃŒ', 'ŃŒ'),
-    ('ŃŸ', 'ŃŸ'),
-    ('Ҁ', 'Ҁ'),
-    ('Ҋ', 'Ҋ'),
-    ('Ҍ', 'Ҍ'),
-    ('Ҏ', 'Ҏ'),
-    ('Ґ', 'Ґ'),
-    ('Ғ', 'Ғ'),
-    ('Ҕ', 'Ҕ'),
-    ('Җ', 'Җ'),
-    ('Ҙ', 'Ҙ'),
-    ('Қ', 'Қ'),
-    ('Ҝ', 'Ҝ'),
-    ('Ҟ', 'Ҟ'),
-    ('Ò ', 'Ò '),
-    ('Òą', 'Òą'),
-    ('Ò€', 'Ò€'),
-    ('ÒŠ', 'ÒŠ'),
-    ('Òš', 'Òš'),
-    ('ÒȘ', 'ÒȘ'),
-    ('ÒŹ', 'ÒŹ'),
-    ('Òź', 'Òź'),
-    ('Ò°', 'Ò°'),
-    ('ÒČ', 'ÒČ'),
-    ('ÒŽ', 'ÒŽ'),
-    ('Ò¶', 'Ò¶'),
-    ('Òž', 'Òž'),
-    ('Òș', 'Òș'),
-    ('Ҍ', 'Ҍ'),
-    ('ÒŸ', 'ÒŸ'),
-    ('Ӏ', 'Ӂ'),
-    ('Ӄ', 'Ӄ'),
-    ('Ӆ', 'Ӆ'),
-    ('Ӈ', 'Ӈ'),
-    ('Ӊ', 'Ӊ'),
-    ('Ӌ', 'Ӌ'),
-    ('Ӎ', 'Ӎ'),
-    ('Ӑ', 'Ӑ'),
-    ('Ӓ', 'Ӓ'),
-    ('Ӕ', 'Ӕ'),
-    ('Ӗ', 'Ӗ'),
-    ('Ә', 'Ә'),
-    ('Ӛ', 'Ӛ'),
-    ('Ӝ', 'Ӝ'),
-    ('Ӟ', 'Ӟ'),
-    ('Ó ', 'Ó '),
-    ('Óą', 'Óą'),
-    ('Ó€', 'Ó€'),
-    ('ÓŠ', 'ÓŠ'),
-    ('Óš', 'Óš'),
-    ('ÓȘ', 'ÓȘ'),
-    ('ÓŹ', 'ÓŹ'),
-    ('Óź', 'Óź'),
-    ('Ó°', 'Ó°'),
-    ('ÓČ', 'ÓČ'),
-    ('ÓŽ', 'ÓŽ'),
-    ('Ó¶', 'Ó¶'),
-    ('Óž', 'Óž'),
-    ('Óș', 'Óș'),
-    ('ӌ', 'ӌ'),
-    ('ÓŸ', 'ÓŸ'),
-    ('Ԁ', 'Ԁ'),
-    ('Ԃ', 'Ԃ'),
-    ('Ԅ', 'Ԅ'),
-    ('Ԇ', 'Ԇ'),
-    ('Ԉ', 'Ԉ'),
-    ('Ԋ', 'Ԋ'),
-    ('Ԍ', 'Ԍ'),
-    ('Ԏ', 'Ԏ'),
-    ('Ԑ', 'Ԑ'),
-    ('Ԓ', 'Ԓ'),
-    ('Ԕ', 'Ԕ'),
-    ('Ԗ', 'Ԗ'),
-    ('Ԙ', 'Ԙ'),
-    ('Ԛ', 'Ԛ'),
-    ('Ԝ', 'Ԝ'),
-    ('Ԟ', 'Ԟ'),
-    ('Ô ', 'Ô '),
-    ('Ôą', 'Ôą'),
-    ('Ô€', 'Ô€'),
-    ('ÔŠ', 'ÔŠ'),
-    ('Ôš', 'Ôš'),
-    ('ÔȘ', 'ÔȘ'),
-    ('ÔŹ', 'ÔŹ'),
-    ('Ôź', 'Ôź'),
-    ('Ô±', 'Ֆ'),
-    ('Ⴀ', 'Ⴥ'),
-    ('Ⴧ', 'Ⴧ'),
-    ('Ⴭ', 'Ⴭ'),
-    ('Ꭰ', 'Ꮤ'),
-    ('áȉ', 'áȉ'),
-    ('áȐ', 'áČș'),
-    ('áČœ', 'áČż'),
-    ('ᾀ', 'ᾀ'),
-    ('ᾂ', 'ᾂ'),
-    ('ᾄ', 'ᾄ'),
-    ('ᾆ', 'ᾆ'),
-    ('ឈ', 'ឈ'),
-    ('ᾊ', 'ᾊ'),
-    ('ᾌ', 'ᾌ'),
-    ('ᾎ', 'ᾎ'),
-    ('ថ', 'ថ'),
-    ('ᾒ', 'ᾒ'),
-    ('ᾔ', 'ᾔ'),
-    ('ᾖ', 'ᾖ'),
-    ('ម', 'ម'),
-    ('ᾚ', 'ᾚ'),
-    ('ᾜ', 'ᾜ'),
-    ('ᾞ', 'ᾞ'),
-    ('áž ', 'áž '),
-    ('ážą', 'ážą'),
-    ('ក', 'ក'),
-    ('ដ', 'ដ'),
-    ('ážš', 'ážš'),
-    ('ážȘ', 'ážȘ'),
-    ('ត', 'ត'),
-    ('ážź', 'ážź'),
-    ('áž°', 'áž°'),
-    ('ážČ', 'ážČ'),
-    ('ណ', 'ណ'),
-    ('áž¶', 'áž¶'),
-    ('ážž', 'ážž'),
-    ('ážș', 'ážș'),
-    ('ឌ', 'ឌ'),
-    ('ស', 'ស'),
-    ('áč€', 'áč€'),
-    ('áč‚', 'áč‚'),
-    ('áč„', 'áč„'),
-    ('áč†', 'áč†'),
-    ('áčˆ', 'áčˆ'),
-    ('áčŠ', 'áčŠ'),
-    ('áčŒ', 'áčŒ'),
-    ('áčŽ', 'áčŽ'),
-    ('áč', 'áč'),
-    ('áč’', 'áč’'),
-    ('áč”', 'áč”'),
-    ('áč–', 'áč–'),
-    ('áč˜', 'áč˜'),
-    ('áčš', 'áčš'),
-    ('áčœ', 'áčœ'),
-    ('áčž', 'áčž'),
-    ('áč ', 'áč '),
-    ('áčą', 'áčą'),
-    ('áč€', 'áč€'),
-    ('áčŠ', 'áčŠ'),
-    ('áčš', 'áčš'),
-    ('áčȘ', 'áčȘ'),
-    ('áčŹ', 'áčŹ'),
-    ('áčź', 'áčź'),
-    ('áč°', 'áč°'),
-    ('áčČ', 'áčČ'),
-    ('áčŽ', 'áčŽ'),
-    ('áč¶', 'áč¶'),
-    ('áčž', 'áčž'),
-    ('áčș', 'áčș'),
-    ('áčŒ', 'áčŒ'),
-    ('áčŸ', 'áčŸ'),
-    ('áș€', 'áș€'),
-    ('áș‚', 'áș‚'),
-    ('áș„', 'áș„'),
-    ('áș†', 'áș†'),
-    ('áșˆ', 'áșˆ'),
-    ('áșŠ', 'áșŠ'),
-    ('áșŒ', 'áșŒ'),
-    ('áșŽ', 'áșŽ'),
-    ('áș', 'áș'),
-    ('áș’', 'áș’'),
-    ('áș”', 'áș”'),
-    ('áșž', 'áșž'),
-    ('áș ', 'áș '),
-    ('áșą', 'áșą'),
-    ('áș€', 'áș€'),
-    ('áșŠ', 'áșŠ'),
-    ('áșš', 'áșš'),
-    ('áșȘ', 'áșȘ'),
-    ('áșŹ', 'áșŹ'),
-    ('áșź', 'áșź'),
-    ('áș°', 'áș°'),
-    ('áșČ', 'áșČ'),
-    ('áșŽ', 'áșŽ'),
-    ('áș¶', 'áș¶'),
-    ('áșž', 'áșž'),
-    ('áșș', 'áșș'),
-    ('áșŒ', 'áșŒ'),
-    ('áșŸ', 'áșŸ'),
-    ('Ề', 'Ề'),
-    ('Ể', 'Ể'),
-    ('Ễ', 'Ễ'),
-    ('Ệ', 'Ệ'),
-    ('Ỉ', 'Ỉ'),
-    ('Ị', 'Ị'),
-    ('Ọ', 'Ọ'),
-    ('Ỏ', 'Ỏ'),
-    ('Ố', 'Ố'),
-    ('Ồ', 'Ồ'),
-    ('Ổ', 'Ổ'),
-    ('Ỗ', 'Ỗ'),
-    ('Ộ', 'Ộ'),
-    ('Ớ', 'Ớ'),
-    ('Ờ', 'Ờ'),
-    ('Ở', 'Ở'),
-    ('á» ', 'á» '),
-    ('ỹ', 'ỹ'),
-    ('Ề', 'Ề'),
-    ('Ị', 'Ị'),
-    ('Ớ', 'Ớ'),
-    ('á»Ș', 'á»Ș'),
-    ('ỏ', 'ỏ'),
-    ('ở', 'ở'),
-    ('á»°', 'á»°'),
-    ('á»Č', 'á»Č'),
-    ('Ỏ', 'Ỏ'),
-    ('á»¶', 'á»¶'),
-    ('Ở', 'Ở'),
-    ('á»ș', 'á»ș'),
-    ('Ọ', 'Ọ'),
-    ('ở', 'ở'),
-    ('ገ', 'ጏ'),
-    ('ጘ', 'ጝ'),
-    ('ጚ', 'áŒŻ'),
-    ('ጞ', 'áŒż'),
-    ('ᜈ', 'ᜍ'),
-    ('᜙', '᜙'),
-    ('᜛', '᜛'),
-    ('᜝', '᜝'),
-    ('ᜟ', 'ᜟ'),
-    ('᜚', 'áœŻ'),
-    ('ៈ', '៏'),
-    ('៘', '៟'),
-    ('៚', 'áŸŻ'),
-    ('៞', '៌'),
-    ('Ὲ', 'ῌ'),
-    ('Ῐ', 'Ί'),
-    ('Ὶ', '῏'),
-    ('áżž', 'áżŒ'),
-    ('℩', '℩'),
-    ('â„Ș', 'Å'),
-    ('â„Č', 'â„Č'),
-    ('Ⅰ', 'Ⅿ'),
-    ('Ↄ', 'Ↄ'),
-    ('Ⓐ', 'Ⓩ'),
-    ('Ⰰ', 'Ⱟ'),
-    ('â± ', 'â± '),
-    ('ⱹ', 'ⱀ'),
-    ('â±§', 'â±§'),
-    ('Ⱪ', 'Ⱪ'),
-    ('Ⱬ', 'Ⱬ'),
-    ('â±­', 'â±°'),
-    ('â±Č', 'â±Č'),
-    ('â±”', 'â±”'),
-    ('ⱟ', 'âȀ'),
-    ('âȂ', 'âȂ'),
-    ('âȄ', 'âȄ'),
-    ('âȆ', 'âȆ'),
-    ('âȈ', 'âȈ'),
-    ('âȊ', 'âȊ'),
-    ('âȌ', 'âȌ'),
-    ('âȎ', 'âȎ'),
-    ('âȐ', 'âȐ'),
-    ('âȒ', 'âȒ'),
-    ('âȔ', 'âȔ'),
-    ('âȖ', 'âȖ'),
-    ('âȘ', 'âȘ'),
-    ('âȚ', 'âȚ'),
-    ('âȜ', 'âȜ'),
-    ('âȞ', 'âȞ'),
-    ('âČ ', 'âČ '),
-    ('âČą', 'âČą'),
-    ('âČ€', 'âČ€'),
-    ('âČŠ', 'âČŠ'),
-    ('âČš', 'âČš'),
-    ('âČȘ', 'âČȘ'),
-    ('âČŹ', 'âČŹ'),
-    ('âČź', 'âČź'),
-    ('âȰ', 'âȰ'),
-    ('âČČ', 'âČČ'),
-    ('âČŽ', 'âČŽ'),
-    ('âȶ', 'âȶ'),
-    ('âČž', 'âČž'),
-    ('âČș', 'âČș'),
-    ('âČŒ', 'âČŒ'),
-    ('âČŸ', 'âČŸ'),
-    ('Ⳁ', 'Ⳁ'),
-    ('Ⳃ', 'Ⳃ'),
-    ('Ⳅ', 'Ⳅ'),
-    ('Ⳇ', 'Ⳇ'),
-    ('Ⳉ', 'Ⳉ'),
-    ('Ⳋ', 'Ⳋ'),
-    ('Ⳍ', 'Ⳍ'),
-    ('Ⳏ', 'Ⳏ'),
-    ('Ⳑ', 'Ⳑ'),
-    ('Ⳓ', 'Ⳓ'),
-    ('Ⳕ', 'Ⳕ'),
-    ('Ⳗ', 'Ⳗ'),
-    ('Ⳙ', 'Ⳙ'),
-    ('Ⳛ', 'Ⳛ'),
-    ('Ⳝ', 'Ⳝ'),
-    ('Ⳟ', 'Ⳟ'),
-    ('âł ', 'âł '),
-    ('âłą', 'âłą'),
-    ('âł«', 'âł«'),
-    ('âł­', 'âł­'),
-    ('âłČ', 'âłČ'),
-    ('Ꙁ', 'Ꙁ'),
-    ('Ꙃ', 'Ꙃ'),
-    ('Ꙅ', 'Ꙅ'),
-    ('Ꙇ', 'Ꙇ'),
-    ('Ꙉ', 'Ꙉ'),
-    ('Ꙋ', 'Ꙋ'),
-    ('Ꙍ', 'Ꙍ'),
-    ('Ꙏ', 'Ꙏ'),
-    ('Ꙑ', 'Ꙑ'),
-    ('Ꙓ', 'Ꙓ'),
-    ('Ꙕ', 'Ꙕ'),
-    ('Ꙗ', 'Ꙗ'),
-    ('Ꙙ', 'Ꙙ'),
-    ('Ꙛ', 'Ꙛ'),
-    ('Ꙝ', 'Ꙝ'),
-    ('Ꙟ', 'Ꙟ'),
-    ('Ꙡ', 'Ꙡ'),
-    ('ê™ą', 'ê™ą'),
-    ('Ꙁ', 'Ꙁ'),
-    ('Ꙋ', 'Ꙋ'),
-    ('Ꙛ', 'Ꙛ'),
-    ('ê™Ș', 'ê™Ș'),
-    ('ê™Ź', 'ê™Ź'),
-    ('Ꚁ', 'Ꚁ'),
-    ('Ꚃ', 'Ꚃ'),
-    ('Ꚅ', 'Ꚅ'),
-    ('Ꚇ', 'Ꚇ'),
-    ('Ꚉ', 'Ꚉ'),
-    ('Ꚋ', 'Ꚋ'),
-    ('Ꚍ', 'Ꚍ'),
-    ('Ꚏ', 'Ꚏ'),
-    ('Ꚑ', 'Ꚑ'),
-    ('Ꚓ', 'Ꚓ'),
-    ('Ꚕ', 'Ꚕ'),
-    ('Ꚗ', 'Ꚗ'),
-    ('Ꚙ', 'Ꚙ'),
-    ('Ꚛ', 'Ꚛ'),
-    ('êœą', 'êœą'),
-    ('꜀', '꜀'),
-    ('꜊', '꜊'),
-    ('ꜚ', 'ꜚ'),
-    ('êœȘ', 'êœȘ'),
-    ('êœŹ', 'êœŹ'),
-    ('êœź', 'êœź'),
-    ('êœČ', 'êœČ'),
-    ('꜎', '꜎'),
-    ('Ꜷ', 'Ꜷ'),
-    ('ꜞ', 'ꜞ'),
-    ('êœș', 'êœș'),
-    ('꜌', '꜌'),
-    ('ꜟ', 'ꜟ'),
-    ('Ꝁ', 'Ꝁ'),
-    ('Ꝃ', 'Ꝃ'),
-    ('Ꝅ', 'Ꝅ'),
-    ('Ꝇ', 'Ꝇ'),
-    ('Ꝉ', 'Ꝉ'),
-    ('Ꝋ', 'Ꝋ'),
-    ('Ꝍ', 'Ꝍ'),
-    ('Ꝏ', 'Ꝏ'),
-    ('Ꝑ', 'Ꝑ'),
-    ('Ꝓ', 'Ꝓ'),
-    ('Ꝕ', 'Ꝕ'),
-    ('Ꝗ', 'Ꝗ'),
-    ('Ꝙ', 'Ꝙ'),
-    ('Ꝛ', 'Ꝛ'),
-    ('Ꝝ', 'Ꝝ'),
-    ('Ꝟ', 'Ꝟ'),
-    ('Ꝡ', 'Ꝡ'),
-    ('êą', 'êą'),
-    ('Ꝁ', 'Ꝁ'),
-    ('Ꝋ', 'Ꝋ'),
-    ('Ꝛ', 'Ꝛ'),
-    ('êȘ', 'êȘ'),
-    ('êŹ', 'êŹ'),
-    ('êź', 'êź'),
-    ('êč', 'êč'),
-    ('Ꝼ', 'Ꝼ'),
-    ('Ꝝ', 'ꝟ'),
-    ('Ꞁ', 'Ꞁ'),
-    ('Ꞃ', 'Ꞃ'),
-    ('Ꞅ', 'Ꞅ'),
-    ('Ꞇ', 'Ꞇ'),
-    ('Ꞌ', 'Ꞌ'),
-    ('Ɥ', 'Ɥ'),
-    ('Ꞑ', 'Ꞑ'),
-    ('Ꞓ', 'Ꞓ'),
-    ('Ꞗ', 'Ꞗ'),
-    ('Ꞙ', 'Ꞙ'),
-    ('Ꞛ', 'Ꞛ'),
-    ('Ꞝ', 'Ꞝ'),
-    ('Ꞟ', 'Ꞟ'),
-    ('Ꞡ', 'Ꞡ'),
-    ('êžą', 'êžą'),
-    ('Ꞁ', 'Ꞁ'),
-    ('꞊', '꞊'),
-    ('Ꞛ', 'Ꞛ'),
-    ('êžȘ', 'êžź'),
-    ('Ʞ', 'ꞎ'),
-    ('Ꞷ', 'Ꞷ'),
-    ('êžž', 'êžž'),
-    ('êžș', 'êžș'),
-    ('ꞌ', 'ꞌ'),
-    ('ꞟ', 'ꞟ'),
-    ('Ꟁ', 'Ꟁ'),
-    ('Ꟃ', 'Ꟃ'),
-    ('Ꞔ', 'Ꟈ'),
-    ('Ꟊ', 'Ꟊ'),
-    ('Ɤ', 'Ꟍ'),
-    ('Ꟑ', 'Ꟑ'),
-    ('Ꟗ', 'Ꟗ'),
-    ('Ꟙ', 'Ꟙ'),
-    ('Ꟛ', 'Ꟛ'),
-    ('Ƛ', 'Ƛ'),
-    ('꟔', '꟔'),
-    ('ïŒĄ', 'ïŒș'),
-    ('𐐀', '𐐧'),
-    ('𐒰', '𐓓'),
-    ('𐕰', 'đ•ș'),
-    ('đ•Œ', '𐖊'),
-    ('𐖌', '𐖒'),
-    ('𐖔', '𐖕'),
-    ('đČ€', 'đČČ'),
-    ('𐔐', '𐔄'),
-    ('𑱠', '𑱿'),
-    ('đ–č€', 'đ–čŸ'),
-    ('𞀀', '𞀥'),
-];
-
-pub const CHANGES_WHEN_TITLECASED: &'static [(char, char)] = &[
-    ('a', 'z'),
-    ('µ', 'µ'),
-    ('ß', 'ö'),
-    ('ø', 'ÿ'),
-    ('ā', 'ā'),
-    ('ă', 'ă'),
-    ('ą', 'ą'),
-    ('ć', 'ć'),
-    ('ĉ', 'ĉ'),
-    ('ċ', 'ċ'),
-    ('č', 'č'),
-    ('ď', 'ď'),
-    ('đ', 'đ'),
-    ('ē', 'ē'),
-    ('ĕ', 'ĕ'),
-    ('ė', 'ė'),
-    ('ę', 'ę'),
-    ('ě', 'ě'),
-    ('ĝ', 'ĝ'),
-    ('ğ', 'ğ'),
-    ('ÄĄ', 'ÄĄ'),
-    ('ÄŁ', 'ÄŁ'),
-    ('Ä„', 'Ä„'),
-    ('ħ', 'ħ'),
-    ('Ä©', 'Ä©'),
-    ('Ä«', 'Ä«'),
-    ('Ä­', 'Ä­'),
-    ('ÄŻ', 'ÄŻ'),
-    ('ı', 'ı'),
-    ('Äł', 'Äł'),
-    ('Ä”', 'Ä”'),
-    ('Ä·', 'Ä·'),
-    ('Äș', 'Äș'),
-    ('Č', 'Č'),
-    ('ÄŸ', 'ÄŸ'),
-    ('ƀ', 'ƀ'),
-    ('Ƃ', 'Ƃ'),
-    ('Ƅ', 'Ƅ'),
-    ('Ɔ', 'Ɔ'),
-    ('ƈ', 'Ɖ'),
-    ('Ƌ', 'Ƌ'),
-    ('ƍ', 'ƍ'),
-    ('Ə', 'Ə'),
-    ('Ƒ', 'Ƒ'),
-    ('œ', 'œ'),
-    ('ƕ', 'ƕ'),
-    ('Ɨ', 'Ɨ'),
-    ('ƙ', 'ƙ'),
-    ('ƛ', 'ƛ'),
-    ('Ɲ', 'Ɲ'),
-    ('ß', 'ß'),
-    ('š', 'š'),
-    ('ĆŁ', 'ĆŁ'),
-    ('Ć„', 'Ć„'),
-    ('Ƨ', 'Ƨ'),
-    ('Ć©', 'Ć©'),
-    ('Ć«', 'Ć«'),
-    ('Ć­', 'Ć­'),
-    ('ĆŻ', 'ĆŻ'),
-    ('Ʊ', 'Ʊ'),
-    ('Ćł', 'Ćł'),
-    ('Ć”', 'Ć”'),
-    ('Ć·', 'Ć·'),
-    ('Ćș', 'Ćș'),
-    ('ĆŒ', 'ĆŒ'),
-    ('ĆŸ', 'ƀ'),
-    ('ƃ', 'ƃ'),
-    ('ƅ', 'ƅ'),
-    ('ƈ', 'ƈ'),
-    ('ƌ', 'ƌ'),
-    ('ƒ', 'ƒ'),
-    ('ƕ', 'ƕ'),
-    ('ƙ', 'ƛ'),
-    ('ƞ', 'ƞ'),
-    ('ÆĄ', 'ÆĄ'),
-    ('ÆŁ', 'ÆŁ'),
-    ('Æ„', 'Æ„'),
-    ('Æš', 'Æš'),
-    ('Æ­', 'Æ­'),
-    ('ư', 'ư'),
-    ('ÆŽ', 'ÆŽ'),
-    ('ƶ', 'ƶ'),
-    ('Æč', 'Æč'),
-    ('Ɯ', 'Ɯ'),
-    ('Æż', 'Æż'),
-    ('DŽ', 'DŽ'),
-    ('dž', 'LJ'),
-    ('lj', 'NJ'),
-    ('nj', 'nj'),
-    ('ǎ', 'ǎ'),
-    ('ǐ', 'ǐ'),
-    ('ǒ', 'ǒ'),
-    ('ǔ', 'ǔ'),
-    ('ǖ', 'ǖ'),
-    ('ǘ', 'ǘ'),
-    ('ǚ', 'ǚ'),
-    ('ǜ', 'ǝ'),
-    ('ǟ', 'ǟ'),
-    ('ÇĄ', 'ÇĄ'),
-    ('ÇŁ', 'ÇŁ'),
-    ('Ç„', 'Ç„'),
-    ('ǧ', 'ǧ'),
-    ('Ç©', 'Ç©'),
-    ('Ç«', 'Ç«'),
-    ('Ç­', 'Ç­'),
-    ('ǯ', 'DZ'),
-    ('Çł', 'Çł'),
-    ('Ç”', 'Ç”'),
-    ('Çč', 'Çč'),
-    ('Ç»', 'Ç»'),
-    ('ǜ', 'ǜ'),
-    ('Çż', 'Çż'),
-    ('ȁ', 'ȁ'),
-    ('ȃ', 'ȃ'),
-    ('ȅ', 'ȅ'),
-    ('ȇ', 'ȇ'),
-    ('ȉ', 'ȉ'),
-    ('ȋ', 'ȋ'),
-    ('ȍ', 'ȍ'),
-    ('ȏ', 'ȏ'),
-    ('ȑ', 'ȑ'),
-    ('ȓ', 'ȓ'),
-    ('ȕ', 'ȕ'),
-    ('ȗ', 'ȗ'),
-    ('ș', 'ș'),
-    ('ț', 'ț'),
-    ('ȝ', 'ȝ'),
-    ('ȟ', 'ȟ'),
-    ('ÈŁ', 'ÈŁ'),
-    ('È„', 'È„'),
-    ('ȧ', 'ȧ'),
-    ('È©', 'È©'),
-    ('È«', 'È«'),
-    ('È­', 'È­'),
-    ('ÈŻ', 'ÈŻ'),
-    ('ȱ', 'ȱ'),
-    ('Èł', 'Èł'),
-    ('Ȍ', 'Ȍ'),
-    ('Èż', 'ɀ'),
-    ('ɂ', 'ɂ'),
-    ('ɇ', 'ɇ'),
-    ('ɉ', 'ɉ'),
-    ('ɋ', 'ɋ'),
-    ('ɍ', 'ɍ'),
-    ('ɏ', 'ɔ'),
-    ('ɖ', 'ɗ'),
-    ('ə', 'ə'),
-    ('ɛ', 'ɜ'),
-    ('É ', 'ÉĄ'),
-    ('ÉŁ', 'ÉŠ'),
-    ('Éš', 'ÉŹ'),
-    ('ÉŻ', 'ÉŻ'),
-    ('ɱ', 'ÉČ'),
-    ('É”', 'É”'),
-    ('ɜ', 'ɜ'),
-    ('ʀ', 'ʀ'),
-    ('ʂ', 'ʃ'),
-    ('ʇ', 'ʌ'),
-    ('ʒ', 'ʒ'),
-    ('ʝ', 'ʞ'),
-    ('\u{345}', '\u{345}'),
-    ('ͱ', 'ͱ'),
-    ('Íł', 'Íł'),
-    ('Í·', 'Í·'),
-    ('ͻ', '͜'),
-    ('ΐ', 'ΐ'),
-    ('ÎŹ', 'ώ'),
-    ('ϐ', 'ϑ'),
-    ('ϕ', 'ϗ'),
-    ('ϙ', 'ϙ'),
-    ('ϛ', 'ϛ'),
-    ('ϝ', 'ϝ'),
-    ('ϟ', 'ϟ'),
-    ('ÏĄ', 'ÏĄ'),
-    ('ÏŁ', 'ÏŁ'),
-    ('Ï„', 'Ï„'),
-    ('ϧ', 'ϧ'),
-    ('Ï©', 'Ï©'),
-    ('Ï«', 'Ï«'),
-    ('Ï­', 'Ï­'),
-    ('ÏŻ', 'Ïł'),
-    ('Ï”', 'Ï”'),
-    ('Ïž', 'Ïž'),
-    ('Ï»', 'Ï»'),
-    ('а', 'џ'),
-    ('ŃĄ', 'ŃĄ'),
-    ('ŃŁ', 'ŃŁ'),
-    ('Ń„', 'Ń„'),
-    ('ѧ', 'ѧ'),
-    ('Ń©', 'Ń©'),
-    ('Ń«', 'Ń«'),
-    ('Ń­', 'Ń­'),
-    ('ŃŻ', 'ŃŻ'),
-    ('ѱ', 'ѱ'),
-    ('Ńł', 'Ńł'),
-    ('Ń”', 'Ń”'),
-    ('Ń·', 'Ń·'),
-    ('Ńč', 'Ńč'),
-    ('Ń»', 'Ń»'),
-    ('Ńœ', 'Ńœ'),
-    ('Ńż', 'Ńż'),
-    ('ҁ', 'ҁ'),
-    ('ҋ', 'ҋ'),
-    ('ҍ', 'ҍ'),
-    ('ҏ', 'ҏ'),
-    ('ґ', 'ґ'),
-    ('ғ', 'ғ'),
-    ('ҕ', 'ҕ'),
-    ('җ', 'җ'),
-    ('ҙ', 'ҙ'),
-    ('қ', 'қ'),
-    ('ҝ', 'ҝ'),
-    ('ҟ', 'ҟ'),
-    ('ÒĄ', 'ÒĄ'),
-    ('ÒŁ', 'ÒŁ'),
-    ('Ò„', 'Ò„'),
-    ('Ò§', 'Ò§'),
-    ('Ò©', 'Ò©'),
-    ('Ò«', 'Ò«'),
-    ('Ò­', 'Ò­'),
-    ('ÒŻ', 'ÒŻ'),
-    ('Ò±', 'Ò±'),
-    ('Òł', 'Òł'),
-    ('Ò”', 'Ò”'),
-    ('Ò·', 'Ò·'),
-    ('Òč', 'Òč'),
-    ('Ò»', 'Ò»'),
-    ('Ҝ', 'Ҝ'),
-    ('Òż', 'Òż'),
-    ('ӂ', 'ӂ'),
-    ('ӄ', 'ӄ'),
-    ('ӆ', 'ӆ'),
-    ('ӈ', 'ӈ'),
-    ('ӊ', 'ӊ'),
-    ('ӌ', 'ӌ'),
-    ('ӎ', 'ӏ'),
-    ('ӑ', 'ӑ'),
-    ('ӓ', 'ӓ'),
-    ('ӕ', 'ӕ'),
-    ('ӗ', 'ӗ'),
-    ('ә', 'ә'),
-    ('ӛ', 'ӛ'),
-    ('ӝ', 'ӝ'),
-    ('ӟ', 'ӟ'),
-    ('ÓĄ', 'ÓĄ'),
-    ('ÓŁ', 'ÓŁ'),
-    ('Ó„', 'Ó„'),
-    ('Ó§', 'Ó§'),
-    ('Ó©', 'Ó©'),
-    ('Ó«', 'Ó«'),
-    ('Ó­', 'Ó­'),
-    ('ÓŻ', 'ÓŻ'),
-    ('Ó±', 'Ó±'),
-    ('Ół', 'Ół'),
-    ('Ó”', 'Ó”'),
-    ('Ó·', 'Ó·'),
-    ('Óč', 'Óč'),
-    ('Ó»', 'Ó»'),
-    ('Ӝ', 'Ӝ'),
-    ('Óż', 'Óż'),
-    ('ԁ', 'ԁ'),
-    ('ԃ', 'ԃ'),
-    ('ԅ', 'ԅ'),
-    ('ԇ', 'ԇ'),
-    ('ԉ', 'ԉ'),
-    ('ԋ', 'ԋ'),
-    ('ԍ', 'ԍ'),
-    ('ԏ', 'ԏ'),
-    ('ԑ', 'ԑ'),
-    ('ԓ', 'ԓ'),
-    ('ԕ', 'ԕ'),
-    ('ԗ', 'ԗ'),
-    ('ԙ', 'ԙ'),
-    ('ԛ', 'ԛ'),
-    ('ԝ', 'ԝ'),
-    ('ԟ', 'ԟ'),
-    ('ÔĄ', 'ÔĄ'),
-    ('ÔŁ', 'ÔŁ'),
-    ('Ô„', 'Ô„'),
-    ('Ô§', 'Ô§'),
-    ('Ô©', 'Ô©'),
-    ('Ô«', 'Ô«'),
-    ('Ô­', 'Ô­'),
-    ('ÔŻ', 'ÔŻ'),
-    ('ա', 'և'),
-    ('Ꮮ', 'Ꮬ'),
-    ('áȀ', 'áȈ'),
-    ('áȊ', 'áȊ'),
-    ('á”č', 'á”č'),
-    ('ᔜ', 'ᔜ'),
-    ('ᶎ', 'ᶎ'),
-    ('ខ', 'ខ'),
-    ('ឃ', 'ឃ'),
-    ('ᾅ', 'ᾅ'),
-    ('ᾇ', 'ᾇ'),
-    ('ᾉ', 'ᾉ'),
-    ('ᾋ', 'ᾋ'),
-    ('ឍ', 'ឍ'),
-    ('ត', 'ត'),
-    ('ᾑ', 'ᾑ'),
-    ('ᾓ', 'ᾓ'),
-    ('ᾕ', 'ᾕ'),
-    ('ᾗ', 'ᾗ'),
-    ('ᾙ', 'ᾙ'),
-    ('ᾛ', 'ᾛ'),
-    ('ឝ', 'ឝ'),
-    ('ᾟ', 'ᾟ'),
-    ('ឥ', 'ឥ'),
-    ('ឣ', 'ឣ'),
-    ('áž„', 'áž„'),
-    ('áž§', 'áž§'),
-    ('áž©', 'áž©'),
-    ('áž«', 'áž«'),
-    ('áž­', 'áž­'),
-    ('ឯ', 'ឯ'),
-    ('áž±', 'áž±'),
-    ('ážł', 'ážł'),
-    ('áž”', 'áž”'),
-    ('áž·', 'áž·'),
-    ('ážč', 'ážč'),
-    ('áž»', 'áž»'),
-    ('វ', 'វ'),
-    ('ážż', 'ážż'),
-    ('áč', 'áč'),
-    ('áčƒ', 'áčƒ'),
-    ('áč…', 'áč…'),
-    ('áč‡', 'áč‡'),
-    ('áč‰', 'áč‰'),
-    ('áč‹', 'áč‹'),
-    ('áč', 'áč'),
-    ('áč', 'áč'),
-    ('áč‘', 'áč‘'),
-    ('áč“', 'áč“'),
-    ('áč•', 'áč•'),
-    ('áč—', 'áč—'),
-    ('áč™', 'áč™'),
-    ('áč›', 'áč›'),
-    ('áč', 'áč'),
-    ('áčŸ', 'áčŸ'),
-    ('áčĄ', 'áčĄ'),
-    ('áčŁ', 'áčŁ'),
-    ('áč„', 'áč„'),
-    ('áč§', 'áč§'),
-    ('áč©', 'áč©'),
-    ('áč«', 'áč«'),
-    ('áč­', 'áč­'),
-    ('áčŻ', 'áčŻ'),
-    ('áč±', 'áč±'),
-    ('áčł', 'áčł'),
-    ('áč”', 'áč”'),
-    ('áč·', 'áč·'),
-    ('áčč', 'áčč'),
-    ('áč»', 'áč»'),
-    ('áčœ', 'áčœ'),
-    ('áčż', 'áčż'),
-    ('áș', 'áș'),
-    ('áșƒ', 'áșƒ'),
-    ('áș…', 'áș…'),
-    ('áș‡', 'áș‡'),
-    ('áș‰', 'áș‰'),
-    ('áș‹', 'áș‹'),
-    ('áș', 'áș'),
-    ('áș', 'áș'),
-    ('áș‘', 'áș‘'),
-    ('áș“', 'áș“'),
-    ('áș•', 'áș›'),
-    ('áșĄ', 'áșĄ'),
-    ('áșŁ', 'áșŁ'),
-    ('áș„', 'áș„'),
-    ('áș§', 'áș§'),
-    ('áș©', 'áș©'),
-    ('áș«', 'áș«'),
-    ('áș­', 'áș­'),
-    ('áșŻ', 'áșŻ'),
-    ('áș±', 'áș±'),
-    ('áșł', 'áșł'),
-    ('áș”', 'áș”'),
-    ('áș·', 'áș·'),
-    ('áșč', 'áșč'),
-    ('áș»', 'áș»'),
-    ('áșœ', 'áșœ'),
-    ('áșż', 'áșż'),
-    ('ề', 'ề'),
-    ('ể', 'ể'),
-    ('ễ', 'ễ'),
-    ('ệ', 'ệ'),
-    ('ỉ', 'ỉ'),
-    ('ị', 'ị'),
-    ('ọ', 'ọ'),
-    ('ỏ', 'ỏ'),
-    ('ố', 'ố'),
-    ('ồ', 'ồ'),
-    ('ổ', 'ổ'),
-    ('ỗ', 'ỗ'),
-    ('ộ', 'ộ'),
-    ('ớ', 'ớ'),
-    ('ờ', 'ờ'),
-    ('ở', 'ở'),
-    ('ụ', 'ụ'),
-    ('ợ', 'ợ'),
-    ('Ễ', 'Ễ'),
-    ('á»§', 'á»§'),
-    ('ứ', 'ứ'),
-    ('ừ', 'ừ'),
-    ('á»­', 'á»­'),
-    ('ữ', 'ữ'),
-    ('á»±', 'á»±'),
-    ('ỳ', 'ỳ'),
-    ('á»”', 'á»”'),
-    ('á»·', 'á»·'),
-    ('á»č', 'á»č'),
-    ('á»»', 'á»»'),
-    ('Ờ', 'Ờ'),
-    ('ỿ', 'ጇ'),
-    ('ጐ', 'ጕ'),
-    ('ጠ', 'ጧ'),
-    ('ጰ', 'ጷ'),
-    ('ᜀ', 'ᜅ'),
-    ('ᜐ', '᜗'),
-    ('ᜠ', 'ᜧ'),
-    ('ᜰ', '᜜'),
-    ('ៀ', 'ះ'),
-    ('័', 'ៗ'),
-    ('០', '៧'),
-    ('៰', '៎'),
-    ('៶', '៷'),
-    ('៟', '៟'),
-    ('ῂ', 'ῄ'),
-    ('ῆ', 'ῇ'),
-    ('ῐ', 'ΐ'),
-    ('ῖ', 'ῗ'),
-    ('áż ', 'áż§'),
-    ('áżČ', '῎'),
-    ('áż¶', 'áż·'),
-    ('ⅎ', 'ⅎ'),
-    ('ⅰ', 'ⅿ'),
-    ('ↄ', 'ↄ'),
-    ('ⓐ', 'ⓩ'),
-    ('ⰰ', 'ⱟ'),
-    ('ⱥ', 'ⱥ'),
-    ('ⱄ', 'ⱊ'),
-    ('ⱚ', 'ⱚ'),
-    ('â±Ș', 'â±Ș'),
-    ('ⱏ', 'ⱏ'),
-    ('ⱳ', 'ⱳ'),
-    ('â±¶', 'â±¶'),
-    ('âȁ', 'âȁ'),
-    ('âȃ', 'âȃ'),
-    ('âȅ', 'âȅ'),
-    ('âȇ', 'âȇ'),
-    ('âȉ', 'âȉ'),
-    ('âȋ', 'âȋ'),
-    ('âȍ', 'âȍ'),
-    ('âȏ', 'âȏ'),
-    ('âȑ', 'âȑ'),
-    ('âȓ', 'âȓ'),
-    ('âȕ', 'âȕ'),
-    ('âȗ', 'âȗ'),
-    ('âș', 'âș'),
-    ('âț', 'âț'),
-    ('âȝ', 'âȝ'),
-    ('âȟ', 'âȟ'),
-    ('âČĄ', 'âČĄ'),
-    ('âČŁ', 'âČŁ'),
-    ('âČ„', 'âČ„'),
-    ('âȧ', 'âȧ'),
-    ('âČ©', 'âČ©'),
-    ('âČ«', 'âČ«'),
-    ('âČ­', 'âČ­'),
-    ('âČŻ', 'âČŻ'),
-    ('âȱ', 'âȱ'),
-    ('âČł', 'âČł'),
-    ('âČ”', 'âČ”'),
-    ('âČ·', 'âČ·'),
-    ('âČč', 'âČč'),
-    ('âČ»', 'âČ»'),
-    ('âČœ', 'âČœ'),
-    ('âČż', 'âČż'),
-    ('ⳁ', 'ⳁ'),
-    ('ⳃ', 'ⳃ'),
-    ('ⳅ', 'ⳅ'),
-    ('ⳇ', 'ⳇ'),
-    ('ⳉ', 'ⳉ'),
-    ('ⳋ', 'ⳋ'),
-    ('ⳍ', 'ⳍ'),
-    ('ⳏ', 'ⳏ'),
-    ('ⳑ', 'ⳑ'),
-    ('ⳓ', 'ⳓ'),
-    ('ⳕ', 'ⳕ'),
-    ('ⳗ', 'ⳗ'),
-    ('ⳙ', 'ⳙ'),
-    ('ⳛ', 'ⳛ'),
-    ('ⳝ', 'ⳝ'),
-    ('ⳟ', 'ⳟ'),
-    ('⳥', '⳥'),
-    ('ⳣ', 'ⳣ'),
-    ('ⳏ', 'ⳏ'),
-    ('âłź', 'âłź'),
-    ('âłł', 'âłł'),
-    ('⮀', '⎄'),
-    ('⎧', '⎧'),
-    ('⎭', '⎭'),
-    ('ꙁ', 'ꙁ'),
-    ('ꙃ', 'ꙃ'),
-    ('ꙅ', 'ꙅ'),
-    ('ꙇ', 'ꙇ'),
-    ('ꙉ', 'ꙉ'),
-    ('ꙋ', 'ꙋ'),
-    ('ꙍ', 'ꙍ'),
-    ('ꙏ', 'ꙏ'),
-    ('ꙑ', 'ꙑ'),
-    ('ꙓ', 'ꙓ'),
-    ('ꙕ', 'ꙕ'),
-    ('ꙗ', 'ꙗ'),
-    ('ꙙ', 'ꙙ'),
-    ('ꙛ', 'ꙛ'),
-    ('ꙝ', 'ꙝ'),
-    ('ꙟ', 'ꙟ'),
-    ('ê™Ą', 'ê™Ą'),
-    ('ê™Ł', 'ê™Ł'),
-    ('Ꙅ', 'Ꙅ'),
-    ('ꙧ', 'ꙧ'),
-    ('ꙩ', 'ꙩ'),
-    ('ꙫ', 'ꙫ'),
-    ('ꙭ', 'ꙭ'),
-    ('ꚁ', 'ꚁ'),
-    ('ꚃ', 'ꚃ'),
-    ('ꚅ', 'ꚅ'),
-    ('ꚇ', 'ꚇ'),
-    ('ꚉ', 'ꚉ'),
-    ('ꚋ', 'ꚋ'),
-    ('ꚍ', 'ꚍ'),
-    ('ꚏ', 'ꚏ'),
-    ('ꚑ', 'ꚑ'),
-    ('ꚓ', 'ꚓ'),
-    ('ꚕ', 'ꚕ'),
-    ('ꚗ', 'ꚗ'),
-    ('ꚙ', 'ꚙ'),
-    ('ꚛ', 'ꚛ'),
-    ('êœŁ', 'êœŁ'),
-    ('꜄', '꜄'),
-    ('ꜧ', 'ꜧ'),
-    ('ꜩ', 'ꜩ'),
-    ('ꜫ', 'ꜫ'),
-    ('ꜭ', 'ꜭ'),
-    ('êœŻ', 'êœŻ'),
-    ('êœł', 'êœł'),
-    ('꜔', '꜔'),
-    ('ꜷ', 'ꜷ'),
-    ('êœč', 'êœč'),
-    ('ꜻ', 'ꜻ'),
-    ('ꜜ', 'ꜜ'),
-    ('êœż', 'êœż'),
-    ('ꝁ', 'ꝁ'),
-    ('ꝃ', 'ꝃ'),
-    ('ꝅ', 'ꝅ'),
-    ('ꝇ', 'ꝇ'),
-    ('ꝉ', 'ꝉ'),
-    ('ꝋ', 'ꝋ'),
-    ('ꝍ', 'ꝍ'),
-    ('ꝏ', 'ꝏ'),
-    ('ꝑ', 'ꝑ'),
-    ('ꝓ', 'ꝓ'),
-    ('ꝕ', 'ꝕ'),
-    ('ꝗ', 'ꝗ'),
-    ('ꝙ', 'ꝙ'),
-    ('ꝛ', 'ꝛ'),
-    ('ꝝ', 'ꝝ'),
-    ('ꝟ', 'ꝟ'),
-    ('êĄ', 'êĄ'),
-    ('êŁ', 'êŁ'),
-    ('Ꝅ', 'Ꝅ'),
-    ('ꝧ', 'ꝧ'),
-    ('ꝩ', 'ꝩ'),
-    ('ꝫ', 'ꝫ'),
-    ('ꝭ', 'ꝭ'),
-    ('êŻ', 'êŻ'),
-    ('êș', 'êș'),
-    ('Ꝍ', 'Ꝍ'),
-    ('êż', 'êż'),
-    ('ꞁ', 'ꞁ'),
-    ('ꞃ', 'ꞃ'),
-    ('ꞅ', 'ꞅ'),
-    ('ꞇ', 'ꞇ'),
-    ('ꞌ', 'ꞌ'),
-    ('ꞑ', 'ꞑ'),
-    ('ꞓ', 'ꞔ'),
-    ('ꞗ', 'ꞗ'),
-    ('ꞙ', 'ꞙ'),
-    ('ꞛ', 'ꞛ'),
-    ('ꞝ', 'ꞝ'),
-    ('ꞟ', 'ꞟ'),
-    ('êžĄ', 'êžĄ'),
-    ('êžŁ', 'êžŁ'),
-    ('Ꞅ', 'Ꞅ'),
-    ('ꞧ', 'ꞧ'),
-    ('ꞩ', 'ꞩ'),
-    ('ꞔ', 'ꞔ'),
-    ('ꞷ', 'ꞷ'),
-    ('êžč', 'êžč'),
-    ('ꞻ', 'ꞻ'),
-    ('Ꞝ', 'Ꞝ'),
-    ('êžż', 'êžż'),
-    ('ꟁ', 'ꟁ'),
-    ('ꟃ', 'ꟃ'),
-    ('ꟈ', 'ꟈ'),
-    ('ꟊ', 'ꟊ'),
-    ('ꟍ', 'ꟍ'),
-    ('ꟑ', 'ꟑ'),
-    ('ꟗ', 'ꟗ'),
-    ('ꟙ', 'ꟙ'),
-    ('ꟛ', 'ꟛ'),
-    ('ꟶ', 'ꟶ'),
-    ('ꭓ', 'ꭓ'),
-    ('ê­°', 'êźż'),
-    ('ïŹ€', 'ïŹ†'),
-    ('ïŹ“', 'ïŹ—'),
-    ('', ''),
-    ('𐐹', '𐑏'),
-    ('𐓘', '𐓻'),
-    ('𐖗', '𐖡'),
-    ('𐖣', '𐖱'),
-    ('𐖳', 'đ–č'),
-    ('𐖻', 'đ–Œ'),
-    ('𐳀', 'đłČ'),
-    ('𐔰', '𐶅'),
-    ('𑣀', '𑣟'),
-    ('đ–č ', 'đ–čż'),
-    ('𞀹', 'đž„ƒ'),
-];
-
-pub const CHANGES_WHEN_UPPERCASED: &'static [(char, char)] = &[
-    ('a', 'z'),
-    ('µ', 'µ'),
-    ('ß', 'ö'),
-    ('ø', 'ÿ'),
-    ('ā', 'ā'),
-    ('ă', 'ă'),
-    ('ą', 'ą'),
-    ('ć', 'ć'),
-    ('ĉ', 'ĉ'),
-    ('ċ', 'ċ'),
-    ('č', 'č'),
-    ('ď', 'ď'),
-    ('đ', 'đ'),
-    ('ē', 'ē'),
-    ('ĕ', 'ĕ'),
-    ('ė', 'ė'),
-    ('ę', 'ę'),
-    ('ě', 'ě'),
-    ('ĝ', 'ĝ'),
-    ('ğ', 'ğ'),
-    ('ÄĄ', 'ÄĄ'),
-    ('ÄŁ', 'ÄŁ'),
-    ('Ä„', 'Ä„'),
-    ('ħ', 'ħ'),
-    ('Ä©', 'Ä©'),
-    ('Ä«', 'Ä«'),
-    ('Ä­', 'Ä­'),
-    ('ÄŻ', 'ÄŻ'),
-    ('ı', 'ı'),
-    ('Äł', 'Äł'),
-    ('Ä”', 'Ä”'),
-    ('Ä·', 'Ä·'),
-    ('Äș', 'Äș'),
-    ('Č', 'Č'),
-    ('ÄŸ', 'ÄŸ'),
-    ('ƀ', 'ƀ'),
-    ('Ƃ', 'Ƃ'),
-    ('Ƅ', 'Ƅ'),
-    ('Ɔ', 'Ɔ'),
-    ('ƈ', 'Ɖ'),
-    ('Ƌ', 'Ƌ'),
-    ('ƍ', 'ƍ'),
-    ('Ə', 'Ə'),
-    ('Ƒ', 'Ƒ'),
-    ('œ', 'œ'),
-    ('ƕ', 'ƕ'),
-    ('Ɨ', 'Ɨ'),
-    ('ƙ', 'ƙ'),
-    ('ƛ', 'ƛ'),
-    ('Ɲ', 'Ɲ'),
-    ('ß', 'ß'),
-    ('š', 'š'),
-    ('ĆŁ', 'ĆŁ'),
-    ('Ć„', 'Ć„'),
-    ('Ƨ', 'Ƨ'),
-    ('Ć©', 'Ć©'),
-    ('Ć«', 'Ć«'),
-    ('Ć­', 'Ć­'),
-    ('ĆŻ', 'ĆŻ'),
-    ('Ʊ', 'Ʊ'),
-    ('Ćł', 'Ćł'),
-    ('Ć”', 'Ć”'),
-    ('Ć·', 'Ć·'),
-    ('Ćș', 'Ćș'),
-    ('ĆŒ', 'ĆŒ'),
-    ('ĆŸ', 'ƀ'),
-    ('ƃ', 'ƃ'),
-    ('ƅ', 'ƅ'),
-    ('ƈ', 'ƈ'),
-    ('ƌ', 'ƌ'),
-    ('ƒ', 'ƒ'),
-    ('ƕ', 'ƕ'),
-    ('ƙ', 'ƛ'),
-    ('ƞ', 'ƞ'),
-    ('ÆĄ', 'ÆĄ'),
-    ('ÆŁ', 'ÆŁ'),
-    ('Æ„', 'Æ„'),
-    ('Æš', 'Æš'),
-    ('Æ­', 'Æ­'),
-    ('ư', 'ư'),
-    ('ÆŽ', 'ÆŽ'),
-    ('ƶ', 'ƶ'),
-    ('Æč', 'Æč'),
-    ('Ɯ', 'Ɯ'),
-    ('Æż', 'Æż'),
-    ('Dž', 'dž'),
-    ('Lj', 'lj'),
-    ('Nj', 'nj'),
-    ('ǎ', 'ǎ'),
-    ('ǐ', 'ǐ'),
-    ('ǒ', 'ǒ'),
-    ('ǔ', 'ǔ'),
-    ('ǖ', 'ǖ'),
-    ('ǘ', 'ǘ'),
-    ('ǚ', 'ǚ'),
-    ('ǜ', 'ǝ'),
-    ('ǟ', 'ǟ'),
-    ('ÇĄ', 'ÇĄ'),
-    ('ÇŁ', 'ÇŁ'),
-    ('Ç„', 'Ç„'),
-    ('ǧ', 'ǧ'),
-    ('Ç©', 'Ç©'),
-    ('Ç«', 'Ç«'),
-    ('Ç­', 'Ç­'),
-    ('ǯ', 'ǰ'),
-    ('ÇČ', 'Çł'),
-    ('Ç”', 'Ç”'),
-    ('Çč', 'Çč'),
-    ('Ç»', 'Ç»'),
-    ('ǜ', 'ǜ'),
-    ('Çż', 'Çż'),
-    ('ȁ', 'ȁ'),
-    ('ȃ', 'ȃ'),
-    ('ȅ', 'ȅ'),
-    ('ȇ', 'ȇ'),
-    ('ȉ', 'ȉ'),
-    ('ȋ', 'ȋ'),
-    ('ȍ', 'ȍ'),
-    ('ȏ', 'ȏ'),
-    ('ȑ', 'ȑ'),
-    ('ȓ', 'ȓ'),
-    ('ȕ', 'ȕ'),
-    ('ȗ', 'ȗ'),
-    ('ș', 'ș'),
-    ('ț', 'ț'),
-    ('ȝ', 'ȝ'),
-    ('ȟ', 'ȟ'),
-    ('ÈŁ', 'ÈŁ'),
-    ('È„', 'È„'),
-    ('ȧ', 'ȧ'),
-    ('È©', 'È©'),
-    ('È«', 'È«'),
-    ('È­', 'È­'),
-    ('ÈŻ', 'ÈŻ'),
-    ('ȱ', 'ȱ'),
-    ('Èł', 'Èł'),
-    ('Ȍ', 'Ȍ'),
-    ('Èż', 'ɀ'),
-    ('ɂ', 'ɂ'),
-    ('ɇ', 'ɇ'),
-    ('ɉ', 'ɉ'),
-    ('ɋ', 'ɋ'),
-    ('ɍ', 'ɍ'),
-    ('ɏ', 'ɔ'),
-    ('ɖ', 'ɗ'),
-    ('ə', 'ə'),
-    ('ɛ', 'ɜ'),
-    ('É ', 'ÉĄ'),
-    ('ÉŁ', 'ÉŠ'),
-    ('Éš', 'ÉŹ'),
-    ('ÉŻ', 'ÉŻ'),
-    ('ɱ', 'ÉČ'),
-    ('É”', 'É”'),
-    ('ɜ', 'ɜ'),
-    ('ʀ', 'ʀ'),
-    ('ʂ', 'ʃ'),
-    ('ʇ', 'ʌ'),
-    ('ʒ', 'ʒ'),
-    ('ʝ', 'ʞ'),
-    ('\u{345}', '\u{345}'),
-    ('ͱ', 'ͱ'),
-    ('Íł', 'Íł'),
-    ('Í·', 'Í·'),
-    ('ͻ', '͜'),
-    ('ΐ', 'ΐ'),
-    ('ÎŹ', 'ώ'),
-    ('ϐ', 'ϑ'),
-    ('ϕ', 'ϗ'),
-    ('ϙ', 'ϙ'),
-    ('ϛ', 'ϛ'),
-    ('ϝ', 'ϝ'),
-    ('ϟ', 'ϟ'),
-    ('ÏĄ', 'ÏĄ'),
-    ('ÏŁ', 'ÏŁ'),
-    ('Ï„', 'Ï„'),
-    ('ϧ', 'ϧ'),
-    ('Ï©', 'Ï©'),
-    ('Ï«', 'Ï«'),
-    ('Ï­', 'Ï­'),
-    ('ÏŻ', 'Ïł'),
-    ('Ï”', 'Ï”'),
-    ('Ïž', 'Ïž'),
-    ('Ï»', 'Ï»'),
-    ('а', 'џ'),
-    ('ŃĄ', 'ŃĄ'),
-    ('ŃŁ', 'ŃŁ'),
-    ('Ń„', 'Ń„'),
-    ('ѧ', 'ѧ'),
-    ('Ń©', 'Ń©'),
-    ('Ń«', 'Ń«'),
-    ('Ń­', 'Ń­'),
-    ('ŃŻ', 'ŃŻ'),
-    ('ѱ', 'ѱ'),
-    ('Ńł', 'Ńł'),
-    ('Ń”', 'Ń”'),
-    ('Ń·', 'Ń·'),
-    ('Ńč', 'Ńč'),
-    ('Ń»', 'Ń»'),
-    ('Ńœ', 'Ńœ'),
-    ('Ńż', 'Ńż'),
-    ('ҁ', 'ҁ'),
-    ('ҋ', 'ҋ'),
-    ('ҍ', 'ҍ'),
-    ('ҏ', 'ҏ'),
-    ('ґ', 'ґ'),
-    ('ғ', 'ғ'),
-    ('ҕ', 'ҕ'),
-    ('җ', 'җ'),
-    ('ҙ', 'ҙ'),
-    ('қ', 'қ'),
-    ('ҝ', 'ҝ'),
-    ('ҟ', 'ҟ'),
-    ('ÒĄ', 'ÒĄ'),
-    ('ÒŁ', 'ÒŁ'),
-    ('Ò„', 'Ò„'),
-    ('Ò§', 'Ò§'),
-    ('Ò©', 'Ò©'),
-    ('Ò«', 'Ò«'),
-    ('Ò­', 'Ò­'),
-    ('ÒŻ', 'ÒŻ'),
-    ('Ò±', 'Ò±'),
-    ('Òł', 'Òł'),
-    ('Ò”', 'Ò”'),
-    ('Ò·', 'Ò·'),
-    ('Òč', 'Òč'),
-    ('Ò»', 'Ò»'),
-    ('Ҝ', 'Ҝ'),
-    ('Òż', 'Òż'),
-    ('ӂ', 'ӂ'),
-    ('ӄ', 'ӄ'),
-    ('ӆ', 'ӆ'),
-    ('ӈ', 'ӈ'),
-    ('ӊ', 'ӊ'),
-    ('ӌ', 'ӌ'),
-    ('ӎ', 'ӏ'),
-    ('ӑ', 'ӑ'),
-    ('ӓ', 'ӓ'),
-    ('ӕ', 'ӕ'),
-    ('ӗ', 'ӗ'),
-    ('ә', 'ә'),
-    ('ӛ', 'ӛ'),
-    ('ӝ', 'ӝ'),
-    ('ӟ', 'ӟ'),
-    ('ÓĄ', 'ÓĄ'),
-    ('ÓŁ', 'ÓŁ'),
-    ('Ó„', 'Ó„'),
-    ('Ó§', 'Ó§'),
-    ('Ó©', 'Ó©'),
-    ('Ó«', 'Ó«'),
-    ('Ó­', 'Ó­'),
-    ('ÓŻ', 'ÓŻ'),
-    ('Ó±', 'Ó±'),
-    ('Ół', 'Ół'),
-    ('Ó”', 'Ó”'),
-    ('Ó·', 'Ó·'),
-    ('Óč', 'Óč'),
-    ('Ó»', 'Ó»'),
-    ('Ӝ', 'Ӝ'),
-    ('Óż', 'Óż'),
-    ('ԁ', 'ԁ'),
-    ('ԃ', 'ԃ'),
-    ('ԅ', 'ԅ'),
-    ('ԇ', 'ԇ'),
-    ('ԉ', 'ԉ'),
-    ('ԋ', 'ԋ'),
-    ('ԍ', 'ԍ'),
-    ('ԏ', 'ԏ'),
-    ('ԑ', 'ԑ'),
-    ('ԓ', 'ԓ'),
-    ('ԕ', 'ԕ'),
-    ('ԗ', 'ԗ'),
-    ('ԙ', 'ԙ'),
-    ('ԛ', 'ԛ'),
-    ('ԝ', 'ԝ'),
-    ('ԟ', 'ԟ'),
-    ('ÔĄ', 'ÔĄ'),
-    ('ÔŁ', 'ÔŁ'),
-    ('Ô„', 'Ô„'),
-    ('Ô§', 'Ô§'),
-    ('Ô©', 'Ô©'),
-    ('Ô«', 'Ô«'),
-    ('Ô­', 'Ô­'),
-    ('ÔŻ', 'ÔŻ'),
-    ('ա', 'և'),
-    ('ა', 'áƒș'),
-    ('ნ', 'ჿ'),
-    ('Ꮮ', 'Ꮬ'),
-    ('áȀ', 'áȈ'),
-    ('áȊ', 'áȊ'),
-    ('á”č', 'á”č'),
-    ('ᔜ', 'ᔜ'),
-    ('ᶎ', 'ᶎ'),
-    ('ខ', 'ខ'),
-    ('ឃ', 'ឃ'),
-    ('ᾅ', 'ᾅ'),
-    ('ᾇ', 'ᾇ'),
-    ('ᾉ', 'ᾉ'),
-    ('ᾋ', 'ᾋ'),
-    ('ឍ', 'ឍ'),
-    ('ត', 'ត'),
-    ('ᾑ', 'ᾑ'),
-    ('ᾓ', 'ᾓ'),
-    ('ᾕ', 'ᾕ'),
-    ('ᾗ', 'ᾗ'),
-    ('ᾙ', 'ᾙ'),
-    ('ᾛ', 'ᾛ'),
-    ('ឝ', 'ឝ'),
-    ('ᾟ', 'ᾟ'),
-    ('ឥ', 'ឥ'),
-    ('ឣ', 'ឣ'),
-    ('áž„', 'áž„'),
-    ('áž§', 'áž§'),
-    ('áž©', 'áž©'),
-    ('áž«', 'áž«'),
-    ('áž­', 'áž­'),
-    ('ឯ', 'ឯ'),
-    ('áž±', 'áž±'),
-    ('ážł', 'ážł'),
-    ('áž”', 'áž”'),
-    ('áž·', 'áž·'),
-    ('ážč', 'ážč'),
-    ('áž»', 'áž»'),
-    ('វ', 'វ'),
-    ('ážż', 'ážż'),
-    ('áč', 'áč'),
-    ('áčƒ', 'áčƒ'),
-    ('áč…', 'áč…'),
-    ('áč‡', 'áč‡'),
-    ('áč‰', 'áč‰'),
-    ('áč‹', 'áč‹'),
-    ('áč', 'áč'),
-    ('áč', 'áč'),
-    ('áč‘', 'áč‘'),
-    ('áč“', 'áč“'),
-    ('áč•', 'áč•'),
-    ('áč—', 'áč—'),
-    ('áč™', 'áč™'),
-    ('áč›', 'áč›'),
-    ('áč', 'áč'),
-    ('áčŸ', 'áčŸ'),
-    ('áčĄ', 'áčĄ'),
-    ('áčŁ', 'áčŁ'),
-    ('áč„', 'áč„'),
-    ('áč§', 'áč§'),
-    ('áč©', 'áč©'),
-    ('áč«', 'áč«'),
-    ('áč­', 'áč­'),
-    ('áčŻ', 'áčŻ'),
-    ('áč±', 'áč±'),
-    ('áčł', 'áčł'),
-    ('áč”', 'áč”'),
-    ('áč·', 'áč·'),
-    ('áčč', 'áčč'),
-    ('áč»', 'áč»'),
-    ('áčœ', 'áčœ'),
-    ('áčż', 'áčż'),
-    ('áș', 'áș'),
-    ('áșƒ', 'áșƒ'),
-    ('áș…', 'áș…'),
-    ('áș‡', 'áș‡'),
-    ('áș‰', 'áș‰'),
-    ('áș‹', 'áș‹'),
-    ('áș', 'áș'),
-    ('áș', 'áș'),
-    ('áș‘', 'áș‘'),
-    ('áș“', 'áș“'),
-    ('áș•', 'áș›'),
-    ('áșĄ', 'áșĄ'),
-    ('áșŁ', 'áșŁ'),
-    ('áș„', 'áș„'),
-    ('áș§', 'áș§'),
-    ('áș©', 'áș©'),
-    ('áș«', 'áș«'),
-    ('áș­', 'áș­'),
-    ('áșŻ', 'áșŻ'),
-    ('áș±', 'áș±'),
-    ('áșł', 'áșł'),
-    ('áș”', 'áș”'),
-    ('áș·', 'áș·'),
-    ('áșč', 'áșč'),
-    ('áș»', 'áș»'),
-    ('áșœ', 'áșœ'),
-    ('áșż', 'áșż'),
-    ('ề', 'ề'),
-    ('ể', 'ể'),
-    ('ễ', 'ễ'),
-    ('ệ', 'ệ'),
-    ('ỉ', 'ỉ'),
-    ('ị', 'ị'),
-    ('ọ', 'ọ'),
-    ('ỏ', 'ỏ'),
-    ('ố', 'ố'),
-    ('ồ', 'ồ'),
-    ('ổ', 'ổ'),
-    ('ỗ', 'ỗ'),
-    ('ộ', 'ộ'),
-    ('ớ', 'ớ'),
-    ('ờ', 'ờ'),
-    ('ở', 'ở'),
-    ('ụ', 'ụ'),
-    ('ợ', 'ợ'),
-    ('Ễ', 'Ễ'),
-    ('á»§', 'á»§'),
-    ('ứ', 'ứ'),
-    ('ừ', 'ừ'),
-    ('á»­', 'á»­'),
-    ('ữ', 'ữ'),
-    ('á»±', 'á»±'),
-    ('ỳ', 'ỳ'),
-    ('á»”', 'á»”'),
-    ('á»·', 'á»·'),
-    ('á»č', 'á»č'),
-    ('á»»', 'á»»'),
-    ('Ờ', 'Ờ'),
-    ('ỿ', 'ጇ'),
-    ('ጐ', 'ጕ'),
-    ('ጠ', 'ጧ'),
-    ('ጰ', 'ጷ'),
-    ('ᜀ', 'ᜅ'),
-    ('ᜐ', '᜗'),
-    ('ᜠ', 'ᜧ'),
-    ('ᜰ', '᜜'),
-    ('ៀ', '៎'),
-    ('៶', '៷'),
-    ('៌', '៌'),
-    ('៟', '៟'),
-    ('ῂ', 'ῄ'),
-    ('ῆ', 'ῇ'),
-    ('ῌ', 'ῌ'),
-    ('ῐ', 'ΐ'),
-    ('ῖ', 'ῗ'),
-    ('áż ', 'áż§'),
-    ('áżČ', '῎'),
-    ('áż¶', 'áż·'),
-    ('áżŒ', 'áżŒ'),
-    ('ⅎ', 'ⅎ'),
-    ('ⅰ', 'ⅿ'),
-    ('ↄ', 'ↄ'),
-    ('ⓐ', 'ⓩ'),
-    ('ⰰ', 'ⱟ'),
-    ('ⱥ', 'ⱥ'),
-    ('ⱄ', 'ⱊ'),
-    ('ⱚ', 'ⱚ'),
-    ('â±Ș', 'â±Ș'),
-    ('ⱏ', 'ⱏ'),
-    ('ⱳ', 'ⱳ'),
-    ('â±¶', 'â±¶'),
-    ('âȁ', 'âȁ'),
-    ('âȃ', 'âȃ'),
-    ('âȅ', 'âȅ'),
-    ('âȇ', 'âȇ'),
-    ('âȉ', 'âȉ'),
-    ('âȋ', 'âȋ'),
-    ('âȍ', 'âȍ'),
-    ('âȏ', 'âȏ'),
-    ('âȑ', 'âȑ'),
-    ('âȓ', 'âȓ'),
-    ('âȕ', 'âȕ'),
-    ('âȗ', 'âȗ'),
-    ('âș', 'âș'),
-    ('âț', 'âț'),
-    ('âȝ', 'âȝ'),
-    ('âȟ', 'âȟ'),
-    ('âČĄ', 'âČĄ'),
-    ('âČŁ', 'âČŁ'),
-    ('âČ„', 'âČ„'),
-    ('âȧ', 'âȧ'),
-    ('âČ©', 'âČ©'),
-    ('âČ«', 'âČ«'),
-    ('âČ­', 'âČ­'),
-    ('âČŻ', 'âČŻ'),
-    ('âȱ', 'âȱ'),
-    ('âČł', 'âČł'),
-    ('âČ”', 'âČ”'),
-    ('âČ·', 'âČ·'),
-    ('âČč', 'âČč'),
-    ('âČ»', 'âČ»'),
-    ('âČœ', 'âČœ'),
-    ('âČż', 'âČż'),
-    ('ⳁ', 'ⳁ'),
-    ('ⳃ', 'ⳃ'),
-    ('ⳅ', 'ⳅ'),
-    ('ⳇ', 'ⳇ'),
-    ('ⳉ', 'ⳉ'),
-    ('ⳋ', 'ⳋ'),
-    ('ⳍ', 'ⳍ'),
-    ('ⳏ', 'ⳏ'),
-    ('ⳑ', 'ⳑ'),
-    ('ⳓ', 'ⳓ'),
-    ('ⳕ', 'ⳕ'),
-    ('ⳗ', 'ⳗ'),
-    ('ⳙ', 'ⳙ'),
-    ('ⳛ', 'ⳛ'),
-    ('ⳝ', 'ⳝ'),
-    ('ⳟ', 'ⳟ'),
-    ('⳥', '⳥'),
-    ('ⳣ', 'ⳣ'),
-    ('ⳏ', 'ⳏ'),
-    ('âłź', 'âłź'),
-    ('âłł', 'âłł'),
-    ('⮀', '⎄'),
-    ('⎧', '⎧'),
-    ('⎭', '⎭'),
-    ('ꙁ', 'ꙁ'),
-    ('ꙃ', 'ꙃ'),
-    ('ꙅ', 'ꙅ'),
-    ('ꙇ', 'ꙇ'),
-    ('ꙉ', 'ꙉ'),
-    ('ꙋ', 'ꙋ'),
-    ('ꙍ', 'ꙍ'),
-    ('ꙏ', 'ꙏ'),
-    ('ꙑ', 'ꙑ'),
-    ('ꙓ', 'ꙓ'),
-    ('ꙕ', 'ꙕ'),
-    ('ꙗ', 'ꙗ'),
-    ('ꙙ', 'ꙙ'),
-    ('ꙛ', 'ꙛ'),
-    ('ꙝ', 'ꙝ'),
-    ('ꙟ', 'ꙟ'),
-    ('ê™Ą', 'ê™Ą'),
-    ('ê™Ł', 'ê™Ł'),
-    ('Ꙅ', 'Ꙅ'),
-    ('ꙧ', 'ꙧ'),
-    ('ꙩ', 'ꙩ'),
-    ('ꙫ', 'ꙫ'),
-    ('ꙭ', 'ꙭ'),
-    ('ꚁ', 'ꚁ'),
-    ('ꚃ', 'ꚃ'),
-    ('ꚅ', 'ꚅ'),
-    ('ꚇ', 'ꚇ'),
-    ('ꚉ', 'ꚉ'),
-    ('ꚋ', 'ꚋ'),
-    ('ꚍ', 'ꚍ'),
-    ('ꚏ', 'ꚏ'),
-    ('ꚑ', 'ꚑ'),
-    ('ꚓ', 'ꚓ'),
-    ('ꚕ', 'ꚕ'),
-    ('ꚗ', 'ꚗ'),
-    ('ꚙ', 'ꚙ'),
-    ('ꚛ', 'ꚛ'),
-    ('êœŁ', 'êœŁ'),
-    ('꜄', '꜄'),
-    ('ꜧ', 'ꜧ'),
-    ('ꜩ', 'ꜩ'),
-    ('ꜫ', 'ꜫ'),
-    ('ꜭ', 'ꜭ'),
-    ('êœŻ', 'êœŻ'),
-    ('êœł', 'êœł'),
-    ('꜔', '꜔'),
-    ('ꜷ', 'ꜷ'),
-    ('êœč', 'êœč'),
-    ('ꜻ', 'ꜻ'),
-    ('ꜜ', 'ꜜ'),
-    ('êœż', 'êœż'),
-    ('ꝁ', 'ꝁ'),
-    ('ꝃ', 'ꝃ'),
-    ('ꝅ', 'ꝅ'),
-    ('ꝇ', 'ꝇ'),
-    ('ꝉ', 'ꝉ'),
-    ('ꝋ', 'ꝋ'),
-    ('ꝍ', 'ꝍ'),
-    ('ꝏ', 'ꝏ'),
-    ('ꝑ', 'ꝑ'),
-    ('ꝓ', 'ꝓ'),
-    ('ꝕ', 'ꝕ'),
-    ('ꝗ', 'ꝗ'),
-    ('ꝙ', 'ꝙ'),
-    ('ꝛ', 'ꝛ'),
-    ('ꝝ', 'ꝝ'),
-    ('ꝟ', 'ꝟ'),
-    ('êĄ', 'êĄ'),
-    ('êŁ', 'êŁ'),
-    ('Ꝅ', 'Ꝅ'),
-    ('ꝧ', 'ꝧ'),
-    ('ꝩ', 'ꝩ'),
-    ('ꝫ', 'ꝫ'),
-    ('ꝭ', 'ꝭ'),
-    ('êŻ', 'êŻ'),
-    ('êș', 'êș'),
-    ('Ꝍ', 'Ꝍ'),
-    ('êż', 'êż'),
-    ('ꞁ', 'ꞁ'),
-    ('ꞃ', 'ꞃ'),
-    ('ꞅ', 'ꞅ'),
-    ('ꞇ', 'ꞇ'),
-    ('ꞌ', 'ꞌ'),
-    ('ꞑ', 'ꞑ'),
-    ('ꞓ', 'ꞔ'),
-    ('ꞗ', 'ꞗ'),
-    ('ꞙ', 'ꞙ'),
-    ('ꞛ', 'ꞛ'),
-    ('ꞝ', 'ꞝ'),
-    ('ꞟ', 'ꞟ'),
-    ('êžĄ', 'êžĄ'),
-    ('êžŁ', 'êžŁ'),
-    ('Ꞅ', 'Ꞅ'),
-    ('ꞧ', 'ꞧ'),
-    ('ꞩ', 'ꞩ'),
-    ('ꞔ', 'ꞔ'),
-    ('ꞷ', 'ꞷ'),
-    ('êžč', 'êžč'),
-    ('ꞻ', 'ꞻ'),
-    ('Ꞝ', 'Ꞝ'),
-    ('êžż', 'êžż'),
-    ('ꟁ', 'ꟁ'),
-    ('ꟃ', 'ꟃ'),
-    ('ꟈ', 'ꟈ'),
-    ('ꟊ', 'ꟊ'),
-    ('ꟍ', 'ꟍ'),
-    ('ꟑ', 'ꟑ'),
-    ('ꟗ', 'ꟗ'),
-    ('ꟙ', 'ꟙ'),
-    ('ꟛ', 'ꟛ'),
-    ('ꟶ', 'ꟶ'),
-    ('ꭓ', 'ꭓ'),
-    ('ê­°', 'êźż'),
-    ('ïŹ€', 'ïŹ†'),
-    ('ïŹ“', 'ïŹ—'),
-    ('', ''),
-    ('𐐹', '𐑏'),
-    ('𐓘', '𐓻'),
-    ('𐖗', '𐖡'),
-    ('𐖣', '𐖱'),
-    ('𐖳', 'đ–č'),
-    ('𐖻', 'đ–Œ'),
-    ('𐳀', 'đłČ'),
-    ('𐔰', '𐶅'),
-    ('𑣀', '𑣟'),
-    ('đ–č ', 'đ–čż'),
-    ('𞀹', 'đž„ƒ'),
-];
-
-pub const DASH: &'static [(char, char)] = &[
-    ('-', '-'),
-    ('֊', '֊'),
-    ('ÖŸ', 'ÖŸ'),
-    ('᐀', '᐀'),
-    ('᠆', '᠆'),
-    ('‐', '―'),
-    ('⁓', '⁓'),
-    ('⁻', '⁻'),
-    ('₋', '₋'),
-    ('−', '−'),
-    ('⾗', '⾗'),
-    ('⾚', '⾚'),
-    ('âžș', 'âž»'),
-    ('âč€', 'âč€'),
-    ('âč', 'âč'),
-    ('〜', '〜'),
-    ('〰', '〰'),
-    ('゠', '゠'),
-    ('ïž±', 'ïžČ'),
-    ('ïč˜', 'ïč˜'),
-    ('ïčŁ', 'ïčŁ'),
-    ('', ''),
-    ('𐔟', '𐔟'),
-    ('đș­', 'đș­'),
-];
-
-pub const DEFAULT_IGNORABLE_CODE_POINT: &'static [(char, char)] = &[
-    ('\u{ad}', '\u{ad}'),
-    ('\u{34f}', '\u{34f}'),
-    ('\u{61c}', '\u{61c}'),
-    ('ᅟ', 'ᅠ'),
-    ('\u{17b4}', '\u{17b5}'),
-    ('\u{180b}', '\u{180f}'),
-    ('\u{200b}', '\u{200f}'),
-    ('\u{202a}', '\u{202e}'),
-    ('\u{2060}', '\u{206f}'),
-    ('ă…€', 'ă…€'),
-    ('\u{fe00}', '\u{fe0f}'),
-    ('\u{feff}', '\u{feff}'),
-    ('', ''),
-    ('\u{fff0}', '\u{fff8}'),
-    ('\u{1bca0}', '\u{1bca3}'),
-    ('\u{1d173}', '\u{1d17a}'),
-    ('\u{e0000}', '\u{e0fff}'),
-];
-
-pub const DEPRECATED: &'static [(char, char)] = &[
-    ('Ɖ', 'Ɖ'),
-    ('Ùł', 'Ùł'),
-    ('\u{f77}', '\u{f77}'),
-    ('\u{f79}', '\u{f79}'),
-    ('ឣ', 'ក'),
-    ('\u{206a}', '\u{206f}'),
-    ('⟨', '⟩'),
-    ('\u{e0001}', '\u{e0001}'),
-];
-
-pub const DIACRITIC: &'static [(char, char)] = &[
-    ('^', '^'),
-    ('`', '`'),
-    ('¨', '¨'),
-    ('¯', '¯'),
-    ('´', '´'),
-    ('·', '¸'),
-    ('ʰ', '\u{34e}'),
-    ('\u{350}', '\u{357}'),
-    ('\u{35d}', '\u{362}'),
-    ('ÍŽ', 'Í”'),
-    ('Íș', 'Íș'),
-    ('΄', '΅'),
-    ('\u{483}', '\u{487}'),
-    ('ՙ', 'ՙ'),
-    ('\u{591}', '\u{5a1}'),
-    ('\u{5a3}', '\u{5bd}'),
-    ('\u{5bf}', '\u{5bf}'),
-    ('\u{5c1}', '\u{5c2}'),
-    ('\u{5c4}', '\u{5c4}'),
-    ('\u{64b}', '\u{652}'),
-    ('\u{657}', '\u{658}'),
-    ('\u{6df}', '\u{6e0}'),
-    ('Û„', 'ÛŠ'),
-    ('\u{6ea}', '\u{6ec}'),
-    ('\u{730}', '\u{74a}'),
-    ('\u{7a6}', '\u{7b0}'),
-    ('\u{7eb}', 'ß”'),
-    ('\u{818}', '\u{819}'),
-    ('\u{898}', '\u{89f}'),
-    ('àŁ‰', '\u{8d2}'),
-    ('\u{8e3}', '\u{8fe}'),
-    ('\u{93c}', '\u{93c}'),
-    ('\u{94d}', '\u{94d}'),
-    ('\u{951}', '\u{954}'),
-    ('à„±', 'à„±'),
-    ('\u{9bc}', '\u{9bc}'),
-    ('\u{9cd}', '\u{9cd}'),
-    ('\u{a3c}', '\u{a3c}'),
-    ('\u{a4d}', '\u{a4d}'),
-    ('\u{abc}', '\u{abc}'),
-    ('\u{acd}', '\u{acd}'),
-    ('\u{afd}', '\u{aff}'),
-    ('\u{b3c}', '\u{b3c}'),
-    ('\u{b4d}', '\u{b4d}'),
-    ('\u{b55}', '\u{b55}'),
-    ('\u{bcd}', '\u{bcd}'),
-    ('\u{c3c}', '\u{c3c}'),
-    ('\u{c4d}', '\u{c4d}'),
-    ('\u{cbc}', '\u{cbc}'),
-    ('\u{ccd}', '\u{ccd}'),
-    ('\u{d3b}', '\u{d3c}'),
-    ('\u{d4d}', '\u{d4d}'),
-    ('\u{dca}', '\u{dca}'),
-    ('\u{e3a}', '\u{e3a}'),
-    ('\u{e47}', '\u{e4c}'),
-    ('\u{e4e}', '\u{e4e}'),
-    ('\u{eba}', '\u{eba}'),
-    ('\u{ec8}', '\u{ecc}'),
-    ('\u{f18}', '\u{f19}'),
-    ('\u{f35}', '\u{f35}'),
-    ('\u{f37}', '\u{f37}'),
-    ('\u{f39}', '\u{f39}'),
-    ('àŒŸ', 'àŒż'),
-    ('\u{f82}', '\u{f84}'),
-    ('\u{f86}', '\u{f87}'),
-    ('\u{fc6}', '\u{fc6}'),
-    ('\u{1037}', '\u{1037}'),
-    ('\u{1039}', '\u{103a}'),
-    ('ၣ', '၀'),
-    ('ၩ', 'ၭ'),
-    ('ႇ', '\u{108d}'),
-    ('ႏ', 'ႏ'),
-    ('ႚ', 'ႛ'),
-    ('\u{135d}', '\u{135f}'),
-    ('\u{1714}', '\u{1715}'),
-    ('\u{1734}', '\u{1734}'),
-    ('\u{17c9}', '\u{17d3}'),
-    ('\u{17dd}', '\u{17dd}'),
-    ('\u{1939}', '\u{193b}'),
-    ('\u{1a60}', '\u{1a60}'),
-    ('\u{1a75}', '\u{1a7c}'),
-    ('\u{1a7f}', '\u{1a7f}'),
-    ('\u{1ab0}', '\u{1abe}'),
-    ('\u{1ac1}', '\u{1acb}'),
-    ('\u{1b34}', '\u{1b34}'),
-    ('\u{1b44}', '\u{1b44}'),
-    ('\u{1b6b}', '\u{1b73}'),
-    ('\u{1baa}', '\u{1bab}'),
-    ('\u{1be6}', '\u{1be6}'),
-    ('\u{1bf2}', '\u{1bf3}'),
-    ('\u{1c36}', '\u{1c37}'),
-    ('ᱞ', 'ᱜ'),
-    ('\u{1cd0}', '\u{1ce8}'),
-    ('\u{1ced}', '\u{1ced}'),
-    ('\u{1cf4}', '\u{1cf4}'),
-    ('áł·', '\u{1cf9}'),
-    ('ᎏ', 'á”Ș'),
-    ('\u{1dc4}', '\u{1dcf}'),
-    ('\u{1df5}', '\u{1dff}'),
-    ('ៜ', 'ៜ'),
-    ('áŸż', '῁'),
-    ('῍', '῏'),
-    ('῝', '῟'),
-    ('῭', '`'),
-    ('áżœ', 'áżŸ'),
-    ('\u{2cef}', '\u{2cf1}'),
-    ('➯', '➯'),
-    ('\u{302a}', '\u{302f}'),
-    ('\u{3099}', '゜'),
-    ('ăƒŒ', 'ăƒŒ'),
-    ('\u{a66f}', '\u{a66f}'),
-    ('\u{a67c}', '\u{a67d}'),
-    ('ê™ż', 'ê™ż'),
-    ('ꚜ', 'ꚝ'),
-    ('\u{a6f0}', '\u{a6f1}'),
-    ('꜀', 'êœĄ'),
-    ('ꞈ', '꞊'),
-    ('꟞', 'êŸč'),
-    ('\u{a806}', '\u{a806}'),
-    ('\u{a82c}', '\u{a82c}'),
-    ('\u{a8c4}', '\u{a8c4}'),
-    ('\u{a8e0}', '\u{a8f1}'),
-    ('\u{a92b}', 'ê€ź'),
-    ('\u{a953}', '\u{a953}'),
-    ('\u{a9b3}', '\u{a9b3}'),
-    ('\u{a9c0}', '\u{a9c0}'),
-    ('\u{a9e5}', '\u{a9e5}'),
-    ('ꩻ', '꩜'),
-    ('\u{aabf}', 'ꫂ'),
-    ('\u{aaf6}', '\u{aaf6}'),
-    ('꭛', 'ꭟ'),
-    ('ê­©', 'ê­«'),
-    ('êŻŹ', '\u{abed}'),
-    ('\u{fb1e}', '\u{fb1e}'),
-    ('\u{fe20}', '\u{fe2f}'),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('\u{ff9e}', '\u{ff9f}'),
-    ('ïżŁ', 'ïżŁ'),
-    ('\u{102e0}', '\u{102e0}'),
-    ('𐞀', '𐞅'),
-    ('𐞇', '𐞰'),
-    ('đžČ', 'đžș'),
-    ('\u{10a38}', '\u{10a3a}'),
-    ('\u{10a3f}', '\u{10a3f}'),
-    ('\u{10ae5}', '\u{10ae6}'),
-    ('𐎹', '\u{10d27}'),
-    ('𐔎', '𐔎'),
-    ('\u{10d69}', '\u{10d6d}'),
-    ('\u{10efd}', '\u{10eff}'),
-    ('\u{10f46}', '\u{10f50}'),
-    ('\u{10f82}', '\u{10f85}'),
-    ('\u{11046}', '\u{11046}'),
-    ('\u{11070}', '\u{11070}'),
-    ('\u{110b9}', '\u{110ba}'),
-    ('\u{11133}', '\u{11134}'),
-    ('\u{11173}', '\u{11173}'),
-    ('\u{111c0}', '\u{111c0}'),
-    ('\u{111ca}', '\u{111cc}'),
-    ('\u{11235}', '\u{11236}'),
-    ('\u{112e9}', '\u{112ea}'),
-    ('\u{1133b}', '\u{1133c}'),
-    ('\u{1134d}', '\u{1134d}'),
-    ('\u{11366}', '\u{1136c}'),
-    ('\u{11370}', '\u{11374}'),
-    ('\u{113ce}', '\u{113d0}'),
-    ('\u{113d2}', '𑏓'),
-    ('\u{113e1}', '\u{113e2}'),
-    ('\u{11442}', '\u{11442}'),
-    ('\u{11446}', '\u{11446}'),
-    ('\u{114c2}', '\u{114c3}'),
-    ('\u{115bf}', '\u{115c0}'),
-    ('\u{1163f}', '\u{1163f}'),
-    ('\u{116b6}', '\u{116b7}'),
-    ('\u{1172b}', '\u{1172b}'),
-    ('\u{11839}', '\u{1183a}'),
-    ('\u{1193d}', '\u{1193e}'),
-    ('\u{11943}', '\u{11943}'),
-    ('\u{119e0}', '\u{119e0}'),
-    ('\u{11a34}', '\u{11a34}'),
-    ('\u{11a47}', '\u{11a47}'),
-    ('\u{11a99}', '\u{11a99}'),
-    ('\u{11c3f}', '\u{11c3f}'),
-    ('\u{11d42}', '\u{11d42}'),
-    ('\u{11d44}', '\u{11d45}'),
-    ('\u{11d97}', '\u{11d97}'),
-    ('\u{11f41}', '\u{11f42}'),
-    ('\u{11f5a}', '\u{11f5a}'),
-    ('\u{13447}', '\u{13455}'),
-    ('\u{1612f}', '\u{1612f}'),
-    ('\u{16af0}', '\u{16af4}'),
-    ('\u{16b30}', '\u{16b36}'),
-    ('đ–”«', '𖔏'),
-    ('\u{16f8f}', 'đ–ŸŸ'),
-    ('\u{16ff0}', '\u{16ff1}'),
-    ('𚿰', '𚿳'),
-    ('đšż”', 'đšż»'),
-    ('đšżœ', 'đšżŸ'),
-    ('\u{1cf00}', '\u{1cf2d}'),
-    ('\u{1cf30}', '\u{1cf46}'),
-    ('\u{1d167}', '\u{1d169}'),
-    ('\u{1d16d}', '\u{1d172}'),
-    ('\u{1d17b}', '\u{1d182}'),
-    ('\u{1d185}', '\u{1d18b}'),
-    ('\u{1d1aa}', '\u{1d1ad}'),
-    ('𞀰', '𞁭'),
-    ('\u{1e130}', '\u{1e136}'),
-    ('\u{1e2ae}', '\u{1e2ae}'),
-    ('\u{1e2ec}', '\u{1e2ef}'),
-    ('\u{1e5ee}', '\u{1e5ef}'),
-    ('\u{1e8d0}', '\u{1e8d6}'),
-    ('\u{1e944}', '\u{1e946}'),
-    ('\u{1e948}', '\u{1e94a}'),
-];
-
-pub const EMOJI: &'static [(char, char)] = &[
-    ('#', '#'),
-    ('*', '*'),
-    ('0', '9'),
-    ('©', '©'),
-    ('®', '®'),
-    ('‌', '‌'),
-    ('⁉', '⁉'),
-    ('™', '™'),
-    ('â„č', 'â„č'),
-    ('↔', '↙'),
-    ('↩', 'â†Ș'),
-    ('⌚', '⌛'),
-    ('⌹', '⌹'),
-    ('⏏', '⏏'),
-    ('⏩', '⏳'),
-    ('⏞', 'âș'),
-    ('Ⓜ', 'Ⓜ'),
-    ('â–Ș', '▫'),
-    ('▶', '▶'),
-    ('◀', '◀'),
-    ('◻', 'â—Ÿ'),
-    ('☀', '☄'),
-    ('☎', '☎'),
-    ('☑', '☑'),
-    ('☔', '☕'),
-    ('☘', '☘'),
-    ('☝', '☝'),
-    ('☠', '☠'),
-    ('☹', '☣'),
-    ('☊', '☊'),
-    ('â˜Ș', 'â˜Ș'),
-    ('☟', '☯'),
-    ('☞', 'â˜ș'),
-    ('♀', '♀'),
-    ('♂', '♂'),
-    ('♈', '♓'),
-    ('♟', '♠'),
-    ('♣', '♣'),
-    ('♥', '♦'),
-    ('♹', '♹'),
-    ('♻', '♻'),
-    ('♟', '♿'),
-    ('⚒', '⚗'),
-    ('⚙', '⚙'),
-    ('⚛', '⚜'),
-    ('⚠', '⚡'),
-    ('⚧', '⚧'),
-    ('âšȘ', '⚫'),
-    ('⚰', '⚱'),
-    ('âšœ', '⚟'),
-    ('⛄', '⛅'),
-    ('⛈', '⛈'),
-    ('⛎', '⛏'),
-    ('⛑', '⛑'),
-    ('⛓', '⛔'),
-    ('⛩', 'â›Ș'),
-    ('⛰', 'â›”'),
-    ('⛷', 'â›ș'),
-    ('⛜', '⛜'),
-    ('✂', '✂'),
-    ('✅', '✅'),
-    ('✈', '✍'),
-    ('✏', '✏'),
-    ('✒', '✒'),
-    ('✔', '✔'),
-    ('✖', '✖'),
-    ('✝', '✝'),
-    ('✡', '✡'),
-    ('✹', '✹'),
-    ('✳', '✮'),
-    ('❄', '❄'),
-    ('❇', '❇'),
-    ('❌', '❌'),
-    ('❎', '❎'),
-    ('❓', '❕'),
-    ('❗', '❗'),
-    ('❣', '❀'),
-    ('➕', '➗'),
-    ('➡', '➡'),
-    ('➰', '➰'),
-    ('➿', '➿'),
-    ('‎', '—'),
-    ('⬅', '⬇'),
-    ('⬛', '⬜'),
-    ('⭐', '⭐'),
-    ('⭕', '⭕'),
-    ('〰', '〰'),
-    ('ă€œ', 'ă€œ'),
-    ('㊗', '㊗'),
-    ('㊙', '㊙'),
-    ('🀄', '🀄'),
-    ('🃏', '🃏'),
-    ('🅰', 'đŸ…±'),
-    ('đŸ…Ÿ', '🅿'),
-    ('🆎', '🆎'),
-    ('🆑', '🆚'),
-    ('🇩', '🇿'),
-    ('🈁', '🈂'),
-    ('🈚', '🈚'),
-    ('🈯', '🈯'),
-    ('đŸˆČ', 'đŸˆș'),
-    ('🉐', '🉑'),
-    ('🌀', '🌡'),
-    ('đŸŒ€', '🎓'),
-    ('🎖', '🎗'),
-    ('🎙', '🎛'),
-    ('🎞', '🏰'),
-    ('🏳', 'đŸ”'),
-    ('đŸ·', 'đŸ“œ'),
-    ('📿', 'đŸ”œ'),
-    ('🕉', '🕎'),
-    ('🕐', '🕧'),
-    ('🕯', '🕰'),
-    ('🕳', 'đŸ•ș'),
-    ('🖇', '🖇'),
-    ('🖊', '🖍'),
-    ('🖐', '🖐'),
-    ('🖕', '🖖'),
-    ('đŸ–€', 'đŸ–„'),
-    ('🖹', '🖹'),
-    ('đŸ–±', 'đŸ–Č'),
-    ('đŸ–Œ', 'đŸ–Œ'),
-    ('🗂', '🗄'),
-    ('🗑', '🗓'),
-    ('🗜', '🗞'),
-    ('🗡', '🗡'),
-    ('🗣', '🗣'),
-    ('🗹', '🗹'),
-    ('🗯', '🗯'),
-    ('🗳', '🗳'),
-    ('đŸ—ș', '🙏'),
-    ('🚀', '🛅'),
-    ('🛋', '🛒'),
-    ('🛕', '🛗'),
-    ('🛜', 'đŸ›„'),
-    ('đŸ›©', 'đŸ›©'),
-    ('đŸ›«', '🛬'),
-    ('🛰', '🛰'),
-    ('🛳', 'đŸ›Œ'),
-    ('🟠', 'đŸŸ«'),
-    ('🟰', '🟰'),
-    ('đŸ€Œ', 'đŸ€ș'),
-    ('đŸ€Œ', 'đŸ„…'),
-    ('đŸ„‡', '🧿'),
-    ('đŸ©°', 'đŸ©Œ'),
-    ('đŸȘ€', 'đŸȘ‰'),
-    ('đŸȘ', 'đŸ«†'),
-    ('đŸ«Ž', 'đŸ«œ'),
-    ('đŸ«Ÿ', 'đŸ«©'),
-    ('đŸ«°', 'đŸ«ž'),
-];
-
-pub const EMOJI_COMPONENT: &'static [(char, char)] = &[
-    ('#', '#'),
-    ('*', '*'),
-    ('0', '9'),
-    ('\u{200d}', '\u{200d}'),
-    ('\u{20e3}', '\u{20e3}'),
-    ('\u{fe0f}', '\u{fe0f}'),
-    ('🇩', '🇿'),
-    ('đŸ»', '🏿'),
-    ('🩰', '🩳'),
-    ('\u{e0020}', '\u{e007f}'),
-];
-
-pub const EMOJI_MODIFIER: &'static [(char, char)] = &[('đŸ»', '🏿')];
-
-pub const EMOJI_MODIFIER_BASE: &'static [(char, char)] = &[
-    ('☝', '☝'),
-    ('â›č', 'â›č'),
-    ('✊', '✍'),
-    ('🎅', '🎅'),
-    ('🏂', '🏄'),
-    ('🏇', '🏇'),
-    ('🏊', '🏌'),
-    ('👂', '👃'),
-    ('👆', '👐'),
-    ('👩', '👾'),
-    ('đŸ‘Œ', 'đŸ‘Œ'),
-    ('💁', '💃'),
-    ('💅', '💇'),
-    ('💏', '💏'),
-    ('💑', '💑'),
-    ('đŸ’Ș', 'đŸ’Ș'),
-    ('🕮', 'đŸ•”'),
-    ('đŸ•ș', 'đŸ•ș'),
-    ('🖐', '🖐'),
-    ('🖕', '🖖'),
-    ('🙅', '🙇'),
-    ('🙋', '🙏'),
-    ('🚣', '🚣'),
-    ('🚮', 'đŸš¶'),
-    ('🛀', '🛀'),
-    ('🛌', '🛌'),
-    ('đŸ€Œ', 'đŸ€Œ'),
-    ('đŸ€', 'đŸ€'),
-    ('đŸ€˜', 'đŸ€Ÿ'),
-    ('đŸ€Š', 'đŸ€Š'),
-    ('đŸ€°', 'đŸ€č'),
-    ('đŸ€Œ', 'đŸ€Ÿ'),
-    ('đŸ„·', 'đŸ„·'),
-    ('đŸŠ”', 'đŸŠ¶'),
-    ('🩾', 'đŸŠč'),
-    ('đŸŠ»', 'đŸŠ»'),
-    ('🧍', '🧏'),
-    ('🧑', '🧝'),
-    ('đŸ«ƒ', 'đŸ«…'),
-    ('đŸ«°', 'đŸ«ž'),
-];
-
-pub const EMOJI_PRESENTATION: &'static [(char, char)] = &[
-    ('⌚', '⌛'),
-    ('⏩', '⏬'),
-    ('⏰', '⏰'),
-    ('⏳', '⏳'),
-    ('◜', '◟'),
-    ('☔', '☕'),
-    ('♈', '♓'),
-    ('♿', '♿'),
-    ('⚓', '⚓'),
-    ('⚡', '⚡'),
-    ('âšȘ', '⚫'),
-    ('âšœ', '⚟'),
-    ('⛄', '⛅'),
-    ('⛎', '⛎'),
-    ('⛔', '⛔'),
-    ('â›Ș', 'â›Ș'),
-    ('â›Č', '⛳'),
-    ('â›”', 'â›”'),
-    ('â›ș', 'â›ș'),
-    ('⛜', '⛜'),
-    ('✅', '✅'),
-    ('✊', '✋'),
-    ('✹', '✹'),
-    ('❌', '❌'),
-    ('❎', '❎'),
-    ('❓', '❕'),
-    ('❗', '❗'),
-    ('➕', '➗'),
-    ('➰', '➰'),
-    ('➿', '➿'),
-    ('⬛', '⬜'),
-    ('⭐', '⭐'),
-    ('⭕', '⭕'),
-    ('🀄', '🀄'),
-    ('🃏', '🃏'),
-    ('🆎', '🆎'),
-    ('🆑', '🆚'),
-    ('🇩', '🇿'),
-    ('🈁', '🈁'),
-    ('🈚', '🈚'),
-    ('🈯', '🈯'),
-    ('đŸˆČ', 'đŸˆ¶'),
-    ('🈾', 'đŸˆș'),
-    ('🉐', '🉑'),
-    ('🌀', '🌠'),
-    ('🌭', 'đŸŒ”'),
-    ('đŸŒ·', 'đŸŒ'),
-    ('đŸŸ', '🎓'),
-    ('🎠', '🏊'),
-    ('🏏', '🏓'),
-    ('🏠', '🏰'),
-    ('🏮', '🏮'),
-    ('🏾', 'đŸŸ'),
-    ('👀', '👀'),
-    ('👂', 'đŸ“Œ'),
-    ('📿', 'đŸ”œ'),
-    ('🕋', '🕎'),
-    ('🕐', '🕧'),
-    ('đŸ•ș', 'đŸ•ș'),
-    ('🖕', '🖖'),
-    ('đŸ–€', 'đŸ–€'),
-    ('đŸ—»', '🙏'),
-    ('🚀', '🛅'),
-    ('🛌', '🛌'),
-    ('🛐', '🛒'),
-    ('🛕', '🛗'),
-    ('🛜', '🛟'),
-    ('đŸ›«', '🛬'),
-    ('🛮', 'đŸ›Œ'),
-    ('🟠', 'đŸŸ«'),
-    ('🟰', '🟰'),
-    ('đŸ€Œ', 'đŸ€ș'),
-    ('đŸ€Œ', 'đŸ„…'),
-    ('đŸ„‡', '🧿'),
-    ('đŸ©°', 'đŸ©Œ'),
-    ('đŸȘ€', 'đŸȘ‰'),
-    ('đŸȘ', 'đŸ«†'),
-    ('đŸ«Ž', 'đŸ«œ'),
-    ('đŸ«Ÿ', 'đŸ«©'),
-    ('đŸ«°', 'đŸ«ž'),
-];
-
-pub const EXTENDED_PICTOGRAPHIC: &'static [(char, char)] = &[
-    ('©', '©'),
-    ('®', '®'),
-    ('‌', '‌'),
-    ('⁉', '⁉'),
-    ('™', '™'),
-    ('â„č', 'â„č'),
-    ('↔', '↙'),
-    ('↩', 'â†Ș'),
-    ('⌚', '⌛'),
-    ('⌹', '⌹'),
-    ('⎈', '⎈'),
-    ('⏏', '⏏'),
-    ('⏩', '⏳'),
-    ('⏞', 'âș'),
-    ('Ⓜ', 'Ⓜ'),
-    ('â–Ș', '▫'),
-    ('▶', '▶'),
-    ('◀', '◀'),
-    ('◻', 'â—Ÿ'),
-    ('☀', '★'),
-    ('☇', '☒'),
-    ('☔', '⚅'),
-    ('⚐', '✅'),
-    ('✈', '✒'),
-    ('✔', '✔'),
-    ('✖', '✖'),
-    ('✝', '✝'),
-    ('✡', '✡'),
-    ('✹', '✹'),
-    ('✳', '✮'),
-    ('❄', '❄'),
-    ('❇', '❇'),
-    ('❌', '❌'),
-    ('❎', '❎'),
-    ('❓', '❕'),
-    ('❗', '❗'),
-    ('❣', '❧'),
-    ('➕', '➗'),
-    ('➡', '➡'),
-    ('➰', '➰'),
-    ('➿', '➿'),
-    ('‎', '—'),
-    ('⬅', '⬇'),
-    ('⬛', '⬜'),
-    ('⭐', '⭐'),
-    ('⭕', '⭕'),
-    ('〰', '〰'),
-    ('ă€œ', 'ă€œ'),
-    ('㊗', '㊗'),
-    ('㊙', '㊙'),
-    ('🀀', '\u{1f0ff}'),
-    ('🄍', '🄏'),
-    ('🄯', '🄯'),
-    ('🅬', 'đŸ…±'),
-    ('đŸ…Ÿ', '🅿'),
-    ('🆎', '🆎'),
-    ('🆑', '🆚'),
-    ('🆭', '\u{1f1e5}'),
-    ('🈁', '\u{1f20f}'),
-    ('🈚', '🈚'),
-    ('🈯', '🈯'),
-    ('đŸˆČ', 'đŸˆș'),
-    ('\u{1f23c}', '\u{1f23f}'),
-    ('\u{1f249}', 'đŸș'),
-    ('🐀', 'đŸ”œ'),
-    ('🕆', '🙏'),
-    ('🚀', '\u{1f6ff}'),
-    ('🝮', '🝿'),
-    ('🟕', '\u{1f7ff}'),
-    ('\u{1f80c}', '\u{1f80f}'),
-    ('\u{1f848}', '\u{1f84f}'),
-    ('\u{1f85a}', '\u{1f85f}'),
-    ('\u{1f888}', '\u{1f88f}'),
-    ('\u{1f8ae}', '\u{1f8ff}'),
-    ('đŸ€Œ', 'đŸ€ș'),
-    ('đŸ€Œ', 'đŸ„…'),
-    ('đŸ„‡', '\u{1faff}'),
-    ('\u{1fc00}', '\u{1fffd}'),
-];
-
-pub const EXTENDER: &'static [(char, char)] = &[
-    ('·', '·'),
-    ('ː', 'ˑ'),
-    ('ـ', 'ـ'),
-    ('ßș', 'ßș'),
-    ('\u{a71}', '\u{a71}'),
-    ('\u{afb}', '\u{afb}'),
-    ('\u{b55}', '\u{b55}'),
-    ('àč†', 'àč†'),
-    ('ໆ', 'ໆ'),
-    ('᠊', '᠊'),
-    ('᥃', '᥃'),
-    ('áȘ§', 'áȘ§'),
-    ('\u{1c36}', '\u{1c36}'),
-    ('á±»', 'á±»'),
-    ('々', '々'),
-    ('〱', '〔'),
-    ('ゝ', 'ゞ'),
-    ('ăƒŒ', 'ăƒŸ'),
-    ('ꀕ', 'ꀕ'),
-    ('ꘌ', 'ꘌ'),
-    ('ꧏ', 'ꧏ'),
-    ('ê§Š', 'ê§Š'),
-    ('ê©°', 'ê©°'),
-    ('ꫝ', 'ꫝ'),
-    ('ê«ł', '꫎'),
-    ('', ''),
-    ('𐞁', '𐞂'),
-    ('𐔎', '𐔎'),
-    ('\u{10d6a}', '\u{10d6a}'),
-    ('𐔯', '𐔯'),
-    ('\u{11237}', '\u{11237}'),
-    ('𑍝', '𑍝'),
-    ('\u{113d2}', '𑏓'),
-    ('𑗆', '𑗈'),
-    ('\u{11a98}', '\u{11a98}'),
-    ('𖭂', '𖭃'),
-    ('𖿠', '𖿡'),
-    ('𖿣', '𖿣'),
-    ('đž„Œ', 'đž„œ'),
-    ('\u{1e5ef}', '\u{1e5ef}'),
-    ('\u{1e944}', '\u{1e946}'),
-];
-
-pub const GRAPHEME_BASE: &'static [(char, char)] = &[
-    (' ', '~'),
-    ('\u{a0}', '¬'),
-    ('®', 'Ëż'),
-    ('Ͱ', 'ͷ'),
-    ('Íș', 'Íż'),
-    ('΄', 'Ί'),
-    ('Ό', 'Ό'),
-    ('Ύ', 'Ρ'),
-    ('Σ', '҂'),
-    ('Ҋ', 'ÔŻ'),
-    ('Ô±', 'Ֆ'),
-    ('ՙ', '֊'),
-    ('֍', '֏'),
-    ('ÖŸ', 'ÖŸ'),
-    ('Ś€', 'Ś€'),
-    ('ڃ', 'ڃ'),
-    ('چ', 'چ'),
-    ('ڐ', 'ŚȘ'),
-    ('ŚŻ', 'ŚŽ'),
-    ('ۆ', 'ۏ'),
-    ('ۛ', 'ۛ'),
-    ('۝', 'ي'),
-    ('Ù ', 'ÙŻ'),
-    ('ٱ', 'ە'),
-    ('۞', '۞'),
-    ('Û„', 'ÛŠ'),
-    ('Û©', 'Û©'),
-    ('Ûź', '܍'),
-    ('ܐ', 'ܐ'),
-    ('ܒ', 'ܯ'),
-    ('ʍ', 'Ț„'),
-    ('Ț±', 'Ț±'),
-    ('߀', 'ßȘ'),
-    ('ߎ', 'ßș'),
-    ('ߟ', 'ࠕ'),
-    ('ࠚ', 'ࠚ'),
-    ('à €', 'à €'),
-    ('à š', 'à š'),
-    ('à °', 'à Ÿ'),
-    ('àĄ€', 'àĄ˜'),
-    ('àĄž', 'àĄž'),
-    ('àĄ ', 'àĄȘ'),
-    ('àĄ°', 'àąŽ'),
-    ('àą ', 'àŁ‰'),
-    ('à€ƒ', 'à€č'),
-    ('à€»', 'à€»'),
-    ('à€œ', 'à„€'),
-    ('à„‰', 'à„Œ'),
-    ('à„Ž', 'à„'),
-    ('à„˜', 'à„Ą'),
-    ('à„€', 'àŠ€'),
-    ('àŠ‚', 'àŠƒ'),
-    ('àŠ…', 'àŠŒ'),
-    ('àŠ', 'àŠ'),
-    ('àŠ“', 'àŠš'),
-    ('àŠȘ', 'àŠ°'),
-    ('àŠČ', 'àŠČ'),
-    ('àŠ¶', 'àŠč'),
-    ('àŠœ', 'àŠœ'),
-    ('àŠż', 'ী'),
-    ('ে', 'ৈ'),
-    ('ো', 'ৌ'),
-    ('ৎ', 'ৎ'),
-    ('ড়', 'ঢ়'),
-    ('য়', 'à§Ą'),
-    ('৊', 'ড়'),
-    ('àšƒ', 'àšƒ'),
-    ('àš…', 'àšŠ'),
-    ('àš', 'àš'),
-    ('àš“', 'àšš'),
-    ('àšȘ', 'àš°'),
-    ('àšČ', 'àšł'),
-    ('àš”', 'àš¶'),
-    ('àšž', 'àšč'),
-    ('àšŸ', 'ੀ'),
-    ('ਖ਼', 'ੜ'),
-    ('ਫ਼', 'ਫ਼'),
-    ('੊', 'à©Ż'),
-    ('à©Č', '੎'),
-    ('à©¶', 'à©¶'),
-    ('àȘƒ', 'àȘƒ'),
-    ('àȘ…', 'àȘ'),
-    ('àȘ', 'àȘ‘'),
-    ('àȘ“', 'àȘš'),
-    ('àȘȘ', 'àȘ°'),
-    ('àȘČ', 'àȘł'),
-    ('àȘ”', 'àȘč'),
-    ('àȘœ', 'ી'),
-    ('ૉ', 'ૉ'),
-    ('ો', 'ૌ'),
-    ('ૐ', 'ૐ'),
-    ('à« ', 'à«Ą'),
-    ('૊', '૱'),
-    ('à«č', 'à«č'),
-    ('àŹ‚', 'àŹƒ'),
-    ('àŹ…', 'àŹŒ'),
-    ('àŹ', 'àŹ'),
-    ('àŹ“', 'àŹš'),
-    ('àŹȘ', 'àŹ°'),
-    ('àŹČ', 'àŹł'),
-    ('àŹ”', 'àŹč'),
-    ('àŹœ', 'àŹœ'),
-    ('ୀ', 'ୀ'),
-    ('େ', 'ୈ'),
-    ('ୋ', 'ୌ'),
-    ('ଡ଼', 'ଢ଼'),
-    ('ୟ', 'à­Ą'),
-    ('à­Š', 'à­·'),
-    ('àźƒ', 'àźƒ'),
-    ('àź…', 'àźŠ'),
-    ('àźŽ', 'àź'),
-    ('àź’', 'àź•'),
-    ('àź™', 'àźš'),
-    ('àźœ', 'àźœ'),
-    ('àźž', 'àźŸ'),
-    ('àźŁ', 'àź€'),
-    ('àźš', 'àźȘ'),
-    ('àźź', 'àźč'),
-    ('àźż', 'àźż'),
-    ('àŻ', 'àŻ‚'),
-    ('àŻ†', 'àŻˆ'),
-    ('àŻŠ', 'àŻŒ'),
-    ('àŻ', 'àŻ'),
-    ('àŻŠ', 'àŻș'),
-    ('ఁ', 'ః'),
-    ('అ', 'ఌ'),
-    ('ఎ', 'ఐ'),
-    ('ఒ', 'à°š'),
-    ('à°Ș', 'à°č'),
-    ('జ', 'జ'),
-    ('ు', 'ౄ'),
-    ('ౘ', 'ౚ'),
-    ('ౝ', 'ౝ'),
-    ('à± ', 'à±Ą'),
-    ('ొ', 'à±Ż'),
-    ('à±·', 'àȀ'),
-    ('àȂ', 'àȌ'),
-    ('àȎ', 'àȐ'),
-    ('àȒ', 'àČš'),
-    ('àČȘ', 'àČł'),
-    ('àČ”', 'àČč'),
-    ('àČœ', 'àČŸ'),
-    ('àł', 'àł'),
-    ('àłƒ', 'àł„'),
-    ('àł', 'àłž'),
-    ('àł ', 'àłĄ'),
-    ('àłŠ', 'àłŻ'),
-    ('àł±', 'àłł'),
-    ('àŽ‚', 'àŽŒ'),
-    ('àŽŽ', 'àŽ'),
-    ('àŽ’', 'àŽș'),
-    ('àŽœ', 'àŽœ'),
-    ('àŽż', 'à”€'),
-    ('à”†', 'à”ˆ'),
-    ('à”Š', 'à”Œ'),
-    ('à”Ž', 'à”'),
-    ('à””', 'à”–'),
-    ('à”˜', 'à”Ą'),
-    ('à”Š', 'à”ż'),
-    ('ං', 'ඃ'),
-    ('අ', 'ඖ'),
-    ('ක', 'න'),
-    ('à¶ł', 'à¶»'),
-    ('ග', 'ග'),
-    ('ව', 'ෆ'),
-    ('ැ', 'ෑ'),
-    ('ෘ', 'ෞ'),
-    ('à·Š', 'à·Ż'),
-    ('à·Č', 'à·Ž'),
-    ('àž', 'àž°'),
-    ('àžČ', 'àžł'),
-    ('àžż', 'àč†'),
-    ('àč', 'àč›'),
-    ('àș', 'àș‚'),
-    ('àș„', 'àș„'),
-    ('àș†', 'àșŠ'),
-    ('àșŒ', 'àșŁ'),
-    ('àș„', 'àș„'),
-    ('àș§', 'àș°'),
-    ('àșČ', 'àșł'),
-    ('àșœ', 'àșœ'),
-    ('ເ', 'ໄ'),
-    ('ໆ', 'ໆ'),
-    ('໐', '໙'),
-    ('ໜ', 'ໟ'),
-    ('àŒ€', 'àŒ—'),
-    ('àŒš', 'àŒŽ'),
-    ('àŒ¶', 'àŒ¶'),
-    ('àŒž', 'àŒž'),
-    ('àŒș', 'àœ‡'),
-    ('àœ‰', 'àœŹ'),
-    ('àœż', 'àœż'),
-    ('àŸ…', 'àŸ…'),
-    ('àŸˆ', 'àŸŒ'),
-    ('àŸŸ', 'àż…'),
-    ('àż‡', 'àżŒ'),
-    ('àżŽ', 'àżš'),
-    ('က', 'ာ'),
-    ('ေ', 'ေ'),
-    ('ှ', 'ှ'),
-    ('ျ', 'ဌ'),
-    ('ဿ', 'ၗ'),
-    ('ၚ', 'ၝ'),
-    ('ၥ', 'ၰ'),
-    ('ၔ', 'ႁ'),
-    ('ႃ', 'ႄ'),
-    ('ႇ', 'ႌ'),
-    ('ႎ', 'ႜ'),
-    ('႞', 'Ⴥ'),
-    ('Ⴧ', 'Ⴧ'),
-    ('Ⴭ', 'Ⴭ'),
-    ('ა', 'ቈ'),
-    ('ቊ', 'ቍ'),
-    ('ቐ', 'ቖ'),
-    ('ቘ', 'ቘ'),
-    ('ቚ', 'ቝ'),
-    ('በ', 'ኈ'),
-    ('ኊ', 'ኍ'),
-    ('ነ', 'ኰ'),
-    ('áŠČ', 'ኔ'),
-    ('ኾ', 'ኟ'),
-    ('ዀ', 'ዀ'),
-    ('ዂ', 'ዅ'),
-    ('ወ', 'ዖ'),
-    ('ዘ', 'ጐ'),
-    ('ጒ', 'ጕ'),
-    ('ጘ', 'ፚ'),
-    ('፠', 'ፌ'),
-    ('ᎀ', '᎙'),
-    ('Ꭰ', 'Ꮤ'),
-    ('Ꮮ', 'Ꮬ'),
-    ('᐀', '᚜'),
-    ('ᚠ', '᛾'),
-    ('ᜀ', 'ᜑ'),
-    ('ᜟ', 'ᜱ'),
-    ('᜔', '᜶'),
-    ('ᝀ', 'ᝑ'),
-    ('ᝠ', 'ᝬ'),
-    ('᝼', 'ᝰ'),
-    ('ក', 'ឳ'),
-    ('ា', 'ា'),
-    ('ស', 'ៅ'),
-    ('ះ', 'ៈ'),
-    ('។', 'ៜ'),
-    ('០', '៩'),
-    ('៰', 'áŸč'),
-    ('᠀', '᠊'),
-    ('᠐', '᠙'),
-    ('ᠠ', 'ᥞ'),
-    ('᱀', '᱄'),
-    ('᱇', 'ᱹ'),
-    ('áąȘ', 'áąȘ'),
-    ('Ṱ', 'ᣔ'),
-    ('က', 'သ'),
-    ('ဣ', 'ည'),
-    ('ဩ', 'ါ'),
-    ('ူ', 'ေ'),
-    ('ဳ', 'သ'),
-    ('á„€', 'á„€'),
-    ('á„„', 'á„­'),
-    ('ᄰ', 'ᄎ'),
-    ('ᩀ', 'ካ'),
-    ('ᩰ', 'ᧉ'),
-    ('᧐', '᧚'),
-    ('᧞', 'Ṗ'),
-    ('ṙ', 'Ṛ'),
-    ('᚞', 'ᩕ'),
-    ('ᩗ', 'ᩗ'),
-    ('ᩥ', 'ᩥ'),
-    ('ᩣ', 'ᩀ'),
-    ('á©­', 'á©Č'),
-    ('áȘ€', 'áȘ‰'),
-    ('áȘ', 'áȘ™'),
-    ('áȘ ', 'áȘ­'),
-    ('ᬄ', 'ᬳ'),
-    ('áŹŸ', 'ᭁ'),
-    ('ᭅ', 'ᭌ'),
-    ('᭎', 'á­Ș'),
-    ('á­Ž', 'á­ż'),
-    ('ἂ', 'ἡ'),
-    ('៊', '៧'),
-    ('៟', 'ᯄ'),
-    ('ᯧ', 'ᯧ'),
-    ('áŻȘ', 'ᯏ'),
-    ('ᯟ', 'ᯟ'),
-    ('áŻŒ', 'á°«'),
-    ('á°Ž', 'á°”'),
-    ('᰻', '᱉'),
-    ('ᱍ', 'áȊ'),
-    ('áȐ', 'áČș'),
-    ('áČœ', '᳇'),
-    ('᳓', '᳓'),
-    ('᳥', '᳥'),
-    ('ᳩ', '᳏'),
-    ('áłź', 'áłł'),
-    ('áł”', 'áł·'),
-    ('áłș', 'áłș'),
-    ('ᮀ', 'á¶ż'),
-    ('ᾀ', 'ጕ'),
-    ('ጘ', 'ጝ'),
-    ('ጠ', 'ᜅ'),
-    ('ᜈ', 'ᜍ'),
-    ('ᜐ', '᜗'),
-    ('᜙', '᜙'),
-    ('᜛', '᜛'),
-    ('᜝', '᜝'),
-    ('ᜟ', '᜜'),
-    ('ៀ', '៎'),
-    ('៶', 'ῄ'),
-    ('ῆ', 'ΐ'),
-    ('ῖ', 'Ί'),
-    ('῝', '`'),
-    ('áżČ', '῎'),
-    ('áż¶', 'áżŸ'),
-    ('\u{2000}', '\u{200a}'),
-    ('‐', '‧'),
-    ('\u{202f}', '\u{205f}'),
-    ('⁰', 'ⁱ'),
-    ('', '₎'),
-    ('ₐ', 'ₜ'),
-    ('₠', '⃀'),
-    ('℀', '↋'),
-    ('←', '␩'),
-    ('⑀', '⑊'),
-    ('①', '⭳'),
-    ('â­¶', '⼕'),
-    ('⼗', '⳼'),
-    ('âłČ', 'âłł'),
-    ('âłč', '⎄'),
-    ('⎧', '⎧'),
-    ('⎭', '⎭'),
-    ('⎰', '┧'),
-    ('┯', '┰'),
-    ('ⶀ', 'ⶖ'),
-    ('â¶ ', 'â¶Š'),
-    ('â¶š', 'â¶ź'),
-    ('â¶°', 'â¶¶'),
-    ('â¶ž', 'â¶Ÿ'),
-    ('ⷀ', 'ⷆ'),
-    ('ⷈ', 'ⷎ'),
-    ('ⷐ', 'ⷖ'),
-    ('ⷘ', 'ⷞ'),
-    ('⾀', 'âč'),
-    ('âș€', 'âș™'),
-    ('âș›', '⻳'),
-    ('⌀', '⿕'),
-    ('âż°', '〩'),
-    ('〰', '〿'),
-    ('ぁ', 'ゖ'),
-    ('゛', 'ヿ'),
-    ('ㄅ', 'ㄯ'),
-    ('ㄱ', 'ㆎ'),
-    ('㆐', '㇄'),
-    ('㇯', '㈞'),
-    ('㈠', 'ꒌ'),
-    ('꒐', '꓆'),
-    ('ꓐ', 'ꘫ'),
-    ('Ꙁ', 'ê™ź'),
-    ('ê™ł', 'ê™ł'),
-    ('ꙟ', 'ꚝ'),
-    ('ꚠ', 'ê›Ż'),
-    ('ê›Č', '꛷'),
-    ('꜀', 'ꟍ'),
-    ('Ꟑ', 'ꟑ'),
-    ('ꟓ', 'ꟓ'),
-    ('ꟕ', 'Ƛ'),
-    ('êŸČ', 'ꠁ'),
-    ('ꠃ', 'ꠅ'),
-    ('ꠇ', 'ꠊ'),
-    ('ꠌ', 'ê €'),
-    ('ê §', 'ê «'),
-    ('ê °', 'ê č'),
-    ('êĄ€', 'êĄ·'),
-    ('êą€', 'êŁƒ'),
-    ('êŁŽ', 'êŁ™'),
-    ('êŁČ', 'êŁŸ'),
-    ('ꀀ', 'ꀄ'),
-    ('ê€ź', 'ꄆ'),
-    ('ê„’', 'ê„’'),
-    ('ꄟ', 'ꄌ'),
-    ('ꊃ', 'êŠČ'),
-    ('ꊎ', 'ꊔ'),
-    ('êŠș', 'ꊻ'),
-    ('ꊟ', 'êŠż'),
-    ('꧁', '꧍'),
-    ('ꧏ', '꧙'),
-    ('꧞', '꧀'),
-    ('ê§Š', 'ê§Ÿ'),
-    ('Ꚁ', 'êšš'),
-    ('êšŻ', 'êš°'),
-    ('êšł', 'Ꚏ'),
-    ('ꩀ', 'ꩂ'),
-    ('ꩄ', 'ꩋ'),
-    ('ꩍ', 'ꩍ'),
-    ('꩐', '꩙'),
-    ('꩜', 'ꩻ'),
-    ('꩜', 'êȘŻ'),
-    ('êȘ±', 'êȘ±'),
-    ('êȘ”', 'êȘ¶'),
-    ('êȘč', 'êȘœ'),
-    ('ꫀ', 'ꫀ'),
-    ('ꫂ', 'ꫂ'),
-    ('ꫛ', 'ꫫ'),
-    ('ê«ź', 'ê«”'),
-    ('êŹ', 'êŹ†'),
-    ('êŹ‰', 'êŹŽ'),
-    ('êŹ‘', 'êŹ–'),
-    ('êŹ ', 'êŹŠ'),
-    ('êŹš', 'êŹź'),
-    ('êŹ°', 'ê­«'),
-    ('ê­°', 'êŻ€'),
-    ('êŻŠ', 'êŻ§'),
-    ('êŻ©', 'êŻŹ'),
-    ('êŻ°', 'êŻč'),
-    ('가', '힣'),
-    ('ힰ', 'ퟆ'),
-    ('ퟋ', 'ퟻ'),
-    ('', 'ï©­'),
-    ('並', '龎'),
-    ('ïŹ€', 'ïŹ†'),
-    ('ïŹ“', 'ïŹ—'),
-    ('ïŹ', 'ïŹ'),
-    ('ïŹŸ', 'ïŹ¶'),
-    ('ïŹž', 'ïŹŒ'),
-    ('ïŹŸ', 'ïŹŸ'),
-    ('נּ', 'סּ'),
-    ('ףּ', 'פּ'),
-    ('צּ', 'ïŻ‚'),
-    ('ïŻ“', 'ﶏ'),
-    ('ﶒ', 'ﷇ'),
-    ('﷏', '﷏'),
-    ('ï·°', 'ï·ż'),
-    ('', 'ïž™'),
-    ('ïž°', 'ïč’'),
-    ('ïč”', 'ïčŠ'),
-    ('ïčš', 'ïč«'),
-    ('ïč°', 'ïčŽ'),
-    ('ïč¶', 'ﻌ'),
-    ('', ''),
-    ('', ''),
-    ('ïż‚', 'ïż‡'),
-    ('ïżŠ', 'ïż'),
-    ('ïż’', 'ïż—'),
-    ('ïżš', 'ïżœ'),
-    ('ïż ', 'ïżŠ'),
-    ('ïżš', 'ïżź'),
-    ('ïżŒ', 'ïżœ'),
-    ('𐀀', '𐀋'),
-    ('𐀍', '𐀩'),
-    ('𐀹', 'đ€ș'),
-    ('đ€Œ', 'đ€œ'),
-    ('𐀿', '𐁍'),
-    ('𐁐', '𐁝'),
-    ('𐂀', 'đƒș'),
-    ('𐄀', '𐄂'),
-    ('𐄇', '𐄳'),
-    ('𐄷', '𐆎'),
-    ('𐆐', '𐆜'),
-    ('𐆠', '𐆠'),
-    ('𐇐', 'đ‡Œ'),
-    ('𐊀', '𐊜'),
-    ('𐊠', '𐋐'),
-    ('𐋡', '𐋻'),
-    ('𐌀', '𐌣'),
-    ('𐌭', '𐍊'),
-    ('𐍐', 'đ”'),
-    ('𐎀', '𐎝'),
-    ('𐎟', '𐏃'),
-    ('𐏈', '𐏕'),
-    ('𐐀', '𐒝'),
-    ('𐒠', '𐒩'),
-    ('𐒰', '𐓓'),
-    ('𐓘', '𐓻'),
-    ('𐔀', '𐔧'),
-    ('𐔰', '𐕣'),
-    ('𐕯', 'đ•ș'),
-    ('đ•Œ', '𐖊'),
-    ('𐖌', '𐖒'),
-    ('𐖔', '𐖕'),
-    ('𐖗', '𐖡'),
-    ('𐖣', '𐖱'),
-    ('𐖳', 'đ–č'),
-    ('𐖻', 'đ–Œ'),
-    ('𐗀', '𐗳'),
-    ('𐘀', 'đœ¶'),
-    ('𐝀', '𐝕'),
-    ('𐝠', '𐝧'),
-    ('𐞀', '𐞅'),
-    ('𐞇', '𐞰'),
-    ('đžČ', 'đžș'),
-    ('𐠀', '𐠅'),
-    ('𐠈', '𐠈'),
-    ('𐠊', '𐠔'),
-    ('𐠷', '𐠞'),
-    ('đ Œ', 'đ Œ'),
-    ('𐠿', '𐡕'),
-    ('𐡗', '𐱞'),
-    ('𐹧', '𐹯'),
-    ('𐣠', 'đŁČ'),
-    ('𐣎', '𐣔'),
-    ('𐣻', '𐀛'),
-    ('đ€Ÿ', 'đ€č'),
-    ('𐀿', '𐀿'),
-    ('𐩀', '𐊷'),
-    ('đŠŒ', '𐧏'),
-    ('𐧒', '𐹀'),
-    ('𐹐', '𐹓'),
-    ('𐹕', '𐹗'),
-    ('𐹙', '𐚔'),
-    ('𐩀', '𐩈'),
-    ('𐩐', '𐩘'),
-    ('𐩠', 'đȘŸ'),
-    ('𐫀', '𐫀'),
-    ('𐫫', '𐫶'),
-    ('𐬀', '𐏔'),
-    ('đŹč', '𐭕'),
-    ('𐭘', 'đ­Č'),
-    ('𐭾', '𐼑'),
-    ('𐼙', '𐼜'),
-    ('𐟩', '𐟯'),
-    ('𐰀', '𐱈'),
-    ('đČ€', 'đČČ'),
-    ('𐳀', 'đłČ'),
-    ('đłș', '𐎣'),
-    ('𐎰', 'đŽč'),
-    ('𐔀', '𐔄'),
-    ('𐔟', '𐶅'),
-    ('𐶎', 'đ¶'),
-    ('đč ', 'đčŸ'),
-    ('đș€', 'đș©'),
-    ('đș­', 'đș­'),
-    ('đș°', 'đș±'),
-    ('𐻂', '𐻄'),
-    ('đŒ€', 'đŒ§'),
-    ('đŒ°', 'đœ…'),
-    ('đœ‘', 'đœ™'),
-    ('đœ°', 'đŸ'),
-    ('đŸ†', 'đŸ‰'),
-    ('đŸ°', '𐿋'),
-    ('𐿠', '𐿶'),
-    ('𑀀', '𑀀'),
-    ('𑀂', 'đ‘€·'),
-    ('𑁇', '𑁍'),
-    ('𑁒', '𑁯'),
-    ('𑁱', 'đ‘Č'),
-    ('𑁔', '𑁔'),
-    ('𑂂', 'đ‘‚Č'),
-    ('đ‘‚·', '𑂾'),
-    ('đ‘‚»', 'đ‘‚Œ'),
-    ('đ‘‚Ÿ', '𑃁'),
-    ('𑃐', '𑃹'),
-    ('𑃰', 'đ‘ƒč'),
-    ('𑄃', '𑄩'),
-    ('𑄬', '𑄬'),
-    ('đ‘„¶', '𑅇'),
-    ('𑅐', 'đ‘…Č'),
-    ('𑅮', 'đ‘…¶'),
-    ('𑆂', '𑆔'),
-    ('𑆿', '𑆿'),
-    ('𑇁', '𑇈'),
-    ('𑇍', '𑇎'),
-    ('𑇐', '𑇟'),
-    ('𑇡', '𑇮'),
-    ('𑈀', '𑈑'),
-    ('𑈓', '𑈼'),
-    ('đ‘ˆČ', '𑈳'),
-    ('𑈾', 'đ‘ˆœ'),
-    ('𑈿', '𑉀'),
-    ('𑊀', '𑊆'),
-    ('𑊈', '𑊈'),
-    ('𑊊', '𑊍'),
-    ('𑊏', '𑊝'),
-    ('𑊟', '𑊩'),
-    ('𑊰', '𑋞'),
-    ('𑋠', '𑋱'),
-    ('𑋰', 'đ‘‹č'),
-    ('𑌂', '𑌃'),
-    ('𑌅', '𑌌'),
-    ('𑌏', '𑌐'),
-    ('𑌓', '𑌹'),
-    ('đ‘ŒȘ', '𑌰'),
-    ('đ‘ŒČ', '𑌳'),
-    ('đ‘Œ”', 'đ‘Œč'),
-    ('đ‘Œœ', 'đ‘Œœ'),
-    ('𑌿', '𑌿'),
-    ('𑍁', '𑍄'),
-    ('𑍇', '𑍈'),
-    ('𑍋', '𑍌'),
-    ('𑍐', '𑍐'),
-    ('𑍝', '𑍣'),
-    ('𑎀', '𑎉'),
-    ('𑎋', '𑎋'),
-    ('𑎎', '𑎎'),
-    ('𑎐', '𑎔'),
-    ('𑎷', '𑎷'),
-    ('đ‘Žč', 'đ‘Žș'),
-    ('𑏊', '𑏊'),
-    ('𑏌', '𑏍'),
-    ('𑏑', '𑏑'),
-    ('𑏓', '𑏕'),
-    ('𑏗', '𑏘'),
-    ('𑐀', '𑐷'),
-    ('𑑀', '𑑁'),
-    ('𑑅', '𑑅'),
-    ('𑑇', '𑑛'),
-    ('𑑝', '𑑝'),
-    ('𑑟', '𑑡'),
-    ('𑒀', '𑒯'),
-    ('đ‘’±', 'đ‘’Č'),
-    ('đ‘’č', 'đ‘’č'),
-    ('đ‘’»', 'đ‘’Œ'),
-    ('đ‘’Ÿ', 'đ‘’Ÿ'),
-    ('𑓁', '𑓁'),
-    ('𑓄', '𑓇'),
-    ('𑓐', '𑓙'),
-    ('𑖀', '𑖼'),
-    ('𑖰', 'đ‘–±'),
-    ('𑖾', 'đ‘–»'),
-    ('đ‘–Ÿ', 'đ‘–Ÿ'),
-    ('𑗁', '𑗛'),
-    ('𑘀', 'đ‘˜Č'),
-    ('đ‘˜»', 'đ‘˜Œ'),
-    ('đ‘˜Ÿ', 'đ‘˜Ÿ'),
-    ('𑙁', '𑙄'),
-    ('𑙐', '𑙙'),
-    ('𑙠', '𑙬'),
-    ('𑚀', 'đ‘šȘ'),
-    ('𑚬', '𑚬'),
-    ('𑚼', '𑚯'),
-    ('𑚾', 'đ‘šč'),
-    ('𑛀', '𑛉'),
-    ('𑛐', '𑛣'),
-    ('𑜀', '𑜚'),
-    ('𑜞', '𑜞'),
-    ('𑜠', '𑜡'),
-    ('𑜩', '𑜩'),
-    ('𑜰', '𑝆'),
-    ('𑠀', '𑠼'),
-    ('𑠾', '𑠾'),
-    ('đ‘ »', 'đ‘ »'),
-    ('𑱠', 'đ‘ŁČ'),
-    ('𑣿', '𑀆'),
-    ('𑀉', '𑀉'),
-    ('đ‘€Œ', '𑀓'),
-    ('𑀕', 'đ‘€–'),
-    ('đ‘€˜', '𑀯'),
-    ('đ‘€±', 'đ‘€”'),
-    ('đ‘€·', '𑀞'),
-    ('𑀿', 'đ‘„‚'),
-    ('đ‘„„', '𑄆'),
-    ('𑄐', 'đ‘„™'),
-    ('𑩠', '𑩧'),
-    ('đ‘ŠȘ', '𑧓'),
-    ('𑧜', '𑧟'),
-    ('𑧡', 'đ‘§€'),
-    ('𑹀', '𑹀'),
-    ('𑹋', 'đ‘šČ'),
-    ('đ‘šč', 'đ‘šș'),
-    ('𑹿', '𑩆'),
-    ('𑩐', '𑩐'),
-    ('đ‘©—', 'đ‘©˜'),
-    ('đ‘©œ', 'đ‘Ș‰'),
-    ('đ‘Ș—', 'đ‘Ș—'),
-    ('đ‘Șš', 'đ‘Șą'),
-    ('đ‘Ș°', 'đ‘«ž'),
-    ('𑬀', '𑬉'),
-    ('𑯀', '𑯡'),
-    ('𑯰', 'đ‘Żč'),
-    ('𑰀', '𑰈'),
-    ('𑰊', '𑰯'),
-    ('đ‘°Ÿ', 'đ‘°Ÿ'),
-    ('𑱀', '𑱅'),
-    ('𑱐', '𑱏'),
-    ('𑱰', 'đ‘ȏ'),
-    ('đ‘Č©', 'đ‘Č©'),
-    ('đ‘ȱ', 'đ‘ȱ'),
-    ('đ‘ČŽ', 'đ‘ČŽ'),
-    ('𑮀', '𑮆'),
-    ('𑮈', '𑮉'),
-    ('𑮋', '𑮰'),
-    ('𑔆', '𑔆'),
-    ('𑔐', 'đ‘”™'),
-    ('đ‘” ', 'đ‘”„'),
-    ('đ‘”§', '𑔚'),
-    ('đ‘”Ș', 'đ‘¶Ž'),
-    ('đ‘¶“', 'đ‘¶”'),
-    ('đ‘¶–', 'đ‘¶–'),
-    ('đ‘¶˜', 'đ‘¶˜'),
-    ('đ‘¶ ', 'đ‘¶©'),
-    ('đ‘» ', 'đ‘»Č'),
-    ('đ‘»”', '𑻞'),
-    ('đ‘Œ‚', 'đ‘Œ'),
-    ('đ‘Œ’', 'đ‘Œ”'),
-    ('đ‘ŒŸ', 'đ‘Œż'),
-    ('đ‘œƒ', 'đ‘œ™'),
-    ('đ‘Ÿ°', 'đ‘Ÿ°'),
-    ('𑿀', '𑿱'),
-    ('𑿿', '𒎙'),
-    ('𒐀', '𒑼'),
-    ('𒑰', '𒑮'),
-    ('𒒀', '𒕃'),
-    ('đ’Ÿ', 'đ’żČ'),
-    ('𓀀', '𓐯'),
-    ('𓑁', '𓑆'),
-    ('𓑠', 'đ”ș'),
-    ('𔐀', '𔙆'),
-    ('𖄀', '𖄝'),
-    ('đ–„Ș', '𖄬'),
-    ('𖄰', 'đ–„č'),
-    ('𖠀', '𖹾'),
-    ('đ–©€', 'đ–©ž'),
-    ('đ–© ', 'đ–©©'),
-    ('đ–©ź', 'đ–ȘŸ'),
-    ('đ–«€', '𖫉'),
-    ('𖫐', 'đ–«­'),
-    ('đ–«”', 'đ–«”'),
-    ('𖬀', '𖬯'),
-    ('đ–Ź·', '𖭅'),
-    ('𖭐', '𖭙'),
-    ('𖭛', '𖭡'),
-    ('𖭣', 'đ–­·'),
-    ('đ–­œ', '𖼏'),
-    ('𖔀', 'đ–”č'),
-    ('đ–č€', 'đ–șš'),
-    ('đ–Œ€', 'đ–œŠ'),
-    ('đ–œ', 'đ–Ÿ‡'),
-    ('đ–Ÿ“', 'đ–ŸŸ'),
-    ('𖿠', '𖿣'),
-    ('𗀀', 'đ˜Ÿ·'),
-    ('𘠀', '𘳕'),
-    ('𘳿', '𘎈'),
-    ('𚿰', '𚿳'),
-    ('đšż”', 'đšż»'),
-    ('đšżœ', 'đšżŸ'),
-    ('𛀀', '𛄱'),
-    ('đ›„Č', 'đ›„Č'),
-    ('𛅐', '𛅒'),
-    ('𛅕', '𛅕'),
-    ('đ›…€', '𛅧'),
-    ('𛅰', '𛋻'),
-    ('𛰀', 'đ›±Ș'),
-    ('đ›±°', 'đ›±Œ'),
-    ('đ›Č€', 'đ›Čˆ'),
-    ('đ›Č', 'đ›Č™'),
-    ('đ›Čœ', 'đ›Čœ'),
-    ('đ›ČŸ', 'đ›ČŸ'),
-    ('𜰀', 'đœłč'),
-    ('𜮀', 'đœșł'),
-    ('đœœ', '𜿃'),
-    ('𝀀', 'đƒ”'),
-    ('𝄀', '𝄩'),
-    ('đ„©', 'đ…€'),
-    ('đ…Ș', '𝅬'),
-    ('𝆃', '𝆄'),
-    ('𝆌', 'đ†©'),
-    ('𝆺𝅥', 'đ‡Ș'),
-    ('𝈀', '𝉁'),
-    ('𝉅', '𝉅'),
-    ('𝋀', '𝋓'),
-    ('𝋠', '𝋳'),
-    ('𝌀', '𝍖'),
-    ('𝍠', '𝍾'),
-    ('𝐀', '𝑔'),
-    ('𝑖', '𝒜'),
-    ('𝒞', '𝒟'),
-    ('𝒱', '𝒱'),
-    ('đ’„', '𝒩'),
-    ('đ’©', '𝒬'),
-    ('𝒼', 'đ’č'),
-    ('đ’»', 'đ’»'),
-    ('đ’œ', '𝓃'),
-    ('𝓅', '𝔅'),
-    ('𝔇', '𝔊'),
-    ('𝔍', '𝔔'),
-    ('𝔖', '𝔜'),
-    ('𝔞', 'đ”č'),
-    ('đ”»', 'đ”Ÿ'),
-    ('𝕀', '𝕄'),
-    ('𝕆', '𝕆'),
-    ('𝕊', '𝕐'),
-    ('𝕒', 'đš„'),
-    ('𝚹', '𝟋'),
-    ('𝟎', '𝧿'),
-    ('đš·', 'đšș'),
-    ('đ©­', 'đ©Ž'),
-    ('đ©¶', 'đȘƒ'),
-    ('đȘ…', 'đȘ‹'),
-    ('đŒ€', 'đŒž'),
-    ('đŒ„', 'đŒȘ'),
-    ('𞀰', '𞁭'),
-    ('𞄀', '𞄬'),
-    ('đž„·', 'đž„œ'),
-    ('𞅀', '𞅉'),
-    ('𞅎', '𞅏'),
-    ('𞊐', '𞊭'),
-    ('𞋀', 'đž‹«'),
-    ('𞋰', 'đž‹č'),
-    ('𞋿', '𞋿'),
-    ('𞓐', 'đž“«'),
-    ('𞓰', 'đž“č'),
-    ('𞗐', '𞗭'),
-    ('𞗰', 'đž—ș'),
-    ('𞗿', '𞗿'),
-    ('𞟠', '𞟩'),
-    ('𞟹', 'đžŸ«'),
-    ('𞟭', '𞟼'),
-    ('𞟰', 'đžŸŸ'),
-    ('𞠀', '𞣄'),
-    ('𞣇', '𞣏'),
-    ('𞀀', 'đž„ƒ'),
-    ('đž„‹', 'đž„‹'),
-    ('𞄐', 'đž„™'),
-    ('𞄞', 'đž„Ÿ'),
-    ('đž±±', 'đžČŽ'),
-    ('𞮁', 'đžŽœ'),
-    ('𞾀', '𞾃'),
-    ('𞾅', '𞾟'),
-    ('𞾡', '𞾱'),
-    ('𞞀', '𞞀'),
-    ('𞾧', '𞾧'),
-    ('đžž©', 'đžžČ'),
-    ('𞾮', 'đžž·'),
-    ('đžžč', 'đžžč'),
-    ('đžž»', 'đžž»'),
-    ('đžč‚', 'đžč‚'),
-    ('đžč‡', 'đžč‡'),
-    ('đžč‰', 'đžč‰'),
-    ('đžč‹', 'đžč‹'),
-    ('đžč', 'đžč'),
-    ('đžč‘', 'đžč’'),
-    ('đžč”', 'đžč”'),
-    ('đžč—', 'đžč—'),
-    ('đžč™', 'đžč™'),
-    ('đžč›', 'đžč›'),
-    ('đžč', 'đžč'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžčĄ', 'đžčą'),
-    ('đžč€', 'đžč€'),
-    ('đžč§', 'đžčȘ'),
-    ('đžčŹ', 'đžčČ'),
-    ('đžčŽ', 'đžč·'),
-    ('đžčč', 'đžčŒ'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžș€', 'đžș‰'),
-    ('đžș‹', 'đžș›'),
-    ('đžșĄ', 'đžșŁ'),
-    ('đžș„', 'đžș©'),
-    ('đžș«', 'đžș»'),
-    ('đž»°', 'đž»±'),
-    ('🀀', 'đŸ€«'),
-    ('🀰', '🂓'),
-    ('🂠', '🂼'),
-    ('đŸ‚±', '🂿'),
-    ('🃁', '🃏'),
-    ('🃑', 'đŸƒ”'),
-    ('🄀', '🆭'),
-    ('🇩', '🈂'),
-    ('🈐', 'đŸˆ»'),
-    ('🉀', '🉈'),
-    ('🉐', '🉑'),
-    ('🉠', 'đŸ‰„'),
-    ('🌀', '🛗'),
-    ('🛜', '🛬'),
-    ('🛰', 'đŸ›Œ'),
-    ('🜀', 'đŸ¶'),
-    ('đŸ»', '🟙'),
-    ('🟠', 'đŸŸ«'),
-    ('🟰', '🟰'),
-    ('🠀', '🠋'),
-    ('🠐', '🡇'),
-    ('🡐', '🡙'),
-    ('🡠', '🱇'),
-    ('🱐', '🱭'),
-    ('🱰', 'đŸą»'),
-    ('🣀', '🣁'),
-    ('đŸ€€', 'đŸ©“'),
-    ('đŸ© ', 'đŸ©­'),
-    ('đŸ©°', 'đŸ©Œ'),
-    ('đŸȘ€', 'đŸȘ‰'),
-    ('đŸȘ', 'đŸ«†'),
-    ('đŸ«Ž', 'đŸ«œ'),
-    ('đŸ«Ÿ', 'đŸ«©'),
-    ('đŸ«°', 'đŸ«ž'),
-    ('🬀', '🼒'),
-    ('🼔', 'đŸŻč'),
-    ('𠀀', 'đȘ›Ÿ'),
-    ('đȘœ€', 'đ«œč'),
-    ('đ«€', 'đ« '),
-    ('đ«  ', 'đŹșĄ'),
-    ('đŹș°', '🯠'),
-    ('🯰', 'đźč'),
-    ('丽', '𯹝'),
-    ('𰀀', 'đ±Š'),
-    ('đ±', 'đȎŻ'),
-];
-
-pub const GRAPHEME_EXTEND: &'static [(char, char)] = &[
-    ('\u{300}', '\u{36f}'),
-    ('\u{483}', '\u{489}'),
-    ('\u{591}', '\u{5bd}'),
-    ('\u{5bf}', '\u{5bf}'),
-    ('\u{5c1}', '\u{5c2}'),
-    ('\u{5c4}', '\u{5c5}'),
-    ('\u{5c7}', '\u{5c7}'),
-    ('\u{610}', '\u{61a}'),
-    ('\u{64b}', '\u{65f}'),
-    ('\u{670}', '\u{670}'),
-    ('\u{6d6}', '\u{6dc}'),
-    ('\u{6df}', '\u{6e4}'),
-    ('\u{6e7}', '\u{6e8}'),
-    ('\u{6ea}', '\u{6ed}'),
-    ('\u{711}', '\u{711}'),
-    ('\u{730}', '\u{74a}'),
-    ('\u{7a6}', '\u{7b0}'),
-    ('\u{7eb}', '\u{7f3}'),
-    ('\u{7fd}', '\u{7fd}'),
-    ('\u{816}', '\u{819}'),
-    ('\u{81b}', '\u{823}'),
-    ('\u{825}', '\u{827}'),
-    ('\u{829}', '\u{82d}'),
-    ('\u{859}', '\u{85b}'),
-    ('\u{897}', '\u{89f}'),
-    ('\u{8ca}', '\u{8e1}'),
-    ('\u{8e3}', '\u{902}'),
-    ('\u{93a}', '\u{93a}'),
-    ('\u{93c}', '\u{93c}'),
-    ('\u{941}', '\u{948}'),
-    ('\u{94d}', '\u{94d}'),
-    ('\u{951}', '\u{957}'),
-    ('\u{962}', '\u{963}'),
-    ('\u{981}', '\u{981}'),
-    ('\u{9bc}', '\u{9bc}'),
-    ('\u{9be}', '\u{9be}'),
-    ('\u{9c1}', '\u{9c4}'),
-    ('\u{9cd}', '\u{9cd}'),
-    ('\u{9d7}', '\u{9d7}'),
-    ('\u{9e2}', '\u{9e3}'),
-    ('\u{9fe}', '\u{9fe}'),
-    ('\u{a01}', '\u{a02}'),
-    ('\u{a3c}', '\u{a3c}'),
-    ('\u{a41}', '\u{a42}'),
-    ('\u{a47}', '\u{a48}'),
-    ('\u{a4b}', '\u{a4d}'),
-    ('\u{a51}', '\u{a51}'),
-    ('\u{a70}', '\u{a71}'),
-    ('\u{a75}', '\u{a75}'),
-    ('\u{a81}', '\u{a82}'),
-    ('\u{abc}', '\u{abc}'),
-    ('\u{ac1}', '\u{ac5}'),
-    ('\u{ac7}', '\u{ac8}'),
-    ('\u{acd}', '\u{acd}'),
-    ('\u{ae2}', '\u{ae3}'),
-    ('\u{afa}', '\u{aff}'),
-    ('\u{b01}', '\u{b01}'),
-    ('\u{b3c}', '\u{b3c}'),
-    ('\u{b3e}', '\u{b3f}'),
-    ('\u{b41}', '\u{b44}'),
-    ('\u{b4d}', '\u{b4d}'),
-    ('\u{b55}', '\u{b57}'),
-    ('\u{b62}', '\u{b63}'),
-    ('\u{b82}', '\u{b82}'),
-    ('\u{bbe}', '\u{bbe}'),
-    ('\u{bc0}', '\u{bc0}'),
-    ('\u{bcd}', '\u{bcd}'),
-    ('\u{bd7}', '\u{bd7}'),
-    ('\u{c00}', '\u{c00}'),
-    ('\u{c04}', '\u{c04}'),
-    ('\u{c3c}', '\u{c3c}'),
-    ('\u{c3e}', '\u{c40}'),
-    ('\u{c46}', '\u{c48}'),
-    ('\u{c4a}', '\u{c4d}'),
-    ('\u{c55}', '\u{c56}'),
-    ('\u{c62}', '\u{c63}'),
-    ('\u{c81}', '\u{c81}'),
-    ('\u{cbc}', '\u{cbc}'),
-    ('\u{cbf}', '\u{cc0}'),
-    ('\u{cc2}', '\u{cc2}'),
-    ('\u{cc6}', '\u{cc8}'),
-    ('\u{cca}', '\u{ccd}'),
-    ('\u{cd5}', '\u{cd6}'),
-    ('\u{ce2}', '\u{ce3}'),
-    ('\u{d00}', '\u{d01}'),
-    ('\u{d3b}', '\u{d3c}'),
-    ('\u{d3e}', '\u{d3e}'),
-    ('\u{d41}', '\u{d44}'),
-    ('\u{d4d}', '\u{d4d}'),
-    ('\u{d57}', '\u{d57}'),
-    ('\u{d62}', '\u{d63}'),
-    ('\u{d81}', '\u{d81}'),
-    ('\u{dca}', '\u{dca}'),
-    ('\u{dcf}', '\u{dcf}'),
-    ('\u{dd2}', '\u{dd4}'),
-    ('\u{dd6}', '\u{dd6}'),
-    ('\u{ddf}', '\u{ddf}'),
-    ('\u{e31}', '\u{e31}'),
-    ('\u{e34}', '\u{e3a}'),
-    ('\u{e47}', '\u{e4e}'),
-    ('\u{eb1}', '\u{eb1}'),
-    ('\u{eb4}', '\u{ebc}'),
-    ('\u{ec8}', '\u{ece}'),
-    ('\u{f18}', '\u{f19}'),
-    ('\u{f35}', '\u{f35}'),
-    ('\u{f37}', '\u{f37}'),
-    ('\u{f39}', '\u{f39}'),
-    ('\u{f71}', '\u{f7e}'),
-    ('\u{f80}', '\u{f84}'),
-    ('\u{f86}', '\u{f87}'),
-    ('\u{f8d}', '\u{f97}'),
-    ('\u{f99}', '\u{fbc}'),
-    ('\u{fc6}', '\u{fc6}'),
-    ('\u{102d}', '\u{1030}'),
-    ('\u{1032}', '\u{1037}'),
-    ('\u{1039}', '\u{103a}'),
-    ('\u{103d}', '\u{103e}'),
-    ('\u{1058}', '\u{1059}'),
-    ('\u{105e}', '\u{1060}'),
-    ('\u{1071}', '\u{1074}'),
-    ('\u{1082}', '\u{1082}'),
-    ('\u{1085}', '\u{1086}'),
-    ('\u{108d}', '\u{108d}'),
-    ('\u{109d}', '\u{109d}'),
-    ('\u{135d}', '\u{135f}'),
-    ('\u{1712}', '\u{1715}'),
-    ('\u{1732}', '\u{1734}'),
-    ('\u{1752}', '\u{1753}'),
-    ('\u{1772}', '\u{1773}'),
-    ('\u{17b4}', '\u{17b5}'),
-    ('\u{17b7}', '\u{17bd}'),
-    ('\u{17c6}', '\u{17c6}'),
-    ('\u{17c9}', '\u{17d3}'),
-    ('\u{17dd}', '\u{17dd}'),
-    ('\u{180b}', '\u{180d}'),
-    ('\u{180f}', '\u{180f}'),
-    ('\u{1885}', '\u{1886}'),
-    ('\u{18a9}', '\u{18a9}'),
-    ('\u{1920}', '\u{1922}'),
-    ('\u{1927}', '\u{1928}'),
-    ('\u{1932}', '\u{1932}'),
-    ('\u{1939}', '\u{193b}'),
-    ('\u{1a17}', '\u{1a18}'),
-    ('\u{1a1b}', '\u{1a1b}'),
-    ('\u{1a56}', '\u{1a56}'),
-    ('\u{1a58}', '\u{1a5e}'),
-    ('\u{1a60}', '\u{1a60}'),
-    ('\u{1a62}', '\u{1a62}'),
-    ('\u{1a65}', '\u{1a6c}'),
-    ('\u{1a73}', '\u{1a7c}'),
-    ('\u{1a7f}', '\u{1a7f}'),
-    ('\u{1ab0}', '\u{1ace}'),
-    ('\u{1b00}', '\u{1b03}'),
-    ('\u{1b34}', '\u{1b3d}'),
-    ('\u{1b42}', '\u{1b44}'),
-    ('\u{1b6b}', '\u{1b73}'),
-    ('\u{1b80}', '\u{1b81}'),
-    ('\u{1ba2}', '\u{1ba5}'),
-    ('\u{1ba8}', '\u{1bad}'),
-    ('\u{1be6}', '\u{1be6}'),
-    ('\u{1be8}', '\u{1be9}'),
-    ('\u{1bed}', '\u{1bed}'),
-    ('\u{1bef}', '\u{1bf3}'),
-    ('\u{1c2c}', '\u{1c33}'),
-    ('\u{1c36}', '\u{1c37}'),
-    ('\u{1cd0}', '\u{1cd2}'),
-    ('\u{1cd4}', '\u{1ce0}'),
-    ('\u{1ce2}', '\u{1ce8}'),
-    ('\u{1ced}', '\u{1ced}'),
-    ('\u{1cf4}', '\u{1cf4}'),
-    ('\u{1cf8}', '\u{1cf9}'),
-    ('\u{1dc0}', '\u{1dff}'),
-    ('\u{200c}', '\u{200c}'),
-    ('\u{20d0}', '\u{20f0}'),
-    ('\u{2cef}', '\u{2cf1}'),
-    ('\u{2d7f}', '\u{2d7f}'),
-    ('\u{2de0}', '\u{2dff}'),
-    ('\u{302a}', '\u{302f}'),
-    ('\u{3099}', '\u{309a}'),
-    ('\u{a66f}', '\u{a672}'),
-    ('\u{a674}', '\u{a67d}'),
-    ('\u{a69e}', '\u{a69f}'),
-    ('\u{a6f0}', '\u{a6f1}'),
-    ('\u{a802}', '\u{a802}'),
-    ('\u{a806}', '\u{a806}'),
-    ('\u{a80b}', '\u{a80b}'),
-    ('\u{a825}', '\u{a826}'),
-    ('\u{a82c}', '\u{a82c}'),
-    ('\u{a8c4}', '\u{a8c5}'),
-    ('\u{a8e0}', '\u{a8f1}'),
-    ('\u{a8ff}', '\u{a8ff}'),
-    ('\u{a926}', '\u{a92d}'),
-    ('\u{a947}', '\u{a951}'),
-    ('\u{a953}', '\u{a953}'),
-    ('\u{a980}', '\u{a982}'),
-    ('\u{a9b3}', '\u{a9b3}'),
-    ('\u{a9b6}', '\u{a9b9}'),
-    ('\u{a9bc}', '\u{a9bd}'),
-    ('\u{a9c0}', '\u{a9c0}'),
-    ('\u{a9e5}', '\u{a9e5}'),
-    ('\u{aa29}', '\u{aa2e}'),
-    ('\u{aa31}', '\u{aa32}'),
-    ('\u{aa35}', '\u{aa36}'),
-    ('\u{aa43}', '\u{aa43}'),
-    ('\u{aa4c}', '\u{aa4c}'),
-    ('\u{aa7c}', '\u{aa7c}'),
-    ('\u{aab0}', '\u{aab0}'),
-    ('\u{aab2}', '\u{aab4}'),
-    ('\u{aab7}', '\u{aab8}'),
-    ('\u{aabe}', '\u{aabf}'),
-    ('\u{aac1}', '\u{aac1}'),
-    ('\u{aaec}', '\u{aaed}'),
-    ('\u{aaf6}', '\u{aaf6}'),
-    ('\u{abe5}', '\u{abe5}'),
-    ('\u{abe8}', '\u{abe8}'),
-    ('\u{abed}', '\u{abed}'),
-    ('\u{fb1e}', '\u{fb1e}'),
-    ('\u{fe00}', '\u{fe0f}'),
-    ('\u{fe20}', '\u{fe2f}'),
-    ('\u{ff9e}', '\u{ff9f}'),
-    ('\u{101fd}', '\u{101fd}'),
-    ('\u{102e0}', '\u{102e0}'),
-    ('\u{10376}', '\u{1037a}'),
-    ('\u{10a01}', '\u{10a03}'),
-    ('\u{10a05}', '\u{10a06}'),
-    ('\u{10a0c}', '\u{10a0f}'),
-    ('\u{10a38}', '\u{10a3a}'),
-    ('\u{10a3f}', '\u{10a3f}'),
-    ('\u{10ae5}', '\u{10ae6}'),
-    ('\u{10d24}', '\u{10d27}'),
-    ('\u{10d69}', '\u{10d6d}'),
-    ('\u{10eab}', '\u{10eac}'),
-    ('\u{10efc}', '\u{10eff}'),
-    ('\u{10f46}', '\u{10f50}'),
-    ('\u{10f82}', '\u{10f85}'),
-    ('\u{11001}', '\u{11001}'),
-    ('\u{11038}', '\u{11046}'),
-    ('\u{11070}', '\u{11070}'),
-    ('\u{11073}', '\u{11074}'),
-    ('\u{1107f}', '\u{11081}'),
-    ('\u{110b3}', '\u{110b6}'),
-    ('\u{110b9}', '\u{110ba}'),
-    ('\u{110c2}', '\u{110c2}'),
-    ('\u{11100}', '\u{11102}'),
-    ('\u{11127}', '\u{1112b}'),
-    ('\u{1112d}', '\u{11134}'),
-    ('\u{11173}', '\u{11173}'),
-    ('\u{11180}', '\u{11181}'),
-    ('\u{111b6}', '\u{111be}'),
-    ('\u{111c0}', '\u{111c0}'),
-    ('\u{111c9}', '\u{111cc}'),
-    ('\u{111cf}', '\u{111cf}'),
-    ('\u{1122f}', '\u{11231}'),
-    ('\u{11234}', '\u{11237}'),
-    ('\u{1123e}', '\u{1123e}'),
-    ('\u{11241}', '\u{11241}'),
-    ('\u{112df}', '\u{112df}'),
-    ('\u{112e3}', '\u{112ea}'),
-    ('\u{11300}', '\u{11301}'),
-    ('\u{1133b}', '\u{1133c}'),
-    ('\u{1133e}', '\u{1133e}'),
-    ('\u{11340}', '\u{11340}'),
-    ('\u{1134d}', '\u{1134d}'),
-    ('\u{11357}', '\u{11357}'),
-    ('\u{11366}', '\u{1136c}'),
-    ('\u{11370}', '\u{11374}'),
-    ('\u{113b8}', '\u{113b8}'),
-    ('\u{113bb}', '\u{113c0}'),
-    ('\u{113c2}', '\u{113c2}'),
-    ('\u{113c5}', '\u{113c5}'),
-    ('\u{113c7}', '\u{113c9}'),
-    ('\u{113ce}', '\u{113d0}'),
-    ('\u{113d2}', '\u{113d2}'),
-    ('\u{113e1}', '\u{113e2}'),
-    ('\u{11438}', '\u{1143f}'),
-    ('\u{11442}', '\u{11444}'),
-    ('\u{11446}', '\u{11446}'),
-    ('\u{1145e}', '\u{1145e}'),
-    ('\u{114b0}', '\u{114b0}'),
-    ('\u{114b3}', '\u{114b8}'),
-    ('\u{114ba}', '\u{114ba}'),
-    ('\u{114bd}', '\u{114bd}'),
-    ('\u{114bf}', '\u{114c0}'),
-    ('\u{114c2}', '\u{114c3}'),
-    ('\u{115af}', '\u{115af}'),
-    ('\u{115b2}', '\u{115b5}'),
-    ('\u{115bc}', '\u{115bd}'),
-    ('\u{115bf}', '\u{115c0}'),
-    ('\u{115dc}', '\u{115dd}'),
-    ('\u{11633}', '\u{1163a}'),
-    ('\u{1163d}', '\u{1163d}'),
-    ('\u{1163f}', '\u{11640}'),
-    ('\u{116ab}', '\u{116ab}'),
-    ('\u{116ad}', '\u{116ad}'),
-    ('\u{116b0}', '\u{116b7}'),
-    ('\u{1171d}', '\u{1171d}'),
-    ('\u{1171f}', '\u{1171f}'),
-    ('\u{11722}', '\u{11725}'),
-    ('\u{11727}', '\u{1172b}'),
-    ('\u{1182f}', '\u{11837}'),
-    ('\u{11839}', '\u{1183a}'),
-    ('\u{11930}', '\u{11930}'),
-    ('\u{1193b}', '\u{1193e}'),
-    ('\u{11943}', '\u{11943}'),
-    ('\u{119d4}', '\u{119d7}'),
-    ('\u{119da}', '\u{119db}'),
-    ('\u{119e0}', '\u{119e0}'),
-    ('\u{11a01}', '\u{11a0a}'),
-    ('\u{11a33}', '\u{11a38}'),
-    ('\u{11a3b}', '\u{11a3e}'),
-    ('\u{11a47}', '\u{11a47}'),
-    ('\u{11a51}', '\u{11a56}'),
-    ('\u{11a59}', '\u{11a5b}'),
-    ('\u{11a8a}', '\u{11a96}'),
-    ('\u{11a98}', '\u{11a99}'),
-    ('\u{11c30}', '\u{11c36}'),
-    ('\u{11c38}', '\u{11c3d}'),
-    ('\u{11c3f}', '\u{11c3f}'),
-    ('\u{11c92}', '\u{11ca7}'),
-    ('\u{11caa}', '\u{11cb0}'),
-    ('\u{11cb2}', '\u{11cb3}'),
-    ('\u{11cb5}', '\u{11cb6}'),
-    ('\u{11d31}', '\u{11d36}'),
-    ('\u{11d3a}', '\u{11d3a}'),
-    ('\u{11d3c}', '\u{11d3d}'),
-    ('\u{11d3f}', '\u{11d45}'),
-    ('\u{11d47}', '\u{11d47}'),
-    ('\u{11d90}', '\u{11d91}'),
-    ('\u{11d95}', '\u{11d95}'),
-    ('\u{11d97}', '\u{11d97}'),
-    ('\u{11ef3}', '\u{11ef4}'),
-    ('\u{11f00}', '\u{11f01}'),
-    ('\u{11f36}', '\u{11f3a}'),
-    ('\u{11f40}', '\u{11f42}'),
-    ('\u{11f5a}', '\u{11f5a}'),
-    ('\u{13440}', '\u{13440}'),
-    ('\u{13447}', '\u{13455}'),
-    ('\u{1611e}', '\u{16129}'),
-    ('\u{1612d}', '\u{1612f}'),
-    ('\u{16af0}', '\u{16af4}'),
-    ('\u{16b30}', '\u{16b36}'),
-    ('\u{16f4f}', '\u{16f4f}'),
-    ('\u{16f8f}', '\u{16f92}'),
-    ('\u{16fe4}', '\u{16fe4}'),
-    ('\u{16ff0}', '\u{16ff1}'),
-    ('\u{1bc9d}', '\u{1bc9e}'),
-    ('\u{1cf00}', '\u{1cf2d}'),
-    ('\u{1cf30}', '\u{1cf46}'),
-    ('\u{1d165}', '\u{1d169}'),
-    ('\u{1d16d}', '\u{1d172}'),
-    ('\u{1d17b}', '\u{1d182}'),
-    ('\u{1d185}', '\u{1d18b}'),
-    ('\u{1d1aa}', '\u{1d1ad}'),
-    ('\u{1d242}', '\u{1d244}'),
-    ('\u{1da00}', '\u{1da36}'),
-    ('\u{1da3b}', '\u{1da6c}'),
-    ('\u{1da75}', '\u{1da75}'),
-    ('\u{1da84}', '\u{1da84}'),
-    ('\u{1da9b}', '\u{1da9f}'),
-    ('\u{1daa1}', '\u{1daaf}'),
-    ('\u{1e000}', '\u{1e006}'),
-    ('\u{1e008}', '\u{1e018}'),
-    ('\u{1e01b}', '\u{1e021}'),
-    ('\u{1e023}', '\u{1e024}'),
-    ('\u{1e026}', '\u{1e02a}'),
-    ('\u{1e08f}', '\u{1e08f}'),
-    ('\u{1e130}', '\u{1e136}'),
-    ('\u{1e2ae}', '\u{1e2ae}'),
-    ('\u{1e2ec}', '\u{1e2ef}'),
-    ('\u{1e4ec}', '\u{1e4ef}'),
-    ('\u{1e5ee}', '\u{1e5ef}'),
-    ('\u{1e8d0}', '\u{1e8d6}'),
-    ('\u{1e944}', '\u{1e94a}'),
-    ('\u{e0020}', '\u{e007f}'),
-    ('\u{e0100}', '\u{e01ef}'),
-];
-
-pub const GRAPHEME_LINK: &'static [(char, char)] = &[
-    ('\u{94d}', '\u{94d}'),
-    ('\u{9cd}', '\u{9cd}'),
-    ('\u{a4d}', '\u{a4d}'),
-    ('\u{acd}', '\u{acd}'),
-    ('\u{b4d}', '\u{b4d}'),
-    ('\u{bcd}', '\u{bcd}'),
-    ('\u{c4d}', '\u{c4d}'),
-    ('\u{ccd}', '\u{ccd}'),
-    ('\u{d3b}', '\u{d3c}'),
-    ('\u{d4d}', '\u{d4d}'),
-    ('\u{dca}', '\u{dca}'),
-    ('\u{e3a}', '\u{e3a}'),
-    ('\u{eba}', '\u{eba}'),
-    ('\u{f84}', '\u{f84}'),
-    ('\u{1039}', '\u{103a}'),
-    ('\u{1714}', '\u{1715}'),
-    ('\u{1734}', '\u{1734}'),
-    ('\u{17d2}', '\u{17d2}'),
-    ('\u{1a60}', '\u{1a60}'),
-    ('\u{1b44}', '\u{1b44}'),
-    ('\u{1baa}', '\u{1bab}'),
-    ('\u{1bf2}', '\u{1bf3}'),
-    ('\u{2d7f}', '\u{2d7f}'),
-    ('\u{a806}', '\u{a806}'),
-    ('\u{a82c}', '\u{a82c}'),
-    ('\u{a8c4}', '\u{a8c4}'),
-    ('\u{a953}', '\u{a953}'),
-    ('\u{a9c0}', '\u{a9c0}'),
-    ('\u{aaf6}', '\u{aaf6}'),
-    ('\u{abed}', '\u{abed}'),
-    ('\u{10a3f}', '\u{10a3f}'),
-    ('\u{11046}', '\u{11046}'),
-    ('\u{11070}', '\u{11070}'),
-    ('\u{1107f}', '\u{1107f}'),
-    ('\u{110b9}', '\u{110b9}'),
-    ('\u{11133}', '\u{11134}'),
-    ('\u{111c0}', '\u{111c0}'),
-    ('\u{11235}', '\u{11235}'),
-    ('\u{112ea}', '\u{112ea}'),
-    ('\u{1134d}', '\u{1134d}'),
-    ('\u{113ce}', '\u{113d0}'),
-    ('\u{11442}', '\u{11442}'),
-    ('\u{114c2}', '\u{114c2}'),
-    ('\u{115bf}', '\u{115bf}'),
-    ('\u{1163f}', '\u{1163f}'),
-    ('\u{116b6}', '\u{116b6}'),
-    ('\u{1172b}', '\u{1172b}'),
-    ('\u{11839}', '\u{11839}'),
-    ('\u{1193d}', '\u{1193e}'),
-    ('\u{119e0}', '\u{119e0}'),
-    ('\u{11a34}', '\u{11a34}'),
-    ('\u{11a47}', '\u{11a47}'),
-    ('\u{11a99}', '\u{11a99}'),
-    ('\u{11c3f}', '\u{11c3f}'),
-    ('\u{11d44}', '\u{11d45}'),
-    ('\u{11d97}', '\u{11d97}'),
-    ('\u{11f41}', '\u{11f42}'),
-    ('\u{1612f}', '\u{1612f}'),
-];
-
-pub const HEX_DIGIT: &'static [(char, char)] = &[
-    ('0', '9'),
-    ('A', 'F'),
-    ('a', 'f'),
-    ('', ''),
-    ('ïŒĄ', ''),
-    ('', ''),
-];
-
-pub const HYPHEN: &'static [(char, char)] = &[
-    ('-', '-'),
-    ('\u{ad}', '\u{ad}'),
-    ('֊', '֊'),
-    ('᠆', '᠆'),
-    ('‐', '‑'),
-    ('⾗', '⾗'),
-    ('・', '・'),
-    ('ïčŁ', 'ïčŁ'),
-    ('', ''),
-    ('', ''),
-];
-
-pub const IDS_BINARY_OPERATOR: &'static [(char, char)] =
-    &[('âż°', 'âż±'), ('⿎', 'âżœ'), ('㇯', '㇯')];
-
-pub const IDS_TRINARY_OPERATOR: &'static [(char, char)] = &[('âżČ', 'âżł')];
-
-pub const IDS_UNARY_OPERATOR: &'static [(char, char)] = &[('âżŸ', 'âżż')];
-
-pub const ID_COMPAT_MATH_CONTINUE: &'static [(char, char)] = &[
-    ('²', '³'),
-    ('¹', '¹'),
-    ('⁰', '⁰'),
-    ('⁎', ' '),
-    ('₀', '₎'),
-    ('∂', '∂'),
-    ('∇', '∇'),
-    ('∞', '∞'),
-    ('𝛁', '𝛁'),
-    ('𝛛', '𝛛'),
-    ('đ›»', 'đ›»'),
-    ('𝜕', '𝜕'),
-    ('đœ”', 'đœ”'),
-    ('𝝏', '𝝏'),
-    ('𝝯', '𝝯'),
-    ('𝞉', '𝞉'),
-    ('đž©', 'đž©'),
-    ('𝟃', '𝟃'),
-];
-
-pub const ID_COMPAT_MATH_START: &'static [(char, char)] = &[
-    ('∂', '∂'),
-    ('∇', '∇'),
-    ('∞', '∞'),
-    ('𝛁', '𝛁'),
-    ('𝛛', '𝛛'),
-    ('đ›»', 'đ›»'),
-    ('𝜕', '𝜕'),
-    ('đœ”', 'đœ”'),
-    ('𝝏', '𝝏'),
-    ('𝝯', '𝝯'),
-    ('𝞉', '𝞉'),
-    ('đž©', 'đž©'),
-    ('𝟃', '𝟃'),
-];
-
-pub const ID_CONTINUE: &'static [(char, char)] = &[
-    ('0', '9'),
-    ('A', 'Z'),
-    ('_', '_'),
-    ('a', 'z'),
-    ('ª', 'ª'),
-    ('µ', 'µ'),
-    ('·', '·'),
-    ('º', 'º'),
-    ('À', 'Ö'),
-    ('Ø', 'ö'),
-    ('ø', 'ˁ'),
-    ('ˆ', 'ˑ'),
-    ('Ë ', 'Ë€'),
-    ('ËŹ', 'ËŹ'),
-    ('Ëź', 'Ëź'),
-    ('\u{300}', 'ÍŽ'),
-    ('Ͷ', 'ͷ'),
-    ('Íș', 'Íœ'),
-    ('Íż', 'Íż'),
-    ('Ά', 'Ί'),
-    ('Ό', 'Ό'),
-    ('Ύ', 'Ρ'),
-    ('Σ', 'Ï”'),
-    ('Ϸ', 'ҁ'),
-    ('\u{483}', '\u{487}'),
-    ('Ҋ', 'ÔŻ'),
-    ('Ô±', 'Ֆ'),
-    ('ՙ', 'ՙ'),
-    ('ՠ', 'ֈ'),
-    ('\u{591}', '\u{5bd}'),
-    ('\u{5bf}', '\u{5bf}'),
-    ('\u{5c1}', '\u{5c2}'),
-    ('\u{5c4}', '\u{5c5}'),
-    ('\u{5c7}', '\u{5c7}'),
-    ('ڐ', 'ŚȘ'),
-    ('ŚŻ', 'ŚČ'),
-    ('\u{610}', '\u{61a}'),
-    ('Ű ', 'Ù©'),
-    ('Ùź', 'ۓ'),
-    ('ە', '\u{6dc}'),
-    ('\u{6df}', '\u{6e8}'),
-    ('\u{6ea}', 'ی'),
-    ('Ûż', 'Ûż'),
-    ('ܐ', '\u{74a}'),
-    ('ʍ', 'Ț±'),
-    ('߀', 'ß”'),
-    ('ßș', 'ßș'),
-    ('\u{7fd}', '\u{7fd}'),
-    ('ࠀ', '\u{82d}'),
-    ('àĄ€', '\u{85b}'),
-    ('àĄ ', 'àĄȘ'),
-    ('àĄ°', 'àą‡'),
-    ('àą‰', 'àąŽ'),
-    ('\u{897}', '\u{8e1}'),
-    ('\u{8e3}', '\u{963}'),
-    ('à„Š', 'à„Ż'),
-    ('à„±', 'àŠƒ'),
-    ('àŠ…', 'àŠŒ'),
-    ('àŠ', 'àŠ'),
-    ('àŠ“', 'àŠš'),
-    ('àŠȘ', 'àŠ°'),
-    ('àŠČ', 'àŠČ'),
-    ('àŠ¶', 'àŠč'),
-    ('\u{9bc}', '\u{9c4}'),
-    ('ে', 'ৈ'),
-    ('ো', 'ৎ'),
-    ('\u{9d7}', '\u{9d7}'),
-    ('ড়', 'ঢ়'),
-    ('য়', '\u{9e3}'),
-    ('à§Š', 'à§±'),
-    ('ৌ', 'ৌ'),
-    ('\u{9fe}', '\u{9fe}'),
-    ('\u{a01}', 'àšƒ'),
-    ('àš…', 'àšŠ'),
-    ('àš', 'àš'),
-    ('àš“', 'àšš'),
-    ('àšȘ', 'àš°'),
-    ('àšČ', 'àšł'),
-    ('àš”', 'àš¶'),
-    ('àšž', 'àšč'),
-    ('\u{a3c}', '\u{a3c}'),
-    ('àšŸ', '\u{a42}'),
-    ('\u{a47}', '\u{a48}'),
-    ('\u{a4b}', '\u{a4d}'),
-    ('\u{a51}', '\u{a51}'),
-    ('ਖ਼', 'ੜ'),
-    ('ਫ਼', 'ਫ਼'),
-    ('੊', '\u{a75}'),
-    ('\u{a81}', 'àȘƒ'),
-    ('àȘ…', 'àȘ'),
-    ('àȘ', 'àȘ‘'),
-    ('àȘ“', 'àȘš'),
-    ('àȘȘ', 'àȘ°'),
-    ('àȘČ', 'àȘł'),
-    ('àȘ”', 'àȘč'),
-    ('\u{abc}', '\u{ac5}'),
-    ('\u{ac7}', 'ૉ'),
-    ('ો', '\u{acd}'),
-    ('ૐ', 'ૐ'),
-    ('à« ', '\u{ae3}'),
-    ('૊', 'à«Ż'),
-    ('à«č', '\u{aff}'),
-    ('\u{b01}', 'àŹƒ'),
-    ('àŹ…', 'àŹŒ'),
-    ('àŹ', 'àŹ'),
-    ('àŹ“', 'àŹš'),
-    ('àŹȘ', 'àŹ°'),
-    ('àŹČ', 'àŹł'),
-    ('àŹ”', 'àŹč'),
-    ('\u{b3c}', '\u{b44}'),
-    ('େ', 'ୈ'),
-    ('ୋ', '\u{b4d}'),
-    ('\u{b55}', '\u{b57}'),
-    ('ଡ଼', 'ଢ଼'),
-    ('ୟ', '\u{b63}'),
-    ('à­Š', 'à­Ż'),
-    ('à­±', 'à­±'),
-    ('\u{b82}', 'àźƒ'),
-    ('àź…', 'àźŠ'),
-    ('àźŽ', 'àź'),
-    ('àź’', 'àź•'),
-    ('àź™', 'àźš'),
-    ('àźœ', 'àźœ'),
-    ('àźž', 'àźŸ'),
-    ('àźŁ', 'àź€'),
-    ('àźš', 'àźȘ'),
-    ('àźź', 'àźč'),
-    ('\u{bbe}', 'àŻ‚'),
-    ('àŻ†', 'àŻˆ'),
-    ('àŻŠ', '\u{bcd}'),
-    ('àŻ', 'àŻ'),
-    ('\u{bd7}', '\u{bd7}'),
-    ('àŻŠ', 'àŻŻ'),
-    ('\u{c00}', 'ఌ'),
-    ('ఎ', 'ఐ'),
-    ('ఒ', 'à°š'),
-    ('à°Ș', 'à°č'),
-    ('\u{c3c}', 'ౄ'),
-    ('\u{c46}', '\u{c48}'),
-    ('\u{c4a}', '\u{c4d}'),
-    ('\u{c55}', '\u{c56}'),
-    ('ౘ', 'ౚ'),
-    ('ౝ', 'ౝ'),
-    ('à± ', '\u{c63}'),
-    ('ొ', 'à±Ż'),
-    ('àȀ', 'àȃ'),
-    ('àȅ', 'àȌ'),
-    ('àȎ', 'àȐ'),
-    ('àȒ', 'àČš'),
-    ('àČȘ', 'àČł'),
-    ('àČ”', 'àČč'),
-    ('\u{cbc}', 'àł„'),
-    ('\u{cc6}', '\u{cc8}'),
-    ('\u{cca}', '\u{ccd}'),
-    ('\u{cd5}', '\u{cd6}'),
-    ('àł', 'àłž'),
-    ('àł ', '\u{ce3}'),
-    ('àłŠ', 'àłŻ'),
-    ('àł±', 'àłł'),
-    ('\u{d00}', 'àŽŒ'),
-    ('àŽŽ', 'àŽ'),
-    ('àŽ’', '\u{d44}'),
-    ('à”†', 'à”ˆ'),
-    ('à”Š', 'à”Ž'),
-    ('à””', '\u{d57}'),
-    ('à”Ÿ', '\u{d63}'),
-    ('à”Š', 'à”Ż'),
-    ('à”ș', 'à”ż'),
-    ('\u{d81}', 'ඃ'),
-    ('අ', 'ඖ'),
-    ('ක', 'න'),
-    ('à¶ł', 'à¶»'),
-    ('ග', 'ග'),
-    ('ව', 'ෆ'),
-    ('\u{dca}', '\u{dca}'),
-    ('\u{dcf}', '\u{dd4}'),
-    ('\u{dd6}', '\u{dd6}'),
-    ('ෘ', '\u{ddf}'),
-    ('à·Š', 'à·Ż'),
-    ('à·Č', 'à·ł'),
-    ('àž', '\u{e3a}'),
-    ('àč€', '\u{e4e}'),
-    ('àč', 'àč™'),
-    ('àș', 'àș‚'),
-    ('àș„', 'àș„'),
-    ('àș†', 'àșŠ'),
-    ('àșŒ', 'àșŁ'),
-    ('àș„', 'àș„'),
-    ('àș§', 'àșœ'),
-    ('ເ', 'ໄ'),
-    ('ໆ', 'ໆ'),
-    ('\u{ec8}', '\u{ece}'),
-    ('໐', '໙'),
-    ('ໜ', 'ໟ'),
-    ('àŒ€', 'àŒ€'),
-    ('\u{f18}', '\u{f19}'),
-    ('àŒ ', 'àŒ©'),
-    ('\u{f35}', '\u{f35}'),
-    ('\u{f37}', '\u{f37}'),
-    ('\u{f39}', '\u{f39}'),
-    ('àŒŸ', 'àœ‡'),
-    ('àœ‰', 'àœŹ'),
-    ('\u{f71}', '\u{f84}'),
-    ('\u{f86}', '\u{f97}'),
-    ('\u{f99}', '\u{fbc}'),
-    ('\u{fc6}', '\u{fc6}'),
-    ('က', '၉'),
-    ('ၐ', '\u{109d}'),
-    ('Ⴀ', 'Ⴥ'),
-    ('Ⴧ', 'Ⴧ'),
-    ('Ⴭ', 'Ⴭ'),
-    ('ა', 'áƒș'),
-    ('჌', 'ቈ'),
-    ('ቊ', 'ቍ'),
-    ('ቐ', 'ቖ'),
-    ('ቘ', 'ቘ'),
-    ('ቚ', 'ቝ'),
-    ('በ', 'ኈ'),
-    ('ኊ', 'ኍ'),
-    ('ነ', 'ኰ'),
-    ('áŠČ', 'ኔ'),
-    ('ኾ', 'ኟ'),
-    ('ዀ', 'ዀ'),
-    ('ዂ', 'ዅ'),
-    ('ወ', 'ዖ'),
-    ('ዘ', 'ጐ'),
-    ('ጒ', 'ጕ'),
-    ('ጘ', 'ፚ'),
-    ('\u{135d}', '\u{135f}'),
-    ('፩', '፱'),
-    ('ᎀ', 'ᎏ'),
-    ('Ꭰ', 'Ꮤ'),
-    ('Ꮮ', 'Ꮬ'),
-    ('ᐁ', 'ᙬ'),
-    ('ᙯ', 'ᙿ'),
-    ('ᚁ', 'ᚚ'),
-    ('ᚠ', 'á›Ș'),
-    ('᛼', '᛾'),
-    ('ᜀ', '\u{1715}'),
-    ('ᜟ', '\u{1734}'),
-    ('ᝀ', '\u{1753}'),
-    ('ᝠ', 'ᝬ'),
-    ('᝼', 'ᝰ'),
-    ('\u{1772}', '\u{1773}'),
-    ('ក', '\u{17d3}'),
-    ('ៗ', 'ៗ'),
-    ('ៜ', '\u{17dd}'),
-    ('០', '៩'),
-    ('\u{180b}', '\u{180d}'),
-    ('\u{180f}', '᠙'),
-    ('ᠠ', 'ᥞ'),
-    ('᱀', 'áąȘ'),
-    ('Ṱ', 'ᣔ'),
-    ('က', 'သ'),
-    ('\u{1920}', 'ါ'),
-    ('ူ', '\u{193b}'),
-    ('ᄆ', 'á„­'),
-    ('ᄰ', 'ᄎ'),
-    ('ᩀ', 'ካ'),
-    ('ᩰ', 'ᧉ'),
-    ('᧐', '᧚'),
-    ('Ṁ', '\u{1a1b}'),
-    ('áš ', '\u{1a5e}'),
-    ('\u{1a60}', '\u{1a7c}'),
-    ('\u{1a7f}', 'áȘ‰'),
-    ('áȘ', 'áȘ™'),
-    ('áȘ§', 'áȘ§'),
-    ('\u{1ab0}', '\u{1abd}'),
-    ('\u{1abf}', '\u{1ace}'),
-    ('\u{1b00}', 'ᭌ'),
-    ('᭐', '᭙'),
-    ('\u{1b6b}', '\u{1b73}'),
-    ('\u{1b80}', '\u{1bf3}'),
-    ('ᰀ', '\u{1c37}'),
-    ('᱀', '᱉'),
-    ('ᱍ', 'ᱜ'),
-    ('áȀ', 'áȊ'),
-    ('áȐ', 'áČș'),
-    ('áČœ', 'áČż'),
-    ('\u{1cd0}', '\u{1cd2}'),
-    ('\u{1cd4}', 'áłș'),
-    ('ᮀ', 'ጕ'),
-    ('ጘ', 'ጝ'),
-    ('ጠ', 'ᜅ'),
-    ('ᜈ', 'ᜍ'),
-    ('ᜐ', '᜗'),
-    ('᜙', '᜙'),
-    ('᜛', '᜛'),
-    ('᜝', '᜝'),
-    ('ᜟ', '᜜'),
-    ('ៀ', '៎'),
-    ('៶', '៌'),
-    ('៟', '៟'),
-    ('ῂ', 'ῄ'),
-    ('ῆ', 'ῌ'),
-    ('ῐ', 'ΐ'),
-    ('ῖ', 'Ί'),
-    ('ῠ', '῏'),
-    ('áżČ', '῎'),
-    ('áż¶', 'áżŒ'),
-    ('\u{200c}', '\u{200d}'),
-    ('‿', '⁀'),
-    ('⁔', '⁔'),
-    ('ⁱ', 'ⁱ'),
-    ('ⁿ', 'ⁿ'),
-    ('ₐ', 'ₜ'),
-    ('\u{20d0}', '\u{20dc}'),
-    ('\u{20e1}', '\u{20e1}'),
-    ('\u{20e5}', '\u{20f0}'),
-    ('ℂ', 'ℂ'),
-    ('ℇ', 'ℇ'),
-    ('ℊ', 'ℓ'),
-    ('ℕ', 'ℕ'),
-    ('℘', 'ℝ'),
-    ('â„€', 'â„€'),
-    ('℩', '℩'),
-    ('ℹ', 'ℹ'),
-    ('â„Ș', 'â„č'),
-    ('ℌ', 'ℿ'),
-    ('ⅅ', 'ⅉ'),
-    ('ⅎ', 'ⅎ'),
-    ('Ⅰ', 'ↈ'),
-    ('Ⰰ', 'Ⳁ'),
-    ('âł«', 'âłł'),
-    ('⮀', '⎄'),
-    ('⎧', '⎧'),
-    ('⎭', '⎭'),
-    ('⎰', '┧'),
-    ('┯', '┯'),
-    ('\u{2d7f}', 'ⶖ'),
-    ('â¶ ', 'â¶Š'),
-    ('â¶š', 'â¶ź'),
-    ('â¶°', 'â¶¶'),
-    ('â¶ž', 'â¶Ÿ'),
-    ('ⷀ', 'ⷆ'),
-    ('ⷈ', 'ⷎ'),
-    ('ⷐ', 'ⷖ'),
-    ('ⷘ', 'ⷞ'),
-    ('\u{2de0}', '\u{2dff}'),
-    ('々', '〇'),
-    ('〡', '\u{302f}'),
-    ('〱', '〔'),
-    ('〾', 'ă€Œ'),
-    ('ぁ', 'ゖ'),
-    ('\u{3099}', 'ゟ'),
-    ('ァ', 'ヿ'),
-    ('ㄅ', 'ㄯ'),
-    ('ㄱ', 'ㆎ'),
-    ('ㆠ', 'ㆿ'),
-    ('ㇰ', 'ㇿ'),
-    ('㐀', 'ä¶ż'),
-    ('侀', 'ꒌ'),
-    ('ꓐ', 'ꓜ'),
-    ('ꔀ', 'ꘌ'),
-    ('ꘐ', 'ꘫ'),
-    ('Ꙁ', '\u{a66f}'),
-    ('\u{a674}', '\u{a67d}'),
-    ('ê™ż', '\u{a6f1}'),
-    ('ꜗ', 'ꜟ'),
-    ('êœą', 'ꞈ'),
-    ('Ꞌ', 'ꟍ'),
-    ('Ꟑ', 'ꟑ'),
-    ('ꟓ', 'ꟓ'),
-    ('ꟕ', 'Ƛ'),
-    ('êŸČ', 'ê §'),
-    ('\u{a82c}', '\u{a82c}'),
-    ('êĄ€', 'êĄł'),
-    ('êą€', '\u{a8c5}'),
-    ('êŁ', 'êŁ™'),
-    ('\u{a8e0}', 'êŁ·'),
-    ('êŁ»', 'êŁ»'),
-    ('êŁœ', '\u{a92d}'),
-    ('ꀰ', '\u{a953}'),
-    ('ꄠ', 'ꄌ'),
-    ('\u{a980}', '\u{a9c0}'),
-    ('ꧏ', '꧙'),
-    ('ê§ ', 'ê§Ÿ'),
-    ('Ꚁ', '\u{aa36}'),
-    ('ꩀ', 'ꩍ'),
-    ('꩐', '꩙'),
-    ('ê© ', 'ê©¶'),
-    ('ê©ș', 'ꫂ'),
-    ('ꫛ', 'ꫝ'),
-    ('ê« ', 'ê«Ż'),
-    ('ê«Č', '\u{aaf6}'),
-    ('êŹ', 'êŹ†'),
-    ('êŹ‰', 'êŹŽ'),
-    ('êŹ‘', 'êŹ–'),
-    ('êŹ ', 'êŹŠ'),
-    ('êŹš', 'êŹź'),
-    ('êŹ°', 'ꭚ'),
-    ('ꭜ', 'ꭩ'),
-    ('ê­°', 'êŻȘ'),
-    ('êŻŹ', '\u{abed}'),
-    ('êŻ°', 'êŻč'),
-    ('가', '힣'),
-    ('ힰ', 'ퟆ'),
-    ('ퟋ', 'ퟻ'),
-    ('', 'ï©­'),
-    ('並', '龎'),
-    ('ïŹ€', 'ïŹ†'),
-    ('ïŹ“', 'ïŹ—'),
-    ('ïŹ', 'ïŹš'),
-    ('ïŹȘ', 'ïŹ¶'),
-    ('ïŹž', 'ïŹŒ'),
-    ('ïŹŸ', 'ïŹŸ'),
-    ('נּ', 'סּ'),
-    ('ףּ', 'פּ'),
-    ('צּ', 'ïź±'),
-    ('ïŻ“', ''),
-    ('', 'ﶏ'),
-    ('ﶒ', 'ﷇ'),
-    ('ï·°', 'ï·»'),
-    ('\u{fe00}', '\u{fe0f}'),
-    ('\u{fe20}', '\u{fe2f}'),
-    ('ïžł', ''),
-    ('ïč', 'ïč'),
-    ('ïč°', 'ïčŽ'),
-    ('ïč¶', 'ﻌ'),
-    ('', ''),
-    ('ïŒĄ', 'ïŒș'),
-    ('ïŒż', 'ïŒż'),
-    ('', ''),
-    ('', ''),
-    ('ïż‚', 'ïż‡'),
-    ('ïżŠ', 'ïż'),
-    ('ïż’', 'ïż—'),
-    ('ïżš', 'ïżœ'),
-    ('𐀀', '𐀋'),
-    ('𐀍', '𐀩'),
-    ('𐀹', 'đ€ș'),
-    ('đ€Œ', 'đ€œ'),
-    ('𐀿', '𐁍'),
-    ('𐁐', '𐁝'),
-    ('𐂀', 'đƒș'),
-    ('𐅀', '𐅮'),
-    ('\u{101fd}', '\u{101fd}'),
-    ('𐊀', '𐊜'),
-    ('𐊠', '𐋐'),
-    ('\u{102e0}', '\u{102e0}'),
-    ('𐌀', '𐌟'),
-    ('𐌭', '𐍊'),
-    ('𐍐', '\u{1037a}'),
-    ('𐎀', '𐎝'),
-    ('𐎠', '𐏃'),
-    ('𐏈', '𐏏'),
-    ('𐏑', '𐏕'),
-    ('𐐀', '𐒝'),
-    ('𐒠', '𐒩'),
-    ('𐒰', '𐓓'),
-    ('𐓘', '𐓻'),
-    ('𐔀', '𐔧'),
-    ('𐔰', '𐕣'),
-    ('𐕰', 'đ•ș'),
-    ('đ•Œ', '𐖊'),
-    ('𐖌', '𐖒'),
-    ('𐖔', '𐖕'),
-    ('𐖗', '𐖡'),
-    ('𐖣', '𐖱'),
-    ('𐖳', 'đ–č'),
-    ('𐖻', 'đ–Œ'),
-    ('𐗀', '𐗳'),
-    ('𐘀', 'đœ¶'),
-    ('𐝀', '𐝕'),
-    ('𐝠', '𐝧'),
-    ('𐞀', '𐞅'),
-    ('𐞇', '𐞰'),
-    ('đžČ', 'đžș'),
-    ('𐠀', '𐠅'),
-    ('𐠈', '𐠈'),
-    ('𐠊', '𐠔'),
-    ('𐠷', '𐠞'),
-    ('đ Œ', 'đ Œ'),
-    ('𐠿', '𐡕'),
-    ('𐥠', '𐥶'),
-    ('𐱀', '𐱞'),
-    ('𐣠', 'đŁČ'),
-    ('𐣎', '𐣔'),
-    ('𐀀', '𐀕'),
-    ('𐀠', 'đ€č'),
-    ('𐩀', '𐊷'),
-    ('đŠŸ', '𐊿'),
-    ('𐹀', '\u{10a03}'),
-    ('\u{10a05}', '\u{10a06}'),
-    ('\u{10a0c}', '𐹓'),
-    ('𐹕', '𐹗'),
-    ('𐹙', '𐚔'),
-    ('\u{10a38}', '\u{10a3a}'),
-    ('\u{10a3f}', '\u{10a3f}'),
-    ('𐩠', 'đ©Œ'),
-    ('đȘ€', 'đȘœ'),
-    ('𐫀', '𐫇'),
-    ('𐫉', '\u{10ae6}'),
-    ('𐬀', '𐏔'),
-    ('𐭀', '𐭕'),
-    ('𐭠', 'đ­Č'),
-    ('𐼀', '𐼑'),
-    ('𐰀', '𐱈'),
-    ('đČ€', 'đČČ'),
-    ('𐳀', 'đłČ'),
-    ('𐮀', '\u{10d27}'),
-    ('𐎰', 'đŽč'),
-    ('𐔀', '𐔄'),
-    ('\u{10d69}', '\u{10d6d}'),
-    ('𐔯', '𐶅'),
-    ('đș€', 'đș©'),
-    ('\u{10eab}', '\u{10eac}'),
-    ('đș°', 'đș±'),
-    ('𐻂', '𐻄'),
-    ('\u{10efc}', 'đŒœ'),
-    ('đŒ§', 'đŒ§'),
-    ('đŒ°', '\u{10f50}'),
-    ('đœ°', '\u{10f85}'),
-    ('đŸ°', '𐿄'),
-    ('𐿠', '𐿶'),
-    ('𑀀', '\u{11046}'),
-    ('𑁩', '𑁔'),
-    ('\u{1107f}', '\u{110ba}'),
-    ('\u{110c2}', '\u{110c2}'),
-    ('𑃐', '𑃹'),
-    ('𑃰', 'đ‘ƒč'),
-    ('\u{11100}', '\u{11134}'),
-    ('đ‘„¶', '𑄿'),
-    ('𑅄', '𑅇'),
-    ('𑅐', '\u{11173}'),
-    ('đ‘…¶', 'đ‘…¶'),
-    ('\u{11180}', '𑇄'),
-    ('\u{111c9}', '\u{111cc}'),
-    ('𑇎', '𑇚'),
-    ('𑇜', '𑇜'),
-    ('𑈀', '𑈑'),
-    ('𑈓', '\u{11237}'),
-    ('\u{1123e}', '\u{11241}'),
-    ('𑊀', '𑊆'),
-    ('𑊈', '𑊈'),
-    ('𑊊', '𑊍'),
-    ('𑊏', '𑊝'),
-    ('𑊟', '𑊹'),
-    ('𑊰', '\u{112ea}'),
-    ('𑋰', 'đ‘‹č'),
-    ('\u{11300}', '𑌃'),
-    ('𑌅', '𑌌'),
-    ('𑌏', '𑌐'),
-    ('𑌓', '𑌹'),
-    ('đ‘ŒȘ', '𑌰'),
-    ('đ‘ŒČ', '𑌳'),
-    ('đ‘Œ”', 'đ‘Œč'),
-    ('\u{1133b}', '𑍄'),
-    ('𑍇', '𑍈'),
-    ('𑍋', '\u{1134d}'),
-    ('𑍐', '𑍐'),
-    ('\u{11357}', '\u{11357}'),
-    ('𑍝', '𑍣'),
-    ('\u{11366}', '\u{1136c}'),
-    ('\u{11370}', '\u{11374}'),
-    ('𑎀', '𑎉'),
-    ('𑎋', '𑎋'),
-    ('𑎎', '𑎎'),
-    ('𑎐', '𑎔'),
-    ('𑎷', '\u{113c0}'),
-    ('\u{113c2}', '\u{113c2}'),
-    ('\u{113c5}', '\u{113c5}'),
-    ('\u{113c7}', '𑏊'),
-    ('𑏌', '𑏓'),
-    ('\u{113e1}', '\u{113e2}'),
-    ('𑐀', '𑑊'),
-    ('𑑐', '𑑙'),
-    ('\u{1145e}', '𑑡'),
-    ('𑒀', '𑓅'),
-    ('𑓇', '𑓇'),
-    ('𑓐', '𑓙'),
-    ('𑖀', '\u{115b5}'),
-    ('𑖾', '\u{115c0}'),
-    ('𑗘', '\u{115dd}'),
-    ('𑘀', '\u{11640}'),
-    ('𑙄', '𑙄'),
-    ('𑙐', '𑙙'),
-    ('𑚀', '𑚾'),
-    ('𑛀', '𑛉'),
-    ('𑛐', '𑛣'),
-    ('𑜀', '𑜚'),
-    ('\u{1171d}', '\u{1172b}'),
-    ('𑜰', 'đ‘œč'),
-    ('𑝀', '𑝆'),
-    ('𑠀', '\u{1183a}'),
-    ('𑱠', '𑣩'),
-    ('𑣿', '𑀆'),
-    ('𑀉', '𑀉'),
-    ('đ‘€Œ', '𑀓'),
-    ('𑀕', 'đ‘€–'),
-    ('đ‘€˜', 'đ‘€”'),
-    ('đ‘€·', '𑀞'),
-    ('\u{1193b}', '\u{11943}'),
-    ('𑄐', 'đ‘„™'),
-    ('𑩠', '𑩧'),
-    ('đ‘ŠȘ', '\u{119d7}'),
-    ('\u{119da}', '𑧡'),
-    ('𑧣', 'đ‘§€'),
-    ('𑹀', '\u{11a3e}'),
-    ('\u{11a47}', '\u{11a47}'),
-    ('𑩐', '\u{11a99}'),
-    ('đ‘Ș', 'đ‘Ș'),
-    ('đ‘Ș°', 'đ‘«ž'),
-    ('𑯀', '𑯠'),
-    ('𑯰', 'đ‘Żč'),
-    ('𑰀', '𑰈'),
-    ('𑰊', '\u{11c36}'),
-    ('\u{11c38}', '𑱀'),
-    ('𑱐', '𑱙'),
-    ('đ‘±Č', 'đ‘ȏ'),
-    ('\u{11c92}', '\u{11ca7}'),
-    ('đ‘Č©', '\u{11cb6}'),
-    ('𑮀', '𑮆'),
-    ('𑮈', '𑮉'),
-    ('𑮋', '\u{11d36}'),
-    ('\u{11d3a}', '\u{11d3a}'),
-    ('\u{11d3c}', '\u{11d3d}'),
-    ('\u{11d3f}', '\u{11d47}'),
-    ('𑔐', 'đ‘”™'),
-    ('đ‘” ', 'đ‘”„'),
-    ('đ‘”§', '𑔚'),
-    ('đ‘”Ș', 'đ‘¶Ž'),
-    ('\u{11d90}', '\u{11d91}'),
-    ('đ‘¶“', 'đ‘¶˜'),
-    ('đ‘¶ ', 'đ‘¶©'),
-    ('đ‘» ', 'đ‘»¶'),
-    ('\u{11f00}', 'đ‘Œ'),
-    ('đ‘Œ’', '\u{11f3a}'),
-    ('đ‘ŒŸ', '\u{11f42}'),
-    ('đ‘œ', '\u{11f5a}'),
-    ('đ‘Ÿ°', 'đ‘Ÿ°'),
-    ('𒀀', '𒎙'),
-    ('𒐀', '𒑼'),
-    ('𒒀', '𒕃'),
-    ('đ’Ÿ', '𒿰'),
-    ('𓀀', '𓐯'),
-    ('\u{13440}', '\u{13455}'),
-    ('𓑠', 'đ”ș'),
-    ('𔐀', '𔙆'),
-    ('𖄀', 'đ–„č'),
-    ('𖠀', '𖹾'),
-    ('đ–©€', 'đ–©ž'),
-    ('đ–© ', 'đ–©©'),
-    ('đ–©°', 'đ–ȘŸ'),
-    ('đ–«€', '𖫉'),
-    ('𖫐', 'đ–«­'),
-    ('\u{16af0}', '\u{16af4}'),
-    ('𖬀', '\u{16b36}'),
-    ('𖭀', '𖭃'),
-    ('𖭐', '𖭙'),
-    ('𖭣', 'đ–­·'),
-    ('đ–­œ', '𖼏'),
-    ('𖔀', '𖔏'),
-    ('đ–”°', 'đ–”č'),
-    ('đ–č€', 'đ–čż'),
-    ('đ–Œ€', 'đ–œŠ'),
-    ('\u{16f4f}', 'đ–Ÿ‡'),
-    ('\u{16f8f}', 'đ–ŸŸ'),
-    ('𖿠', '𖿡'),
-    ('𖿣', '\u{16fe4}'),
-    ('\u{16ff0}', '\u{16ff1}'),
-    ('𗀀', 'đ˜Ÿ·'),
-    ('𘠀', '𘳕'),
-    ('𘳿', '𘎈'),
-    ('𚿰', '𚿳'),
-    ('đšż”', 'đšż»'),
-    ('đšżœ', 'đšżŸ'),
-    ('𛀀', '𛄱'),
-    ('đ›„Č', 'đ›„Č'),
-    ('𛅐', '𛅒'),
-    ('𛅕', '𛅕'),
-    ('đ›…€', '𛅧'),
-    ('𛅰', '𛋻'),
-    ('𛰀', 'đ›±Ș'),
-    ('đ›±°', 'đ›±Œ'),
-    ('đ›Č€', 'đ›Čˆ'),
-    ('đ›Č', 'đ›Č™'),
-    ('\u{1bc9d}', '\u{1bc9e}'),
-    ('𜳰', 'đœłč'),
-    ('\u{1cf00}', '\u{1cf2d}'),
-    ('\u{1cf30}', '\u{1cf46}'),
-    ('\u{1d165}', '\u{1d169}'),
-    ('\u{1d16d}', '\u{1d172}'),
-    ('\u{1d17b}', '\u{1d182}'),
-    ('\u{1d185}', '\u{1d18b}'),
-    ('\u{1d1aa}', '\u{1d1ad}'),
-    ('\u{1d242}', '\u{1d244}'),
-    ('𝐀', '𝑔'),
-    ('𝑖', '𝒜'),
-    ('𝒞', '𝒟'),
-    ('𝒱', '𝒱'),
-    ('đ’„', '𝒩'),
-    ('đ’©', '𝒬'),
-    ('𝒼', 'đ’č'),
-    ('đ’»', 'đ’»'),
-    ('đ’œ', '𝓃'),
-    ('𝓅', '𝔅'),
-    ('𝔇', '𝔊'),
-    ('𝔍', '𝔔'),
-    ('𝔖', '𝔜'),
-    ('𝔞', 'đ”č'),
-    ('đ”»', 'đ”Ÿ'),
-    ('𝕀', '𝕄'),
-    ('𝕆', '𝕆'),
-    ('𝕊', '𝕐'),
-    ('𝕒', 'đš„'),
-    ('𝚹', '𝛀'),
-    ('𝛂', '𝛚'),
-    ('𝛜', 'đ›ș'),
-    ('đ›Œ', '𝜔'),
-    ('𝜖', '𝜮'),
-    ('đœ¶', '𝝎'),
-    ('𝝐', '𝝼'),
-    ('𝝰', '𝞈'),
-    ('𝞊', '𝞹'),
-    ('đžȘ', '𝟂'),
-    ('𝟄', '𝟋'),
-    ('𝟎', '𝟿'),
-    ('\u{1da00}', '\u{1da36}'),
-    ('\u{1da3b}', '\u{1da6c}'),
-    ('\u{1da75}', '\u{1da75}'),
-    ('\u{1da84}', '\u{1da84}'),
-    ('\u{1da9b}', '\u{1da9f}'),
-    ('\u{1daa1}', '\u{1daaf}'),
-    ('đŒ€', 'đŒž'),
-    ('đŒ„', 'đŒȘ'),
-    ('\u{1e000}', '\u{1e006}'),
-    ('\u{1e008}', '\u{1e018}'),
-    ('\u{1e01b}', '\u{1e021}'),
-    ('\u{1e023}', '\u{1e024}'),
-    ('\u{1e026}', '\u{1e02a}'),
-    ('𞀰', '𞁭'),
-    ('\u{1e08f}', '\u{1e08f}'),
-    ('𞄀', '𞄬'),
-    ('\u{1e130}', 'đž„œ'),
-    ('𞅀', '𞅉'),
-    ('𞅎', '𞅎'),
-    ('𞊐', '\u{1e2ae}'),
-    ('𞋀', 'đž‹č'),
-    ('𞓐', 'đž“č'),
-    ('𞗐', 'đž—ș'),
-    ('𞟠', '𞟩'),
-    ('𞟹', 'đžŸ«'),
-    ('𞟭', '𞟼'),
-    ('𞟰', 'đžŸŸ'),
-    ('𞠀', '𞣄'),
-    ('\u{1e8d0}', '\u{1e8d6}'),
-    ('𞀀', 'đž„‹'),
-    ('𞄐', 'đž„™'),
-    ('𞾀', '𞾃'),
-    ('𞾅', '𞾟'),
-    ('𞾡', '𞾱'),
-    ('𞞀', '𞞀'),
-    ('𞾧', '𞾧'),
-    ('đžž©', 'đžžČ'),
-    ('𞾮', 'đžž·'),
-    ('đžžč', 'đžžč'),
-    ('đžž»', 'đžž»'),
-    ('đžč‚', 'đžč‚'),
-    ('đžč‡', 'đžč‡'),
-    ('đžč‰', 'đžč‰'),
-    ('đžč‹', 'đžč‹'),
-    ('đžč', 'đžč'),
-    ('đžč‘', 'đžč’'),
-    ('đžč”', 'đžč”'),
-    ('đžč—', 'đžč—'),
-    ('đžč™', 'đžč™'),
-    ('đžč›', 'đžč›'),
-    ('đžč', 'đžč'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžčĄ', 'đžčą'),
-    ('đžč€', 'đžč€'),
-    ('đžč§', 'đžčȘ'),
-    ('đžčŹ', 'đžčČ'),
-    ('đžčŽ', 'đžč·'),
-    ('đžčč', 'đžčŒ'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžș€', 'đžș‰'),
-    ('đžș‹', 'đžș›'),
-    ('đžșĄ', 'đžșŁ'),
-    ('đžș„', 'đžș©'),
-    ('đžș«', 'đžș»'),
-    ('🯰', 'đŸŻč'),
-    ('𠀀', 'đȘ›Ÿ'),
-    ('đȘœ€', 'đ«œč'),
-    ('đ«€', 'đ« '),
-    ('đ«  ', 'đŹșĄ'),
-    ('đŹș°', '🯠'),
-    ('🯰', 'đźč'),
-    ('丽', '𯹝'),
-    ('𰀀', 'đ±Š'),
-    ('đ±', 'đȎŻ'),
-    ('\u{e0100}', '\u{e01ef}'),
-];
-
-pub const ID_START: &'static [(char, char)] = &[
-    ('A', 'Z'),
-    ('a', 'z'),
-    ('ª', 'ª'),
-    ('µ', 'µ'),
-    ('º', 'º'),
-    ('À', 'Ö'),
-    ('Ø', 'ö'),
-    ('ø', 'ˁ'),
-    ('ˆ', 'ˑ'),
-    ('Ë ', 'Ë€'),
-    ('ËŹ', 'ËŹ'),
-    ('Ëź', 'Ëź'),
-    ('Ͱ', '͎'),
-    ('Ͷ', 'ͷ'),
-    ('Íș', 'Íœ'),
-    ('Íż', 'Íż'),
-    ('Ά', 'Ά'),
-    ('Έ', 'Ί'),
-    ('Ό', 'Ό'),
-    ('Ύ', 'Ρ'),
-    ('Σ', 'Ï”'),
-    ('Ϸ', 'ҁ'),
-    ('Ҋ', 'ÔŻ'),
-    ('Ô±', 'Ֆ'),
-    ('ՙ', 'ՙ'),
-    ('ՠ', 'ֈ'),
-    ('ڐ', 'ŚȘ'),
-    ('ŚŻ', 'ŚČ'),
-    ('Ű ', 'ي'),
-    ('Ùź', 'ÙŻ'),
-    ('ٱ', 'ۓ'),
-    ('ە', 'ە'),
-    ('Û„', 'ÛŠ'),
-    ('Ûź', 'ÛŻ'),
-    ('Ûș', 'ÛŒ'),
-    ('Ûż', 'Ûż'),
-    ('ܐ', 'ܐ'),
-    ('ܒ', 'ܯ'),
-    ('ʍ', 'Ț„'),
-    ('Ț±', 'Ț±'),
-    ('ߊ', 'ßȘ'),
-    ('ߎ', 'ߔ'),
-    ('ßș', 'ßș'),
-    ('ࠀ', 'ࠕ'),
-    ('ࠚ', 'ࠚ'),
-    ('à €', 'à €'),
-    ('à š', 'à š'),
-    ('àĄ€', 'àĄ˜'),
-    ('àĄ ', 'àĄȘ'),
-    ('àĄ°', 'àą‡'),
-    ('àą‰', 'àąŽ'),
-    ('àą ', 'àŁ‰'),
-    ('à€„', 'à€č'),
-    ('à€œ', 'à€œ'),
-    ('à„', 'à„'),
-    ('à„˜', 'à„Ą'),
-    ('à„±', 'àŠ€'),
-    ('àŠ…', 'àŠŒ'),
-    ('àŠ', 'àŠ'),
-    ('àŠ“', 'àŠš'),
-    ('àŠȘ', 'àŠ°'),
-    ('àŠČ', 'àŠČ'),
-    ('àŠ¶', 'àŠč'),
-    ('àŠœ', 'àŠœ'),
-    ('ৎ', 'ৎ'),
-    ('ড়', 'ঢ়'),
-    ('য়', 'à§Ą'),
-    ('à§°', 'à§±'),
-    ('ৌ', 'ৌ'),
-    ('àš…', 'àšŠ'),
-    ('àš', 'àš'),
-    ('àš“', 'àšš'),
-    ('àšȘ', 'àš°'),
-    ('àšČ', 'àšł'),
-    ('àš”', 'àš¶'),
-    ('àšž', 'àšč'),
-    ('ਖ਼', 'ੜ'),
-    ('ਫ਼', 'ਫ਼'),
-    ('à©Č', '੎'),
-    ('àȘ…', 'àȘ'),
-    ('àȘ', 'àȘ‘'),
-    ('àȘ“', 'àȘš'),
-    ('àȘȘ', 'àȘ°'),
-    ('àȘČ', 'àȘł'),
-    ('àȘ”', 'àȘč'),
-    ('àȘœ', 'àȘœ'),
-    ('ૐ', 'ૐ'),
-    ('à« ', 'à«Ą'),
-    ('à«č', 'à«č'),
-    ('àŹ…', 'àŹŒ'),
-    ('àŹ', 'àŹ'),
-    ('àŹ“', 'àŹš'),
-    ('àŹȘ', 'àŹ°'),
-    ('àŹČ', 'àŹł'),
-    ('àŹ”', 'àŹč'),
-    ('àŹœ', 'àŹœ'),
-    ('ଡ଼', 'ଢ଼'),
-    ('ୟ', 'à­Ą'),
-    ('à­±', 'à­±'),
-    ('àźƒ', 'àźƒ'),
-    ('àź…', 'àźŠ'),
-    ('àźŽ', 'àź'),
-    ('àź’', 'àź•'),
-    ('àź™', 'àźš'),
-    ('àźœ', 'àźœ'),
-    ('àźž', 'àźŸ'),
-    ('àźŁ', 'àź€'),
-    ('àźš', 'àźȘ'),
-    ('àźź', 'àźč'),
-    ('àŻ', 'àŻ'),
-    ('అ', 'ఌ'),
-    ('ఎ', 'ఐ'),
-    ('ఒ', 'à°š'),
-    ('à°Ș', 'à°č'),
-    ('జ', 'జ'),
-    ('ౘ', 'ౚ'),
-    ('ౝ', 'ౝ'),
-    ('à± ', 'à±Ą'),
-    ('àȀ', 'àȀ'),
-    ('àȅ', 'àȌ'),
-    ('àȎ', 'àȐ'),
-    ('àȒ', 'àČš'),
-    ('àČȘ', 'àČł'),
-    ('àČ”', 'àČč'),
-    ('àČœ', 'àČœ'),
-    ('àł', 'àłž'),
-    ('àł ', 'àłĄ'),
-    ('àł±', 'àłČ'),
-    ('àŽ„', 'àŽŒ'),
-    ('àŽŽ', 'àŽ'),
-    ('àŽ’', 'àŽș'),
-    ('àŽœ', 'àŽœ'),
-    ('à”Ž', 'à”Ž'),
-    ('à””', 'à”–'),
-    ('à”Ÿ', 'à”Ą'),
-    ('à”ș', 'à”ż'),
-    ('අ', 'ඖ'),
-    ('ක', 'න'),
-    ('à¶ł', 'à¶»'),
-    ('ග', 'ග'),
-    ('ව', 'ෆ'),
-    ('àž', 'àž°'),
-    ('àžČ', 'àžł'),
-    ('àč€', 'àč†'),
-    ('àș', 'àș‚'),
-    ('àș„', 'àș„'),
-    ('àș†', 'àșŠ'),
-    ('àșŒ', 'àșŁ'),
-    ('àș„', 'àș„'),
-    ('àș§', 'àș°'),
-    ('àșČ', 'àșł'),
-    ('àșœ', 'àșœ'),
-    ('ເ', 'ໄ'),
-    ('ໆ', 'ໆ'),
-    ('ໜ', 'ໟ'),
-    ('àŒ€', 'àŒ€'),
-    ('àœ€', 'àœ‡'),
-    ('àœ‰', 'àœŹ'),
-    ('àŸˆ', 'àŸŒ'),
-    ('က', 'á€Ș'),
-    ('ဿ', 'ဿ'),
-    ('ၐ', 'ၕ'),
-    ('ၚ', 'ၝ'),
-    ('ၥ', 'ၥ'),
-    ('၄', '၊'),
-    ('ၟ', 'ၰ'),
-    ('ၔ', 'ႁ'),
-    ('ႎ', 'ႎ'),
-    ('Ⴀ', 'Ⴥ'),
-    ('Ⴧ', 'Ⴧ'),
-    ('Ⴭ', 'Ⴭ'),
-    ('ა', 'áƒș'),
-    ('჌', 'ቈ'),
-    ('ቊ', 'ቍ'),
-    ('ቐ', 'ቖ'),
-    ('ቘ', 'ቘ'),
-    ('ቚ', 'ቝ'),
-    ('በ', 'ኈ'),
-    ('ኊ', 'ኍ'),
-    ('ነ', 'ኰ'),
-    ('áŠČ', 'ኔ'),
-    ('ኾ', 'ኟ'),
-    ('ዀ', 'ዀ'),
-    ('ዂ', 'ዅ'),
-    ('ወ', 'ዖ'),
-    ('ዘ', 'ጐ'),
-    ('ጒ', 'ጕ'),
-    ('ጘ', 'ፚ'),
-    ('ᎀ', 'ᎏ'),
-    ('Ꭰ', 'Ꮤ'),
-    ('Ꮮ', 'Ꮬ'),
-    ('ᐁ', 'ᙬ'),
-    ('ᙯ', 'ᙿ'),
-    ('ᚁ', 'ᚚ'),
-    ('ᚠ', 'á›Ș'),
-    ('᛼', '᛾'),
-    ('ᜀ', 'ᜑ'),
-    ('ᜟ', 'ᜱ'),
-    ('ᝀ', 'ᝑ'),
-    ('ᝠ', 'ᝬ'),
-    ('᝼', 'ᝰ'),
-    ('ក', 'ឳ'),
-    ('ៗ', 'ៗ'),
-    ('ៜ', 'ៜ'),
-    ('ᠠ', 'ᥞ'),
-    ('᱀', 'ᱹ'),
-    ('áąȘ', 'áąȘ'),
-    ('Ṱ', 'ᣔ'),
-    ('က', 'သ'),
-    ('ᄐ', 'ᄭ'),
-    ('ᄰ', 'ᄎ'),
-    ('ᩀ', 'ካ'),
-    ('ᩰ', 'ᧉ'),
-    ('Ṁ', 'Ṗ'),
-    ('áš ', 'ᩔ'),
-    ('áȘ§', 'áȘ§'),
-    ('ᬅ', 'ᬳ'),
-    ('ᭅ', 'ᭌ'),
-    ('ៃ', '០'),
-    ('៟', '៯'),
-    ('áźș', 'ᯄ'),
-    ('ᰀ', 'ᰣ'),
-    ('ᱍ', 'ᱏ'),
-    ('ᱚ', 'ᱜ'),
-    ('áȀ', 'áȊ'),
-    ('áȐ', 'áČș'),
-    ('áČœ', 'áČż'),
-    ('ᳩ', '᳏'),
-    ('áłź', 'áłł'),
-    ('áł”', 'áł¶'),
-    ('áłș', 'áłș'),
-    ('ᮀ', 'á¶ż'),
-    ('ᾀ', 'ጕ'),
-    ('ጘ', 'ጝ'),
-    ('ጠ', 'ᜅ'),
-    ('ᜈ', 'ᜍ'),
-    ('ᜐ', '᜗'),
-    ('᜙', '᜙'),
-    ('᜛', '᜛'),
-    ('᜝', '᜝'),
-    ('ᜟ', '᜜'),
-    ('ៀ', '៎'),
-    ('៶', '៌'),
-    ('៟', '៟'),
-    ('ῂ', 'ῄ'),
-    ('ῆ', 'ῌ'),
-    ('ῐ', 'ΐ'),
-    ('ῖ', 'Ί'),
-    ('ῠ', '῏'),
-    ('áżČ', '῎'),
-    ('áż¶', 'áżŒ'),
-    ('ⁱ', 'ⁱ'),
-    ('ⁿ', 'ⁿ'),
-    ('ₐ', 'ₜ'),
-    ('ℂ', 'ℂ'),
-    ('ℇ', 'ℇ'),
-    ('ℊ', 'ℓ'),
-    ('ℕ', 'ℕ'),
-    ('℘', 'ℝ'),
-    ('â„€', 'â„€'),
-    ('℩', '℩'),
-    ('ℹ', 'ℹ'),
-    ('â„Ș', 'â„č'),
-    ('ℌ', 'ℿ'),
-    ('ⅅ', 'ⅉ'),
-    ('ⅎ', 'ⅎ'),
-    ('Ⅰ', 'ↈ'),
-    ('Ⰰ', 'Ⳁ'),
-    ('âł«', 'âłź'),
-    ('âłČ', 'âłł'),
-    ('⮀', '⎄'),
-    ('⎧', '⎧'),
-    ('⎭', '⎭'),
-    ('⎰', '┧'),
-    ('┯', '┯'),
-    ('ⶀ', 'ⶖ'),
-    ('â¶ ', 'â¶Š'),
-    ('â¶š', 'â¶ź'),
-    ('â¶°', 'â¶¶'),
-    ('â¶ž', 'â¶Ÿ'),
-    ('ⷀ', 'ⷆ'),
-    ('ⷈ', 'ⷎ'),
-    ('ⷐ', 'ⷖ'),
-    ('ⷘ', 'ⷞ'),
-    ('々', '〇'),
-    ('〡', '〩'),
-    ('〱', '〔'),
-    ('〾', 'ă€Œ'),
-    ('ぁ', 'ゖ'),
-    ('゛', 'ゟ'),
-    ('ァ', 'ăƒș'),
-    ('ăƒŒ', 'ヿ'),
-    ('ㄅ', 'ㄯ'),
-    ('ㄱ', 'ㆎ'),
-    ('ㆠ', 'ㆿ'),
-    ('ㇰ', 'ㇿ'),
-    ('㐀', 'ä¶ż'),
-    ('侀', 'ꒌ'),
-    ('ꓐ', 'ꓜ'),
-    ('ꔀ', 'ꘌ'),
-    ('ꘐ', 'ꘟ'),
-    ('ê˜Ș', 'ꘫ'),
-    ('Ꙁ', 'ê™ź'),
-    ('ê™ż', 'ꚝ'),
-    ('ꚠ', 'ê›Ż'),
-    ('ꜗ', 'ꜟ'),
-    ('êœą', 'ꞈ'),
-    ('Ꞌ', 'ꟍ'),
-    ('Ꟑ', 'ꟑ'),
-    ('ꟓ', 'ꟓ'),
-    ('ꟕ', 'Ƛ'),
-    ('êŸČ', 'ꠁ'),
-    ('ꠃ', 'ꠅ'),
-    ('ꠇ', 'ꠊ'),
-    ('ꠌ', 'ê ą'),
-    ('êĄ€', 'êĄł'),
-    ('êą‚', 'êął'),
-    ('êŁČ', 'êŁ·'),
-    ('êŁ»', 'êŁ»'),
-    ('êŁœ', 'êŁŸ'),
-    ('ꀊ', 'ꀄ'),
-    ('ꀰ', 'ꄆ'),
-    ('ꄠ', 'ꄌ'),
-    ('ꊄ', 'êŠČ'),
-    ('ꧏ', 'ꧏ'),
-    ('ê§ ', 'ê§€'),
-    ('ê§Š', 'ê§Ż'),
-    ('ê§ș', 'ê§Ÿ'),
-    ('Ꚁ', 'êšš'),
-    ('ꩀ', 'ꩂ'),
-    ('ꩄ', 'ꩋ'),
-    ('ê© ', 'ê©¶'),
-    ('ê©ș', 'ê©ș'),
-    ('꩟', 'êȘŻ'),
-    ('êȘ±', 'êȘ±'),
-    ('êȘ”', 'êȘ¶'),
-    ('êȘč', 'êȘœ'),
-    ('ꫀ', 'ꫀ'),
-    ('ꫂ', 'ꫂ'),
-    ('ꫛ', 'ꫝ'),
-    ('ê« ', 'ê«Ș'),
-    ('ê«Č', '꫎'),
-    ('êŹ', 'êŹ†'),
-    ('êŹ‰', 'êŹŽ'),
-    ('êŹ‘', 'êŹ–'),
-    ('êŹ ', 'êŹŠ'),
-    ('êŹš', 'êŹź'),
-    ('êŹ°', 'ꭚ'),
-    ('ꭜ', 'ꭩ'),
-    ('ê­°', 'êŻą'),
-    ('가', '힣'),
-    ('ힰ', 'ퟆ'),
-    ('ퟋ', 'ퟻ'),
-    ('', 'ï©­'),
-    ('並', '龎'),
-    ('ïŹ€', 'ïŹ†'),
-    ('ïŹ“', 'ïŹ—'),
-    ('ïŹ', 'ïŹ'),
-    ('ïŹŸ', 'ïŹš'),
-    ('ïŹȘ', 'ïŹ¶'),
-    ('ïŹž', 'ïŹŒ'),
-    ('ïŹŸ', 'ïŹŸ'),
-    ('נּ', 'סּ'),
-    ('ףּ', 'פּ'),
-    ('צּ', 'ïź±'),
-    ('ïŻ“', ''),
-    ('', 'ﶏ'),
-    ('ﶒ', 'ﷇ'),
-    ('ï·°', 'ï·»'),
-    ('ïč°', 'ïčŽ'),
-    ('ïč¶', 'ﻌ'),
-    ('ïŒĄ', 'ïŒș'),
-    ('', ''),
-    ('', ''),
-    ('ïż‚', 'ïż‡'),
-    ('ïżŠ', 'ïż'),
-    ('ïż’', 'ïż—'),
-    ('ïżš', 'ïżœ'),
-    ('𐀀', '𐀋'),
-    ('𐀍', '𐀩'),
-    ('𐀹', 'đ€ș'),
-    ('đ€Œ', 'đ€œ'),
-    ('𐀿', '𐁍'),
-    ('𐁐', '𐁝'),
-    ('𐂀', 'đƒș'),
-    ('𐅀', '𐅮'),
-    ('𐊀', '𐊜'),
-    ('𐊠', '𐋐'),
-    ('𐌀', '𐌟'),
-    ('𐌭', '𐍊'),
-    ('𐍐', 'đ”'),
-    ('𐎀', '𐎝'),
-    ('𐎠', '𐏃'),
-    ('𐏈', '𐏏'),
-    ('𐏑', '𐏕'),
-    ('𐐀', '𐒝'),
-    ('𐒰', '𐓓'),
-    ('𐓘', '𐓻'),
-    ('𐔀', '𐔧'),
-    ('𐔰', '𐕣'),
-    ('𐕰', 'đ•ș'),
-    ('đ•Œ', '𐖊'),
-    ('𐖌', '𐖒'),
-    ('𐖔', '𐖕'),
-    ('𐖗', '𐖡'),
-    ('𐖣', '𐖱'),
-    ('𐖳', 'đ–č'),
-    ('𐖻', 'đ–Œ'),
-    ('𐗀', '𐗳'),
-    ('𐘀', 'đœ¶'),
-    ('𐝀', '𐝕'),
-    ('𐝠', '𐝧'),
-    ('𐞀', '𐞅'),
-    ('𐞇', '𐞰'),
-    ('đžČ', 'đžș'),
-    ('𐠀', '𐠅'),
-    ('𐠈', '𐠈'),
-    ('𐠊', '𐠔'),
-    ('𐠷', '𐠞'),
-    ('đ Œ', 'đ Œ'),
-    ('𐠿', '𐡕'),
-    ('𐥠', '𐥶'),
-    ('𐱀', '𐱞'),
-    ('𐣠', 'đŁČ'),
-    ('𐣎', '𐣔'),
-    ('𐀀', '𐀕'),
-    ('𐀠', 'đ€č'),
-    ('𐩀', '𐊷'),
-    ('đŠŸ', '𐊿'),
-    ('𐹀', '𐹀'),
-    ('𐹐', '𐹓'),
-    ('𐹕', '𐹗'),
-    ('𐹙', '𐚔'),
-    ('𐩠', 'đ©Œ'),
-    ('đȘ€', 'đȘœ'),
-    ('𐫀', '𐫇'),
-    ('𐫉', '𐫀'),
-    ('𐬀', '𐏔'),
-    ('𐭀', '𐭕'),
-    ('𐭠', 'đ­Č'),
-    ('𐼀', '𐼑'),
-    ('𐰀', '𐱈'),
-    ('đČ€', 'đČČ'),
-    ('𐳀', 'đłČ'),
-    ('𐮀', '𐮣'),
-    ('𐔊', '𐔄'),
-    ('𐔯', '𐶅'),
-    ('đș€', 'đș©'),
-    ('đș°', 'đș±'),
-    ('𐻂', '𐻄'),
-    ('đŒ€', 'đŒœ'),
-    ('đŒ§', 'đŒ§'),
-    ('đŒ°', 'đœ…'),
-    ('đœ°', 'đŸ'),
-    ('đŸ°', '𐿄'),
-    ('𐿠', '𐿶'),
-    ('𑀃', 'đ‘€·'),
-    ('𑁱', 'đ‘Č'),
-    ('𑁔', '𑁔'),
-    ('𑂃', '𑂯'),
-    ('𑃐', '𑃹'),
-    ('𑄃', '𑄩'),
-    ('𑅄', '𑅄'),
-    ('𑅇', '𑅇'),
-    ('𑅐', 'đ‘…Č'),
-    ('đ‘…¶', 'đ‘…¶'),
-    ('𑆃', 'đ‘†Č'),
-    ('𑇁', '𑇄'),
-    ('𑇚', '𑇚'),
-    ('𑇜', '𑇜'),
-    ('𑈀', '𑈑'),
-    ('𑈓', 'đ‘ˆ«'),
-    ('𑈿', '𑉀'),
-    ('𑊀', '𑊆'),
-    ('𑊈', '𑊈'),
-    ('𑊊', '𑊍'),
-    ('𑊏', '𑊝'),
-    ('𑊟', '𑊹'),
-    ('𑊰', '𑋞'),
-    ('𑌅', '𑌌'),
-    ('𑌏', '𑌐'),
-    ('𑌓', '𑌹'),
-    ('đ‘ŒȘ', '𑌰'),
-    ('đ‘ŒČ', '𑌳'),
-    ('đ‘Œ”', 'đ‘Œč'),
-    ('đ‘Œœ', 'đ‘Œœ'),
-    ('𑍐', '𑍐'),
-    ('𑍝', '𑍡'),
-    ('𑎀', '𑎉'),
-    ('𑎋', '𑎋'),
-    ('𑎎', '𑎎'),
-    ('𑎐', '𑎔'),
-    ('𑎷', '𑎷'),
-    ('𑏑', '𑏑'),
-    ('𑏓', '𑏓'),
-    ('𑐀', '𑐮'),
-    ('𑑇', '𑑊'),
-    ('𑑟', '𑑡'),
-    ('𑒀', '𑒯'),
-    ('𑓄', '𑓅'),
-    ('𑓇', '𑓇'),
-    ('𑖀', '𑖼'),
-    ('𑗘', '𑗛'),
-    ('𑘀', '𑘯'),
-    ('𑙄', '𑙄'),
-    ('𑚀', 'đ‘šȘ'),
-    ('𑚾', '𑚾'),
-    ('𑜀', '𑜚'),
-    ('𑝀', '𑝆'),
-    ('𑠀', 'đ‘ «'),
-    ('𑱠', '𑣟'),
-    ('𑣿', '𑀆'),
-    ('𑀉', '𑀉'),
-    ('đ‘€Œ', '𑀓'),
-    ('𑀕', 'đ‘€–'),
-    ('đ‘€˜', '𑀯'),
-    ('𑀿', '𑀿'),
-    ('𑄁', '𑄁'),
-    ('𑩠', '𑩧'),
-    ('đ‘ŠȘ', '𑧐'),
-    ('𑧡', '𑧡'),
-    ('𑧣', '𑧣'),
-    ('𑹀', '𑹀'),
-    ('𑹋', 'đ‘šČ'),
-    ('đ‘šș', 'đ‘šș'),
-    ('𑩐', '𑩐'),
-    ('đ‘©œ', 'đ‘Ș‰'),
-    ('đ‘Ș', 'đ‘Ș'),
-    ('đ‘Ș°', 'đ‘«ž'),
-    ('𑯀', '𑯠'),
-    ('𑰀', '𑰈'),
-    ('𑰊', '𑰼'),
-    ('𑱀', '𑱀'),
-    ('đ‘±Č', 'đ‘ȏ'),
-    ('𑮀', '𑮆'),
-    ('𑮈', '𑮉'),
-    ('𑮋', '𑮰'),
-    ('𑔆', '𑔆'),
-    ('đ‘” ', 'đ‘”„'),
-    ('đ‘”§', '𑔚'),
-    ('đ‘”Ș', '𑶉'),
-    ('đ‘¶˜', 'đ‘¶˜'),
-    ('đ‘» ', 'đ‘»Č'),
-    ('đ‘Œ‚', 'đ‘Œ‚'),
-    ('đ‘Œ„', 'đ‘Œ'),
-    ('đ‘Œ’', 'đ‘Œł'),
-    ('đ‘Ÿ°', 'đ‘Ÿ°'),
-    ('𒀀', '𒎙'),
-    ('𒐀', '𒑼'),
-    ('𒒀', '𒕃'),
-    ('đ’Ÿ', '𒿰'),
-    ('𓀀', '𓐯'),
-    ('𓑁', '𓑆'),
-    ('𓑠', 'đ”ș'),
-    ('𔐀', '𔙆'),
-    ('𖄀', '𖄝'),
-    ('𖠀', '𖹾'),
-    ('đ–©€', 'đ–©ž'),
-    ('đ–©°', 'đ–ȘŸ'),
-    ('𖫐', 'đ–«­'),
-    ('𖬀', '𖬯'),
-    ('𖭀', '𖭃'),
-    ('𖭣', 'đ–­·'),
-    ('đ–­œ', '𖼏'),
-    ('𖔀', '𖔏'),
-    ('đ–č€', 'đ–čż'),
-    ('đ–Œ€', 'đ–œŠ'),
-    ('đ–œ', 'đ–œ'),
-    ('đ–Ÿ“', 'đ–ŸŸ'),
-    ('𖿠', '𖿡'),
-    ('𖿣', '𖿣'),
-    ('𗀀', 'đ˜Ÿ·'),
-    ('𘠀', '𘳕'),
-    ('𘳿', '𘎈'),
-    ('𚿰', '𚿳'),
-    ('đšż”', 'đšż»'),
-    ('đšżœ', 'đšżŸ'),
-    ('𛀀', '𛄱'),
-    ('đ›„Č', 'đ›„Č'),
-    ('𛅐', '𛅒'),
-    ('𛅕', '𛅕'),
-    ('đ›…€', '𛅧'),
-    ('𛅰', '𛋻'),
-    ('𛰀', 'đ›±Ș'),
-    ('đ›±°', 'đ›±Œ'),
-    ('đ›Č€', 'đ›Čˆ'),
-    ('đ›Č', 'đ›Č™'),
-    ('𝐀', '𝑔'),
-    ('𝑖', '𝒜'),
-    ('𝒞', '𝒟'),
-    ('𝒱', '𝒱'),
-    ('đ’„', '𝒩'),
-    ('đ’©', '𝒬'),
-    ('𝒼', 'đ’č'),
-    ('đ’»', 'đ’»'),
-    ('đ’œ', '𝓃'),
-    ('𝓅', '𝔅'),
-    ('𝔇', '𝔊'),
-    ('𝔍', '𝔔'),
-    ('𝔖', '𝔜'),
-    ('𝔞', 'đ”č'),
-    ('đ”»', 'đ”Ÿ'),
-    ('𝕀', '𝕄'),
-    ('𝕆', '𝕆'),
-    ('𝕊', '𝕐'),
-    ('𝕒', 'đš„'),
-    ('𝚹', '𝛀'),
-    ('𝛂', '𝛚'),
-    ('𝛜', 'đ›ș'),
-    ('đ›Œ', '𝜔'),
-    ('𝜖', '𝜮'),
-    ('đœ¶', '𝝎'),
-    ('𝝐', '𝝼'),
-    ('𝝰', '𝞈'),
-    ('𝞊', '𝞹'),
-    ('đžȘ', '𝟂'),
-    ('𝟄', '𝟋'),
-    ('đŒ€', 'đŒž'),
-    ('đŒ„', 'đŒȘ'),
-    ('𞀰', '𞁭'),
-    ('𞄀', '𞄬'),
-    ('đž„·', 'đž„œ'),
-    ('𞅎', '𞅎'),
-    ('𞊐', '𞊭'),
-    ('𞋀', 'đž‹«'),
-    ('𞓐', 'đž“«'),
-    ('𞗐', '𞗭'),
-    ('𞗰', '𞗰'),
-    ('𞟠', '𞟩'),
-    ('𞟹', 'đžŸ«'),
-    ('𞟭', '𞟼'),
-    ('𞟰', 'đžŸŸ'),
-    ('𞠀', '𞣄'),
-    ('𞀀', 'đž„ƒ'),
-    ('đž„‹', 'đž„‹'),
-    ('𞾀', '𞾃'),
-    ('𞾅', '𞾟'),
-    ('𞾡', '𞾱'),
-    ('𞞀', '𞞀'),
-    ('𞾧', '𞾧'),
-    ('đžž©', 'đžžČ'),
-    ('𞾮', 'đžž·'),
-    ('đžžč', 'đžžč'),
-    ('đžž»', 'đžž»'),
-    ('đžč‚', 'đžč‚'),
-    ('đžč‡', 'đžč‡'),
-    ('đžč‰', 'đžč‰'),
-    ('đžč‹', 'đžč‹'),
-    ('đžč', 'đžč'),
-    ('đžč‘', 'đžč’'),
-    ('đžč”', 'đžč”'),
-    ('đžč—', 'đžč—'),
-    ('đžč™', 'đžč™'),
-    ('đžč›', 'đžč›'),
-    ('đžč', 'đžč'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžčĄ', 'đžčą'),
-    ('đžč€', 'đžč€'),
-    ('đžč§', 'đžčȘ'),
-    ('đžčŹ', 'đžčČ'),
-    ('đžčŽ', 'đžč·'),
-    ('đžčč', 'đžčŒ'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžș€', 'đžș‰'),
-    ('đžș‹', 'đžș›'),
-    ('đžșĄ', 'đžșŁ'),
-    ('đžș„', 'đžș©'),
-    ('đžș«', 'đžș»'),
-    ('𠀀', 'đȘ›Ÿ'),
-    ('đȘœ€', 'đ«œč'),
-    ('đ«€', 'đ« '),
-    ('đ«  ', 'đŹșĄ'),
-    ('đŹș°', '🯠'),
-    ('🯰', 'đźč'),
-    ('丽', '𯹝'),
-    ('𰀀', 'đ±Š'),
-    ('đ±', 'đȎŻ'),
-];
-
-pub const IDEOGRAPHIC: &'static [(char, char)] = &[
-    ('〆', '〇'),
-    ('〡', '〩'),
-    ('〾', 'ă€ș'),
-    ('㐀', 'ä¶ż'),
-    ('侀', '鿿'),
-    ('', 'ï©­'),
-    ('並', '龎'),
-    ('\u{16fe4}', '\u{16fe4}'),
-    ('𗀀', 'đ˜Ÿ·'),
-    ('𘠀', '𘳕'),
-    ('𘳿', '𘎈'),
-    ('𛅰', '𛋻'),
-    ('𠀀', 'đȘ›Ÿ'),
-    ('đȘœ€', 'đ«œč'),
-    ('đ«€', 'đ« '),
-    ('đ«  ', 'đŹșĄ'),
-    ('đŹș°', '🯠'),
-    ('🯰', 'đźč'),
-    ('丽', '𯹝'),
-    ('𰀀', 'đ±Š'),
-    ('đ±', 'đȎŻ'),
-];
-
-pub const INCB: &'static [(char, char)] = &[
-    ('\u{300}', '\u{36f}'),
-    ('\u{483}', '\u{489}'),
-    ('\u{591}', '\u{5bd}'),
-    ('\u{5bf}', '\u{5bf}'),
-    ('\u{5c1}', '\u{5c2}'),
-    ('\u{5c4}', '\u{5c5}'),
-    ('\u{5c7}', '\u{5c7}'),
-    ('\u{610}', '\u{61a}'),
-    ('\u{64b}', '\u{65f}'),
-    ('\u{670}', '\u{670}'),
-    ('\u{6d6}', '\u{6dc}'),
-    ('\u{6df}', '\u{6e4}'),
-    ('\u{6e7}', '\u{6e8}'),
-    ('\u{6ea}', '\u{6ed}'),
-    ('\u{711}', '\u{711}'),
-    ('\u{730}', '\u{74a}'),
-    ('\u{7a6}', '\u{7b0}'),
-    ('\u{7eb}', '\u{7f3}'),
-    ('\u{7fd}', '\u{7fd}'),
-    ('\u{816}', '\u{819}'),
-    ('\u{81b}', '\u{823}'),
-    ('\u{825}', '\u{827}'),
-    ('\u{829}', '\u{82d}'),
-    ('\u{859}', '\u{85b}'),
-    ('\u{897}', '\u{89f}'),
-    ('\u{8ca}', '\u{8e1}'),
-    ('\u{8e3}', '\u{902}'),
-    ('à€•', '\u{93a}'),
-    ('\u{93c}', '\u{93c}'),
-    ('\u{941}', '\u{948}'),
-    ('\u{94d}', '\u{94d}'),
-    ('\u{951}', 'à„Ÿ'),
-    ('\u{962}', '\u{963}'),
-    ('à„ž', 'à„ż'),
-    ('\u{981}', '\u{981}'),
-    ('àŠ•', 'àŠš'),
-    ('àŠȘ', 'àŠ°'),
-    ('àŠČ', 'àŠČ'),
-    ('àŠ¶', 'àŠč'),
-    ('\u{9bc}', '\u{9bc}'),
-    ('\u{9be}', '\u{9be}'),
-    ('\u{9c1}', '\u{9c4}'),
-    ('\u{9cd}', '\u{9cd}'),
-    ('\u{9d7}', '\u{9d7}'),
-    ('ড়', 'ঢ়'),
-    ('য়', 'য়'),
-    ('\u{9e2}', '\u{9e3}'),
-    ('à§°', 'à§±'),
-    ('\u{9fe}', '\u{9fe}'),
-    ('\u{a01}', '\u{a02}'),
-    ('\u{a3c}', '\u{a3c}'),
-    ('\u{a41}', '\u{a42}'),
-    ('\u{a47}', '\u{a48}'),
-    ('\u{a4b}', '\u{a4d}'),
-    ('\u{a51}', '\u{a51}'),
-    ('\u{a70}', '\u{a71}'),
-    ('\u{a75}', '\u{a75}'),
-    ('\u{a81}', '\u{a82}'),
-    ('àȘ•', 'àȘš'),
-    ('àȘȘ', 'àȘ°'),
-    ('àȘČ', 'àȘł'),
-    ('àȘ”', 'àȘč'),
-    ('\u{abc}', '\u{abc}'),
-    ('\u{ac1}', '\u{ac5}'),
-    ('\u{ac7}', '\u{ac8}'),
-    ('\u{acd}', '\u{acd}'),
-    ('\u{ae2}', '\u{ae3}'),
-    ('à«č', '\u{aff}'),
-    ('\u{b01}', '\u{b01}'),
-    ('àŹ•', 'àŹš'),
-    ('àŹȘ', 'àŹ°'),
-    ('àŹČ', 'àŹł'),
-    ('àŹ”', 'àŹč'),
-    ('\u{b3c}', '\u{b3c}'),
-    ('\u{b3e}', '\u{b3f}'),
-    ('\u{b41}', '\u{b44}'),
-    ('\u{b4d}', '\u{b4d}'),
-    ('\u{b55}', '\u{b57}'),
-    ('ଡ଼', 'ଢ଼'),
-    ('ୟ', 'ୟ'),
-    ('\u{b62}', '\u{b63}'),
-    ('à­±', 'à­±'),
-    ('\u{b82}', '\u{b82}'),
-    ('\u{bbe}', '\u{bbe}'),
-    ('\u{bc0}', '\u{bc0}'),
-    ('\u{bcd}', '\u{bcd}'),
-    ('\u{bd7}', '\u{bd7}'),
-    ('\u{c00}', '\u{c00}'),
-    ('\u{c04}', '\u{c04}'),
-    ('క', 'à°š'),
-    ('à°Ș', 'à°č'),
-    ('\u{c3c}', '\u{c3c}'),
-    ('\u{c3e}', '\u{c40}'),
-    ('\u{c46}', '\u{c48}'),
-    ('\u{c4a}', '\u{c4d}'),
-    ('\u{c55}', '\u{c56}'),
-    ('ౘ', 'ౚ'),
-    ('\u{c62}', '\u{c63}'),
-    ('\u{c81}', '\u{c81}'),
-    ('\u{cbc}', '\u{cbc}'),
-    ('\u{cbf}', '\u{cc0}'),
-    ('\u{cc2}', '\u{cc2}'),
-    ('\u{cc6}', '\u{cc8}'),
-    ('\u{cca}', '\u{ccd}'),
-    ('\u{cd5}', '\u{cd6}'),
-    ('\u{ce2}', '\u{ce3}'),
-    ('\u{d00}', '\u{d01}'),
-    ('àŽ•', '\u{d3c}'),
-    ('\u{d3e}', '\u{d3e}'),
-    ('\u{d41}', '\u{d44}'),
-    ('\u{d4d}', '\u{d4d}'),
-    ('\u{d57}', '\u{d57}'),
-    ('\u{d62}', '\u{d63}'),
-    ('\u{d81}', '\u{d81}'),
-    ('\u{dca}', '\u{dca}'),
-    ('\u{dcf}', '\u{dcf}'),
-    ('\u{dd2}', '\u{dd4}'),
-    ('\u{dd6}', '\u{dd6}'),
-    ('\u{ddf}', '\u{ddf}'),
-    ('\u{e31}', '\u{e31}'),
-    ('\u{e34}', '\u{e3a}'),
-    ('\u{e47}', '\u{e4e}'),
-    ('\u{eb1}', '\u{eb1}'),
-    ('\u{eb4}', '\u{ebc}'),
-    ('\u{ec8}', '\u{ece}'),
-    ('\u{f18}', '\u{f19}'),
-    ('\u{f35}', '\u{f35}'),
-    ('\u{f37}', '\u{f37}'),
-    ('\u{f39}', '\u{f39}'),
-    ('\u{f71}', '\u{f7e}'),
-    ('\u{f80}', '\u{f84}'),
-    ('\u{f86}', '\u{f87}'),
-    ('\u{f8d}', '\u{f97}'),
-    ('\u{f99}', '\u{fbc}'),
-    ('\u{fc6}', '\u{fc6}'),
-    ('\u{102d}', '\u{1030}'),
-    ('\u{1032}', '\u{1037}'),
-    ('\u{1039}', '\u{103a}'),
-    ('\u{103d}', '\u{103e}'),
-    ('\u{1058}', '\u{1059}'),
-    ('\u{105e}', '\u{1060}'),
-    ('\u{1071}', '\u{1074}'),
-    ('\u{1082}', '\u{1082}'),
-    ('\u{1085}', '\u{1086}'),
-    ('\u{108d}', '\u{108d}'),
-    ('\u{109d}', '\u{109d}'),
-    ('\u{135d}', '\u{135f}'),
-    ('\u{1712}', '\u{1715}'),
-    ('\u{1732}', '\u{1734}'),
-    ('\u{1752}', '\u{1753}'),
-    ('\u{1772}', '\u{1773}'),
-    ('\u{17b4}', '\u{17b5}'),
-    ('\u{17b7}', '\u{17bd}'),
-    ('\u{17c6}', '\u{17c6}'),
-    ('\u{17c9}', '\u{17d3}'),
-    ('\u{17dd}', '\u{17dd}'),
-    ('\u{180b}', '\u{180d}'),
-    ('\u{180f}', '\u{180f}'),
-    ('\u{1885}', '\u{1886}'),
-    ('\u{18a9}', '\u{18a9}'),
-    ('\u{1920}', '\u{1922}'),
-    ('\u{1927}', '\u{1928}'),
-    ('\u{1932}', '\u{1932}'),
-    ('\u{1939}', '\u{193b}'),
-    ('\u{1a17}', '\u{1a18}'),
-    ('\u{1a1b}', '\u{1a1b}'),
-    ('\u{1a56}', '\u{1a56}'),
-    ('\u{1a58}', '\u{1a5e}'),
-    ('\u{1a60}', '\u{1a60}'),
-    ('\u{1a62}', '\u{1a62}'),
-    ('\u{1a65}', '\u{1a6c}'),
-    ('\u{1a73}', '\u{1a7c}'),
-    ('\u{1a7f}', '\u{1a7f}'),
-    ('\u{1ab0}', '\u{1ace}'),
-    ('\u{1b00}', '\u{1b03}'),
-    ('\u{1b34}', '\u{1b3d}'),
-    ('\u{1b42}', '\u{1b44}'),
-    ('\u{1b6b}', '\u{1b73}'),
-    ('\u{1b80}', '\u{1b81}'),
-    ('\u{1ba2}', '\u{1ba5}'),
-    ('\u{1ba8}', '\u{1bad}'),
-    ('\u{1be6}', '\u{1be6}'),
-    ('\u{1be8}', '\u{1be9}'),
-    ('\u{1bed}', '\u{1bed}'),
-    ('\u{1bef}', '\u{1bf3}'),
-    ('\u{1c2c}', '\u{1c33}'),
-    ('\u{1c36}', '\u{1c37}'),
-    ('\u{1cd0}', '\u{1cd2}'),
-    ('\u{1cd4}', '\u{1ce0}'),
-    ('\u{1ce2}', '\u{1ce8}'),
-    ('\u{1ced}', '\u{1ced}'),
-    ('\u{1cf4}', '\u{1cf4}'),
-    ('\u{1cf8}', '\u{1cf9}'),
-    ('\u{1dc0}', '\u{1dff}'),
-    ('\u{200d}', '\u{200d}'),
-    ('\u{20d0}', '\u{20f0}'),
-    ('\u{2cef}', '\u{2cf1}'),
-    ('\u{2d7f}', '\u{2d7f}'),
-    ('\u{2de0}', '\u{2dff}'),
-    ('\u{302a}', '\u{302f}'),
-    ('\u{3099}', '\u{309a}'),
-    ('\u{a66f}', '\u{a672}'),
-    ('\u{a674}', '\u{a67d}'),
-    ('\u{a69e}', '\u{a69f}'),
-    ('\u{a6f0}', '\u{a6f1}'),
-    ('\u{a802}', '\u{a802}'),
-    ('\u{a806}', '\u{a806}'),
-    ('\u{a80b}', '\u{a80b}'),
-    ('\u{a825}', '\u{a826}'),
-    ('\u{a82c}', '\u{a82c}'),
-    ('\u{a8c4}', '\u{a8c5}'),
-    ('\u{a8e0}', '\u{a8f1}'),
-    ('\u{a8ff}', '\u{a8ff}'),
-    ('\u{a926}', '\u{a92d}'),
-    ('\u{a947}', '\u{a951}'),
-    ('\u{a953}', '\u{a953}'),
-    ('\u{a980}', '\u{a982}'),
-    ('\u{a9b3}', '\u{a9b3}'),
-    ('\u{a9b6}', '\u{a9b9}'),
-    ('\u{a9bc}', '\u{a9bd}'),
-    ('\u{a9c0}', '\u{a9c0}'),
-    ('\u{a9e5}', '\u{a9e5}'),
-    ('\u{aa29}', '\u{aa2e}'),
-    ('\u{aa31}', '\u{aa32}'),
-    ('\u{aa35}', '\u{aa36}'),
-    ('\u{aa43}', '\u{aa43}'),
-    ('\u{aa4c}', '\u{aa4c}'),
-    ('\u{aa7c}', '\u{aa7c}'),
-    ('\u{aab0}', '\u{aab0}'),
-    ('\u{aab2}', '\u{aab4}'),
-    ('\u{aab7}', '\u{aab8}'),
-    ('\u{aabe}', '\u{aabf}'),
-    ('\u{aac1}', '\u{aac1}'),
-    ('\u{aaec}', '\u{aaed}'),
-    ('\u{aaf6}', '\u{aaf6}'),
-    ('\u{abe5}', '\u{abe5}'),
-    ('\u{abe8}', '\u{abe8}'),
-    ('\u{abed}', '\u{abed}'),
-    ('\u{fb1e}', '\u{fb1e}'),
-    ('\u{fe00}', '\u{fe0f}'),
-    ('\u{fe20}', '\u{fe2f}'),
-    ('\u{ff9e}', '\u{ff9f}'),
-    ('\u{101fd}', '\u{101fd}'),
-    ('\u{102e0}', '\u{102e0}'),
-    ('\u{10376}', '\u{1037a}'),
-    ('\u{10a01}', '\u{10a03}'),
-    ('\u{10a05}', '\u{10a06}'),
-    ('\u{10a0c}', '\u{10a0f}'),
-    ('\u{10a38}', '\u{10a3a}'),
-    ('\u{10a3f}', '\u{10a3f}'),
-    ('\u{10ae5}', '\u{10ae6}'),
-    ('\u{10d24}', '\u{10d27}'),
-    ('\u{10d69}', '\u{10d6d}'),
-    ('\u{10eab}', '\u{10eac}'),
-    ('\u{10efc}', '\u{10eff}'),
-    ('\u{10f46}', '\u{10f50}'),
-    ('\u{10f82}', '\u{10f85}'),
-    ('\u{11001}', '\u{11001}'),
-    ('\u{11038}', '\u{11046}'),
-    ('\u{11070}', '\u{11070}'),
-    ('\u{11073}', '\u{11074}'),
-    ('\u{1107f}', '\u{11081}'),
-    ('\u{110b3}', '\u{110b6}'),
-    ('\u{110b9}', '\u{110ba}'),
-    ('\u{110c2}', '\u{110c2}'),
-    ('\u{11100}', '\u{11102}'),
-    ('\u{11127}', '\u{1112b}'),
-    ('\u{1112d}', '\u{11134}'),
-    ('\u{11173}', '\u{11173}'),
-    ('\u{11180}', '\u{11181}'),
-    ('\u{111b6}', '\u{111be}'),
-    ('\u{111c0}', '\u{111c0}'),
-    ('\u{111c9}', '\u{111cc}'),
-    ('\u{111cf}', '\u{111cf}'),
-    ('\u{1122f}', '\u{11231}'),
-    ('\u{11234}', '\u{11237}'),
-    ('\u{1123e}', '\u{1123e}'),
-    ('\u{11241}', '\u{11241}'),
-    ('\u{112df}', '\u{112df}'),
-    ('\u{112e3}', '\u{112ea}'),
-    ('\u{11300}', '\u{11301}'),
-    ('\u{1133b}', '\u{1133c}'),
-    ('\u{1133e}', '\u{1133e}'),
-    ('\u{11340}', '\u{11340}'),
-    ('\u{1134d}', '\u{1134d}'),
-    ('\u{11357}', '\u{11357}'),
-    ('\u{11366}', '\u{1136c}'),
-    ('\u{11370}', '\u{11374}'),
-    ('\u{113b8}', '\u{113b8}'),
-    ('\u{113bb}', '\u{113c0}'),
-    ('\u{113c2}', '\u{113c2}'),
-    ('\u{113c5}', '\u{113c5}'),
-    ('\u{113c7}', '\u{113c9}'),
-    ('\u{113ce}', '\u{113d0}'),
-    ('\u{113d2}', '\u{113d2}'),
-    ('\u{113e1}', '\u{113e2}'),
-    ('\u{11438}', '\u{1143f}'),
-    ('\u{11442}', '\u{11444}'),
-    ('\u{11446}', '\u{11446}'),
-    ('\u{1145e}', '\u{1145e}'),
-    ('\u{114b0}', '\u{114b0}'),
-    ('\u{114b3}', '\u{114b8}'),
-    ('\u{114ba}', '\u{114ba}'),
-    ('\u{114bd}', '\u{114bd}'),
-    ('\u{114bf}', '\u{114c0}'),
-    ('\u{114c2}', '\u{114c3}'),
-    ('\u{115af}', '\u{115af}'),
-    ('\u{115b2}', '\u{115b5}'),
-    ('\u{115bc}', '\u{115bd}'),
-    ('\u{115bf}', '\u{115c0}'),
-    ('\u{115dc}', '\u{115dd}'),
-    ('\u{11633}', '\u{1163a}'),
-    ('\u{1163d}', '\u{1163d}'),
-    ('\u{1163f}', '\u{11640}'),
-    ('\u{116ab}', '\u{116ab}'),
-    ('\u{116ad}', '\u{116ad}'),
-    ('\u{116b0}', '\u{116b7}'),
-    ('\u{1171d}', '\u{1171d}'),
-    ('\u{1171f}', '\u{1171f}'),
-    ('\u{11722}', '\u{11725}'),
-    ('\u{11727}', '\u{1172b}'),
-    ('\u{1182f}', '\u{11837}'),
-    ('\u{11839}', '\u{1183a}'),
-    ('\u{11930}', '\u{11930}'),
-    ('\u{1193b}', '\u{1193e}'),
-    ('\u{11943}', '\u{11943}'),
-    ('\u{119d4}', '\u{119d7}'),
-    ('\u{119da}', '\u{119db}'),
-    ('\u{119e0}', '\u{119e0}'),
-    ('\u{11a01}', '\u{11a0a}'),
-    ('\u{11a33}', '\u{11a38}'),
-    ('\u{11a3b}', '\u{11a3e}'),
-    ('\u{11a47}', '\u{11a47}'),
-    ('\u{11a51}', '\u{11a56}'),
-    ('\u{11a59}', '\u{11a5b}'),
-    ('\u{11a8a}', '\u{11a96}'),
-    ('\u{11a98}', '\u{11a99}'),
-    ('\u{11c30}', '\u{11c36}'),
-    ('\u{11c38}', '\u{11c3d}'),
-    ('\u{11c3f}', '\u{11c3f}'),
-    ('\u{11c92}', '\u{11ca7}'),
-    ('\u{11caa}', '\u{11cb0}'),
-    ('\u{11cb2}', '\u{11cb3}'),
-    ('\u{11cb5}', '\u{11cb6}'),
-    ('\u{11d31}', '\u{11d36}'),
-    ('\u{11d3a}', '\u{11d3a}'),
-    ('\u{11d3c}', '\u{11d3d}'),
-    ('\u{11d3f}', '\u{11d45}'),
-    ('\u{11d47}', '\u{11d47}'),
-    ('\u{11d90}', '\u{11d91}'),
-    ('\u{11d95}', '\u{11d95}'),
-    ('\u{11d97}', '\u{11d97}'),
-    ('\u{11ef3}', '\u{11ef4}'),
-    ('\u{11f00}', '\u{11f01}'),
-    ('\u{11f36}', '\u{11f3a}'),
-    ('\u{11f40}', '\u{11f42}'),
-    ('\u{11f5a}', '\u{11f5a}'),
-    ('\u{13440}', '\u{13440}'),
-    ('\u{13447}', '\u{13455}'),
-    ('\u{1611e}', '\u{16129}'),
-    ('\u{1612d}', '\u{1612f}'),
-    ('\u{16af0}', '\u{16af4}'),
-    ('\u{16b30}', '\u{16b36}'),
-    ('\u{16f4f}', '\u{16f4f}'),
-    ('\u{16f8f}', '\u{16f92}'),
-    ('\u{16fe4}', '\u{16fe4}'),
-    ('\u{16ff0}', '\u{16ff1}'),
-    ('\u{1bc9d}', '\u{1bc9e}'),
-    ('\u{1cf00}', '\u{1cf2d}'),
-    ('\u{1cf30}', '\u{1cf46}'),
-    ('\u{1d165}', '\u{1d169}'),
-    ('\u{1d16d}', '\u{1d172}'),
-    ('\u{1d17b}', '\u{1d182}'),
-    ('\u{1d185}', '\u{1d18b}'),
-    ('\u{1d1aa}', '\u{1d1ad}'),
-    ('\u{1d242}', '\u{1d244}'),
-    ('\u{1da00}', '\u{1da36}'),
-    ('\u{1da3b}', '\u{1da6c}'),
-    ('\u{1da75}', '\u{1da75}'),
-    ('\u{1da84}', '\u{1da84}'),
-    ('\u{1da9b}', '\u{1da9f}'),
-    ('\u{1daa1}', '\u{1daaf}'),
-    ('\u{1e000}', '\u{1e006}'),
-    ('\u{1e008}', '\u{1e018}'),
-    ('\u{1e01b}', '\u{1e021}'),
-    ('\u{1e023}', '\u{1e024}'),
-    ('\u{1e026}', '\u{1e02a}'),
-    ('\u{1e08f}', '\u{1e08f}'),
-    ('\u{1e130}', '\u{1e136}'),
-    ('\u{1e2ae}', '\u{1e2ae}'),
-    ('\u{1e2ec}', '\u{1e2ef}'),
-    ('\u{1e4ec}', '\u{1e4ef}'),
-    ('\u{1e5ee}', '\u{1e5ef}'),
-    ('\u{1e8d0}', '\u{1e8d6}'),
-    ('\u{1e944}', '\u{1e94a}'),
-    ('đŸ»', '🏿'),
-    ('\u{e0020}', '\u{e007f}'),
-    ('\u{e0100}', '\u{e01ef}'),
-];
-
-pub const JOIN_CONTROL: &'static [(char, char)] = &[('\u{200c}', '\u{200d}')];
-
-pub const LOGICAL_ORDER_EXCEPTION: &'static [(char, char)] = &[
-    ('àč€', 'àč„'),
-    ('ເ', 'ໄ'),
-    ('ኔ', '኷'),
-    ('áŠș', 'áŠș'),
-    ('êȘ”', 'êȘ¶'),
-    ('êȘč', 'êȘč'),
-    ('êȘ»', 'êȘŒ'),
-];
-
-pub const LOWERCASE: &'static [(char, char)] = &[
-    ('a', 'z'),
-    ('ª', 'ª'),
-    ('µ', 'µ'),
-    ('º', 'º'),
-    ('ß', 'ö'),
-    ('ø', 'ÿ'),
-    ('ā', 'ā'),
-    ('ă', 'ă'),
-    ('ą', 'ą'),
-    ('ć', 'ć'),
-    ('ĉ', 'ĉ'),
-    ('ċ', 'ċ'),
-    ('č', 'č'),
-    ('ď', 'ď'),
-    ('đ', 'đ'),
-    ('ē', 'ē'),
-    ('ĕ', 'ĕ'),
-    ('ė', 'ė'),
-    ('ę', 'ę'),
-    ('ě', 'ě'),
-    ('ĝ', 'ĝ'),
-    ('ğ', 'ğ'),
-    ('ÄĄ', 'ÄĄ'),
-    ('ÄŁ', 'ÄŁ'),
-    ('Ä„', 'Ä„'),
-    ('ħ', 'ħ'),
-    ('Ä©', 'Ä©'),
-    ('Ä«', 'Ä«'),
-    ('Ä­', 'Ä­'),
-    ('ÄŻ', 'ÄŻ'),
-    ('ı', 'ı'),
-    ('Äł', 'Äł'),
-    ('Ä”', 'Ä”'),
-    ('Ä·', 'Äž'),
-    ('Äș', 'Äș'),
-    ('Č', 'Č'),
-    ('ÄŸ', 'ÄŸ'),
-    ('ƀ', 'ƀ'),
-    ('Ƃ', 'Ƃ'),
-    ('Ƅ', 'Ƅ'),
-    ('Ɔ', 'Ɔ'),
-    ('ƈ', 'Ɖ'),
-    ('Ƌ', 'Ƌ'),
-    ('ƍ', 'ƍ'),
-    ('Ə', 'Ə'),
-    ('Ƒ', 'Ƒ'),
-    ('œ', 'œ'),
-    ('ƕ', 'ƕ'),
-    ('Ɨ', 'Ɨ'),
-    ('ƙ', 'ƙ'),
-    ('ƛ', 'ƛ'),
-    ('Ɲ', 'Ɲ'),
-    ('ß', 'ß'),
-    ('š', 'š'),
-    ('ĆŁ', 'ĆŁ'),
-    ('Ć„', 'Ć„'),
-    ('Ƨ', 'Ƨ'),
-    ('Ć©', 'Ć©'),
-    ('Ć«', 'Ć«'),
-    ('Ć­', 'Ć­'),
-    ('ĆŻ', 'ĆŻ'),
-    ('Ʊ', 'Ʊ'),
-    ('Ćł', 'Ćł'),
-    ('Ć”', 'Ć”'),
-    ('Ć·', 'Ć·'),
-    ('Ćș', 'Ćș'),
-    ('ĆŒ', 'ĆŒ'),
-    ('ĆŸ', 'ƀ'),
-    ('ƃ', 'ƃ'),
-    ('ƅ', 'ƅ'),
-    ('ƈ', 'ƈ'),
-    ('ƌ', 'ƍ'),
-    ('ƒ', 'ƒ'),
-    ('ƕ', 'ƕ'),
-    ('ƙ', 'ƛ'),
-    ('ƞ', 'ƞ'),
-    ('ÆĄ', 'ÆĄ'),
-    ('ÆŁ', 'ÆŁ'),
-    ('Æ„', 'Æ„'),
-    ('Æš', 'Æš'),
-    ('ÆȘ', 'Æ«'),
-    ('Æ­', 'Æ­'),
-    ('ư', 'ư'),
-    ('ÆŽ', 'ÆŽ'),
-    ('ƶ', 'ƶ'),
-    ('Æč', 'Æș'),
-    ('Æœ', 'Æż'),
-    ('dž', 'dž'),
-    ('lj', 'lj'),
-    ('nj', 'nj'),
-    ('ǎ', 'ǎ'),
-    ('ǐ', 'ǐ'),
-    ('ǒ', 'ǒ'),
-    ('ǔ', 'ǔ'),
-    ('ǖ', 'ǖ'),
-    ('ǘ', 'ǘ'),
-    ('ǚ', 'ǚ'),
-    ('ǜ', 'ǝ'),
-    ('ǟ', 'ǟ'),
-    ('ÇĄ', 'ÇĄ'),
-    ('ÇŁ', 'ÇŁ'),
-    ('Ç„', 'Ç„'),
-    ('ǧ', 'ǧ'),
-    ('Ç©', 'Ç©'),
-    ('Ç«', 'Ç«'),
-    ('Ç­', 'Ç­'),
-    ('ǯ', 'ǰ'),
-    ('Çł', 'Çł'),
-    ('Ç”', 'Ç”'),
-    ('Çč', 'Çč'),
-    ('Ç»', 'Ç»'),
-    ('ǜ', 'ǜ'),
-    ('Çż', 'Çż'),
-    ('ȁ', 'ȁ'),
-    ('ȃ', 'ȃ'),
-    ('ȅ', 'ȅ'),
-    ('ȇ', 'ȇ'),
-    ('ȉ', 'ȉ'),
-    ('ȋ', 'ȋ'),
-    ('ȍ', 'ȍ'),
-    ('ȏ', 'ȏ'),
-    ('ȑ', 'ȑ'),
-    ('ȓ', 'ȓ'),
-    ('ȕ', 'ȕ'),
-    ('ȗ', 'ȗ'),
-    ('ș', 'ș'),
-    ('ț', 'ț'),
-    ('ȝ', 'ȝ'),
-    ('ȟ', 'ȟ'),
-    ('ÈĄ', 'ÈĄ'),
-    ('ÈŁ', 'ÈŁ'),
-    ('È„', 'È„'),
-    ('ȧ', 'ȧ'),
-    ('È©', 'È©'),
-    ('È«', 'È«'),
-    ('È­', 'È­'),
-    ('ÈŻ', 'ÈŻ'),
-    ('ȱ', 'ȱ'),
-    ('Èł', 'Èč'),
-    ('Ȍ', 'Ȍ'),
-    ('Èż', 'ɀ'),
-    ('ɂ', 'ɂ'),
-    ('ɇ', 'ɇ'),
-    ('ɉ', 'ɉ'),
-    ('ɋ', 'ɋ'),
-    ('ɍ', 'ɍ'),
-    ('ɏ', 'ʓ'),
-    ('ʕ', 'Êž'),
-    ('ˀ', 'ˁ'),
-    ('Ë ', 'Ë€'),
-    ('\u{345}', '\u{345}'),
-    ('ͱ', 'ͱ'),
-    ('Íł', 'Íł'),
-    ('Í·', 'Í·'),
-    ('Íș', 'Íœ'),
-    ('ΐ', 'ΐ'),
-    ('ÎŹ', 'ώ'),
-    ('ϐ', 'ϑ'),
-    ('ϕ', 'ϗ'),
-    ('ϙ', 'ϙ'),
-    ('ϛ', 'ϛ'),
-    ('ϝ', 'ϝ'),
-    ('ϟ', 'ϟ'),
-    ('ÏĄ', 'ÏĄ'),
-    ('ÏŁ', 'ÏŁ'),
-    ('Ï„', 'Ï„'),
-    ('ϧ', 'ϧ'),
-    ('Ï©', 'Ï©'),
-    ('Ï«', 'Ï«'),
-    ('Ï­', 'Ï­'),
-    ('ÏŻ', 'Ïł'),
-    ('Ï”', 'Ï”'),
-    ('Ïž', 'Ïž'),
-    ('ϻ', 'ό'),
-    ('а', 'џ'),
-    ('ŃĄ', 'ŃĄ'),
-    ('ŃŁ', 'ŃŁ'),
-    ('Ń„', 'Ń„'),
-    ('ѧ', 'ѧ'),
-    ('Ń©', 'Ń©'),
-    ('Ń«', 'Ń«'),
-    ('Ń­', 'Ń­'),
-    ('ŃŻ', 'ŃŻ'),
-    ('ѱ', 'ѱ'),
-    ('Ńł', 'Ńł'),
-    ('Ń”', 'Ń”'),
-    ('Ń·', 'Ń·'),
-    ('Ńč', 'Ńč'),
-    ('Ń»', 'Ń»'),
-    ('Ńœ', 'Ńœ'),
-    ('Ńż', 'Ńż'),
-    ('ҁ', 'ҁ'),
-    ('ҋ', 'ҋ'),
-    ('ҍ', 'ҍ'),
-    ('ҏ', 'ҏ'),
-    ('ґ', 'ґ'),
-    ('ғ', 'ғ'),
-    ('ҕ', 'ҕ'),
-    ('җ', 'җ'),
-    ('ҙ', 'ҙ'),
-    ('қ', 'қ'),
-    ('ҝ', 'ҝ'),
-    ('ҟ', 'ҟ'),
-    ('ÒĄ', 'ÒĄ'),
-    ('ÒŁ', 'ÒŁ'),
-    ('Ò„', 'Ò„'),
-    ('Ò§', 'Ò§'),
-    ('Ò©', 'Ò©'),
-    ('Ò«', 'Ò«'),
-    ('Ò­', 'Ò­'),
-    ('ÒŻ', 'ÒŻ'),
-    ('Ò±', 'Ò±'),
-    ('Òł', 'Òł'),
-    ('Ò”', 'Ò”'),
-    ('Ò·', 'Ò·'),
-    ('Òč', 'Òč'),
-    ('Ò»', 'Ò»'),
-    ('Ҝ', 'Ҝ'),
-    ('Òż', 'Òż'),
-    ('ӂ', 'ӂ'),
-    ('ӄ', 'ӄ'),
-    ('ӆ', 'ӆ'),
-    ('ӈ', 'ӈ'),
-    ('ӊ', 'ӊ'),
-    ('ӌ', 'ӌ'),
-    ('ӎ', 'ӏ'),
-    ('ӑ', 'ӑ'),
-    ('ӓ', 'ӓ'),
-    ('ӕ', 'ӕ'),
-    ('ӗ', 'ӗ'),
-    ('ә', 'ә'),
-    ('ӛ', 'ӛ'),
-    ('ӝ', 'ӝ'),
-    ('ӟ', 'ӟ'),
-    ('ÓĄ', 'ÓĄ'),
-    ('ÓŁ', 'ÓŁ'),
-    ('Ó„', 'Ó„'),
-    ('Ó§', 'Ó§'),
-    ('Ó©', 'Ó©'),
-    ('Ó«', 'Ó«'),
-    ('Ó­', 'Ó­'),
-    ('ÓŻ', 'ÓŻ'),
-    ('Ó±', 'Ó±'),
-    ('Ół', 'Ół'),
-    ('Ó”', 'Ó”'),
-    ('Ó·', 'Ó·'),
-    ('Óč', 'Óč'),
-    ('Ó»', 'Ó»'),
-    ('Ӝ', 'Ӝ'),
-    ('Óż', 'Óż'),
-    ('ԁ', 'ԁ'),
-    ('ԃ', 'ԃ'),
-    ('ԅ', 'ԅ'),
-    ('ԇ', 'ԇ'),
-    ('ԉ', 'ԉ'),
-    ('ԋ', 'ԋ'),
-    ('ԍ', 'ԍ'),
-    ('ԏ', 'ԏ'),
-    ('ԑ', 'ԑ'),
-    ('ԓ', 'ԓ'),
-    ('ԕ', 'ԕ'),
-    ('ԗ', 'ԗ'),
-    ('ԙ', 'ԙ'),
-    ('ԛ', 'ԛ'),
-    ('ԝ', 'ԝ'),
-    ('ԟ', 'ԟ'),
-    ('ÔĄ', 'ÔĄ'),
-    ('ÔŁ', 'ÔŁ'),
-    ('Ô„', 'Ô„'),
-    ('Ô§', 'Ô§'),
-    ('Ô©', 'Ô©'),
-    ('Ô«', 'Ô«'),
-    ('Ô­', 'Ô­'),
-    ('ÔŻ', 'ÔŻ'),
-    ('ՠ', 'ֈ'),
-    ('ა', 'áƒș'),
-    ('჌', 'ჿ'),
-    ('Ꮮ', 'Ꮬ'),
-    ('áȀ', 'áȈ'),
-    ('áȊ', 'áȊ'),
-    ('ᮀ', 'á¶ż'),
-    ('ខ', 'ខ'),
-    ('ឃ', 'ឃ'),
-    ('ᾅ', 'ᾅ'),
-    ('ᾇ', 'ᾇ'),
-    ('ᾉ', 'ᾉ'),
-    ('ᾋ', 'ᾋ'),
-    ('ឍ', 'ឍ'),
-    ('ត', 'ត'),
-    ('ᾑ', 'ᾑ'),
-    ('ᾓ', 'ᾓ'),
-    ('ᾕ', 'ᾕ'),
-    ('ᾗ', 'ᾗ'),
-    ('ᾙ', 'ᾙ'),
-    ('ᾛ', 'ᾛ'),
-    ('ឝ', 'ឝ'),
-    ('ᾟ', 'ᾟ'),
-    ('ឥ', 'ឥ'),
-    ('ឣ', 'ឣ'),
-    ('áž„', 'áž„'),
-    ('áž§', 'áž§'),
-    ('áž©', 'áž©'),
-    ('áž«', 'áž«'),
-    ('áž­', 'áž­'),
-    ('ឯ', 'ឯ'),
-    ('áž±', 'áž±'),
-    ('ážł', 'ážł'),
-    ('áž”', 'áž”'),
-    ('áž·', 'áž·'),
-    ('ážč', 'ážč'),
-    ('áž»', 'áž»'),
-    ('វ', 'វ'),
-    ('ážż', 'ážż'),
-    ('áč', 'áč'),
-    ('áčƒ', 'áčƒ'),
-    ('áč…', 'áč…'),
-    ('áč‡', 'áč‡'),
-    ('áč‰', 'áč‰'),
-    ('áč‹', 'áč‹'),
-    ('áč', 'áč'),
-    ('áč', 'áč'),
-    ('áč‘', 'áč‘'),
-    ('áč“', 'áč“'),
-    ('áč•', 'áč•'),
-    ('áč—', 'áč—'),
-    ('áč™', 'áč™'),
-    ('áč›', 'áč›'),
-    ('áč', 'áč'),
-    ('áčŸ', 'áčŸ'),
-    ('áčĄ', 'áčĄ'),
-    ('áčŁ', 'áčŁ'),
-    ('áč„', 'áč„'),
-    ('áč§', 'áč§'),
-    ('áč©', 'áč©'),
-    ('áč«', 'áč«'),
-    ('áč­', 'áč­'),
-    ('áčŻ', 'áčŻ'),
-    ('áč±', 'áč±'),
-    ('áčł', 'áčł'),
-    ('áč”', 'áč”'),
-    ('áč·', 'áč·'),
-    ('áčč', 'áčč'),
-    ('áč»', 'áč»'),
-    ('áčœ', 'áčœ'),
-    ('áčż', 'áčż'),
-    ('áș', 'áș'),
-    ('áșƒ', 'áșƒ'),
-    ('áș…', 'áș…'),
-    ('áș‡', 'áș‡'),
-    ('áș‰', 'áș‰'),
-    ('áș‹', 'áș‹'),
-    ('áș', 'áș'),
-    ('áș', 'áș'),
-    ('áș‘', 'áș‘'),
-    ('áș“', 'áș“'),
-    ('áș•', 'áș'),
-    ('áșŸ', 'áșŸ'),
-    ('áșĄ', 'áșĄ'),
-    ('áșŁ', 'áșŁ'),
-    ('áș„', 'áș„'),
-    ('áș§', 'áș§'),
-    ('áș©', 'áș©'),
-    ('áș«', 'áș«'),
-    ('áș­', 'áș­'),
-    ('áșŻ', 'áșŻ'),
-    ('áș±', 'áș±'),
-    ('áșł', 'áșł'),
-    ('áș”', 'áș”'),
-    ('áș·', 'áș·'),
-    ('áșč', 'áșč'),
-    ('áș»', 'áș»'),
-    ('áșœ', 'áșœ'),
-    ('áșż', 'áșż'),
-    ('ề', 'ề'),
-    ('ể', 'ể'),
-    ('ễ', 'ễ'),
-    ('ệ', 'ệ'),
-    ('ỉ', 'ỉ'),
-    ('ị', 'ị'),
-    ('ọ', 'ọ'),
-    ('ỏ', 'ỏ'),
-    ('ố', 'ố'),
-    ('ồ', 'ồ'),
-    ('ổ', 'ổ'),
-    ('ỗ', 'ỗ'),
-    ('ộ', 'ộ'),
-    ('ớ', 'ớ'),
-    ('ờ', 'ờ'),
-    ('ở', 'ở'),
-    ('ụ', 'ụ'),
-    ('ợ', 'ợ'),
-    ('Ễ', 'Ễ'),
-    ('á»§', 'á»§'),
-    ('ứ', 'ứ'),
-    ('ừ', 'ừ'),
-    ('á»­', 'á»­'),
-    ('ữ', 'ữ'),
-    ('á»±', 'á»±'),
-    ('ỳ', 'ỳ'),
-    ('á»”', 'á»”'),
-    ('á»·', 'á»·'),
-    ('á»č', 'á»č'),
-    ('á»»', 'á»»'),
-    ('Ờ', 'Ờ'),
-    ('ỿ', 'ጇ'),
-    ('ጐ', 'ጕ'),
-    ('ጠ', 'ጧ'),
-    ('ጰ', 'ጷ'),
-    ('ᜀ', 'ᜅ'),
-    ('ᜐ', '᜗'),
-    ('ᜠ', 'ᜧ'),
-    ('ᜰ', '᜜'),
-    ('ៀ', 'ះ'),
-    ('័', 'ៗ'),
-    ('០', '៧'),
-    ('៰', '៎'),
-    ('៶', '៷'),
-    ('៟', '៟'),
-    ('ῂ', 'ῄ'),
-    ('ῆ', 'ῇ'),
-    ('ῐ', 'ΐ'),
-    ('ῖ', 'ῗ'),
-    ('áż ', 'áż§'),
-    ('áżČ', '῎'),
-    ('áż¶', 'áż·'),
-    ('ⁱ', 'ⁱ'),
-    ('ⁿ', 'ⁿ'),
-    ('ₐ', 'ₜ'),
-    ('ℊ', 'ℊ'),
-    ('ℎ', 'ℏ'),
-    ('ℓ', 'ℓ'),
-    ('ℯ', 'ℯ'),
-    ('℮', '℮'),
-    ('â„č', 'â„č'),
-    ('ℌ', 'ℜ'),
-    ('ⅆ', 'ⅉ'),
-    ('ⅎ', 'ⅎ'),
-    ('ⅰ', 'ⅿ'),
-    ('ↄ', 'ↄ'),
-    ('ⓐ', 'ⓩ'),
-    ('ⰰ', 'ⱟ'),
-    ('ⱥ', 'ⱥ'),
-    ('ⱄ', 'ⱊ'),
-    ('ⱚ', 'ⱚ'),
-    ('â±Ș', 'â±Ș'),
-    ('ⱏ', 'ⱏ'),
-    ('â±±', 'â±±'),
-    ('ⱳ', 'ⱎ'),
-    ('ⱶ', 'ⱜ'),
-    ('âȁ', 'âȁ'),
-    ('âȃ', 'âȃ'),
-    ('âȅ', 'âȅ'),
-    ('âȇ', 'âȇ'),
-    ('âȉ', 'âȉ'),
-    ('âȋ', 'âȋ'),
-    ('âȍ', 'âȍ'),
-    ('âȏ', 'âȏ'),
-    ('âȑ', 'âȑ'),
-    ('âȓ', 'âȓ'),
-    ('âȕ', 'âȕ'),
-    ('âȗ', 'âȗ'),
-    ('âș', 'âș'),
-    ('âț', 'âț'),
-    ('âȝ', 'âȝ'),
-    ('âȟ', 'âȟ'),
-    ('âČĄ', 'âČĄ'),
-    ('âČŁ', 'âČŁ'),
-    ('âČ„', 'âČ„'),
-    ('âȧ', 'âȧ'),
-    ('âČ©', 'âČ©'),
-    ('âČ«', 'âČ«'),
-    ('âČ­', 'âČ­'),
-    ('âČŻ', 'âČŻ'),
-    ('âȱ', 'âȱ'),
-    ('âČł', 'âČł'),
-    ('âČ”', 'âČ”'),
-    ('âČ·', 'âČ·'),
-    ('âČč', 'âČč'),
-    ('âČ»', 'âČ»'),
-    ('âČœ', 'âČœ'),
-    ('âČż', 'âČż'),
-    ('ⳁ', 'ⳁ'),
-    ('ⳃ', 'ⳃ'),
-    ('ⳅ', 'ⳅ'),
-    ('ⳇ', 'ⳇ'),
-    ('ⳉ', 'ⳉ'),
-    ('ⳋ', 'ⳋ'),
-    ('ⳍ', 'ⳍ'),
-    ('ⳏ', 'ⳏ'),
-    ('ⳑ', 'ⳑ'),
-    ('ⳓ', 'ⳓ'),
-    ('ⳕ', 'ⳕ'),
-    ('ⳗ', 'ⳗ'),
-    ('ⳙ', 'ⳙ'),
-    ('ⳛ', 'ⳛ'),
-    ('ⳝ', 'ⳝ'),
-    ('ⳟ', 'ⳟ'),
-    ('⳥', '⳥'),
-    ('ⳣ', 'Ⳁ'),
-    ('ⳏ', 'ⳏ'),
-    ('âłź', 'âłź'),
-    ('âłł', 'âłł'),
-    ('⮀', '⎄'),
-    ('⎧', '⎧'),
-    ('⎭', '⎭'),
-    ('ꙁ', 'ꙁ'),
-    ('ꙃ', 'ꙃ'),
-    ('ꙅ', 'ꙅ'),
-    ('ꙇ', 'ꙇ'),
-    ('ꙉ', 'ꙉ'),
-    ('ꙋ', 'ꙋ'),
-    ('ꙍ', 'ꙍ'),
-    ('ꙏ', 'ꙏ'),
-    ('ꙑ', 'ꙑ'),
-    ('ꙓ', 'ꙓ'),
-    ('ꙕ', 'ꙕ'),
-    ('ꙗ', 'ꙗ'),
-    ('ꙙ', 'ꙙ'),
-    ('ꙛ', 'ꙛ'),
-    ('ꙝ', 'ꙝ'),
-    ('ꙟ', 'ꙟ'),
-    ('ê™Ą', 'ê™Ą'),
-    ('ê™Ł', 'ê™Ł'),
-    ('Ꙅ', 'Ꙅ'),
-    ('ꙧ', 'ꙧ'),
-    ('ꙩ', 'ꙩ'),
-    ('ꙫ', 'ꙫ'),
-    ('ꙭ', 'ꙭ'),
-    ('ꚁ', 'ꚁ'),
-    ('ꚃ', 'ꚃ'),
-    ('ꚅ', 'ꚅ'),
-    ('ꚇ', 'ꚇ'),
-    ('ꚉ', 'ꚉ'),
-    ('ꚋ', 'ꚋ'),
-    ('ꚍ', 'ꚍ'),
-    ('ꚏ', 'ꚏ'),
-    ('ꚑ', 'ꚑ'),
-    ('ꚓ', 'ꚓ'),
-    ('ꚕ', 'ꚕ'),
-    ('ꚗ', 'ꚗ'),
-    ('ꚙ', 'ꚙ'),
-    ('ꚛ', 'ꚝ'),
-    ('êœŁ', 'êœŁ'),
-    ('꜄', '꜄'),
-    ('ꜧ', 'ꜧ'),
-    ('ꜩ', 'ꜩ'),
-    ('ꜫ', 'ꜫ'),
-    ('ꜭ', 'ꜭ'),
-    ('êœŻ', 'ꜱ'),
-    ('êœł', 'êœł'),
-    ('꜔', '꜔'),
-    ('ꜷ', 'ꜷ'),
-    ('êœč', 'êœč'),
-    ('ꜻ', 'ꜻ'),
-    ('ꜜ', 'ꜜ'),
-    ('êœż', 'êœż'),
-    ('ꝁ', 'ꝁ'),
-    ('ꝃ', 'ꝃ'),
-    ('ꝅ', 'ꝅ'),
-    ('ꝇ', 'ꝇ'),
-    ('ꝉ', 'ꝉ'),
-    ('ꝋ', 'ꝋ'),
-    ('ꝍ', 'ꝍ'),
-    ('ꝏ', 'ꝏ'),
-    ('ꝑ', 'ꝑ'),
-    ('ꝓ', 'ꝓ'),
-    ('ꝕ', 'ꝕ'),
-    ('ꝗ', 'ꝗ'),
-    ('ꝙ', 'ꝙ'),
-    ('ꝛ', 'ꝛ'),
-    ('ꝝ', 'ꝝ'),
-    ('ꝟ', 'ꝟ'),
-    ('êĄ', 'êĄ'),
-    ('êŁ', 'êŁ'),
-    ('Ꝅ', 'Ꝅ'),
-    ('ꝧ', 'ꝧ'),
-    ('ꝩ', 'ꝩ'),
-    ('ꝫ', 'ꝫ'),
-    ('ꝭ', 'ꝭ'),
-    ('êŻ', 'Ꝟ'),
-    ('êș', 'êș'),
-    ('Ꝍ', 'Ꝍ'),
-    ('êż', 'êż'),
-    ('ꞁ', 'ꞁ'),
-    ('ꞃ', 'ꞃ'),
-    ('ꞅ', 'ꞅ'),
-    ('ꞇ', 'ꞇ'),
-    ('ꞌ', 'ꞌ'),
-    ('ꞎ', 'ꞎ'),
-    ('ꞑ', 'ꞑ'),
-    ('ꞓ', 'ꞕ'),
-    ('ꞗ', 'ꞗ'),
-    ('ꞙ', 'ꞙ'),
-    ('ꞛ', 'ꞛ'),
-    ('ꞝ', 'ꞝ'),
-    ('ꞟ', 'ꞟ'),
-    ('êžĄ', 'êžĄ'),
-    ('êžŁ', 'êžŁ'),
-    ('Ꞅ', 'Ꞅ'),
-    ('ꞧ', 'ꞧ'),
-    ('ꞩ', 'ꞩ'),
-    ('êžŻ', 'êžŻ'),
-    ('ꞔ', 'ꞔ'),
-    ('ꞷ', 'ꞷ'),
-    ('êžč', 'êžč'),
-    ('ꞻ', 'ꞻ'),
-    ('Ꞝ', 'Ꞝ'),
-    ('êžż', 'êžż'),
-    ('ꟁ', 'ꟁ'),
-    ('ꟃ', 'ꟃ'),
-    ('ꟈ', 'ꟈ'),
-    ('ꟊ', 'ꟊ'),
-    ('ꟍ', 'ꟍ'),
-    ('ꟑ', 'ꟑ'),
-    ('ꟓ', 'ꟓ'),
-    ('ꟕ', 'ꟕ'),
-    ('ꟗ', 'ꟗ'),
-    ('ꟙ', 'ꟙ'),
-    ('ꟛ', 'ꟛ'),
-    ('êŸČ', '꟎'),
-    ('ꟶ', 'ꟶ'),
-    ('꟞', 'êŸș'),
-    ('êŹ°', 'ꭚ'),
-    ('ꭜ', 'ꭩ'),
-    ('ê­°', 'êźż'),
-    ('ïŹ€', 'ïŹ†'),
-    ('ïŹ“', 'ïŹ—'),
-    ('', ''),
-    ('𐐹', '𐑏'),
-    ('𐓘', '𐓻'),
-    ('𐖗', '𐖡'),
-    ('𐖣', '𐖱'),
-    ('𐖳', 'đ–č'),
-    ('𐖻', 'đ–Œ'),
-    ('𐞀', '𐞀'),
-    ('𐞃', '𐞅'),
-    ('𐞇', '𐞰'),
-    ('đžČ', 'đžș'),
-    ('𐳀', 'đłČ'),
-    ('𐔰', '𐶅'),
-    ('𑣀', '𑣟'),
-    ('đ–č ', 'đ–čż'),
-    ('𝐚', '𝐳'),
-    ('𝑎', '𝑔'),
-    ('𝑖', '𝑧'),
-    ('𝒂', '𝒛'),
-    ('đ’¶', 'đ’č'),
-    ('đ’»', 'đ’»'),
-    ('đ’œ', '𝓃'),
-    ('𝓅', '𝓏'),
-    ('đ“Ș', '𝔃'),
-    ('𝔞', 'đ”·'),
-    ('𝕒', 'đ•«'),
-    ('𝖆', '𝖟'),
-    ('đ–ș', '𝗓'),
-    ('𝗼', '𝘇'),
-    ('𝘱', 'đ˜»'),
-    ('𝙖', '𝙯'),
-    ('𝚊', 'đš„'),
-    ('𝛂', '𝛚'),
-    ('𝛜', '𝛡'),
-    ('đ›Œ', '𝜔'),
-    ('𝜖', '𝜛'),
-    ('đœ¶', '𝝎'),
-    ('𝝐', '𝝕'),
-    ('𝝰', '𝞈'),
-    ('𝞊', '𝞏'),
-    ('đžȘ', '𝟂'),
-    ('𝟄', '𝟉'),
-    ('𝟋', '𝟋'),
-    ('đŒ€', 'đŒ‰'),
-    ('đŒ‹', 'đŒž'),
-    ('đŒ„', 'đŒȘ'),
-    ('𞀰', '𞁭'),
-    ('𞀹', 'đž„ƒ'),
-];
-
-pub const MATH: &'static [(char, char)] = &[
-    ('+', '+'),
-    ('<', '>'),
-    ('^', '^'),
-    ('|', '|'),
-    ('~', '~'),
-    ('¬', '¬'),
-    ('±', '±'),
-    ('×', '×'),
-    ('÷', '÷'),
-    ('ϐ', 'ϒ'),
-    ('ϕ', 'ϕ'),
-    ('ϰ', 'ϱ'),
-    ('ώ', '϶'),
-    ('ۆ', 'ۈ'),
-    ('‖', '‖'),
-    ('′', '‮'),
-    ('⁀', '⁀'),
-    ('⁄', '⁄'),
-    ('⁒', '⁒'),
-    ('\u{2061}', '\u{2064}'),
-    ('âș', ' '),
-    ('₊', '₎'),
-    ('\u{20d0}', '\u{20dc}'),
-    ('\u{20e1}', '\u{20e1}'),
-    ('\u{20e5}', '\u{20e6}'),
-    ('\u{20eb}', '\u{20ef}'),
-    ('ℂ', 'ℂ'),
-    ('ℇ', 'ℇ'),
-    ('ℊ', 'ℓ'),
-    ('ℕ', 'ℕ'),
-    ('℘', 'ℝ'),
-    ('â„€', 'â„€'),
-    ('ℹ', '℩'),
-    ('ℬ', 'ℭ'),
-    ('ℯ', 'ℱ'),
-    ('ℳ', 'ℾ'),
-    ('ℌ', 'ⅉ'),
-    ('⅋', '⅋'),
-    ('←', '↧'),
-    ('↩', '↼'),
-    ('↰', '↱'),
-    ('↶', '↷'),
-    ('↌', '⇛'),
-    ('⇝', '⇝'),
-    ('⇀', '⇄'),
-    ('⇮', '⋿'),
-    ('⌈', '⌋'),
-    ('⌠', '⌡'),
-    ('⍌', '⍌'),
-    ('⎛', '⎔'),
-    ('⎷', '⎷'),
-    ('⏐', '⏐'),
-    ('⏜', '⏱'),
-    ('■', '□'),
-    ('▼', '▷'),
-    ('â–Œ', '◁'),
-    ('◆', '◇'),
-    ('◊', '○'),
-    ('●', '◓'),
-    ('◱', '◱'),
-    ('â—€', 'â—€'),
-    ('◧', '◬'),
-    ('◾', '◿'),
-    ('★', '☆'),
-    ('♀', '♀'),
-    ('♂', '♂'),
-    ('♠', '♣'),
-    ('♭', '♯'),
-    ('⟀', '⟿'),
-    (' ', '⫿'),
-    ('⬰', '⭄'),
-    ('⭇', '⭌'),
-    ('ïŹ©', 'ïŹ©'),
-    ('ïčĄ', 'ïčŠ'),
-    ('ïčš', 'ïčš'),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('ïżą', 'ïżą'),
-    ('ïż©', 'ïżŹ'),
-    ('𐶎', 'đ¶'),
-    ('𝐀', '𝑔'),
-    ('𝑖', '𝒜'),
-    ('𝒞', '𝒟'),
-    ('𝒱', '𝒱'),
-    ('đ’„', '𝒩'),
-    ('đ’©', '𝒬'),
-    ('𝒼', 'đ’č'),
-    ('đ’»', 'đ’»'),
-    ('đ’œ', '𝓃'),
-    ('𝓅', '𝔅'),
-    ('𝔇', '𝔊'),
-    ('𝔍', '𝔔'),
-    ('𝔖', '𝔜'),
-    ('𝔞', 'đ”č'),
-    ('đ”»', 'đ”Ÿ'),
-    ('𝕀', '𝕄'),
-    ('𝕆', '𝕆'),
-    ('𝕊', '𝕐'),
-    ('𝕒', 'đš„'),
-    ('𝚹', '𝟋'),
-    ('𝟎', '𝟿'),
-    ('𞾀', '𞾃'),
-    ('𞾅', '𞾟'),
-    ('𞾡', '𞾱'),
-    ('𞞀', '𞞀'),
-    ('𞾧', '𞾧'),
-    ('đžž©', 'đžžČ'),
-    ('𞾮', 'đžž·'),
-    ('đžžč', 'đžžč'),
-    ('đžž»', 'đžž»'),
-    ('đžč‚', 'đžč‚'),
-    ('đžč‡', 'đžč‡'),
-    ('đžč‰', 'đžč‰'),
-    ('đžč‹', 'đžč‹'),
-    ('đžč', 'đžč'),
-    ('đžč‘', 'đžč’'),
-    ('đžč”', 'đžč”'),
-    ('đžč—', 'đžč—'),
-    ('đžč™', 'đžč™'),
-    ('đžč›', 'đžč›'),
-    ('đžč', 'đžč'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžčĄ', 'đžčą'),
-    ('đžč€', 'đžč€'),
-    ('đžč§', 'đžčȘ'),
-    ('đžčŹ', 'đžčČ'),
-    ('đžčŽ', 'đžč·'),
-    ('đžčč', 'đžčŒ'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžș€', 'đžș‰'),
-    ('đžș‹', 'đžș›'),
-    ('đžșĄ', 'đžșŁ'),
-    ('đžș„', 'đžș©'),
-    ('đžș«', 'đžș»'),
-    ('đž»°', 'đž»±'),
-];
-
-pub const MODIFIER_COMBINING_MARK: &'static [(char, char)] = &[
-    ('\u{654}', '\u{655}'),
-    ('\u{658}', '\u{658}'),
-    ('\u{6dc}', '\u{6dc}'),
-    ('\u{6e3}', '\u{6e3}'),
-    ('\u{6e7}', '\u{6e8}'),
-    ('\u{8ca}', '\u{8cb}'),
-    ('\u{8cd}', '\u{8cf}'),
-    ('\u{8d3}', '\u{8d3}'),
-    ('\u{8f3}', '\u{8f3}'),
-];
-
-pub const NONCHARACTER_CODE_POINT: &'static [(char, char)] = &[
-    ('\u{fdd0}', '\u{fdef}'),
-    ('\u{fffe}', '\u{ffff}'),
-    ('\u{1fffe}', '\u{1ffff}'),
-    ('\u{2fffe}', '\u{2ffff}'),
-    ('\u{3fffe}', '\u{3ffff}'),
-    ('\u{4fffe}', '\u{4ffff}'),
-    ('\u{5fffe}', '\u{5ffff}'),
-    ('\u{6fffe}', '\u{6ffff}'),
-    ('\u{7fffe}', '\u{7ffff}'),
-    ('\u{8fffe}', '\u{8ffff}'),
-    ('\u{9fffe}', '\u{9ffff}'),
-    ('\u{afffe}', '\u{affff}'),
-    ('\u{bfffe}', '\u{bffff}'),
-    ('\u{cfffe}', '\u{cffff}'),
-    ('\u{dfffe}', '\u{dffff}'),
-    ('\u{efffe}', '\u{effff}'),
-    ('\u{ffffe}', '\u{fffff}'),
-    ('\u{10fffe}', '\u{10ffff}'),
-];
-
-pub const OTHER_ALPHABETIC: &'static [(char, char)] = &[
-    ('\u{345}', '\u{345}'),
-    ('\u{363}', '\u{36f}'),
-    ('\u{5b0}', '\u{5bd}'),
-    ('\u{5bf}', '\u{5bf}'),
-    ('\u{5c1}', '\u{5c2}'),
-    ('\u{5c4}', '\u{5c5}'),
-    ('\u{5c7}', '\u{5c7}'),
-    ('\u{610}', '\u{61a}'),
-    ('\u{64b}', '\u{657}'),
-    ('\u{659}', '\u{65f}'),
-    ('\u{670}', '\u{670}'),
-    ('\u{6d6}', '\u{6dc}'),
-    ('\u{6e1}', '\u{6e4}'),
-    ('\u{6e7}', '\u{6e8}'),
-    ('\u{6ed}', '\u{6ed}'),
-    ('\u{711}', '\u{711}'),
-    ('\u{730}', '\u{73f}'),
-    ('\u{7a6}', '\u{7b0}'),
-    ('\u{816}', '\u{817}'),
-    ('\u{81b}', '\u{823}'),
-    ('\u{825}', '\u{827}'),
-    ('\u{829}', '\u{82c}'),
-    ('\u{897}', '\u{897}'),
-    ('\u{8d4}', '\u{8df}'),
-    ('\u{8e3}', '\u{8e9}'),
-    ('\u{8f0}', 'à€ƒ'),
-    ('\u{93a}', 'à€»'),
-    ('à€Ÿ', 'à„Œ'),
-    ('à„Ž', 'à„'),
-    ('\u{955}', '\u{957}'),
-    ('\u{962}', '\u{963}'),
-    ('\u{981}', 'àŠƒ'),
-    ('\u{9be}', '\u{9c4}'),
-    ('ে', 'ৈ'),
-    ('ো', 'ৌ'),
-    ('\u{9d7}', '\u{9d7}'),
-    ('\u{9e2}', '\u{9e3}'),
-    ('\u{a01}', 'àšƒ'),
-    ('àšŸ', '\u{a42}'),
-    ('\u{a47}', '\u{a48}'),
-    ('\u{a4b}', '\u{a4c}'),
-    ('\u{a51}', '\u{a51}'),
-    ('\u{a70}', '\u{a71}'),
-    ('\u{a75}', '\u{a75}'),
-    ('\u{a81}', 'àȘƒ'),
-    ('àȘŸ', '\u{ac5}'),
-    ('\u{ac7}', 'ૉ'),
-    ('ો', 'ૌ'),
-    ('\u{ae2}', '\u{ae3}'),
-    ('\u{afa}', '\u{afc}'),
-    ('\u{b01}', 'àŹƒ'),
-    ('\u{b3e}', '\u{b44}'),
-    ('େ', 'ୈ'),
-    ('ୋ', 'ୌ'),
-    ('\u{b56}', '\u{b57}'),
-    ('\u{b62}', '\u{b63}'),
-    ('\u{b82}', '\u{b82}'),
-    ('\u{bbe}', 'àŻ‚'),
-    ('àŻ†', 'àŻˆ'),
-    ('àŻŠ', 'àŻŒ'),
-    ('\u{bd7}', '\u{bd7}'),
-    ('\u{c00}', '\u{c04}'),
-    ('\u{c3e}', 'ౄ'),
-    ('\u{c46}', '\u{c48}'),
-    ('\u{c4a}', '\u{c4c}'),
-    ('\u{c55}', '\u{c56}'),
-    ('\u{c62}', '\u{c63}'),
-    ('\u{c81}', 'àȃ'),
-    ('àČŸ', 'àł„'),
-    ('\u{cc6}', '\u{cc8}'),
-    ('\u{cca}', '\u{ccc}'),
-    ('\u{cd5}', '\u{cd6}'),
-    ('\u{ce2}', '\u{ce3}'),
-    ('àłł', 'àłł'),
-    ('\u{d00}', 'àŽƒ'),
-    ('\u{d3e}', '\u{d44}'),
-    ('à”†', 'à”ˆ'),
-    ('à”Š', 'à”Œ'),
-    ('\u{d57}', '\u{d57}'),
-    ('\u{d62}', '\u{d63}'),
-    ('\u{d81}', 'ඃ'),
-    ('\u{dcf}', '\u{dd4}'),
-    ('\u{dd6}', '\u{dd6}'),
-    ('ෘ', '\u{ddf}'),
-    ('à·Č', 'à·ł'),
-    ('\u{e31}', '\u{e31}'),
-    ('\u{e34}', '\u{e3a}'),
-    ('\u{e4d}', '\u{e4d}'),
-    ('\u{eb1}', '\u{eb1}'),
-    ('\u{eb4}', '\u{eb9}'),
-    ('\u{ebb}', '\u{ebc}'),
-    ('\u{ecd}', '\u{ecd}'),
-    ('\u{f71}', '\u{f83}'),
-    ('\u{f8d}', '\u{f97}'),
-    ('\u{f99}', '\u{fbc}'),
-    ('ါ', '\u{1036}'),
-    ('ှ', 'ှ'),
-    ('ျ', '\u{103e}'),
-    ('ၖ', '\u{1059}'),
-    ('\u{105e}', '\u{1060}'),
-    ('ၹ', '၀'),
-    ('ၧ', 'ၭ'),
-    ('\u{1071}', '\u{1074}'),
-    ('\u{1082}', '\u{108d}'),
-    ('ႏ', 'ႏ'),
-    ('ႚ', '\u{109d}'),
-    ('\u{1712}', '\u{1713}'),
-    ('\u{1732}', '\u{1733}'),
-    ('\u{1752}', '\u{1753}'),
-    ('\u{1772}', '\u{1773}'),
-    ('ា', 'ៈ'),
-    ('\u{1885}', '\u{1886}'),
-    ('\u{18a9}', '\u{18a9}'),
-    ('\u{1920}', 'ါ'),
-    ('ူ', 'သ'),
-    ('\u{1a17}', '\u{1a1b}'),
-    ('ᩕ', '\u{1a5e}'),
-    ('ᩥ', '\u{1a74}'),
-    ('\u{1abf}', '\u{1ac0}'),
-    ('\u{1acc}', '\u{1ace}'),
-    ('\u{1b00}', 'ᬄ'),
-    ('\u{1b35}', '\u{1b43}'),
-    ('\u{1b80}', 'ἂ'),
-    ('៥', '\u{1ba9}'),
-    ('\u{1bac}', '\u{1bad}'),
-    ('ᯧ', '\u{1bf1}'),
-    ('á°€', '\u{1c36}'),
-    ('\u{1dd3}', '\u{1df4}'),
-    ('Ⓐ', 'ⓩ'),
-    ('\u{2de0}', '\u{2dff}'),
-    ('\u{a674}', '\u{a67b}'),
-    ('\u{a69e}', '\u{a69f}'),
-    ('\u{a802}', '\u{a802}'),
-    ('\u{a80b}', '\u{a80b}'),
-    ('ê Ł', 'ê §'),
-    ('êą€', 'êą'),
-    ('êąŽ', 'êŁƒ'),
-    ('\u{a8c5}', '\u{a8c5}'),
-    ('\u{a8ff}', '\u{a8ff}'),
-    ('\u{a926}', '\u{a92a}'),
-    ('\u{a947}', 'ê„’'),
-    ('\u{a980}', 'ꊃ'),
-    ('ꊎ', 'êŠż'),
-    ('\u{a9e5}', '\u{a9e5}'),
-    ('\u{aa29}', '\u{aa36}'),
-    ('\u{aa43}', '\u{aa43}'),
-    ('\u{aa4c}', 'ꩍ'),
-    ('ꩻ', '꩜'),
-    ('\u{aab0}', '\u{aab0}'),
-    ('\u{aab2}', '\u{aab4}'),
-    ('\u{aab7}', '\u{aab8}'),
-    ('\u{aabe}', '\u{aabe}'),
-    ('ê««', 'ê«Ż'),
-    ('ê«”', 'ê«”'),
-    ('êŻŁ', 'êŻȘ'),
-    ('\u{fb1e}', '\u{fb1e}'),
-    ('\u{10376}', '\u{1037a}'),
-    ('\u{10a01}', '\u{10a03}'),
-    ('\u{10a05}', '\u{10a06}'),
-    ('\u{10a0c}', '\u{10a0f}'),
-    ('\u{10d24}', '\u{10d27}'),
-    ('\u{10d69}', '\u{10d69}'),
-    ('\u{10eab}', '\u{10eac}'),
-    ('\u{10efc}', '\u{10efc}'),
-    ('𑀀', '𑀂'),
-    ('\u{11038}', '\u{11045}'),
-    ('\u{11073}', '\u{11074}'),
-    ('\u{11080}', '𑂂'),
-    ('𑂰', '𑂾'),
-    ('\u{110c2}', '\u{110c2}'),
-    ('\u{11100}', '\u{11102}'),
-    ('\u{11127}', '\u{11132}'),
-    ('𑅅', '𑅆'),
-    ('\u{11180}', '𑆂'),
-    ('𑆳', '𑆿'),
-    ('𑇎', '\u{111cf}'),
-    ('𑈬', '\u{11234}'),
-    ('\u{11237}', '\u{11237}'),
-    ('\u{1123e}', '\u{1123e}'),
-    ('\u{11241}', '\u{11241}'),
-    ('\u{112df}', '\u{112e8}'),
-    ('\u{11300}', '𑌃'),
-    ('\u{1133e}', '𑍄'),
-    ('𑍇', '𑍈'),
-    ('𑍋', '𑍌'),
-    ('\u{11357}', '\u{11357}'),
-    ('𑍱', '𑍣'),
-    ('\u{113b8}', '\u{113c0}'),
-    ('\u{113c2}', '\u{113c2}'),
-    ('\u{113c5}', '\u{113c5}'),
-    ('\u{113c7}', '𑏊'),
-    ('𑏌', '𑏍'),
-    ('𑐔', '𑑁'),
-    ('\u{11443}', '𑑅'),
-    ('\u{114b0}', '𑓁'),
-    ('\u{115af}', '\u{115b5}'),
-    ('𑖾', 'đ‘–Ÿ'),
-    ('\u{115dc}', '\u{115dd}'),
-    ('𑘰', 'đ‘˜Ÿ'),
-    ('\u{11640}', '\u{11640}'),
-    ('\u{116ab}', '\u{116b5}'),
-    ('\u{1171d}', '\u{1172a}'),
-    ('𑠬', '𑠾'),
-    ('\u{11930}', 'đ‘€”'),
-    ('đ‘€·', '𑀞'),
-    ('\u{1193b}', '\u{1193c}'),
-    ('đ‘„€', 'đ‘„€'),
-    ('đ‘„‚', 'đ‘„‚'),
-    ('𑧑', '\u{119d7}'),
-    ('\u{119da}', '𑧟'),
-    ('đ‘§€', 'đ‘§€'),
-    ('\u{11a01}', '\u{11a0a}'),
-    ('\u{11a35}', 'đ‘šč'),
-    ('\u{11a3b}', '\u{11a3e}'),
-    ('\u{11a51}', '\u{11a5b}'),
-    ('\u{11a8a}', 'đ‘Ș—'),
-    ('𑰯', '\u{11c36}'),
-    ('\u{11c38}', 'đ‘°Ÿ'),
-    ('\u{11c92}', '\u{11ca7}'),
-    ('đ‘Č©', '\u{11cb6}'),
-    ('\u{11d31}', '\u{11d36}'),
-    ('\u{11d3a}', '\u{11d3a}'),
-    ('\u{11d3c}', '\u{11d3d}'),
-    ('\u{11d3f}', '\u{11d41}'),
-    ('\u{11d43}', '\u{11d43}'),
-    ('\u{11d47}', '\u{11d47}'),
-    ('đ‘¶Š', 'đ‘¶Ž'),
-    ('\u{11d90}', '\u{11d91}'),
-    ('đ‘¶“', 'đ‘¶–'),
-    ('\u{11ef3}', 'đ‘»¶'),
-    ('\u{11f00}', '\u{11f01}'),
-    ('đ‘Œƒ', 'đ‘Œƒ'),
-    ('đ‘ŒŽ', '\u{11f3a}'),
-    ('đ‘ŒŸ', '\u{11f40}'),
-    ('\u{1611e}', '\u{1612e}'),
-    ('\u{16f4f}', '\u{16f4f}'),
-    ('đ–œ‘', 'đ–Ÿ‡'),
-    ('\u{16f8f}', '\u{16f92}'),
-    ('\u{16ff0}', '\u{16ff1}'),
-    ('\u{1bc9e}', '\u{1bc9e}'),
-    ('\u{1e000}', '\u{1e006}'),
-    ('\u{1e008}', '\u{1e018}'),
-    ('\u{1e01b}', '\u{1e021}'),
-    ('\u{1e023}', '\u{1e024}'),
-    ('\u{1e026}', '\u{1e02a}'),
-    ('\u{1e08f}', '\u{1e08f}'),
-    ('\u{1e947}', '\u{1e947}'),
-    ('🄰', '🅉'),
-    ('🅐', 'đŸ…©'),
-    ('🅰', '🆉'),
-];
-
-pub const OTHER_DEFAULT_IGNORABLE_CODE_POINT: &'static [(char, char)] = &[
-    ('\u{34f}', '\u{34f}'),
-    ('ᅟ', 'ᅠ'),
-    ('\u{17b4}', '\u{17b5}'),
-    ('\u{2065}', '\u{2065}'),
-    ('ă…€', 'ă…€'),
-    ('', ''),
-    ('\u{fff0}', '\u{fff8}'),
-    ('\u{e0000}', '\u{e0000}'),
-    ('\u{e0002}', '\u{e001f}'),
-    ('\u{e0080}', '\u{e00ff}'),
-    ('\u{e01f0}', '\u{e0fff}'),
-];
-
-pub const OTHER_GRAPHEME_EXTEND: &'static [(char, char)] = &[
-    ('\u{9be}', '\u{9be}'),
-    ('\u{9d7}', '\u{9d7}'),
-    ('\u{b3e}', '\u{b3e}'),
-    ('\u{b57}', '\u{b57}'),
-    ('\u{bbe}', '\u{bbe}'),
-    ('\u{bd7}', '\u{bd7}'),
-    ('\u{cc0}', '\u{cc0}'),
-    ('\u{cc2}', '\u{cc2}'),
-    ('\u{cc7}', '\u{cc8}'),
-    ('\u{cca}', '\u{ccb}'),
-    ('\u{cd5}', '\u{cd6}'),
-    ('\u{d3e}', '\u{d3e}'),
-    ('\u{d57}', '\u{d57}'),
-    ('\u{dcf}', '\u{dcf}'),
-    ('\u{ddf}', '\u{ddf}'),
-    ('\u{1715}', '\u{1715}'),
-    ('\u{1734}', '\u{1734}'),
-    ('\u{1b35}', '\u{1b35}'),
-    ('\u{1b3b}', '\u{1b3b}'),
-    ('\u{1b3d}', '\u{1b3d}'),
-    ('\u{1b43}', '\u{1b44}'),
-    ('\u{1baa}', '\u{1baa}'),
-    ('\u{1bf2}', '\u{1bf3}'),
-    ('\u{200c}', '\u{200c}'),
-    ('\u{302e}', '\u{302f}'),
-    ('\u{a953}', '\u{a953}'),
-    ('\u{a9c0}', '\u{a9c0}'),
-    ('\u{ff9e}', '\u{ff9f}'),
-    ('\u{111c0}', '\u{111c0}'),
-    ('\u{11235}', '\u{11235}'),
-    ('\u{1133e}', '\u{1133e}'),
-    ('\u{1134d}', '\u{1134d}'),
-    ('\u{11357}', '\u{11357}'),
-    ('\u{113b8}', '\u{113b8}'),
-    ('\u{113c2}', '\u{113c2}'),
-    ('\u{113c5}', '\u{113c5}'),
-    ('\u{113c7}', '\u{113c9}'),
-    ('\u{113cf}', '\u{113cf}'),
-    ('\u{114b0}', '\u{114b0}'),
-    ('\u{114bd}', '\u{114bd}'),
-    ('\u{115af}', '\u{115af}'),
-    ('\u{116b6}', '\u{116b6}'),
-    ('\u{11930}', '\u{11930}'),
-    ('\u{1193d}', '\u{1193d}'),
-    ('\u{11f41}', '\u{11f41}'),
-    ('\u{16ff0}', '\u{16ff1}'),
-    ('\u{1d165}', '\u{1d166}'),
-    ('\u{1d16d}', '\u{1d172}'),
-    ('\u{e0020}', '\u{e007f}'),
-];
-
-pub const OTHER_ID_CONTINUE: &'static [(char, char)] = &[
-    ('·', '·'),
-    ('·', '·'),
-    ('፩', '፱'),
-    ('᧚', '᧚'),
-    ('\u{200c}', '\u{200d}'),
-    ('・', '・'),
-    ('', ''),
-];
-
-pub const OTHER_ID_START: &'static [(char, char)] =
-    &[('\u{1885}', '\u{1886}'), ('℘', '℘'), ('ℼ', 'ℼ'), ('゛', '゜')];
-
-pub const OTHER_LOWERCASE: &'static [(char, char)] = &[
-    ('ª', 'ª'),
-    ('º', 'º'),
-    ('ʰ', 'ʞ'),
-    ('ˀ', 'ˁ'),
-    ('Ë ', 'Ë€'),
-    ('\u{345}', '\u{345}'),
-    ('Íș', 'Íș'),
-    ('჌', '჌'),
-    ('ᎏ', 'á”Ș'),
-    ('ᔞ', 'ᔞ'),
-    ('ᶛ', 'á¶ż'),
-    ('ⁱ', 'ⁱ'),
-    ('ⁿ', 'ⁿ'),
-    ('ₐ', 'ₜ'),
-    ('ⅰ', 'ⅿ'),
-    ('ⓐ', 'ⓩ'),
-    ('ⱌ', 'ⱜ'),
-    ('ꚜ', 'ꚝ'),
-    ('ꝰ', 'ꝰ'),
-    ('êŸČ', '꟎'),
-    ('꟞', 'êŸč'),
-    ('ꭜ', 'ꭟ'),
-    ('ê­©', 'ê­©'),
-    ('𐞀', '𐞀'),
-    ('𐞃', '𐞅'),
-    ('𐞇', '𐞰'),
-    ('đžČ', 'đžș'),
-    ('𞀰', '𞁭'),
-];
-
-pub const OTHER_MATH: &'static [(char, char)] = &[
-    ('^', '^'),
-    ('ϐ', 'ϒ'),
-    ('ϕ', 'ϕ'),
-    ('ϰ', 'ϱ'),
-    ('ÏŽ', 'Ï”'),
-    ('‖', '‖'),
-    ('′', '‮'),
-    ('⁀', '⁀'),
-    ('\u{2061}', '\u{2064}'),
-    ('⁜', ' '),
-    ('₍', '₎'),
-    ('\u{20d0}', '\u{20dc}'),
-    ('\u{20e1}', '\u{20e1}'),
-    ('\u{20e5}', '\u{20e6}'),
-    ('\u{20eb}', '\u{20ef}'),
-    ('ℂ', 'ℂ'),
-    ('ℇ', 'ℇ'),
-    ('ℊ', 'ℓ'),
-    ('ℕ', 'ℕ'),
-    ('ℙ', 'ℝ'),
-    ('â„€', 'â„€'),
-    ('ℹ', '℩'),
-    ('ℬ', 'ℭ'),
-    ('ℯ', 'ℱ'),
-    ('ℳ', 'ℾ'),
-    ('ℌ', 'ℿ'),
-    ('ⅅ', 'ⅉ'),
-    ('↕', '↙'),
-    ('↜', '↟'),
-    ('↡', '↱'),
-    ('ↀ', 'ↄ'),
-    ('↧', '↧'),
-    ('↩', '↭'),
-    ('↰', '↱'),
-    ('↶', '↷'),
-    ('↌', '⇍'),
-    ('⇐', '⇑'),
-    ('⇓', '⇓'),
-    ('⇕', '⇛'),
-    ('⇝', '⇝'),
-    ('⇀', '⇄'),
-    ('⌈', '⌋'),
-    ('⎮', '⎔'),
-    ('⎷', '⎷'),
-    ('⏐', '⏐'),
-    ('⏱', '⏱'),
-    ('■', '□'),
-    ('▼', '▶'),
-    ('â–Œ', '◀'),
-    ('◆', '◇'),
-    ('◊', '○'),
-    ('●', '◓'),
-    ('◱', '◱'),
-    ('â—€', 'â—€'),
-    ('◧', '◬'),
-    ('★', '☆'),
-    ('♀', '♀'),
-    ('♂', '♂'),
-    ('♠', '♣'),
-    ('♭', '♼'),
-    ('⟅', '⟆'),
-    ('⟩', '⟯'),
-    ('⊃', '⊘'),
-    ('⧘', '⧛'),
-    ('⧌', '⧜'),
-    ('ïčĄ', 'ïčĄ'),
-    ('ïčŁ', 'ïčŁ'),
-    ('ïčš', 'ïčš'),
-    ('', ''),
-    ('', ''),
-    ('𝐀', '𝑔'),
-    ('𝑖', '𝒜'),
-    ('𝒞', '𝒟'),
-    ('𝒱', '𝒱'),
-    ('đ’„', '𝒩'),
-    ('đ’©', '𝒬'),
-    ('𝒼', 'đ’č'),
-    ('đ’»', 'đ’»'),
-    ('đ’œ', '𝓃'),
-    ('𝓅', '𝔅'),
-    ('𝔇', '𝔊'),
-    ('𝔍', '𝔔'),
-    ('𝔖', '𝔜'),
-    ('𝔞', 'đ”č'),
-    ('đ”»', 'đ”Ÿ'),
-    ('𝕀', '𝕄'),
-    ('𝕆', '𝕆'),
-    ('𝕊', '𝕐'),
-    ('𝕒', 'đš„'),
-    ('𝚹', '𝛀'),
-    ('𝛂', '𝛚'),
-    ('𝛜', 'đ›ș'),
-    ('đ›Œ', '𝜔'),
-    ('𝜖', '𝜮'),
-    ('đœ¶', '𝝎'),
-    ('𝝐', '𝝼'),
-    ('𝝰', '𝞈'),
-    ('𝞊', '𝞹'),
-    ('đžȘ', '𝟂'),
-    ('𝟄', '𝟋'),
-    ('𝟎', '𝟿'),
-    ('𞾀', '𞾃'),
-    ('𞾅', '𞾟'),
-    ('𞾡', '𞾱'),
-    ('𞞀', '𞞀'),
-    ('𞾧', '𞾧'),
-    ('đžž©', 'đžžČ'),
-    ('𞾮', 'đžž·'),
-    ('đžžč', 'đžžč'),
-    ('đžž»', 'đžž»'),
-    ('đžč‚', 'đžč‚'),
-    ('đžč‡', 'đžč‡'),
-    ('đžč‰', 'đžč‰'),
-    ('đžč‹', 'đžč‹'),
-    ('đžč', 'đžč'),
-    ('đžč‘', 'đžč’'),
-    ('đžč”', 'đžč”'),
-    ('đžč—', 'đžč—'),
-    ('đžč™', 'đžč™'),
-    ('đžč›', 'đžč›'),
-    ('đžč', 'đžč'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžčĄ', 'đžčą'),
-    ('đžč€', 'đžč€'),
-    ('đžč§', 'đžčȘ'),
-    ('đžčŹ', 'đžčČ'),
-    ('đžčŽ', 'đžč·'),
-    ('đžčč', 'đžčŒ'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžș€', 'đžș‰'),
-    ('đžș‹', 'đžș›'),
-    ('đžșĄ', 'đžșŁ'),
-    ('đžș„', 'đžș©'),
-    ('đžș«', 'đžș»'),
-];
-
-pub const OTHER_UPPERCASE: &'static [(char, char)] =
-    &[('Ⅰ', 'Ⅿ'), ('Ⓐ', 'Ⓩ'), ('🄰', '🅉'), ('🅐', 'đŸ…©'), ('🅰', '🆉')];
-
-pub const PATTERN_SYNTAX: &'static [(char, char)] = &[
-    ('!', '/'),
-    (':', '@'),
-    ('[', '^'),
-    ('`', '`'),
-    ('{', '~'),
-    ('¡', '§'),
-    ('©', '©'),
-    ('«', '¬'),
-    ('®', '®'),
-    ('°', '±'),
-    ('¶', '¶'),
-    ('»', '»'),
-    ('¿', '¿'),
-    ('×', '×'),
-    ('÷', '÷'),
-    ('‐', '‧'),
-    ('‰', '‾'),
-    ('⁁', '⁓'),
-    ('⁕', '⁞'),
-    ('←', '\u{245f}'),
-    ('─', '❔'),
-    ('➔', '⯿'),
-    ('⾀', '\u{2e7f}'),
-    ('、', '〃'),
-    ('〈', '〠'),
-    ('〰', '〰'),
-    ('', 'ïŽż'),
-    ('ïč…', 'ïč†'),
-];
-
-pub const PATTERN_WHITE_SPACE: &'static [(char, char)] = &[
-    ('\t', '\r'),
-    (' ', ' '),
-    ('\u{85}', '\u{85}'),
-    ('\u{200e}', '\u{200f}'),
-    ('\u{2028}', '\u{2029}'),
-];
-
-pub const PREPENDED_CONCATENATION_MARK: &'static [(char, char)] = &[
-    ('\u{600}', '\u{605}'),
-    ('\u{6dd}', '\u{6dd}'),
-    ('\u{70f}', '\u{70f}'),
-    ('\u{890}', '\u{891}'),
-    ('\u{8e2}', '\u{8e2}'),
-    ('\u{110bd}', '\u{110bd}'),
-    ('\u{110cd}', '\u{110cd}'),
-];
-
-pub const QUOTATION_MARK: &'static [(char, char)] = &[
-    ('"', '"'),
-    ('\'', '\''),
-    ('«', '«'),
-    ('»', '»'),
-    ('‘', '‟'),
-    ('‹', '›'),
-    ('âč‚', 'âč‚'),
-    ('「', '』'),
-    ('〝', '〟'),
-    ('ïč', 'ïč„'),
-    ('', ''),
-    ('', ''),
-    ('ïœą', 'ïœŁ'),
-];
-
-pub const RADICAL: &'static [(char, char)] =
-    &[('âș€', 'âș™'), ('âș›', '⻳'), ('⌀', '⿕')];
-
-pub const REGIONAL_INDICATOR: &'static [(char, char)] = &[('🇩', '🇿')];
-
-pub const SENTENCE_TERMINAL: &'static [(char, char)] = &[
-    ('!', '!'),
-    ('.', '.'),
-    ('?', '?'),
-    ('։', '։'),
-    ('۝', '۟'),
-    ('۔', '۔'),
-    ('܀', '܂'),
-    ('ßč', 'ßč'),
-    ('à ·', 'à ·'),
-    ('à č', 'à č'),
-    ('ࠜ', 'ࠟ'),
-    ('à„€', 'à„„'),
-    ('၊', '။'),
-    ('፱', '፱'),
-    ('፧', 'ፚ'),
-    ('ᙼ', 'ᙼ'),
-    ('᜔', '᜶'),
-    ('។', '៕'),
-    ('᠃', '᠃'),
-    ('᠉', '᠉'),
-    ('á„„', 'á„…'),
-    ('áȘš', 'áȘ«'),
-    ('᭎', '᭏'),
-    ('᭚', '᭛'),
-    ('᭞', '᭟'),
-    ('á­œ', 'á­ż'),
-    ('᰻', 'ᰌ'),
-    ('ᱟ', '᱿'),
-    (' ', ' '),
-    ('‌', '“'),
-    ('⁇', '⁉'),
-    ('âłč', 'âł»'),
-    ('âžź', 'âžź'),
-    ('➌', '➌'),
-    ('âč“', 'âč”'),
-    ('。', '。'),
-    ('ê“ż', 'ê“ż'),
-    ('꘎', '꘏'),
-    ('ê›ł', 'ê›ł'),
-    ('꛷', '꛷'),
-    ('êĄ¶', 'êĄ·'),
-    ('êŁŽ', 'êŁ'),
-    ('ê€Ż', 'ê€Ż'),
-    ('꧈', '꧉'),
-    ('꩝', '꩟'),
-    ('꫰', '꫱'),
-    ('êŻ«', 'êŻ«'),
-    ('ïž’', 'ïž’'),
-    ('ïž•', 'ïž–'),
-    ('ïč’', 'ïč’'),
-    ('ïč–', 'ïč—'),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('ïœĄ', 'ïœĄ'),
-    ('𐩖', '𐩗'),
-    ('đœ•', 'đœ™'),
-    ('đŸ†', 'đŸ‰'),
-    ('𑁇', '𑁈'),
-    ('đ‘‚Ÿ', '𑃁'),
-    ('𑅁', '𑅃'),
-    ('𑇅', '𑇆'),
-    ('𑇍', '𑇍'),
-    ('𑇞', '𑇟'),
-    ('𑈾', 'đ‘ˆč'),
-    ('đ‘ˆ»', 'đ‘ˆŒ'),
-    ('𑊩', '𑊩'),
-    ('𑏔', '𑏕'),
-    ('𑑋', '𑑌'),
-    ('𑗂', '𑗃'),
-    ('𑗉', '𑗗'),
-    ('𑙁', '𑙂'),
-    ('đ‘œŒ', 'đ‘œŸ'),
-    ('đ‘„„', 'đ‘„„'),
-    ('𑄆', '𑄆'),
-    ('đ‘©‚', 'đ‘©ƒ'),
-    ('đ‘Ș›', 'đ‘Șœ'),
-    ('𑱁', '𑱂'),
-    ('đ‘»·', '𑻞'),
-    ('đ‘œƒ', 'đ‘œ„'),
-    ('đ–©ź', 'đ–©Ż'),
-    ('đ–«”', 'đ–«”'),
-    ('đ–Ź·', '𖬾'),
-    ('𖭄', '𖭄'),
-    ('𖔟', '𖔯'),
-    ('đ–ș˜', 'đ–ș˜'),
-    ('đ›ČŸ', 'đ›ČŸ'),
-    ('đȘˆ', 'đȘˆ'),
-];
-
-pub const SOFT_DOTTED: &'static [(char, char)] = &[
-    ('i', 'j'),
-    ('ÄŻ', 'ÄŻ'),
-    ('ɉ', 'ɉ'),
-    ('Éš', 'Éš'),
-    ('ʝ', 'ʝ'),
-    ('ÊČ', 'ÊČ'),
-    ('Ïł', 'Ïł'),
-    ('і', 'і'),
-    ('ј', 'ј'),
-    ('ᔹ', 'ᔹ'),
-    ('ᶖ', 'ᶖ'),
-    ('á¶€', 'á¶€'),
-    ('á¶š', 'á¶š'),
-    ('áž­', 'áž­'),
-    ('ị', 'ị'),
-    ('ⁱ', 'ⁱ'),
-    ('ⅈ', 'ⅉ'),
-    ('ⱌ', 'ⱌ'),
-    ('𝐱', '𝐣'),
-    ('𝑖', '𝑗'),
-    ('𝒊', '𝒋'),
-    ('đ’Ÿ', '𝒿'),
-    ('đ“Č', '𝓳'),
-    ('𝔩', '𝔧'),
-    ('𝕚', '𝕛'),
-    ('𝖎', '𝖏'),
-    ('𝗂', '𝗃'),
-    ('đ—¶', 'đ—·'),
-    ('đ˜Ș', 'đ˜«'),
-    ('𝙞', '𝙟'),
-    ('𝚒', '𝚓'),
-    ('đŒš', 'đŒš'),
-    ('𞁌', '𞁍'),
-    ('𞁹', '𞁹'),
-];
-
-pub const TERMINAL_PUNCTUATION: &'static [(char, char)] = &[
-    ('!', '!'),
-    (',', ','),
-    ('.', '.'),
-    (':', ';'),
-    ('?', '?'),
-    ('ÍŸ', 'ÍŸ'),
-    ('·', '·'),
-    ('։', '։'),
-    ('ڃ', 'ڃ'),
-    ('ی', 'ی'),
-    ('ۛ', 'ۛ'),
-    ('۝', '۟'),
-    ('۔', '۔'),
-    ('܀', '܊'),
-    ('܌', '܌'),
-    ('ßž', 'ßč'),
-    ('à °', 'à ”'),
-    ('à ·', 'à Ÿ'),
-    ('àĄž', 'àĄž'),
-    ('à„€', 'à„„'),
-    ('àčš', 'àč›'),
-    ('àŒˆ', 'àŒˆ'),
-    ('àŒ', 'àŒ’'),
-    ('၊', '။'),
-    ('፡', '፹'),
-    ('ᙼ', 'ᙼ'),
-    ('᛫', '᛭'),
-    ('᜔', '᜶'),
-    ('។', '៖'),
-    ('៚', '៚'),
-    ('᠂', '᠅'),
-    ('᠈', '᠉'),
-    ('á„„', 'á„…'),
-    ('áȘš', 'áȘ«'),
-    ('᭎', '᭏'),
-    ('᭚', '᭛'),
-    ('᭝', '᭟'),
-    ('á­œ', 'á­ż'),
-    ('á°»', 'á°ż'),
-    ('ᱟ', '᱿'),
-    (' ', ' '),
-    ('‌', '“'),
-    ('⁇', '⁉'),
-    ('âłč', 'âł»'),
-    ('âžź', 'âžź'),
-    ('➌', '➌'),
-    ('âč', 'âč'),
-    ('âčŒ', 'âčŒ'),
-    ('âčŽ', 'âč'),
-    ('âč“', 'âč”'),
-    ('、', '。'),
-    ('ꓟ', 'ê“ż'),
-    ('꘍', '꘏'),
-    ('ê›ł', '꛷'),
-    ('êĄ¶', 'êĄ·'),
-    ('êŁŽ', 'êŁ'),
-    ('ê€Ż', 'ê€Ż'),
-    ('꧇', '꧉'),
-    ('꩝', '꩟'),
-    ('꫟', '꫟'),
-    ('꫰', '꫱'),
-    ('êŻ«', 'êŻ«'),
-    ('ïž’', 'ïž’'),
-    ('ïž•', 'ïž–'),
-    ('ïč', 'ïč’'),
-    ('ïč”', 'ïč—'),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('ïœĄ', 'ïœĄ'),
-    ('', ''),
-    ('𐎟', '𐎟'),
-    ('𐏐', '𐏐'),
-    ('𐡗', '𐡗'),
-    ('đ€Ÿ', 'đ€Ÿ'),
-    ('𐩖', '𐩗'),
-    ('𐫰', '𐫔'),
-    ('đŹș', '𐏿'),
-    ('𐼙', '𐼜'),
-    ('đœ•', 'đœ™'),
-    ('đŸ†', 'đŸ‰'),
-    ('𑁇', '𑁍'),
-    ('đ‘‚Ÿ', '𑃁'),
-    ('𑅁', '𑅃'),
-    ('𑇅', '𑇆'),
-    ('𑇍', '𑇍'),
-    ('𑇞', '𑇟'),
-    ('𑈾', 'đ‘ˆŒ'),
-    ('𑊩', '𑊩'),
-    ('𑏔', '𑏕'),
-    ('𑑋', '𑑍'),
-    ('𑑚', '𑑛'),
-    ('𑗂', '𑗅'),
-    ('𑗉', '𑗗'),
-    ('𑙁', '𑙂'),
-    ('đ‘œŒ', 'đ‘œŸ'),
-    ('đ‘„„', 'đ‘„„'),
-    ('𑄆', '𑄆'),
-    ('đ‘©‚', 'đ‘©ƒ'),
-    ('đ‘Ș›', 'đ‘Șœ'),
-    ('đ‘ȘĄ', 'đ‘Șą'),
-    ('𑱁', 'đ‘±ƒ'),
-    ('𑱱', '𑱱'),
-    ('đ‘»·', '𑻞'),
-    ('đ‘œƒ', 'đ‘œ„'),
-    ('𒑰', '𒑮'),
-    ('đ–©ź', 'đ–©Ż'),
-    ('đ–«”', 'đ–«”'),
-    ('đ–Ź·', 'đ–Źč'),
-    ('𖭄', '𖭄'),
-    ('𖔟', '𖔯'),
-    ('đ–ș—', 'đ–ș˜'),
-    ('đ›ČŸ', 'đ›ČŸ'),
-    ('đȘ‡', 'đȘŠ'),
-];
-
-pub const UNIFIED_IDEOGRAPH: &'static [(char, char)] = &[
-    ('㐀', 'ä¶ż'),
-    ('侀', '鿿'),
-    ('', ''),
-    ('ïš‘', 'ïš‘'),
-    ('ïš“', 'ïš”'),
-    ('', ''),
-    ('ïšĄ', 'ïšĄ'),
-    ('ïšŁ', ''),
-    ('ïš§', 'ïš©'),
-    ('𠀀', 'đȘ›Ÿ'),
-    ('đȘœ€', 'đ«œč'),
-    ('đ«€', 'đ« '),
-    ('đ«  ', 'đŹșĄ'),
-    ('đŹș°', '🯠'),
-    ('🯰', 'đźč'),
-    ('𰀀', 'đ±Š'),
-    ('đ±', 'đȎŻ'),
-];
-
-pub const UPPERCASE: &'static [(char, char)] = &[
-    ('A', 'Z'),
-    ('À', 'Ö'),
-    ('Ø', 'Þ'),
-    ('Ā', 'Ā'),
-    ('Ă', 'Ă'),
-    ('Ą', 'Ą'),
-    ('Ć', 'Ć'),
-    ('Ĉ', 'Ĉ'),
-    ('Ċ', 'Ċ'),
-    ('Č', 'Č'),
-    ('Ď', 'Ď'),
-    ('Đ', 'Đ'),
-    ('Ē', 'Ē'),
-    ('Ĕ', 'Ĕ'),
-    ('Ė', 'Ė'),
-    ('Ę', 'Ę'),
-    ('Ě', 'Ě'),
-    ('Ĝ', 'Ĝ'),
-    ('Ğ', 'Ğ'),
-    ('Ä ', 'Ä '),
-    ('Äą', 'Äą'),
-    ('Ä€', 'Ä€'),
-    ('ÄŠ', 'ÄŠ'),
-    ('Äš', 'Äš'),
-    ('ÄȘ', 'ÄȘ'),
-    ('ÄŹ', 'ÄŹ'),
-    ('Äź', 'Äź'),
-    ('İ', 'İ'),
-    ('ÄČ', 'ÄČ'),
-    ('ÄŽ', 'ÄŽ'),
-    ('Ķ', 'Ķ'),
-    ('Äč', 'Äč'),
-    ('Ä»', 'Ä»'),
-    ('Ĝ', 'Ĝ'),
-    ('Äż', 'Äż'),
-    ('Ɓ', 'Ɓ'),
-    ('ƃ', 'ƃ'),
-    ('ƅ', 'ƅ'),
-    ('Ƈ', 'Ƈ'),
-    ('Ê', 'Ê'),
-    ('Ì', 'Ì'),
-    ('Ǝ', 'Ǝ'),
-    ('Ɛ', 'Ɛ'),
-    ('Œ', 'Œ'),
-    ('Ɣ', 'Ɣ'),
-    ('Ɩ', 'Ɩ'),
-    ('Ƙ', 'Ƙ'),
-    ('Ú', 'Ú'),
-    ('Ü', 'Ü'),
-    ('ƞ', 'ƞ'),
-    ('Š', 'Š'),
-    ('Ćą', 'Ćą'),
-    ('Ć€', 'Ć€'),
-    ('ĆŠ', 'ĆŠ'),
-    ('Ćš', 'Ćš'),
-    ('ĆȘ', 'ĆȘ'),
-    ('ĆŹ', 'ĆŹ'),
-    ('Ćź', 'Ćź'),
-    ('ư', 'ư'),
-    ('ĆČ', 'ĆČ'),
-    ('ĆŽ', 'ĆŽ'),
-    ('ƶ', 'ƶ'),
-    ('Ÿ', 'Ćč'),
-    ('Ć»', 'Ć»'),
-    ('Ćœ', 'Ćœ'),
-    ('Ɓ', 'Ƃ'),
-    ('Ƅ', 'Ƅ'),
-    ('Ɔ', 'Ƈ'),
-    ('Ɖ', 'Ƌ'),
-    ('Ǝ', 'Ƒ'),
-    ('Ɠ', 'Ɣ'),
-    ('Ɩ', 'Ƙ'),
-    ('Ɯ', 'Ɲ'),
-    ('Ɵ', 'Ơ'),
-    ('Æą', 'Æą'),
-    ('Æ€', 'Æ€'),
-    ('Ɗ', 'Ƨ'),
-    ('Æ©', 'Æ©'),
-    ('ÆŹ', 'ÆŹ'),
-    ('Æź', 'ÆŻ'),
-    ('Ʊ', 'Æł'),
-    ('Æ”', 'Æ”'),
-    ('Æ·', 'Æž'),
-    ('ƌ', 'ƌ'),
-    ('DŽ', 'DŽ'),
-    ('LJ', 'LJ'),
-    ('NJ', 'NJ'),
-    ('Ǎ', 'Ǎ'),
-    ('Ǐ', 'Ǐ'),
-    ('Ǒ', 'Ǒ'),
-    ('Ǔ', 'Ǔ'),
-    ('Ǖ', 'Ǖ'),
-    ('Ǘ', 'Ǘ'),
-    ('Ǚ', 'Ǚ'),
-    ('Ǜ', 'Ǜ'),
-    ('Ǟ', 'Ǟ'),
-    ('Ç ', 'Ç '),
-    ('Çą', 'Çą'),
-    ('Ç€', 'Ç€'),
-    ('ÇŠ', 'ÇŠ'),
-    ('Çš', 'Çš'),
-    ('ÇȘ', 'ÇȘ'),
-    ('ÇŹ', 'ÇŹ'),
-    ('Çź', 'Çź'),
-    ('DZ', 'DZ'),
-    ('ÇŽ', 'ÇŽ'),
-    ('Ƕ', 'Ǟ'),
-    ('Çș', 'Çș'),
-    ('nj', 'nj'),
-    ('ÇŸ', 'ÇŸ'),
-    ('Ȁ', 'Ȁ'),
-    ('Ȃ', 'Ȃ'),
-    ('Ȅ', 'Ȅ'),
-    ('Ȇ', 'Ȇ'),
-    ('Ȉ', 'Ȉ'),
-    ('Ȋ', 'Ȋ'),
-    ('Ȍ', 'Ȍ'),
-    ('Ȏ', 'Ȏ'),
-    ('Ȑ', 'Ȑ'),
-    ('Ȓ', 'Ȓ'),
-    ('Ȕ', 'Ȕ'),
-    ('Ȗ', 'Ȗ'),
-    ('Ș', 'Ș'),
-    ('Ț', 'Ț'),
-    ('Ȝ', 'Ȝ'),
-    ('Ȟ', 'Ȟ'),
-    ('È ', 'È '),
-    ('Èą', 'Èą'),
-    ('È€', 'È€'),
-    ('ÈŠ', 'ÈŠ'),
-    ('Èš', 'Èš'),
-    ('ÈȘ', 'ÈȘ'),
-    ('ÈŹ', 'ÈŹ'),
-    ('Èź', 'Èź'),
-    ('Ȱ', 'Ȱ'),
-    ('ÈČ', 'ÈČ'),
-    ('Èș', 'È»'),
-    ('Ȝ', 'ȟ'),
-    ('Ɂ', 'Ɂ'),
-    ('Ƀ', 'Ɇ'),
-    ('Ɉ', 'Ɉ'),
-    ('Ɋ', 'Ɋ'),
-    ('Ɍ', 'Ɍ'),
-    ('Ɏ', 'Ɏ'),
-    ('Ͱ', 'Ͱ'),
-    ('ÍČ', 'ÍČ'),
-    ('Ͷ', 'Ͷ'),
-    ('Íż', 'Íż'),
-    ('Ά', 'Ά'),
-    ('Έ', 'Ί'),
-    ('Ό', 'Ό'),
-    ('Ύ', 'Ώ'),
-    ('Α', 'Ρ'),
-    ('Σ', 'Ϋ'),
-    ('Ϗ', 'Ϗ'),
-    ('ϒ', 'ϔ'),
-    ('Ϙ', 'Ϙ'),
-    ('Ϛ', 'Ϛ'),
-    ('Ϝ', 'Ϝ'),
-    ('Ϟ', 'Ϟ'),
-    ('Ï ', 'Ï '),
-    ('Ïą', 'Ïą'),
-    ('Ï€', 'Ï€'),
-    ('ÏŠ', 'ÏŠ'),
-    ('Ïš', 'Ïš'),
-    ('ÏȘ', 'ÏȘ'),
-    ('ÏŹ', 'ÏŹ'),
-    ('Ïź', 'Ïź'),
-    ('ÏŽ', 'ÏŽ'),
-    ('Ï·', 'Ï·'),
-    ('Ïč', 'Ïș'),
-    ('Ïœ', 'ĐŻ'),
-    ('Ń ', 'Ń '),
-    ('Ńą', 'Ńą'),
-    ('Ń€', 'Ń€'),
-    ('ŃŠ', 'ŃŠ'),
-    ('Ńš', 'Ńš'),
-    ('ŃȘ', 'ŃȘ'),
-    ('ŃŹ', 'ŃŹ'),
-    ('Ńź', 'Ńź'),
-    ('Ѱ', 'Ѱ'),
-    ('ŃČ', 'ŃČ'),
-    ('ŃŽ', 'ŃŽ'),
-    ('Ѷ', 'Ѷ'),
-    ('Ńž', 'Ńž'),
-    ('Ńș', 'Ńș'),
-    ('ŃŒ', 'ŃŒ'),
-    ('ŃŸ', 'ŃŸ'),
-    ('Ҁ', 'Ҁ'),
-    ('Ҋ', 'Ҋ'),
-    ('Ҍ', 'Ҍ'),
-    ('Ҏ', 'Ҏ'),
-    ('Ґ', 'Ґ'),
-    ('Ғ', 'Ғ'),
-    ('Ҕ', 'Ҕ'),
-    ('Җ', 'Җ'),
-    ('Ҙ', 'Ҙ'),
-    ('Қ', 'Қ'),
-    ('Ҝ', 'Ҝ'),
-    ('Ҟ', 'Ҟ'),
-    ('Ò ', 'Ò '),
-    ('Òą', 'Òą'),
-    ('Ò€', 'Ò€'),
-    ('ÒŠ', 'ÒŠ'),
-    ('Òš', 'Òš'),
-    ('ÒȘ', 'ÒȘ'),
-    ('ÒŹ', 'ÒŹ'),
-    ('Òź', 'Òź'),
-    ('Ò°', 'Ò°'),
-    ('ÒČ', 'ÒČ'),
-    ('ÒŽ', 'ÒŽ'),
-    ('Ò¶', 'Ò¶'),
-    ('Òž', 'Òž'),
-    ('Òș', 'Òș'),
-    ('Ҍ', 'Ҍ'),
-    ('ÒŸ', 'ÒŸ'),
-    ('Ӏ', 'Ӂ'),
-    ('Ӄ', 'Ӄ'),
-    ('Ӆ', 'Ӆ'),
-    ('Ӈ', 'Ӈ'),
-    ('Ӊ', 'Ӊ'),
-    ('Ӌ', 'Ӌ'),
-    ('Ӎ', 'Ӎ'),
-    ('Ӑ', 'Ӑ'),
-    ('Ӓ', 'Ӓ'),
-    ('Ӕ', 'Ӕ'),
-    ('Ӗ', 'Ӗ'),
-    ('Ә', 'Ә'),
-    ('Ӛ', 'Ӛ'),
-    ('Ӝ', 'Ӝ'),
-    ('Ӟ', 'Ӟ'),
-    ('Ó ', 'Ó '),
-    ('Óą', 'Óą'),
-    ('Ó€', 'Ó€'),
-    ('ÓŠ', 'ÓŠ'),
-    ('Óš', 'Óš'),
-    ('ÓȘ', 'ÓȘ'),
-    ('ÓŹ', 'ÓŹ'),
-    ('Óź', 'Óź'),
-    ('Ó°', 'Ó°'),
-    ('ÓČ', 'ÓČ'),
-    ('ÓŽ', 'ÓŽ'),
-    ('Ó¶', 'Ó¶'),
-    ('Óž', 'Óž'),
-    ('Óș', 'Óș'),
-    ('ӌ', 'ӌ'),
-    ('ÓŸ', 'ÓŸ'),
-    ('Ԁ', 'Ԁ'),
-    ('Ԃ', 'Ԃ'),
-    ('Ԅ', 'Ԅ'),
-    ('Ԇ', 'Ԇ'),
-    ('Ԉ', 'Ԉ'),
-    ('Ԋ', 'Ԋ'),
-    ('Ԍ', 'Ԍ'),
-    ('Ԏ', 'Ԏ'),
-    ('Ԑ', 'Ԑ'),
-    ('Ԓ', 'Ԓ'),
-    ('Ԕ', 'Ԕ'),
-    ('Ԗ', 'Ԗ'),
-    ('Ԙ', 'Ԙ'),
-    ('Ԛ', 'Ԛ'),
-    ('Ԝ', 'Ԝ'),
-    ('Ԟ', 'Ԟ'),
-    ('Ô ', 'Ô '),
-    ('Ôą', 'Ôą'),
-    ('Ô€', 'Ô€'),
-    ('ÔŠ', 'ÔŠ'),
-    ('Ôš', 'Ôš'),
-    ('ÔȘ', 'ÔȘ'),
-    ('ÔŹ', 'ÔŹ'),
-    ('Ôź', 'Ôź'),
-    ('Ô±', 'Ֆ'),
-    ('Ⴀ', 'Ⴥ'),
-    ('Ⴧ', 'Ⴧ'),
-    ('Ⴭ', 'Ⴭ'),
-    ('Ꭰ', 'Ꮤ'),
-    ('áȉ', 'áȉ'),
-    ('áȐ', 'áČș'),
-    ('áČœ', 'áČż'),
-    ('ᾀ', 'ᾀ'),
-    ('ᾂ', 'ᾂ'),
-    ('ᾄ', 'ᾄ'),
-    ('ᾆ', 'ᾆ'),
-    ('ឈ', 'ឈ'),
-    ('ᾊ', 'ᾊ'),
-    ('ᾌ', 'ᾌ'),
-    ('ᾎ', 'ᾎ'),
-    ('ថ', 'ថ'),
-    ('ᾒ', 'ᾒ'),
-    ('ᾔ', 'ᾔ'),
-    ('ᾖ', 'ᾖ'),
-    ('ម', 'ម'),
-    ('ᾚ', 'ᾚ'),
-    ('ᾜ', 'ᾜ'),
-    ('ᾞ', 'ᾞ'),
-    ('áž ', 'áž '),
-    ('ážą', 'ážą'),
-    ('ក', 'ក'),
-    ('ដ', 'ដ'),
-    ('ážš', 'ážš'),
-    ('ážȘ', 'ážȘ'),
-    ('ត', 'ត'),
-    ('ážź', 'ážź'),
-    ('áž°', 'áž°'),
-    ('ážČ', 'ážČ'),
-    ('ណ', 'ណ'),
-    ('áž¶', 'áž¶'),
-    ('ážž', 'ážž'),
-    ('ážș', 'ážș'),
-    ('ឌ', 'ឌ'),
-    ('ស', 'ស'),
-    ('áč€', 'áč€'),
-    ('áč‚', 'áč‚'),
-    ('áč„', 'áč„'),
-    ('áč†', 'áč†'),
-    ('áčˆ', 'áčˆ'),
-    ('áčŠ', 'áčŠ'),
-    ('áčŒ', 'áčŒ'),
-    ('áčŽ', 'áčŽ'),
-    ('áč', 'áč'),
-    ('áč’', 'áč’'),
-    ('áč”', 'áč”'),
-    ('áč–', 'áč–'),
-    ('áč˜', 'áč˜'),
-    ('áčš', 'áčš'),
-    ('áčœ', 'áčœ'),
-    ('áčž', 'áčž'),
-    ('áč ', 'áč '),
-    ('áčą', 'áčą'),
-    ('áč€', 'áč€'),
-    ('áčŠ', 'áčŠ'),
-    ('áčš', 'áčš'),
-    ('áčȘ', 'áčȘ'),
-    ('áčŹ', 'áčŹ'),
-    ('áčź', 'áčź'),
-    ('áč°', 'áč°'),
-    ('áčČ', 'áčČ'),
-    ('áčŽ', 'áčŽ'),
-    ('áč¶', 'áč¶'),
-    ('áčž', 'áčž'),
-    ('áčș', 'áčș'),
-    ('áčŒ', 'áčŒ'),
-    ('áčŸ', 'áčŸ'),
-    ('áș€', 'áș€'),
-    ('áș‚', 'áș‚'),
-    ('áș„', 'áș„'),
-    ('áș†', 'áș†'),
-    ('áșˆ', 'áșˆ'),
-    ('áșŠ', 'áșŠ'),
-    ('áșŒ', 'áșŒ'),
-    ('áșŽ', 'áșŽ'),
-    ('áș', 'áș'),
-    ('áș’', 'áș’'),
-    ('áș”', 'áș”'),
-    ('áșž', 'áșž'),
-    ('áș ', 'áș '),
-    ('áșą', 'áșą'),
-    ('áș€', 'áș€'),
-    ('áșŠ', 'áșŠ'),
-    ('áșš', 'áșš'),
-    ('áșȘ', 'áșȘ'),
-    ('áșŹ', 'áșŹ'),
-    ('áșź', 'áșź'),
-    ('áș°', 'áș°'),
-    ('áșČ', 'áșČ'),
-    ('áșŽ', 'áșŽ'),
-    ('áș¶', 'áș¶'),
-    ('áșž', 'áșž'),
-    ('áșș', 'áșș'),
-    ('áșŒ', 'áșŒ'),
-    ('áșŸ', 'áșŸ'),
-    ('Ề', 'Ề'),
-    ('Ể', 'Ể'),
-    ('Ễ', 'Ễ'),
-    ('Ệ', 'Ệ'),
-    ('Ỉ', 'Ỉ'),
-    ('Ị', 'Ị'),
-    ('Ọ', 'Ọ'),
-    ('Ỏ', 'Ỏ'),
-    ('Ố', 'Ố'),
-    ('Ồ', 'Ồ'),
-    ('Ổ', 'Ổ'),
-    ('Ỗ', 'Ỗ'),
-    ('Ộ', 'Ộ'),
-    ('Ớ', 'Ớ'),
-    ('Ờ', 'Ờ'),
-    ('Ở', 'Ở'),
-    ('á» ', 'á» '),
-    ('ỹ', 'ỹ'),
-    ('Ề', 'Ề'),
-    ('Ị', 'Ị'),
-    ('Ớ', 'Ớ'),
-    ('á»Ș', 'á»Ș'),
-    ('ỏ', 'ỏ'),
-    ('ở', 'ở'),
-    ('á»°', 'á»°'),
-    ('á»Č', 'á»Č'),
-    ('Ỏ', 'Ỏ'),
-    ('á»¶', 'á»¶'),
-    ('Ở', 'Ở'),
-    ('á»ș', 'á»ș'),
-    ('Ọ', 'Ọ'),
-    ('ở', 'ở'),
-    ('ገ', 'ጏ'),
-    ('ጘ', 'ጝ'),
-    ('ጚ', 'áŒŻ'),
-    ('ጞ', 'áŒż'),
-    ('ᜈ', 'ᜍ'),
-    ('᜙', '᜙'),
-    ('᜛', '᜛'),
-    ('᜝', '᜝'),
-    ('ᜟ', 'ᜟ'),
-    ('᜚', 'áœŻ'),
-    ('៞', '៻'),
-    ('Ὲ', 'Ή'),
-    ('Ῐ', 'Ί'),
-    ('Ὶ', '῏'),
-    ('áżž', 'áż»'),
-    ('ℂ', 'ℂ'),
-    ('ℇ', 'ℇ'),
-    ('ℋ', 'ℍ'),
-    ('ℐ', 'ℒ'),
-    ('ℕ', 'ℕ'),
-    ('ℙ', 'ℝ'),
-    ('â„€', 'â„€'),
-    ('℩', '℩'),
-    ('ℹ', 'ℹ'),
-    ('â„Ș', 'ℭ'),
-    ('ℰ', 'ℳ'),
-    ('℟', 'ℿ'),
-    ('ⅅ', 'ⅅ'),
-    ('Ⅰ', 'Ⅿ'),
-    ('Ↄ', 'Ↄ'),
-    ('Ⓐ', 'Ⓩ'),
-    ('Ⰰ', 'Ⱟ'),
-    ('â± ', 'â± '),
-    ('ⱹ', 'ⱀ'),
-    ('â±§', 'â±§'),
-    ('Ⱪ', 'Ⱪ'),
-    ('Ⱬ', 'Ⱬ'),
-    ('â±­', 'â±°'),
-    ('â±Č', 'â±Č'),
-    ('â±”', 'â±”'),
-    ('ⱟ', 'âȀ'),
-    ('âȂ', 'âȂ'),
-    ('âȄ', 'âȄ'),
-    ('âȆ', 'âȆ'),
-    ('âȈ', 'âȈ'),
-    ('âȊ', 'âȊ'),
-    ('âȌ', 'âȌ'),
-    ('âȎ', 'âȎ'),
-    ('âȐ', 'âȐ'),
-    ('âȒ', 'âȒ'),
-    ('âȔ', 'âȔ'),
-    ('âȖ', 'âȖ'),
-    ('âȘ', 'âȘ'),
-    ('âȚ', 'âȚ'),
-    ('âȜ', 'âȜ'),
-    ('âȞ', 'âȞ'),
-    ('âČ ', 'âČ '),
-    ('âČą', 'âČą'),
-    ('âČ€', 'âČ€'),
-    ('âČŠ', 'âČŠ'),
-    ('âČš', 'âČš'),
-    ('âČȘ', 'âČȘ'),
-    ('âČŹ', 'âČŹ'),
-    ('âČź', 'âČź'),
-    ('âȰ', 'âȰ'),
-    ('âČČ', 'âČČ'),
-    ('âČŽ', 'âČŽ'),
-    ('âȶ', 'âȶ'),
-    ('âČž', 'âČž'),
-    ('âČș', 'âČș'),
-    ('âČŒ', 'âČŒ'),
-    ('âČŸ', 'âČŸ'),
-    ('Ⳁ', 'Ⳁ'),
-    ('Ⳃ', 'Ⳃ'),
-    ('Ⳅ', 'Ⳅ'),
-    ('Ⳇ', 'Ⳇ'),
-    ('Ⳉ', 'Ⳉ'),
-    ('Ⳋ', 'Ⳋ'),
-    ('Ⳍ', 'Ⳍ'),
-    ('Ⳏ', 'Ⳏ'),
-    ('Ⳑ', 'Ⳑ'),
-    ('Ⳓ', 'Ⳓ'),
-    ('Ⳕ', 'Ⳕ'),
-    ('Ⳗ', 'Ⳗ'),
-    ('Ⳙ', 'Ⳙ'),
-    ('Ⳛ', 'Ⳛ'),
-    ('Ⳝ', 'Ⳝ'),
-    ('Ⳟ', 'Ⳟ'),
-    ('âł ', 'âł '),
-    ('âłą', 'âłą'),
-    ('âł«', 'âł«'),
-    ('âł­', 'âł­'),
-    ('âłČ', 'âłČ'),
-    ('Ꙁ', 'Ꙁ'),
-    ('Ꙃ', 'Ꙃ'),
-    ('Ꙅ', 'Ꙅ'),
-    ('Ꙇ', 'Ꙇ'),
-    ('Ꙉ', 'Ꙉ'),
-    ('Ꙋ', 'Ꙋ'),
-    ('Ꙍ', 'Ꙍ'),
-    ('Ꙏ', 'Ꙏ'),
-    ('Ꙑ', 'Ꙑ'),
-    ('Ꙓ', 'Ꙓ'),
-    ('Ꙕ', 'Ꙕ'),
-    ('Ꙗ', 'Ꙗ'),
-    ('Ꙙ', 'Ꙙ'),
-    ('Ꙛ', 'Ꙛ'),
-    ('Ꙝ', 'Ꙝ'),
-    ('Ꙟ', 'Ꙟ'),
-    ('Ꙡ', 'Ꙡ'),
-    ('ê™ą', 'ê™ą'),
-    ('Ꙁ', 'Ꙁ'),
-    ('Ꙋ', 'Ꙋ'),
-    ('Ꙛ', 'Ꙛ'),
-    ('ê™Ș', 'ê™Ș'),
-    ('ê™Ź', 'ê™Ź'),
-    ('Ꚁ', 'Ꚁ'),
-    ('Ꚃ', 'Ꚃ'),
-    ('Ꚅ', 'Ꚅ'),
-    ('Ꚇ', 'Ꚇ'),
-    ('Ꚉ', 'Ꚉ'),
-    ('Ꚋ', 'Ꚋ'),
-    ('Ꚍ', 'Ꚍ'),
-    ('Ꚏ', 'Ꚏ'),
-    ('Ꚑ', 'Ꚑ'),
-    ('Ꚓ', 'Ꚓ'),
-    ('Ꚕ', 'Ꚕ'),
-    ('Ꚗ', 'Ꚗ'),
-    ('Ꚙ', 'Ꚙ'),
-    ('Ꚛ', 'Ꚛ'),
-    ('êœą', 'êœą'),
-    ('꜀', '꜀'),
-    ('꜊', '꜊'),
-    ('ꜚ', 'ꜚ'),
-    ('êœȘ', 'êœȘ'),
-    ('êœŹ', 'êœŹ'),
-    ('êœź', 'êœź'),
-    ('êœČ', 'êœČ'),
-    ('꜎', '꜎'),
-    ('Ꜷ', 'Ꜷ'),
-    ('ꜞ', 'ꜞ'),
-    ('êœș', 'êœș'),
-    ('꜌', '꜌'),
-    ('ꜟ', 'ꜟ'),
-    ('Ꝁ', 'Ꝁ'),
-    ('Ꝃ', 'Ꝃ'),
-    ('Ꝅ', 'Ꝅ'),
-    ('Ꝇ', 'Ꝇ'),
-    ('Ꝉ', 'Ꝉ'),
-    ('Ꝋ', 'Ꝋ'),
-    ('Ꝍ', 'Ꝍ'),
-    ('Ꝏ', 'Ꝏ'),
-    ('Ꝑ', 'Ꝑ'),
-    ('Ꝓ', 'Ꝓ'),
-    ('Ꝕ', 'Ꝕ'),
-    ('Ꝗ', 'Ꝗ'),
-    ('Ꝙ', 'Ꝙ'),
-    ('Ꝛ', 'Ꝛ'),
-    ('Ꝝ', 'Ꝝ'),
-    ('Ꝟ', 'Ꝟ'),
-    ('Ꝡ', 'Ꝡ'),
-    ('êą', 'êą'),
-    ('Ꝁ', 'Ꝁ'),
-    ('Ꝋ', 'Ꝋ'),
-    ('Ꝛ', 'Ꝛ'),
-    ('êȘ', 'êȘ'),
-    ('êŹ', 'êŹ'),
-    ('êź', 'êź'),
-    ('êč', 'êč'),
-    ('Ꝼ', 'Ꝼ'),
-    ('Ꝝ', 'ꝟ'),
-    ('Ꞁ', 'Ꞁ'),
-    ('Ꞃ', 'Ꞃ'),
-    ('Ꞅ', 'Ꞅ'),
-    ('Ꞇ', 'Ꞇ'),
-    ('Ꞌ', 'Ꞌ'),
-    ('Ɥ', 'Ɥ'),
-    ('Ꞑ', 'Ꞑ'),
-    ('Ꞓ', 'Ꞓ'),
-    ('Ꞗ', 'Ꞗ'),
-    ('Ꞙ', 'Ꞙ'),
-    ('Ꞛ', 'Ꞛ'),
-    ('Ꞝ', 'Ꞝ'),
-    ('Ꞟ', 'Ꞟ'),
-    ('Ꞡ', 'Ꞡ'),
-    ('êžą', 'êžą'),
-    ('Ꞁ', 'Ꞁ'),
-    ('꞊', '꞊'),
-    ('Ꞛ', 'Ꞛ'),
-    ('êžȘ', 'êžź'),
-    ('Ʞ', 'ꞎ'),
-    ('Ꞷ', 'Ꞷ'),
-    ('êžž', 'êžž'),
-    ('êžș', 'êžș'),
-    ('ꞌ', 'ꞌ'),
-    ('ꞟ', 'ꞟ'),
-    ('Ꟁ', 'Ꟁ'),
-    ('Ꟃ', 'Ꟃ'),
-    ('Ꞔ', 'Ꟈ'),
-    ('Ꟊ', 'Ꟊ'),
-    ('Ɤ', 'Ꟍ'),
-    ('Ꟑ', 'Ꟑ'),
-    ('Ꟗ', 'Ꟗ'),
-    ('Ꟙ', 'Ꟙ'),
-    ('Ꟛ', 'Ꟛ'),
-    ('Ƛ', 'Ƛ'),
-    ('꟔', '꟔'),
-    ('ïŒĄ', 'ïŒș'),
-    ('𐐀', '𐐧'),
-    ('𐒰', '𐓓'),
-    ('𐕰', 'đ•ș'),
-    ('đ•Œ', '𐖊'),
-    ('𐖌', '𐖒'),
-    ('𐖔', '𐖕'),
-    ('đČ€', 'đČČ'),
-    ('𐔐', '𐔄'),
-    ('𑱠', '𑱿'),
-    ('đ–č€', 'đ–čŸ'),
-    ('𝐀', '𝐙'),
-    ('𝐮', '𝑍'),
-    ('𝑹', '𝒁'),
-    ('𝒜', '𝒜'),
-    ('𝒞', '𝒟'),
-    ('𝒱', '𝒱'),
-    ('đ’„', '𝒩'),
-    ('đ’©', '𝒬'),
-    ('𝒼', 'đ’”'),
-    ('𝓐', 'đ“©'),
-    ('𝔄', '𝔅'),
-    ('𝔇', '𝔊'),
-    ('𝔍', '𝔔'),
-    ('𝔖', '𝔜'),
-    ('𝔾', 'đ”č'),
-    ('đ”»', 'đ”Ÿ'),
-    ('𝕀', '𝕄'),
-    ('𝕆', '𝕆'),
-    ('𝕊', '𝕐'),
-    ('𝕬', '𝖅'),
-    ('𝖠', 'đ–č'),
-    ('𝗔', '𝗭'),
-    ('𝘈', '𝘡'),
-    ('đ˜Œ', '𝙕'),
-    ('𝙰', '𝚉'),
-    ('𝚹', '𝛀'),
-    ('𝛱', 'đ›ș'),
-    ('𝜜', '𝜮'),
-    ('𝝖', '𝝼'),
-    ('𝞐', '𝞹'),
-    ('𝟊', '𝟊'),
-    ('𞀀', '𞀥'),
-    ('🄰', '🅉'),
-    ('🅐', 'đŸ…©'),
-    ('🅰', '🆉'),
-];
-
-pub const VARIATION_SELECTOR: &'static [(char, char)] = &[
-    ('\u{180b}', '\u{180d}'),
-    ('\u{180f}', '\u{180f}'),
-    ('\u{fe00}', '\u{fe0f}'),
-    ('\u{e0100}', '\u{e01ef}'),
-];
-
-pub const WHITE_SPACE: &'static [(char, char)] = &[
-    ('\t', '\r'),
-    (' ', ' '),
-    ('\u{85}', '\u{85}'),
-    ('\u{a0}', '\u{a0}'),
-    ('\u{1680}', '\u{1680}'),
-    ('\u{2000}', '\u{200a}'),
-    ('\u{2028}', '\u{2029}'),
-    ('\u{202f}', '\u{202f}'),
-    ('\u{205f}', '\u{205f}'),
-    ('\u{3000}', '\u{3000}'),
-];
-
-pub const XID_CONTINUE: &'static [(char, char)] = &[
-    ('0', '9'),
-    ('A', 'Z'),
-    ('_', '_'),
-    ('a', 'z'),
-    ('ª', 'ª'),
-    ('µ', 'µ'),
-    ('·', '·'),
-    ('º', 'º'),
-    ('À', 'Ö'),
-    ('Ø', 'ö'),
-    ('ø', 'ˁ'),
-    ('ˆ', 'ˑ'),
-    ('Ë ', 'Ë€'),
-    ('ËŹ', 'ËŹ'),
-    ('Ëź', 'Ëź'),
-    ('\u{300}', 'ÍŽ'),
-    ('Ͷ', 'ͷ'),
-    ('ͻ', '͜'),
-    ('Íż', 'Íż'),
-    ('Ά', 'Ί'),
-    ('Ό', 'Ό'),
-    ('Ύ', 'Ρ'),
-    ('Σ', 'Ï”'),
-    ('Ϸ', 'ҁ'),
-    ('\u{483}', '\u{487}'),
-    ('Ҋ', 'ÔŻ'),
-    ('Ô±', 'Ֆ'),
-    ('ՙ', 'ՙ'),
-    ('ՠ', 'ֈ'),
-    ('\u{591}', '\u{5bd}'),
-    ('\u{5bf}', '\u{5bf}'),
-    ('\u{5c1}', '\u{5c2}'),
-    ('\u{5c4}', '\u{5c5}'),
-    ('\u{5c7}', '\u{5c7}'),
-    ('ڐ', 'ŚȘ'),
-    ('ŚŻ', 'ŚČ'),
-    ('\u{610}', '\u{61a}'),
-    ('Ű ', 'Ù©'),
-    ('Ùź', 'ۓ'),
-    ('ە', '\u{6dc}'),
-    ('\u{6df}', '\u{6e8}'),
-    ('\u{6ea}', 'ی'),
-    ('Ûż', 'Ûż'),
-    ('ܐ', '\u{74a}'),
-    ('ʍ', 'Ț±'),
-    ('߀', 'ß”'),
-    ('ßș', 'ßș'),
-    ('\u{7fd}', '\u{7fd}'),
-    ('ࠀ', '\u{82d}'),
-    ('àĄ€', '\u{85b}'),
-    ('àĄ ', 'àĄȘ'),
-    ('àĄ°', 'àą‡'),
-    ('àą‰', 'àąŽ'),
-    ('\u{897}', '\u{8e1}'),
-    ('\u{8e3}', '\u{963}'),
-    ('à„Š', 'à„Ż'),
-    ('à„±', 'àŠƒ'),
-    ('àŠ…', 'àŠŒ'),
-    ('àŠ', 'àŠ'),
-    ('àŠ“', 'àŠš'),
-    ('àŠȘ', 'àŠ°'),
-    ('àŠČ', 'àŠČ'),
-    ('àŠ¶', 'àŠč'),
-    ('\u{9bc}', '\u{9c4}'),
-    ('ে', 'ৈ'),
-    ('ো', 'ৎ'),
-    ('\u{9d7}', '\u{9d7}'),
-    ('ড়', 'ঢ়'),
-    ('য়', '\u{9e3}'),
-    ('à§Š', 'à§±'),
-    ('ৌ', 'ৌ'),
-    ('\u{9fe}', '\u{9fe}'),
-    ('\u{a01}', 'àšƒ'),
-    ('àš…', 'àšŠ'),
-    ('àš', 'àš'),
-    ('àš“', 'àšš'),
-    ('àšȘ', 'àš°'),
-    ('àšČ', 'àšł'),
-    ('àš”', 'àš¶'),
-    ('àšž', 'àšč'),
-    ('\u{a3c}', '\u{a3c}'),
-    ('àšŸ', '\u{a42}'),
-    ('\u{a47}', '\u{a48}'),
-    ('\u{a4b}', '\u{a4d}'),
-    ('\u{a51}', '\u{a51}'),
-    ('ਖ਼', 'ੜ'),
-    ('ਫ਼', 'ਫ਼'),
-    ('੊', '\u{a75}'),
-    ('\u{a81}', 'àȘƒ'),
-    ('àȘ…', 'àȘ'),
-    ('àȘ', 'àȘ‘'),
-    ('àȘ“', 'àȘš'),
-    ('àȘȘ', 'àȘ°'),
-    ('àȘČ', 'àȘł'),
-    ('àȘ”', 'àȘč'),
-    ('\u{abc}', '\u{ac5}'),
-    ('\u{ac7}', 'ૉ'),
-    ('ો', '\u{acd}'),
-    ('ૐ', 'ૐ'),
-    ('à« ', '\u{ae3}'),
-    ('૊', 'à«Ż'),
-    ('à«č', '\u{aff}'),
-    ('\u{b01}', 'àŹƒ'),
-    ('àŹ…', 'àŹŒ'),
-    ('àŹ', 'àŹ'),
-    ('àŹ“', 'àŹš'),
-    ('àŹȘ', 'àŹ°'),
-    ('àŹČ', 'àŹł'),
-    ('àŹ”', 'àŹč'),
-    ('\u{b3c}', '\u{b44}'),
-    ('େ', 'ୈ'),
-    ('ୋ', '\u{b4d}'),
-    ('\u{b55}', '\u{b57}'),
-    ('ଡ଼', 'ଢ଼'),
-    ('ୟ', '\u{b63}'),
-    ('à­Š', 'à­Ż'),
-    ('à­±', 'à­±'),
-    ('\u{b82}', 'àźƒ'),
-    ('àź…', 'àźŠ'),
-    ('àźŽ', 'àź'),
-    ('àź’', 'àź•'),
-    ('àź™', 'àźš'),
-    ('àźœ', 'àźœ'),
-    ('àźž', 'àźŸ'),
-    ('àźŁ', 'àź€'),
-    ('àźš', 'àźȘ'),
-    ('àźź', 'àźč'),
-    ('\u{bbe}', 'àŻ‚'),
-    ('àŻ†', 'àŻˆ'),
-    ('àŻŠ', '\u{bcd}'),
-    ('àŻ', 'àŻ'),
-    ('\u{bd7}', '\u{bd7}'),
-    ('àŻŠ', 'àŻŻ'),
-    ('\u{c00}', 'ఌ'),
-    ('ఎ', 'ఐ'),
-    ('ఒ', 'à°š'),
-    ('à°Ș', 'à°č'),
-    ('\u{c3c}', 'ౄ'),
-    ('\u{c46}', '\u{c48}'),
-    ('\u{c4a}', '\u{c4d}'),
-    ('\u{c55}', '\u{c56}'),
-    ('ౘ', 'ౚ'),
-    ('ౝ', 'ౝ'),
-    ('à± ', '\u{c63}'),
-    ('ొ', 'à±Ż'),
-    ('àȀ', 'àȃ'),
-    ('àȅ', 'àȌ'),
-    ('àȎ', 'àȐ'),
-    ('àȒ', 'àČš'),
-    ('àČȘ', 'àČł'),
-    ('àČ”', 'àČč'),
-    ('\u{cbc}', 'àł„'),
-    ('\u{cc6}', '\u{cc8}'),
-    ('\u{cca}', '\u{ccd}'),
-    ('\u{cd5}', '\u{cd6}'),
-    ('àł', 'àłž'),
-    ('àł ', '\u{ce3}'),
-    ('àłŠ', 'àłŻ'),
-    ('àł±', 'àłł'),
-    ('\u{d00}', 'àŽŒ'),
-    ('àŽŽ', 'àŽ'),
-    ('àŽ’', '\u{d44}'),
-    ('à”†', 'à”ˆ'),
-    ('à”Š', 'à”Ž'),
-    ('à””', '\u{d57}'),
-    ('à”Ÿ', '\u{d63}'),
-    ('à”Š', 'à”Ż'),
-    ('à”ș', 'à”ż'),
-    ('\u{d81}', 'ඃ'),
-    ('අ', 'ඖ'),
-    ('ක', 'න'),
-    ('à¶ł', 'à¶»'),
-    ('ග', 'ග'),
-    ('ව', 'ෆ'),
-    ('\u{dca}', '\u{dca}'),
-    ('\u{dcf}', '\u{dd4}'),
-    ('\u{dd6}', '\u{dd6}'),
-    ('ෘ', '\u{ddf}'),
-    ('à·Š', 'à·Ż'),
-    ('à·Č', 'à·ł'),
-    ('àž', '\u{e3a}'),
-    ('àč€', '\u{e4e}'),
-    ('àč', 'àč™'),
-    ('àș', 'àș‚'),
-    ('àș„', 'àș„'),
-    ('àș†', 'àșŠ'),
-    ('àșŒ', 'àșŁ'),
-    ('àș„', 'àș„'),
-    ('àș§', 'àșœ'),
-    ('ເ', 'ໄ'),
-    ('ໆ', 'ໆ'),
-    ('\u{ec8}', '\u{ece}'),
-    ('໐', '໙'),
-    ('ໜ', 'ໟ'),
-    ('àŒ€', 'àŒ€'),
-    ('\u{f18}', '\u{f19}'),
-    ('àŒ ', 'àŒ©'),
-    ('\u{f35}', '\u{f35}'),
-    ('\u{f37}', '\u{f37}'),
-    ('\u{f39}', '\u{f39}'),
-    ('àŒŸ', 'àœ‡'),
-    ('àœ‰', 'àœŹ'),
-    ('\u{f71}', '\u{f84}'),
-    ('\u{f86}', '\u{f97}'),
-    ('\u{f99}', '\u{fbc}'),
-    ('\u{fc6}', '\u{fc6}'),
-    ('က', '၉'),
-    ('ၐ', '\u{109d}'),
-    ('Ⴀ', 'Ⴥ'),
-    ('Ⴧ', 'Ⴧ'),
-    ('Ⴭ', 'Ⴭ'),
-    ('ა', 'áƒș'),
-    ('჌', 'ቈ'),
-    ('ቊ', 'ቍ'),
-    ('ቐ', 'ቖ'),
-    ('ቘ', 'ቘ'),
-    ('ቚ', 'ቝ'),
-    ('በ', 'ኈ'),
-    ('ኊ', 'ኍ'),
-    ('ነ', 'ኰ'),
-    ('áŠČ', 'ኔ'),
-    ('ኾ', 'ኟ'),
-    ('ዀ', 'ዀ'),
-    ('ዂ', 'ዅ'),
-    ('ወ', 'ዖ'),
-    ('ዘ', 'ጐ'),
-    ('ጒ', 'ጕ'),
-    ('ጘ', 'ፚ'),
-    ('\u{135d}', '\u{135f}'),
-    ('፩', '፱'),
-    ('ᎀ', 'ᎏ'),
-    ('Ꭰ', 'Ꮤ'),
-    ('Ꮮ', 'Ꮬ'),
-    ('ᐁ', 'ᙬ'),
-    ('ᙯ', 'ᙿ'),
-    ('ᚁ', 'ᚚ'),
-    ('ᚠ', 'á›Ș'),
-    ('᛼', '᛾'),
-    ('ᜀ', '\u{1715}'),
-    ('ᜟ', '\u{1734}'),
-    ('ᝀ', '\u{1753}'),
-    ('ᝠ', 'ᝬ'),
-    ('᝼', 'ᝰ'),
-    ('\u{1772}', '\u{1773}'),
-    ('ក', '\u{17d3}'),
-    ('ៗ', 'ៗ'),
-    ('ៜ', '\u{17dd}'),
-    ('០', '៩'),
-    ('\u{180b}', '\u{180d}'),
-    ('\u{180f}', '᠙'),
-    ('ᠠ', 'ᥞ'),
-    ('᱀', 'áąȘ'),
-    ('Ṱ', 'ᣔ'),
-    ('က', 'သ'),
-    ('\u{1920}', 'ါ'),
-    ('ူ', '\u{193b}'),
-    ('ᄆ', 'á„­'),
-    ('ᄰ', 'ᄎ'),
-    ('ᩀ', 'ካ'),
-    ('ᩰ', 'ᧉ'),
-    ('᧐', '᧚'),
-    ('Ṁ', '\u{1a1b}'),
-    ('áš ', '\u{1a5e}'),
-    ('\u{1a60}', '\u{1a7c}'),
-    ('\u{1a7f}', 'áȘ‰'),
-    ('áȘ', 'áȘ™'),
-    ('áȘ§', 'áȘ§'),
-    ('\u{1ab0}', '\u{1abd}'),
-    ('\u{1abf}', '\u{1ace}'),
-    ('\u{1b00}', 'ᭌ'),
-    ('᭐', '᭙'),
-    ('\u{1b6b}', '\u{1b73}'),
-    ('\u{1b80}', '\u{1bf3}'),
-    ('ᰀ', '\u{1c37}'),
-    ('᱀', '᱉'),
-    ('ᱍ', 'ᱜ'),
-    ('áȀ', 'áȊ'),
-    ('áȐ', 'áČș'),
-    ('áČœ', 'áČż'),
-    ('\u{1cd0}', '\u{1cd2}'),
-    ('\u{1cd4}', 'áłș'),
-    ('ᮀ', 'ጕ'),
-    ('ጘ', 'ጝ'),
-    ('ጠ', 'ᜅ'),
-    ('ᜈ', 'ᜍ'),
-    ('ᜐ', '᜗'),
-    ('᜙', '᜙'),
-    ('᜛', '᜛'),
-    ('᜝', '᜝'),
-    ('ᜟ', '᜜'),
-    ('ៀ', '៎'),
-    ('៶', '៌'),
-    ('៟', '៟'),
-    ('ῂ', 'ῄ'),
-    ('ῆ', 'ῌ'),
-    ('ῐ', 'ΐ'),
-    ('ῖ', 'Ί'),
-    ('ῠ', '῏'),
-    ('áżČ', '῎'),
-    ('áż¶', 'áżŒ'),
-    ('\u{200c}', '\u{200d}'),
-    ('‿', '⁀'),
-    ('⁔', '⁔'),
-    ('ⁱ', 'ⁱ'),
-    ('ⁿ', 'ⁿ'),
-    ('ₐ', 'ₜ'),
-    ('\u{20d0}', '\u{20dc}'),
-    ('\u{20e1}', '\u{20e1}'),
-    ('\u{20e5}', '\u{20f0}'),
-    ('ℂ', 'ℂ'),
-    ('ℇ', 'ℇ'),
-    ('ℊ', 'ℓ'),
-    ('ℕ', 'ℕ'),
-    ('℘', 'ℝ'),
-    ('â„€', 'â„€'),
-    ('℩', '℩'),
-    ('ℹ', 'ℹ'),
-    ('â„Ș', 'â„č'),
-    ('ℌ', 'ℿ'),
-    ('ⅅ', 'ⅉ'),
-    ('ⅎ', 'ⅎ'),
-    ('Ⅰ', 'ↈ'),
-    ('Ⰰ', 'Ⳁ'),
-    ('âł«', 'âłł'),
-    ('⮀', '⎄'),
-    ('⎧', '⎧'),
-    ('⎭', '⎭'),
-    ('⎰', '┧'),
-    ('┯', '┯'),
-    ('\u{2d7f}', 'ⶖ'),
-    ('â¶ ', 'â¶Š'),
-    ('â¶š', 'â¶ź'),
-    ('â¶°', 'â¶¶'),
-    ('â¶ž', 'â¶Ÿ'),
-    ('ⷀ', 'ⷆ'),
-    ('ⷈ', 'ⷎ'),
-    ('ⷐ', 'ⷖ'),
-    ('ⷘ', 'ⷞ'),
-    ('\u{2de0}', '\u{2dff}'),
-    ('々', '〇'),
-    ('〡', '\u{302f}'),
-    ('〱', '〔'),
-    ('〾', 'ă€Œ'),
-    ('ぁ', 'ゖ'),
-    ('\u{3099}', '\u{309a}'),
-    ('ゝ', 'ゟ'),
-    ('ァ', 'ヿ'),
-    ('ㄅ', 'ㄯ'),
-    ('ㄱ', 'ㆎ'),
-    ('ㆠ', 'ㆿ'),
-    ('ㇰ', 'ㇿ'),
-    ('㐀', 'ä¶ż'),
-    ('侀', 'ꒌ'),
-    ('ꓐ', 'ꓜ'),
-    ('ꔀ', 'ꘌ'),
-    ('ꘐ', 'ꘫ'),
-    ('Ꙁ', '\u{a66f}'),
-    ('\u{a674}', '\u{a67d}'),
-    ('ê™ż', '\u{a6f1}'),
-    ('ꜗ', 'ꜟ'),
-    ('êœą', 'ꞈ'),
-    ('Ꞌ', 'ꟍ'),
-    ('Ꟑ', 'ꟑ'),
-    ('ꟓ', 'ꟓ'),
-    ('ꟕ', 'Ƛ'),
-    ('êŸČ', 'ê §'),
-    ('\u{a82c}', '\u{a82c}'),
-    ('êĄ€', 'êĄł'),
-    ('êą€', '\u{a8c5}'),
-    ('êŁ', 'êŁ™'),
-    ('\u{a8e0}', 'êŁ·'),
-    ('êŁ»', 'êŁ»'),
-    ('êŁœ', '\u{a92d}'),
-    ('ꀰ', '\u{a953}'),
-    ('ꄠ', 'ꄌ'),
-    ('\u{a980}', '\u{a9c0}'),
-    ('ꧏ', '꧙'),
-    ('ê§ ', 'ê§Ÿ'),
-    ('Ꚁ', '\u{aa36}'),
-    ('ꩀ', 'ꩍ'),
-    ('꩐', '꩙'),
-    ('ê© ', 'ê©¶'),
-    ('ê©ș', 'ꫂ'),
-    ('ꫛ', 'ꫝ'),
-    ('ê« ', 'ê«Ż'),
-    ('ê«Č', '\u{aaf6}'),
-    ('êŹ', 'êŹ†'),
-    ('êŹ‰', 'êŹŽ'),
-    ('êŹ‘', 'êŹ–'),
-    ('êŹ ', 'êŹŠ'),
-    ('êŹš', 'êŹź'),
-    ('êŹ°', 'ꭚ'),
-    ('ꭜ', 'ꭩ'),
-    ('ê­°', 'êŻȘ'),
-    ('êŻŹ', '\u{abed}'),
-    ('êŻ°', 'êŻč'),
-    ('가', '힣'),
-    ('ힰ', 'ퟆ'),
-    ('ퟋ', 'ퟻ'),
-    ('', 'ï©­'),
-    ('並', '龎'),
-    ('ïŹ€', 'ïŹ†'),
-    ('ïŹ“', 'ïŹ—'),
-    ('ïŹ', 'ïŹš'),
-    ('ïŹȘ', 'ïŹ¶'),
-    ('ïŹž', 'ïŹŒ'),
-    ('ïŹŸ', 'ïŹŸ'),
-    ('נּ', 'סּ'),
-    ('ףּ', 'פּ'),
-    ('צּ', 'ïź±'),
-    ('ïŻ“', 'ﱝ'),
-    ('ﱀ', ''),
-    ('', 'ﶏ'),
-    ('ﶒ', 'ﷇ'),
-    ('ï·°', 'ï·č'),
-    ('\u{fe00}', '\u{fe0f}'),
-    ('\u{fe20}', '\u{fe2f}'),
-    ('ïžł', ''),
-    ('ïč', 'ïč'),
-    ('ïč±', 'ïč±'),
-    ('ïčł', 'ïčł'),
-    ('ïč·', 'ïč·'),
-    ('ïčč', 'ïčč'),
-    ('ïč»', 'ïč»'),
-    ('ïčœ', 'ïčœ'),
-    ('ïčż', 'ﻌ'),
-    ('', ''),
-    ('ïŒĄ', 'ïŒș'),
-    ('ïŒż', 'ïŒż'),
-    ('', ''),
-    ('', ''),
-    ('ïż‚', 'ïż‡'),
-    ('ïżŠ', 'ïż'),
-    ('ïż’', 'ïż—'),
-    ('ïżš', 'ïżœ'),
-    ('𐀀', '𐀋'),
-    ('𐀍', '𐀩'),
-    ('𐀹', 'đ€ș'),
-    ('đ€Œ', 'đ€œ'),
-    ('𐀿', '𐁍'),
-    ('𐁐', '𐁝'),
-    ('𐂀', 'đƒș'),
-    ('𐅀', '𐅮'),
-    ('\u{101fd}', '\u{101fd}'),
-    ('𐊀', '𐊜'),
-    ('𐊠', '𐋐'),
-    ('\u{102e0}', '\u{102e0}'),
-    ('𐌀', '𐌟'),
-    ('𐌭', '𐍊'),
-    ('𐍐', '\u{1037a}'),
-    ('𐎀', '𐎝'),
-    ('𐎠', '𐏃'),
-    ('𐏈', '𐏏'),
-    ('𐏑', '𐏕'),
-    ('𐐀', '𐒝'),
-    ('𐒠', '𐒩'),
-    ('𐒰', '𐓓'),
-    ('𐓘', '𐓻'),
-    ('𐔀', '𐔧'),
-    ('𐔰', '𐕣'),
-    ('𐕰', 'đ•ș'),
-    ('đ•Œ', '𐖊'),
-    ('𐖌', '𐖒'),
-    ('𐖔', '𐖕'),
-    ('𐖗', '𐖡'),
-    ('𐖣', '𐖱'),
-    ('𐖳', 'đ–č'),
-    ('𐖻', 'đ–Œ'),
-    ('𐗀', '𐗳'),
-    ('𐘀', 'đœ¶'),
-    ('𐝀', '𐝕'),
-    ('𐝠', '𐝧'),
-    ('𐞀', '𐞅'),
-    ('𐞇', '𐞰'),
-    ('đžČ', 'đžș'),
-    ('𐠀', '𐠅'),
-    ('𐠈', '𐠈'),
-    ('𐠊', '𐠔'),
-    ('𐠷', '𐠞'),
-    ('đ Œ', 'đ Œ'),
-    ('𐠿', '𐡕'),
-    ('𐥠', '𐥶'),
-    ('𐱀', '𐱞'),
-    ('𐣠', 'đŁČ'),
-    ('𐣎', '𐣔'),
-    ('𐀀', '𐀕'),
-    ('𐀠', 'đ€č'),
-    ('𐩀', '𐊷'),
-    ('đŠŸ', '𐊿'),
-    ('𐹀', '\u{10a03}'),
-    ('\u{10a05}', '\u{10a06}'),
-    ('\u{10a0c}', '𐹓'),
-    ('𐹕', '𐹗'),
-    ('𐹙', '𐚔'),
-    ('\u{10a38}', '\u{10a3a}'),
-    ('\u{10a3f}', '\u{10a3f}'),
-    ('𐩠', 'đ©Œ'),
-    ('đȘ€', 'đȘœ'),
-    ('𐫀', '𐫇'),
-    ('𐫉', '\u{10ae6}'),
-    ('𐬀', '𐏔'),
-    ('𐭀', '𐭕'),
-    ('𐭠', 'đ­Č'),
-    ('𐼀', '𐼑'),
-    ('𐰀', '𐱈'),
-    ('đČ€', 'đČČ'),
-    ('𐳀', 'đłČ'),
-    ('𐮀', '\u{10d27}'),
-    ('𐎰', 'đŽč'),
-    ('𐔀', '𐔄'),
-    ('\u{10d69}', '\u{10d6d}'),
-    ('𐔯', '𐶅'),
-    ('đș€', 'đș©'),
-    ('\u{10eab}', '\u{10eac}'),
-    ('đș°', 'đș±'),
-    ('𐻂', '𐻄'),
-    ('\u{10efc}', 'đŒœ'),
-    ('đŒ§', 'đŒ§'),
-    ('đŒ°', '\u{10f50}'),
-    ('đœ°', '\u{10f85}'),
-    ('đŸ°', '𐿄'),
-    ('𐿠', '𐿶'),
-    ('𑀀', '\u{11046}'),
-    ('𑁩', '𑁔'),
-    ('\u{1107f}', '\u{110ba}'),
-    ('\u{110c2}', '\u{110c2}'),
-    ('𑃐', '𑃹'),
-    ('𑃰', 'đ‘ƒč'),
-    ('\u{11100}', '\u{11134}'),
-    ('đ‘„¶', '𑄿'),
-    ('𑅄', '𑅇'),
-    ('𑅐', '\u{11173}'),
-    ('đ‘…¶', 'đ‘…¶'),
-    ('\u{11180}', '𑇄'),
-    ('\u{111c9}', '\u{111cc}'),
-    ('𑇎', '𑇚'),
-    ('𑇜', '𑇜'),
-    ('𑈀', '𑈑'),
-    ('𑈓', '\u{11237}'),
-    ('\u{1123e}', '\u{11241}'),
-    ('𑊀', '𑊆'),
-    ('𑊈', '𑊈'),
-    ('𑊊', '𑊍'),
-    ('𑊏', '𑊝'),
-    ('𑊟', '𑊹'),
-    ('𑊰', '\u{112ea}'),
-    ('𑋰', 'đ‘‹č'),
-    ('\u{11300}', '𑌃'),
-    ('𑌅', '𑌌'),
-    ('𑌏', '𑌐'),
-    ('𑌓', '𑌹'),
-    ('đ‘ŒȘ', '𑌰'),
-    ('đ‘ŒČ', '𑌳'),
-    ('đ‘Œ”', 'đ‘Œč'),
-    ('\u{1133b}', '𑍄'),
-    ('𑍇', '𑍈'),
-    ('𑍋', '\u{1134d}'),
-    ('𑍐', '𑍐'),
-    ('\u{11357}', '\u{11357}'),
-    ('𑍝', '𑍣'),
-    ('\u{11366}', '\u{1136c}'),
-    ('\u{11370}', '\u{11374}'),
-    ('𑎀', '𑎉'),
-    ('𑎋', '𑎋'),
-    ('𑎎', '𑎎'),
-    ('𑎐', '𑎔'),
-    ('𑎷', '\u{113c0}'),
-    ('\u{113c2}', '\u{113c2}'),
-    ('\u{113c5}', '\u{113c5}'),
-    ('\u{113c7}', '𑏊'),
-    ('𑏌', '𑏓'),
-    ('\u{113e1}', '\u{113e2}'),
-    ('𑐀', '𑑊'),
-    ('𑑐', '𑑙'),
-    ('\u{1145e}', '𑑡'),
-    ('𑒀', '𑓅'),
-    ('𑓇', '𑓇'),
-    ('𑓐', '𑓙'),
-    ('𑖀', '\u{115b5}'),
-    ('𑖾', '\u{115c0}'),
-    ('𑗘', '\u{115dd}'),
-    ('𑘀', '\u{11640}'),
-    ('𑙄', '𑙄'),
-    ('𑙐', '𑙙'),
-    ('𑚀', '𑚾'),
-    ('𑛀', '𑛉'),
-    ('𑛐', '𑛣'),
-    ('𑜀', '𑜚'),
-    ('\u{1171d}', '\u{1172b}'),
-    ('𑜰', 'đ‘œč'),
-    ('𑝀', '𑝆'),
-    ('𑠀', '\u{1183a}'),
-    ('𑱠', '𑣩'),
-    ('𑣿', '𑀆'),
-    ('𑀉', '𑀉'),
-    ('đ‘€Œ', '𑀓'),
-    ('𑀕', 'đ‘€–'),
-    ('đ‘€˜', 'đ‘€”'),
-    ('đ‘€·', '𑀞'),
-    ('\u{1193b}', '\u{11943}'),
-    ('𑄐', 'đ‘„™'),
-    ('𑩠', '𑩧'),
-    ('đ‘ŠȘ', '\u{119d7}'),
-    ('\u{119da}', '𑧡'),
-    ('𑧣', 'đ‘§€'),
-    ('𑹀', '\u{11a3e}'),
-    ('\u{11a47}', '\u{11a47}'),
-    ('𑩐', '\u{11a99}'),
-    ('đ‘Ș', 'đ‘Ș'),
-    ('đ‘Ș°', 'đ‘«ž'),
-    ('𑯀', '𑯠'),
-    ('𑯰', 'đ‘Żč'),
-    ('𑰀', '𑰈'),
-    ('𑰊', '\u{11c36}'),
-    ('\u{11c38}', '𑱀'),
-    ('𑱐', '𑱙'),
-    ('đ‘±Č', 'đ‘ȏ'),
-    ('\u{11c92}', '\u{11ca7}'),
-    ('đ‘Č©', '\u{11cb6}'),
-    ('𑮀', '𑮆'),
-    ('𑮈', '𑮉'),
-    ('𑮋', '\u{11d36}'),
-    ('\u{11d3a}', '\u{11d3a}'),
-    ('\u{11d3c}', '\u{11d3d}'),
-    ('\u{11d3f}', '\u{11d47}'),
-    ('𑔐', 'đ‘”™'),
-    ('đ‘” ', 'đ‘”„'),
-    ('đ‘”§', '𑔚'),
-    ('đ‘”Ș', 'đ‘¶Ž'),
-    ('\u{11d90}', '\u{11d91}'),
-    ('đ‘¶“', 'đ‘¶˜'),
-    ('đ‘¶ ', 'đ‘¶©'),
-    ('đ‘» ', 'đ‘»¶'),
-    ('\u{11f00}', 'đ‘Œ'),
-    ('đ‘Œ’', '\u{11f3a}'),
-    ('đ‘ŒŸ', '\u{11f42}'),
-    ('đ‘œ', '\u{11f5a}'),
-    ('đ‘Ÿ°', 'đ‘Ÿ°'),
-    ('𒀀', '𒎙'),
-    ('𒐀', '𒑼'),
-    ('𒒀', '𒕃'),
-    ('đ’Ÿ', '𒿰'),
-    ('𓀀', '𓐯'),
-    ('\u{13440}', '\u{13455}'),
-    ('𓑠', 'đ”ș'),
-    ('𔐀', '𔙆'),
-    ('𖄀', 'đ–„č'),
-    ('𖠀', '𖹾'),
-    ('đ–©€', 'đ–©ž'),
-    ('đ–© ', 'đ–©©'),
-    ('đ–©°', 'đ–ȘŸ'),
-    ('đ–«€', '𖫉'),
-    ('𖫐', 'đ–«­'),
-    ('\u{16af0}', '\u{16af4}'),
-    ('𖬀', '\u{16b36}'),
-    ('𖭀', '𖭃'),
-    ('𖭐', '𖭙'),
-    ('𖭣', 'đ–­·'),
-    ('đ–­œ', '𖼏'),
-    ('𖔀', '𖔏'),
-    ('đ–”°', 'đ–”č'),
-    ('đ–č€', 'đ–čż'),
-    ('đ–Œ€', 'đ–œŠ'),
-    ('\u{16f4f}', 'đ–Ÿ‡'),
-    ('\u{16f8f}', 'đ–ŸŸ'),
-    ('𖿠', '𖿡'),
-    ('𖿣', '\u{16fe4}'),
-    ('\u{16ff0}', '\u{16ff1}'),
-    ('𗀀', 'đ˜Ÿ·'),
-    ('𘠀', '𘳕'),
-    ('𘳿', '𘎈'),
-    ('𚿰', '𚿳'),
-    ('đšż”', 'đšż»'),
-    ('đšżœ', 'đšżŸ'),
-    ('𛀀', '𛄱'),
-    ('đ›„Č', 'đ›„Č'),
-    ('𛅐', '𛅒'),
-    ('𛅕', '𛅕'),
-    ('đ›…€', '𛅧'),
-    ('𛅰', '𛋻'),
-    ('𛰀', 'đ›±Ș'),
-    ('đ›±°', 'đ›±Œ'),
-    ('đ›Č€', 'đ›Čˆ'),
-    ('đ›Č', 'đ›Č™'),
-    ('\u{1bc9d}', '\u{1bc9e}'),
-    ('𜳰', 'đœłč'),
-    ('\u{1cf00}', '\u{1cf2d}'),
-    ('\u{1cf30}', '\u{1cf46}'),
-    ('\u{1d165}', '\u{1d169}'),
-    ('\u{1d16d}', '\u{1d172}'),
-    ('\u{1d17b}', '\u{1d182}'),
-    ('\u{1d185}', '\u{1d18b}'),
-    ('\u{1d1aa}', '\u{1d1ad}'),
-    ('\u{1d242}', '\u{1d244}'),
-    ('𝐀', '𝑔'),
-    ('𝑖', '𝒜'),
-    ('𝒞', '𝒟'),
-    ('𝒱', '𝒱'),
-    ('đ’„', '𝒩'),
-    ('đ’©', '𝒬'),
-    ('𝒼', 'đ’č'),
-    ('đ’»', 'đ’»'),
-    ('đ’œ', '𝓃'),
-    ('𝓅', '𝔅'),
-    ('𝔇', '𝔊'),
-    ('𝔍', '𝔔'),
-    ('𝔖', '𝔜'),
-    ('𝔞', 'đ”č'),
-    ('đ”»', 'đ”Ÿ'),
-    ('𝕀', '𝕄'),
-    ('𝕆', '𝕆'),
-    ('𝕊', '𝕐'),
-    ('𝕒', 'đš„'),
-    ('𝚹', '𝛀'),
-    ('𝛂', '𝛚'),
-    ('𝛜', 'đ›ș'),
-    ('đ›Œ', '𝜔'),
-    ('𝜖', '𝜮'),
-    ('đœ¶', '𝝎'),
-    ('𝝐', '𝝼'),
-    ('𝝰', '𝞈'),
-    ('𝞊', '𝞹'),
-    ('đžȘ', '𝟂'),
-    ('𝟄', '𝟋'),
-    ('𝟎', '𝟿'),
-    ('\u{1da00}', '\u{1da36}'),
-    ('\u{1da3b}', '\u{1da6c}'),
-    ('\u{1da75}', '\u{1da75}'),
-    ('\u{1da84}', '\u{1da84}'),
-    ('\u{1da9b}', '\u{1da9f}'),
-    ('\u{1daa1}', '\u{1daaf}'),
-    ('đŒ€', 'đŒž'),
-    ('đŒ„', 'đŒȘ'),
-    ('\u{1e000}', '\u{1e006}'),
-    ('\u{1e008}', '\u{1e018}'),
-    ('\u{1e01b}', '\u{1e021}'),
-    ('\u{1e023}', '\u{1e024}'),
-    ('\u{1e026}', '\u{1e02a}'),
-    ('𞀰', '𞁭'),
-    ('\u{1e08f}', '\u{1e08f}'),
-    ('𞄀', '𞄬'),
-    ('\u{1e130}', 'đž„œ'),
-    ('𞅀', '𞅉'),
-    ('𞅎', '𞅎'),
-    ('𞊐', '\u{1e2ae}'),
-    ('𞋀', 'đž‹č'),
-    ('𞓐', 'đž“č'),
-    ('𞗐', 'đž—ș'),
-    ('𞟠', '𞟩'),
-    ('𞟹', 'đžŸ«'),
-    ('𞟭', '𞟼'),
-    ('𞟰', 'đžŸŸ'),
-    ('𞠀', '𞣄'),
-    ('\u{1e8d0}', '\u{1e8d6}'),
-    ('𞀀', 'đž„‹'),
-    ('𞄐', 'đž„™'),
-    ('𞾀', '𞾃'),
-    ('𞾅', '𞾟'),
-    ('𞾡', '𞾱'),
-    ('𞞀', '𞞀'),
-    ('𞾧', '𞾧'),
-    ('đžž©', 'đžžČ'),
-    ('𞾮', 'đžž·'),
-    ('đžžč', 'đžžč'),
-    ('đžž»', 'đžž»'),
-    ('đžč‚', 'đžč‚'),
-    ('đžč‡', 'đžč‡'),
-    ('đžč‰', 'đžč‰'),
-    ('đžč‹', 'đžč‹'),
-    ('đžč', 'đžč'),
-    ('đžč‘', 'đžč’'),
-    ('đžč”', 'đžč”'),
-    ('đžč—', 'đžč—'),
-    ('đžč™', 'đžč™'),
-    ('đžč›', 'đžč›'),
-    ('đžč', 'đžč'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžčĄ', 'đžčą'),
-    ('đžč€', 'đžč€'),
-    ('đžč§', 'đžčȘ'),
-    ('đžčŹ', 'đžčČ'),
-    ('đžčŽ', 'đžč·'),
-    ('đžčč', 'đžčŒ'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžș€', 'đžș‰'),
-    ('đžș‹', 'đžș›'),
-    ('đžșĄ', 'đžșŁ'),
-    ('đžș„', 'đžș©'),
-    ('đžș«', 'đžș»'),
-    ('🯰', 'đŸŻč'),
-    ('𠀀', 'đȘ›Ÿ'),
-    ('đȘœ€', 'đ«œč'),
-    ('đ«€', 'đ« '),
-    ('đ«  ', 'đŹșĄ'),
-    ('đŹș°', '🯠'),
-    ('🯰', 'đźč'),
-    ('丽', '𯹝'),
-    ('𰀀', 'đ±Š'),
-    ('đ±', 'đȎŻ'),
-    ('\u{e0100}', '\u{e01ef}'),
-];
-
-pub const XID_START: &'static [(char, char)] = &[
-    ('A', 'Z'),
-    ('a', 'z'),
-    ('ª', 'ª'),
-    ('µ', 'µ'),
-    ('º', 'º'),
-    ('À', 'Ö'),
-    ('Ø', 'ö'),
-    ('ø', 'ˁ'),
-    ('ˆ', 'ˑ'),
-    ('Ë ', 'Ë€'),
-    ('ËŹ', 'ËŹ'),
-    ('Ëź', 'Ëź'),
-    ('Ͱ', '͎'),
-    ('Ͷ', 'ͷ'),
-    ('ͻ', '͜'),
-    ('Íż', 'Íż'),
-    ('Ά', 'Ά'),
-    ('Έ', 'Ί'),
-    ('Ό', 'Ό'),
-    ('Ύ', 'Ρ'),
-    ('Σ', 'Ï”'),
-    ('Ϸ', 'ҁ'),
-    ('Ҋ', 'ÔŻ'),
-    ('Ô±', 'Ֆ'),
-    ('ՙ', 'ՙ'),
-    ('ՠ', 'ֈ'),
-    ('ڐ', 'ŚȘ'),
-    ('ŚŻ', 'ŚČ'),
-    ('Ű ', 'ي'),
-    ('Ùź', 'ÙŻ'),
-    ('ٱ', 'ۓ'),
-    ('ە', 'ە'),
-    ('Û„', 'ÛŠ'),
-    ('Ûź', 'ÛŻ'),
-    ('Ûș', 'ÛŒ'),
-    ('Ûż', 'Ûż'),
-    ('ܐ', 'ܐ'),
-    ('ܒ', 'ܯ'),
-    ('ʍ', 'Ț„'),
-    ('Ț±', 'Ț±'),
-    ('ߊ', 'ßȘ'),
-    ('ߎ', 'ߔ'),
-    ('ßș', 'ßș'),
-    ('ࠀ', 'ࠕ'),
-    ('ࠚ', 'ࠚ'),
-    ('à €', 'à €'),
-    ('à š', 'à š'),
-    ('àĄ€', 'àĄ˜'),
-    ('àĄ ', 'àĄȘ'),
-    ('àĄ°', 'àą‡'),
-    ('àą‰', 'àąŽ'),
-    ('àą ', 'àŁ‰'),
-    ('à€„', 'à€č'),
-    ('à€œ', 'à€œ'),
-    ('à„', 'à„'),
-    ('à„˜', 'à„Ą'),
-    ('à„±', 'àŠ€'),
-    ('àŠ…', 'àŠŒ'),
-    ('àŠ', 'àŠ'),
-    ('àŠ“', 'àŠš'),
-    ('àŠȘ', 'àŠ°'),
-    ('àŠČ', 'àŠČ'),
-    ('àŠ¶', 'àŠč'),
-    ('àŠœ', 'àŠœ'),
-    ('ৎ', 'ৎ'),
-    ('ড়', 'ঢ়'),
-    ('য়', 'à§Ą'),
-    ('à§°', 'à§±'),
-    ('ৌ', 'ৌ'),
-    ('àš…', 'àšŠ'),
-    ('àš', 'àš'),
-    ('àš“', 'àšš'),
-    ('àšȘ', 'àš°'),
-    ('àšČ', 'àšł'),
-    ('àš”', 'àš¶'),
-    ('àšž', 'àšč'),
-    ('ਖ਼', 'ੜ'),
-    ('ਫ਼', 'ਫ਼'),
-    ('à©Č', '੎'),
-    ('àȘ…', 'àȘ'),
-    ('àȘ', 'àȘ‘'),
-    ('àȘ“', 'àȘš'),
-    ('àȘȘ', 'àȘ°'),
-    ('àȘČ', 'àȘł'),
-    ('àȘ”', 'àȘč'),
-    ('àȘœ', 'àȘœ'),
-    ('ૐ', 'ૐ'),
-    ('à« ', 'à«Ą'),
-    ('à«č', 'à«č'),
-    ('àŹ…', 'àŹŒ'),
-    ('àŹ', 'àŹ'),
-    ('àŹ“', 'àŹš'),
-    ('àŹȘ', 'àŹ°'),
-    ('àŹČ', 'àŹł'),
-    ('àŹ”', 'àŹč'),
-    ('àŹœ', 'àŹœ'),
-    ('ଡ଼', 'ଢ଼'),
-    ('ୟ', 'à­Ą'),
-    ('à­±', 'à­±'),
-    ('àźƒ', 'àźƒ'),
-    ('àź…', 'àźŠ'),
-    ('àźŽ', 'àź'),
-    ('àź’', 'àź•'),
-    ('àź™', 'àźš'),
-    ('àźœ', 'àźœ'),
-    ('àźž', 'àźŸ'),
-    ('àźŁ', 'àź€'),
-    ('àźš', 'àźȘ'),
-    ('àźź', 'àźč'),
-    ('àŻ', 'àŻ'),
-    ('అ', 'ఌ'),
-    ('ఎ', 'ఐ'),
-    ('ఒ', 'à°š'),
-    ('à°Ș', 'à°č'),
-    ('జ', 'జ'),
-    ('ౘ', 'ౚ'),
-    ('ౝ', 'ౝ'),
-    ('à± ', 'à±Ą'),
-    ('àȀ', 'àȀ'),
-    ('àȅ', 'àȌ'),
-    ('àȎ', 'àȐ'),
-    ('àȒ', 'àČš'),
-    ('àČȘ', 'àČł'),
-    ('àČ”', 'àČč'),
-    ('àČœ', 'àČœ'),
-    ('àł', 'àłž'),
-    ('àł ', 'àłĄ'),
-    ('àł±', 'àłČ'),
-    ('àŽ„', 'àŽŒ'),
-    ('àŽŽ', 'àŽ'),
-    ('àŽ’', 'àŽș'),
-    ('àŽœ', 'àŽœ'),
-    ('à”Ž', 'à”Ž'),
-    ('à””', 'à”–'),
-    ('à”Ÿ', 'à”Ą'),
-    ('à”ș', 'à”ż'),
-    ('අ', 'ඖ'),
-    ('ක', 'න'),
-    ('à¶ł', 'à¶»'),
-    ('ග', 'ග'),
-    ('ව', 'ෆ'),
-    ('àž', 'àž°'),
-    ('àžČ', 'àžČ'),
-    ('àč€', 'àč†'),
-    ('àș', 'àș‚'),
-    ('àș„', 'àș„'),
-    ('àș†', 'àșŠ'),
-    ('àșŒ', 'àșŁ'),
-    ('àș„', 'àș„'),
-    ('àș§', 'àș°'),
-    ('àșČ', 'àșČ'),
-    ('àșœ', 'àșœ'),
-    ('ເ', 'ໄ'),
-    ('ໆ', 'ໆ'),
-    ('ໜ', 'ໟ'),
-    ('àŒ€', 'àŒ€'),
-    ('àœ€', 'àœ‡'),
-    ('àœ‰', 'àœŹ'),
-    ('àŸˆ', 'àŸŒ'),
-    ('က', 'á€Ș'),
-    ('ဿ', 'ဿ'),
-    ('ၐ', 'ၕ'),
-    ('ၚ', 'ၝ'),
-    ('ၥ', 'ၥ'),
-    ('၄', '၊'),
-    ('ၟ', 'ၰ'),
-    ('ၔ', 'ႁ'),
-    ('ႎ', 'ႎ'),
-    ('Ⴀ', 'Ⴥ'),
-    ('Ⴧ', 'Ⴧ'),
-    ('Ⴭ', 'Ⴭ'),
-    ('ა', 'áƒș'),
-    ('჌', 'ቈ'),
-    ('ቊ', 'ቍ'),
-    ('ቐ', 'ቖ'),
-    ('ቘ', 'ቘ'),
-    ('ቚ', 'ቝ'),
-    ('በ', 'ኈ'),
-    ('ኊ', 'ኍ'),
-    ('ነ', 'ኰ'),
-    ('áŠČ', 'ኔ'),
-    ('ኾ', 'ኟ'),
-    ('ዀ', 'ዀ'),
-    ('ዂ', 'ዅ'),
-    ('ወ', 'ዖ'),
-    ('ዘ', 'ጐ'),
-    ('ጒ', 'ጕ'),
-    ('ጘ', 'ፚ'),
-    ('ᎀ', 'ᎏ'),
-    ('Ꭰ', 'Ꮤ'),
-    ('Ꮮ', 'Ꮬ'),
-    ('ᐁ', 'ᙬ'),
-    ('ᙯ', 'ᙿ'),
-    ('ᚁ', 'ᚚ'),
-    ('ᚠ', 'á›Ș'),
-    ('᛼', '᛾'),
-    ('ᜀ', 'ᜑ'),
-    ('ᜟ', 'ᜱ'),
-    ('ᝀ', 'ᝑ'),
-    ('ᝠ', 'ᝬ'),
-    ('᝼', 'ᝰ'),
-    ('ក', 'ឳ'),
-    ('ៗ', 'ៗ'),
-    ('ៜ', 'ៜ'),
-    ('ᠠ', 'ᥞ'),
-    ('᱀', 'ᱹ'),
-    ('áąȘ', 'áąȘ'),
-    ('Ṱ', 'ᣔ'),
-    ('က', 'သ'),
-    ('ᄐ', 'ᄭ'),
-    ('ᄰ', 'ᄎ'),
-    ('ᩀ', 'ካ'),
-    ('ᩰ', 'ᧉ'),
-    ('Ṁ', 'Ṗ'),
-    ('áš ', 'ᩔ'),
-    ('áȘ§', 'áȘ§'),
-    ('ᬅ', 'ᬳ'),
-    ('ᭅ', 'ᭌ'),
-    ('ៃ', '០'),
-    ('៟', '៯'),
-    ('áźș', 'ᯄ'),
-    ('ᰀ', 'ᰣ'),
-    ('ᱍ', 'ᱏ'),
-    ('ᱚ', 'ᱜ'),
-    ('áȀ', 'áȊ'),
-    ('áȐ', 'áČș'),
-    ('áČœ', 'áČż'),
-    ('ᳩ', '᳏'),
-    ('áłź', 'áłł'),
-    ('áł”', 'áł¶'),
-    ('áłș', 'áłș'),
-    ('ᮀ', 'á¶ż'),
-    ('ᾀ', 'ጕ'),
-    ('ጘ', 'ጝ'),
-    ('ጠ', 'ᜅ'),
-    ('ᜈ', 'ᜍ'),
-    ('ᜐ', '᜗'),
-    ('᜙', '᜙'),
-    ('᜛', '᜛'),
-    ('᜝', '᜝'),
-    ('ᜟ', '᜜'),
-    ('ៀ', '៎'),
-    ('៶', '៌'),
-    ('៟', '៟'),
-    ('ῂ', 'ῄ'),
-    ('ῆ', 'ῌ'),
-    ('ῐ', 'ΐ'),
-    ('ῖ', 'Ί'),
-    ('ῠ', '῏'),
-    ('áżČ', '῎'),
-    ('áż¶', 'áżŒ'),
-    ('ⁱ', 'ⁱ'),
-    ('ⁿ', 'ⁿ'),
-    ('ₐ', 'ₜ'),
-    ('ℂ', 'ℂ'),
-    ('ℇ', 'ℇ'),
-    ('ℊ', 'ℓ'),
-    ('ℕ', 'ℕ'),
-    ('℘', 'ℝ'),
-    ('â„€', 'â„€'),
-    ('℩', '℩'),
-    ('ℹ', 'ℹ'),
-    ('â„Ș', 'â„č'),
-    ('ℌ', 'ℿ'),
-    ('ⅅ', 'ⅉ'),
-    ('ⅎ', 'ⅎ'),
-    ('Ⅰ', 'ↈ'),
-    ('Ⰰ', 'Ⳁ'),
-    ('âł«', 'âłź'),
-    ('âłČ', 'âłł'),
-    ('⮀', '⎄'),
-    ('⎧', '⎧'),
-    ('⎭', '⎭'),
-    ('⎰', '┧'),
-    ('┯', '┯'),
-    ('ⶀ', 'ⶖ'),
-    ('â¶ ', 'â¶Š'),
-    ('â¶š', 'â¶ź'),
-    ('â¶°', 'â¶¶'),
-    ('â¶ž', 'â¶Ÿ'),
-    ('ⷀ', 'ⷆ'),
-    ('ⷈ', 'ⷎ'),
-    ('ⷐ', 'ⷖ'),
-    ('ⷘ', 'ⷞ'),
-    ('々', '〇'),
-    ('〡', '〩'),
-    ('〱', '〔'),
-    ('〾', 'ă€Œ'),
-    ('ぁ', 'ゖ'),
-    ('ゝ', 'ゟ'),
-    ('ァ', 'ăƒș'),
-    ('ăƒŒ', 'ヿ'),
-    ('ㄅ', 'ㄯ'),
-    ('ㄱ', 'ㆎ'),
-    ('ㆠ', 'ㆿ'),
-    ('ㇰ', 'ㇿ'),
-    ('㐀', 'ä¶ż'),
-    ('侀', 'ꒌ'),
-    ('ꓐ', 'ꓜ'),
-    ('ꔀ', 'ꘌ'),
-    ('ꘐ', 'ꘟ'),
-    ('ê˜Ș', 'ꘫ'),
-    ('Ꙁ', 'ê™ź'),
-    ('ê™ż', 'ꚝ'),
-    ('ꚠ', 'ê›Ż'),
-    ('ꜗ', 'ꜟ'),
-    ('êœą', 'ꞈ'),
-    ('Ꞌ', 'ꟍ'),
-    ('Ꟑ', 'ꟑ'),
-    ('ꟓ', 'ꟓ'),
-    ('ꟕ', 'Ƛ'),
-    ('êŸČ', 'ꠁ'),
-    ('ꠃ', 'ꠅ'),
-    ('ꠇ', 'ꠊ'),
-    ('ꠌ', 'ê ą'),
-    ('êĄ€', 'êĄł'),
-    ('êą‚', 'êął'),
-    ('êŁČ', 'êŁ·'),
-    ('êŁ»', 'êŁ»'),
-    ('êŁœ', 'êŁŸ'),
-    ('ꀊ', 'ꀄ'),
-    ('ꀰ', 'ꄆ'),
-    ('ꄠ', 'ꄌ'),
-    ('ꊄ', 'êŠČ'),
-    ('ꧏ', 'ꧏ'),
-    ('ê§ ', 'ê§€'),
-    ('ê§Š', 'ê§Ż'),
-    ('ê§ș', 'ê§Ÿ'),
-    ('Ꚁ', 'êšš'),
-    ('ꩀ', 'ꩂ'),
-    ('ꩄ', 'ꩋ'),
-    ('ê© ', 'ê©¶'),
-    ('ê©ș', 'ê©ș'),
-    ('꩟', 'êȘŻ'),
-    ('êȘ±', 'êȘ±'),
-    ('êȘ”', 'êȘ¶'),
-    ('êȘč', 'êȘœ'),
-    ('ꫀ', 'ꫀ'),
-    ('ꫂ', 'ꫂ'),
-    ('ꫛ', 'ꫝ'),
-    ('ê« ', 'ê«Ș'),
-    ('ê«Č', '꫎'),
-    ('êŹ', 'êŹ†'),
-    ('êŹ‰', 'êŹŽ'),
-    ('êŹ‘', 'êŹ–'),
-    ('êŹ ', 'êŹŠ'),
-    ('êŹš', 'êŹź'),
-    ('êŹ°', 'ꭚ'),
-    ('ꭜ', 'ꭩ'),
-    ('ê­°', 'êŻą'),
-    ('가', '힣'),
-    ('ힰ', 'ퟆ'),
-    ('ퟋ', 'ퟻ'),
-    ('', 'ï©­'),
-    ('並', '龎'),
-    ('ïŹ€', 'ïŹ†'),
-    ('ïŹ“', 'ïŹ—'),
-    ('ïŹ', 'ïŹ'),
-    ('ïŹŸ', 'ïŹš'),
-    ('ïŹȘ', 'ïŹ¶'),
-    ('ïŹž', 'ïŹŒ'),
-    ('ïŹŸ', 'ïŹŸ'),
-    ('נּ', 'סּ'),
-    ('ףּ', 'פּ'),
-    ('צּ', 'ïź±'),
-    ('ïŻ“', 'ﱝ'),
-    ('ﱀ', ''),
-    ('', 'ﶏ'),
-    ('ﶒ', 'ﷇ'),
-    ('ï·°', 'ï·č'),
-    ('ïč±', 'ïč±'),
-    ('ïčł', 'ïčł'),
-    ('ïč·', 'ïč·'),
-    ('ïčč', 'ïčč'),
-    ('ïč»', 'ïč»'),
-    ('ïčœ', 'ïčœ'),
-    ('ïčż', 'ﻌ'),
-    ('ïŒĄ', 'ïŒș'),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('ïż‚', 'ïż‡'),
-    ('ïżŠ', 'ïż'),
-    ('ïż’', 'ïż—'),
-    ('ïżš', 'ïżœ'),
-    ('𐀀', '𐀋'),
-    ('𐀍', '𐀩'),
-    ('𐀹', 'đ€ș'),
-    ('đ€Œ', 'đ€œ'),
-    ('𐀿', '𐁍'),
-    ('𐁐', '𐁝'),
-    ('𐂀', 'đƒș'),
-    ('𐅀', '𐅮'),
-    ('𐊀', '𐊜'),
-    ('𐊠', '𐋐'),
-    ('𐌀', '𐌟'),
-    ('𐌭', '𐍊'),
-    ('𐍐', 'đ”'),
-    ('𐎀', '𐎝'),
-    ('𐎠', '𐏃'),
-    ('𐏈', '𐏏'),
-    ('𐏑', '𐏕'),
-    ('𐐀', '𐒝'),
-    ('𐒰', '𐓓'),
-    ('𐓘', '𐓻'),
-    ('𐔀', '𐔧'),
-    ('𐔰', '𐕣'),
-    ('𐕰', 'đ•ș'),
-    ('đ•Œ', '𐖊'),
-    ('𐖌', '𐖒'),
-    ('𐖔', '𐖕'),
-    ('𐖗', '𐖡'),
-    ('𐖣', '𐖱'),
-    ('𐖳', 'đ–č'),
-    ('𐖻', 'đ–Œ'),
-    ('𐗀', '𐗳'),
-    ('𐘀', 'đœ¶'),
-    ('𐝀', '𐝕'),
-    ('𐝠', '𐝧'),
-    ('𐞀', '𐞅'),
-    ('𐞇', '𐞰'),
-    ('đžČ', 'đžș'),
-    ('𐠀', '𐠅'),
-    ('𐠈', '𐠈'),
-    ('𐠊', '𐠔'),
-    ('𐠷', '𐠞'),
-    ('đ Œ', 'đ Œ'),
-    ('𐠿', '𐡕'),
-    ('𐥠', '𐥶'),
-    ('𐱀', '𐱞'),
-    ('𐣠', 'đŁČ'),
-    ('𐣎', '𐣔'),
-    ('𐀀', '𐀕'),
-    ('𐀠', 'đ€č'),
-    ('𐩀', '𐊷'),
-    ('đŠŸ', '𐊿'),
-    ('𐹀', '𐹀'),
-    ('𐹐', '𐹓'),
-    ('𐹕', '𐹗'),
-    ('𐹙', '𐚔'),
-    ('𐩠', 'đ©Œ'),
-    ('đȘ€', 'đȘœ'),
-    ('𐫀', '𐫇'),
-    ('𐫉', '𐫀'),
-    ('𐬀', '𐏔'),
-    ('𐭀', '𐭕'),
-    ('𐭠', 'đ­Č'),
-    ('𐼀', '𐼑'),
-    ('𐰀', '𐱈'),
-    ('đČ€', 'đČČ'),
-    ('𐳀', 'đłČ'),
-    ('𐮀', '𐮣'),
-    ('𐔊', '𐔄'),
-    ('𐔯', '𐶅'),
-    ('đș€', 'đș©'),
-    ('đș°', 'đș±'),
-    ('𐻂', '𐻄'),
-    ('đŒ€', 'đŒœ'),
-    ('đŒ§', 'đŒ§'),
-    ('đŒ°', 'đœ…'),
-    ('đœ°', 'đŸ'),
-    ('đŸ°', '𐿄'),
-    ('𐿠', '𐿶'),
-    ('𑀃', 'đ‘€·'),
-    ('𑁱', 'đ‘Č'),
-    ('𑁔', '𑁔'),
-    ('𑂃', '𑂯'),
-    ('𑃐', '𑃹'),
-    ('𑄃', '𑄩'),
-    ('𑅄', '𑅄'),
-    ('𑅇', '𑅇'),
-    ('𑅐', 'đ‘…Č'),
-    ('đ‘…¶', 'đ‘…¶'),
-    ('𑆃', 'đ‘†Č'),
-    ('𑇁', '𑇄'),
-    ('𑇚', '𑇚'),
-    ('𑇜', '𑇜'),
-    ('𑈀', '𑈑'),
-    ('𑈓', 'đ‘ˆ«'),
-    ('𑈿', '𑉀'),
-    ('𑊀', '𑊆'),
-    ('𑊈', '𑊈'),
-    ('𑊊', '𑊍'),
-    ('𑊏', '𑊝'),
-    ('𑊟', '𑊹'),
-    ('𑊰', '𑋞'),
-    ('𑌅', '𑌌'),
-    ('𑌏', '𑌐'),
-    ('𑌓', '𑌹'),
-    ('đ‘ŒȘ', '𑌰'),
-    ('đ‘ŒČ', '𑌳'),
-    ('đ‘Œ”', 'đ‘Œč'),
-    ('đ‘Œœ', 'đ‘Œœ'),
-    ('𑍐', '𑍐'),
-    ('𑍝', '𑍡'),
-    ('𑎀', '𑎉'),
-    ('𑎋', '𑎋'),
-    ('𑎎', '𑎎'),
-    ('𑎐', '𑎔'),
-    ('𑎷', '𑎷'),
-    ('𑏑', '𑏑'),
-    ('𑏓', '𑏓'),
-    ('𑐀', '𑐮'),
-    ('𑑇', '𑑊'),
-    ('𑑟', '𑑡'),
-    ('𑒀', '𑒯'),
-    ('𑓄', '𑓅'),
-    ('𑓇', '𑓇'),
-    ('𑖀', '𑖼'),
-    ('𑗘', '𑗛'),
-    ('𑘀', '𑘯'),
-    ('𑙄', '𑙄'),
-    ('𑚀', 'đ‘šȘ'),
-    ('𑚾', '𑚾'),
-    ('𑜀', '𑜚'),
-    ('𑝀', '𑝆'),
-    ('𑠀', 'đ‘ «'),
-    ('𑱠', '𑣟'),
-    ('𑣿', '𑀆'),
-    ('𑀉', '𑀉'),
-    ('đ‘€Œ', '𑀓'),
-    ('𑀕', 'đ‘€–'),
-    ('đ‘€˜', '𑀯'),
-    ('𑀿', '𑀿'),
-    ('𑄁', '𑄁'),
-    ('𑩠', '𑩧'),
-    ('đ‘ŠȘ', '𑧐'),
-    ('𑧡', '𑧡'),
-    ('𑧣', '𑧣'),
-    ('𑹀', '𑹀'),
-    ('𑹋', 'đ‘šČ'),
-    ('đ‘šș', 'đ‘šș'),
-    ('𑩐', '𑩐'),
-    ('đ‘©œ', 'đ‘Ș‰'),
-    ('đ‘Ș', 'đ‘Ș'),
-    ('đ‘Ș°', 'đ‘«ž'),
-    ('𑯀', '𑯠'),
-    ('𑰀', '𑰈'),
-    ('𑰊', '𑰼'),
-    ('𑱀', '𑱀'),
-    ('đ‘±Č', 'đ‘ȏ'),
-    ('𑮀', '𑮆'),
-    ('𑮈', '𑮉'),
-    ('𑮋', '𑮰'),
-    ('𑔆', '𑔆'),
-    ('đ‘” ', 'đ‘”„'),
-    ('đ‘”§', '𑔚'),
-    ('đ‘”Ș', '𑶉'),
-    ('đ‘¶˜', 'đ‘¶˜'),
-    ('đ‘» ', 'đ‘»Č'),
-    ('đ‘Œ‚', 'đ‘Œ‚'),
-    ('đ‘Œ„', 'đ‘Œ'),
-    ('đ‘Œ’', 'đ‘Œł'),
-    ('đ‘Ÿ°', 'đ‘Ÿ°'),
-    ('𒀀', '𒎙'),
-    ('𒐀', '𒑼'),
-    ('𒒀', '𒕃'),
-    ('đ’Ÿ', '𒿰'),
-    ('𓀀', '𓐯'),
-    ('𓑁', '𓑆'),
-    ('𓑠', 'đ”ș'),
-    ('𔐀', '𔙆'),
-    ('𖄀', '𖄝'),
-    ('𖠀', '𖹾'),
-    ('đ–©€', 'đ–©ž'),
-    ('đ–©°', 'đ–ȘŸ'),
-    ('𖫐', 'đ–«­'),
-    ('𖬀', '𖬯'),
-    ('𖭀', '𖭃'),
-    ('𖭣', 'đ–­·'),
-    ('đ–­œ', '𖼏'),
-    ('𖔀', '𖔏'),
-    ('đ–č€', 'đ–čż'),
-    ('đ–Œ€', 'đ–œŠ'),
-    ('đ–œ', 'đ–œ'),
-    ('đ–Ÿ“', 'đ–ŸŸ'),
-    ('𖿠', '𖿡'),
-    ('𖿣', '𖿣'),
-    ('𗀀', 'đ˜Ÿ·'),
-    ('𘠀', '𘳕'),
-    ('𘳿', '𘎈'),
-    ('𚿰', '𚿳'),
-    ('đšż”', 'đšż»'),
-    ('đšżœ', 'đšżŸ'),
-    ('𛀀', '𛄱'),
-    ('đ›„Č', 'đ›„Č'),
-    ('𛅐', '𛅒'),
-    ('𛅕', '𛅕'),
-    ('đ›…€', '𛅧'),
-    ('𛅰', '𛋻'),
-    ('𛰀', 'đ›±Ș'),
-    ('đ›±°', 'đ›±Œ'),
-    ('đ›Č€', 'đ›Čˆ'),
-    ('đ›Č', 'đ›Č™'),
-    ('𝐀', '𝑔'),
-    ('𝑖', '𝒜'),
-    ('𝒞', '𝒟'),
-    ('𝒱', '𝒱'),
-    ('đ’„', '𝒩'),
-    ('đ’©', '𝒬'),
-    ('𝒼', 'đ’č'),
-    ('đ’»', 'đ’»'),
-    ('đ’œ', '𝓃'),
-    ('𝓅', '𝔅'),
-    ('𝔇', '𝔊'),
-    ('𝔍', '𝔔'),
-    ('𝔖', '𝔜'),
-    ('𝔞', 'đ”č'),
-    ('đ”»', 'đ”Ÿ'),
-    ('𝕀', '𝕄'),
-    ('𝕆', '𝕆'),
-    ('𝕊', '𝕐'),
-    ('𝕒', 'đš„'),
-    ('𝚹', '𝛀'),
-    ('𝛂', '𝛚'),
-    ('𝛜', 'đ›ș'),
-    ('đ›Œ', '𝜔'),
-    ('𝜖', '𝜮'),
-    ('đœ¶', '𝝎'),
-    ('𝝐', '𝝼'),
-    ('𝝰', '𝞈'),
-    ('𝞊', '𝞹'),
-    ('đžȘ', '𝟂'),
-    ('𝟄', '𝟋'),
-    ('đŒ€', 'đŒž'),
-    ('đŒ„', 'đŒȘ'),
-    ('𞀰', '𞁭'),
-    ('𞄀', '𞄬'),
-    ('đž„·', 'đž„œ'),
-    ('𞅎', '𞅎'),
-    ('𞊐', '𞊭'),
-    ('𞋀', 'đž‹«'),
-    ('𞓐', 'đž“«'),
-    ('𞗐', '𞗭'),
-    ('𞗰', '𞗰'),
-    ('𞟠', '𞟩'),
-    ('𞟹', 'đžŸ«'),
-    ('𞟭', '𞟼'),
-    ('𞟰', 'đžŸŸ'),
-    ('𞠀', '𞣄'),
-    ('𞀀', 'đž„ƒ'),
-    ('đž„‹', 'đž„‹'),
-    ('𞾀', '𞾃'),
-    ('𞾅', '𞾟'),
-    ('𞾡', '𞾱'),
-    ('𞞀', '𞞀'),
-    ('𞾧', '𞾧'),
-    ('đžž©', 'đžžČ'),
-    ('𞾮', 'đžž·'),
-    ('đžžč', 'đžžč'),
-    ('đžž»', 'đžž»'),
-    ('đžč‚', 'đžč‚'),
-    ('đžč‡', 'đžč‡'),
-    ('đžč‰', 'đžč‰'),
-    ('đžč‹', 'đžč‹'),
-    ('đžč', 'đžč'),
-    ('đžč‘', 'đžč’'),
-    ('đžč”', 'đžč”'),
-    ('đžč—', 'đžč—'),
-    ('đžč™', 'đžč™'),
-    ('đžč›', 'đžč›'),
-    ('đžč', 'đžč'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžčĄ', 'đžčą'),
-    ('đžč€', 'đžč€'),
-    ('đžč§', 'đžčȘ'),
-    ('đžčŹ', 'đžčČ'),
-    ('đžčŽ', 'đžč·'),
-    ('đžčč', 'đžčŒ'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžș€', 'đžș‰'),
-    ('đžș‹', 'đžș›'),
-    ('đžșĄ', 'đžșŁ'),
-    ('đžș„', 'đžș©'),
-    ('đžș«', 'đžș»'),
-    ('𠀀', 'đȘ›Ÿ'),
-    ('đȘœ€', 'đ«œč'),
-    ('đ«€', 'đ« '),
-    ('đ«  ', 'đŹșĄ'),
-    ('đŹș°', '🯠'),
-    ('🯰', 'đźč'),
-    ('丽', '𯹝'),
-    ('𰀀', 'đ±Š'),
-    ('đ±', 'đȎŻ'),
-];
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/property_names.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/property_names.rs
deleted file mode 100644
index a27b491..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/property_names.rs
+++ /dev/null
@@ -1,281 +0,0 @@
-// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY:
-//
-//   ucd-generate property-names ucd-16.0.0
-//
-// Unicode version: 16.0.0.
-//
-// ucd-generate 0.3.1 is available on crates.io.
-
-pub const PROPERTY_NAMES: &'static [(&'static str, &'static str)] = &[
-    ("age", "Age"),
-    ("ahex", "ASCII_Hex_Digit"),
-    ("alpha", "Alphabetic"),
-    ("alphabetic", "Alphabetic"),
-    ("asciihexdigit", "ASCII_Hex_Digit"),
-    ("bc", "Bidi_Class"),
-    ("bidic", "Bidi_Control"),
-    ("bidiclass", "Bidi_Class"),
-    ("bidicontrol", "Bidi_Control"),
-    ("bidim", "Bidi_Mirrored"),
-    ("bidimirrored", "Bidi_Mirrored"),
-    ("bidimirroringglyph", "Bidi_Mirroring_Glyph"),
-    ("bidipairedbracket", "Bidi_Paired_Bracket"),
-    ("bidipairedbrackettype", "Bidi_Paired_Bracket_Type"),
-    ("blk", "Block"),
-    ("block", "Block"),
-    ("bmg", "Bidi_Mirroring_Glyph"),
-    ("bpb", "Bidi_Paired_Bracket"),
-    ("bpt", "Bidi_Paired_Bracket_Type"),
-    ("canonicalcombiningclass", "Canonical_Combining_Class"),
-    ("cased", "Cased"),
-    ("casefolding", "Case_Folding"),
-    ("caseignorable", "Case_Ignorable"),
-    ("ccc", "Canonical_Combining_Class"),
-    ("ce", "Composition_Exclusion"),
-    ("cf", "Case_Folding"),
-    ("changeswhencasefolded", "Changes_When_Casefolded"),
-    ("changeswhencasemapped", "Changes_When_Casemapped"),
-    ("changeswhenlowercased", "Changes_When_Lowercased"),
-    ("changeswhennfkccasefolded", "Changes_When_NFKC_Casefolded"),
-    ("changeswhentitlecased", "Changes_When_Titlecased"),
-    ("changeswhenuppercased", "Changes_When_Uppercased"),
-    ("ci", "Case_Ignorable"),
-    ("cjkaccountingnumeric", "kAccountingNumeric"),
-    ("cjkcompatibilityvariant", "kCompatibilityVariant"),
-    ("cjkiicore", "kIICore"),
-    ("cjkirggsource", "kIRG_GSource"),
-    ("cjkirghsource", "kIRG_HSource"),
-    ("cjkirgjsource", "kIRG_JSource"),
-    ("cjkirgkpsource", "kIRG_KPSource"),
-    ("cjkirgksource", "kIRG_KSource"),
-    ("cjkirgmsource", "kIRG_MSource"),
-    ("cjkirgssource", "kIRG_SSource"),
-    ("cjkirgtsource", "kIRG_TSource"),
-    ("cjkirguksource", "kIRG_UKSource"),
-    ("cjkirgusource", "kIRG_USource"),
-    ("cjkirgvsource", "kIRG_VSource"),
-    ("cjkothernumeric", "kOtherNumeric"),
-    ("cjkprimarynumeric", "kPrimaryNumeric"),
-    ("cjkrsunicode", "kRSUnicode"),
-    ("compex", "Full_Composition_Exclusion"),
-    ("compositionexclusion", "Composition_Exclusion"),
-    ("cwcf", "Changes_When_Casefolded"),
-    ("cwcm", "Changes_When_Casemapped"),
-    ("cwkcf", "Changes_When_NFKC_Casefolded"),
-    ("cwl", "Changes_When_Lowercased"),
-    ("cwt", "Changes_When_Titlecased"),
-    ("cwu", "Changes_When_Uppercased"),
-    ("dash", "Dash"),
-    ("decompositionmapping", "Decomposition_Mapping"),
-    ("decompositiontype", "Decomposition_Type"),
-    ("defaultignorablecodepoint", "Default_Ignorable_Code_Point"),
-    ("dep", "Deprecated"),
-    ("deprecated", "Deprecated"),
-    ("di", "Default_Ignorable_Code_Point"),
-    ("dia", "Diacritic"),
-    ("diacritic", "Diacritic"),
-    ("dm", "Decomposition_Mapping"),
-    ("dt", "Decomposition_Type"),
-    ("ea", "East_Asian_Width"),
-    ("eastasianwidth", "East_Asian_Width"),
-    ("ebase", "Emoji_Modifier_Base"),
-    ("ecomp", "Emoji_Component"),
-    ("emod", "Emoji_Modifier"),
-    ("emoji", "Emoji"),
-    ("emojicomponent", "Emoji_Component"),
-    ("emojimodifier", "Emoji_Modifier"),
-    ("emojimodifierbase", "Emoji_Modifier_Base"),
-    ("emojipresentation", "Emoji_Presentation"),
-    ("epres", "Emoji_Presentation"),
-    ("equideo", "Equivalent_Unified_Ideograph"),
-    ("equivalentunifiedideograph", "Equivalent_Unified_Ideograph"),
-    ("expandsonnfc", "Expands_On_NFC"),
-    ("expandsonnfd", "Expands_On_NFD"),
-    ("expandsonnfkc", "Expands_On_NFKC"),
-    ("expandsonnfkd", "Expands_On_NFKD"),
-    ("ext", "Extender"),
-    ("extendedpictographic", "Extended_Pictographic"),
-    ("extender", "Extender"),
-    ("extpict", "Extended_Pictographic"),
-    ("fcnfkc", "FC_NFKC_Closure"),
-    ("fcnfkcclosure", "FC_NFKC_Closure"),
-    ("fullcompositionexclusion", "Full_Composition_Exclusion"),
-    ("gc", "General_Category"),
-    ("gcb", "Grapheme_Cluster_Break"),
-    ("generalcategory", "General_Category"),
-    ("graphemebase", "Grapheme_Base"),
-    ("graphemeclusterbreak", "Grapheme_Cluster_Break"),
-    ("graphemeextend", "Grapheme_Extend"),
-    ("graphemelink", "Grapheme_Link"),
-    ("grbase", "Grapheme_Base"),
-    ("grext", "Grapheme_Extend"),
-    ("grlink", "Grapheme_Link"),
-    ("hangulsyllabletype", "Hangul_Syllable_Type"),
-    ("hex", "Hex_Digit"),
-    ("hexdigit", "Hex_Digit"),
-    ("hst", "Hangul_Syllable_Type"),
-    ("hyphen", "Hyphen"),
-    ("idc", "ID_Continue"),
-    ("idcompatmathcontinue", "ID_Compat_Math_Continue"),
-    ("idcompatmathstart", "ID_Compat_Math_Start"),
-    ("idcontinue", "ID_Continue"),
-    ("ideo", "Ideographic"),
-    ("ideographic", "Ideographic"),
-    ("ids", "ID_Start"),
-    ("idsb", "IDS_Binary_Operator"),
-    ("idsbinaryoperator", "IDS_Binary_Operator"),
-    ("idst", "IDS_Trinary_Operator"),
-    ("idstart", "ID_Start"),
-    ("idstrinaryoperator", "IDS_Trinary_Operator"),
-    ("idsu", "IDS_Unary_Operator"),
-    ("idsunaryoperator", "IDS_Unary_Operator"),
-    ("incb", "Indic_Conjunct_Break"),
-    ("indicconjunctbreak", "Indic_Conjunct_Break"),
-    ("indicpositionalcategory", "Indic_Positional_Category"),
-    ("indicsyllabiccategory", "Indic_Syllabic_Category"),
-    ("inpc", "Indic_Positional_Category"),
-    ("insc", "Indic_Syllabic_Category"),
-    ("isc", "ISO_Comment"),
-    ("jamoshortname", "Jamo_Short_Name"),
-    ("jg", "Joining_Group"),
-    ("joinc", "Join_Control"),
-    ("joincontrol", "Join_Control"),
-    ("joininggroup", "Joining_Group"),
-    ("joiningtype", "Joining_Type"),
-    ("jsn", "Jamo_Short_Name"),
-    ("jt", "Joining_Type"),
-    ("kaccountingnumeric", "kAccountingNumeric"),
-    ("kcompatibilityvariant", "kCompatibilityVariant"),
-    ("kehcat", "kEH_Cat"),
-    ("kehdesc", "kEH_Desc"),
-    ("kehhg", "kEH_HG"),
-    ("kehifao", "kEH_IFAO"),
-    ("kehjsesh", "kEH_JSesh"),
-    ("kehnomirror", "kEH_NoMirror"),
-    ("kehnorotate", "kEH_NoRotate"),
-    ("kiicore", "kIICore"),
-    ("kirggsource", "kIRG_GSource"),
-    ("kirghsource", "kIRG_HSource"),
-    ("kirgjsource", "kIRG_JSource"),
-    ("kirgkpsource", "kIRG_KPSource"),
-    ("kirgksource", "kIRG_KSource"),
-    ("kirgmsource", "kIRG_MSource"),
-    ("kirgssource", "kIRG_SSource"),
-    ("kirgtsource", "kIRG_TSource"),
-    ("kirguksource", "kIRG_UKSource"),
-    ("kirgusource", "kIRG_USource"),
-    ("kirgvsource", "kIRG_VSource"),
-    ("kothernumeric", "kOtherNumeric"),
-    ("kprimarynumeric", "kPrimaryNumeric"),
-    ("krsunicode", "kRSUnicode"),
-    ("lb", "Line_Break"),
-    ("lc", "Lowercase_Mapping"),
-    ("linebreak", "Line_Break"),
-    ("loe", "Logical_Order_Exception"),
-    ("logicalorderexception", "Logical_Order_Exception"),
-    ("lower", "Lowercase"),
-    ("lowercase", "Lowercase"),
-    ("lowercasemapping", "Lowercase_Mapping"),
-    ("math", "Math"),
-    ("mcm", "Modifier_Combining_Mark"),
-    ("modifiercombiningmark", "Modifier_Combining_Mark"),
-    ("na", "Name"),
-    ("na1", "Unicode_1_Name"),
-    ("name", "Name"),
-    ("namealias", "Name_Alias"),
-    ("nchar", "Noncharacter_Code_Point"),
-    ("nfcqc", "NFC_Quick_Check"),
-    ("nfcquickcheck", "NFC_Quick_Check"),
-    ("nfdqc", "NFD_Quick_Check"),
-    ("nfdquickcheck", "NFD_Quick_Check"),
-    ("nfkccasefold", "NFKC_Casefold"),
-    ("nfkccf", "NFKC_Casefold"),
-    ("nfkcqc", "NFKC_Quick_Check"),
-    ("nfkcquickcheck", "NFKC_Quick_Check"),
-    ("nfkcscf", "NFKC_Simple_Casefold"),
-    ("nfkcsimplecasefold", "NFKC_Simple_Casefold"),
-    ("nfkdqc", "NFKD_Quick_Check"),
-    ("nfkdquickcheck", "NFKD_Quick_Check"),
-    ("noncharactercodepoint", "Noncharacter_Code_Point"),
-    ("nt", "Numeric_Type"),
-    ("numerictype", "Numeric_Type"),
-    ("numericvalue", "Numeric_Value"),
-    ("nv", "Numeric_Value"),
-    ("oalpha", "Other_Alphabetic"),
-    ("ocomment", "ISO_Comment"),
-    ("odi", "Other_Default_Ignorable_Code_Point"),
-    ("ogrext", "Other_Grapheme_Extend"),
-    ("oidc", "Other_ID_Continue"),
-    ("oids", "Other_ID_Start"),
-    ("olower", "Other_Lowercase"),
-    ("omath", "Other_Math"),
-    ("otheralphabetic", "Other_Alphabetic"),
-    ("otherdefaultignorablecodepoint", "Other_Default_Ignorable_Code_Point"),
-    ("othergraphemeextend", "Other_Grapheme_Extend"),
-    ("otheridcontinue", "Other_ID_Continue"),
-    ("otheridstart", "Other_ID_Start"),
-    ("otherlowercase", "Other_Lowercase"),
-    ("othermath", "Other_Math"),
-    ("otheruppercase", "Other_Uppercase"),
-    ("oupper", "Other_Uppercase"),
-    ("patsyn", "Pattern_Syntax"),
-    ("patternsyntax", "Pattern_Syntax"),
-    ("patternwhitespace", "Pattern_White_Space"),
-    ("patws", "Pattern_White_Space"),
-    ("pcm", "Prepended_Concatenation_Mark"),
-    ("prependedconcatenationmark", "Prepended_Concatenation_Mark"),
-    ("qmark", "Quotation_Mark"),
-    ("quotationmark", "Quotation_Mark"),
-    ("radical", "Radical"),
-    ("regionalindicator", "Regional_Indicator"),
-    ("ri", "Regional_Indicator"),
-    ("sb", "Sentence_Break"),
-    ("sc", "Script"),
-    ("scf", "Simple_Case_Folding"),
-    ("script", "Script"),
-    ("scriptextensions", "Script_Extensions"),
-    ("scx", "Script_Extensions"),
-    ("sd", "Soft_Dotted"),
-    ("sentencebreak", "Sentence_Break"),
-    ("sentenceterminal", "Sentence_Terminal"),
-    ("sfc", "Simple_Case_Folding"),
-    ("simplecasefolding", "Simple_Case_Folding"),
-    ("simplelowercasemapping", "Simple_Lowercase_Mapping"),
-    ("simpletitlecasemapping", "Simple_Titlecase_Mapping"),
-    ("simpleuppercasemapping", "Simple_Uppercase_Mapping"),
-    ("slc", "Simple_Lowercase_Mapping"),
-    ("softdotted", "Soft_Dotted"),
-    ("space", "White_Space"),
-    ("stc", "Simple_Titlecase_Mapping"),
-    ("sterm", "Sentence_Terminal"),
-    ("suc", "Simple_Uppercase_Mapping"),
-    ("tc", "Titlecase_Mapping"),
-    ("term", "Terminal_Punctuation"),
-    ("terminalpunctuation", "Terminal_Punctuation"),
-    ("titlecasemapping", "Titlecase_Mapping"),
-    ("uc", "Uppercase_Mapping"),
-    ("uideo", "Unified_Ideograph"),
-    ("unicode1name", "Unicode_1_Name"),
-    ("unicoderadicalstroke", "kRSUnicode"),
-    ("unifiedideograph", "Unified_Ideograph"),
-    ("upper", "Uppercase"),
-    ("uppercase", "Uppercase"),
-    ("uppercasemapping", "Uppercase_Mapping"),
-    ("urs", "kRSUnicode"),
-    ("variationselector", "Variation_Selector"),
-    ("verticalorientation", "Vertical_Orientation"),
-    ("vo", "Vertical_Orientation"),
-    ("vs", "Variation_Selector"),
-    ("wb", "Word_Break"),
-    ("whitespace", "White_Space"),
-    ("wordbreak", "Word_Break"),
-    ("wspace", "White_Space"),
-    ("xidc", "XID_Continue"),
-    ("xidcontinue", "XID_Continue"),
-    ("xids", "XID_Start"),
-    ("xidstart", "XID_Start"),
-    ("xonfc", "Expands_On_NFC"),
-    ("xonfd", "Expands_On_NFD"),
-    ("xonfkc", "Expands_On_NFKC"),
-    ("xonfkd", "Expands_On_NFKD"),
-];
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/property_values.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/property_values.rs
deleted file mode 100644
index 2270d66..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/property_values.rs
+++ /dev/null
@@ -1,956 +0,0 @@
-// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY:
-//
-//   ucd-generate property-values ucd-16.0.0 --include gc,script,scx,age,gcb,wb,sb
-//
-// Unicode version: 16.0.0.
-//
-// ucd-generate 0.3.1 is available on crates.io.
-
-pub const PROPERTY_VALUES: &'static [(
-    &'static str,
-    &'static [(&'static str, &'static str)],
-)] = &[
-    (
-        "Age",
-        &[
-            ("1.1", "V1_1"),
-            ("10.0", "V10_0"),
-            ("11.0", "V11_0"),
-            ("12.0", "V12_0"),
-            ("12.1", "V12_1"),
-            ("13.0", "V13_0"),
-            ("14.0", "V14_0"),
-            ("15.0", "V15_0"),
-            ("15.1", "V15_1"),
-            ("16.0", "V16_0"),
-            ("2.0", "V2_0"),
-            ("2.1", "V2_1"),
-            ("3.0", "V3_0"),
-            ("3.1", "V3_1"),
-            ("3.2", "V3_2"),
-            ("4.0", "V4_0"),
-            ("4.1", "V4_1"),
-            ("5.0", "V5_0"),
-            ("5.1", "V5_1"),
-            ("5.2", "V5_2"),
-            ("6.0", "V6_0"),
-            ("6.1", "V6_1"),
-            ("6.2", "V6_2"),
-            ("6.3", "V6_3"),
-            ("7.0", "V7_0"),
-            ("8.0", "V8_0"),
-            ("9.0", "V9_0"),
-            ("na", "Unassigned"),
-            ("unassigned", "Unassigned"),
-            ("v100", "V10_0"),
-            ("v11", "V1_1"),
-            ("v110", "V11_0"),
-            ("v120", "V12_0"),
-            ("v121", "V12_1"),
-            ("v130", "V13_0"),
-            ("v140", "V14_0"),
-            ("v150", "V15_0"),
-            ("v151", "V15_1"),
-            ("v160", "V16_0"),
-            ("v20", "V2_0"),
-            ("v21", "V2_1"),
-            ("v30", "V3_0"),
-            ("v31", "V3_1"),
-            ("v32", "V3_2"),
-            ("v40", "V4_0"),
-            ("v41", "V4_1"),
-            ("v50", "V5_0"),
-            ("v51", "V5_1"),
-            ("v52", "V5_2"),
-            ("v60", "V6_0"),
-            ("v61", "V6_1"),
-            ("v62", "V6_2"),
-            ("v63", "V6_3"),
-            ("v70", "V7_0"),
-            ("v80", "V8_0"),
-            ("v90", "V9_0"),
-        ],
-    ),
-    (
-        "General_Category",
-        &[
-            ("c", "Other"),
-            ("casedletter", "Cased_Letter"),
-            ("cc", "Control"),
-            ("cf", "Format"),
-            ("closepunctuation", "Close_Punctuation"),
-            ("cn", "Unassigned"),
-            ("cntrl", "Control"),
-            ("co", "Private_Use"),
-            ("combiningmark", "Mark"),
-            ("connectorpunctuation", "Connector_Punctuation"),
-            ("control", "Control"),
-            ("cs", "Surrogate"),
-            ("currencysymbol", "Currency_Symbol"),
-            ("dashpunctuation", "Dash_Punctuation"),
-            ("decimalnumber", "Decimal_Number"),
-            ("digit", "Decimal_Number"),
-            ("enclosingmark", "Enclosing_Mark"),
-            ("finalpunctuation", "Final_Punctuation"),
-            ("format", "Format"),
-            ("initialpunctuation", "Initial_Punctuation"),
-            ("l", "Letter"),
-            ("lc", "Cased_Letter"),
-            ("letter", "Letter"),
-            ("letternumber", "Letter_Number"),
-            ("lineseparator", "Line_Separator"),
-            ("ll", "Lowercase_Letter"),
-            ("lm", "Modifier_Letter"),
-            ("lo", "Other_Letter"),
-            ("lowercaseletter", "Lowercase_Letter"),
-            ("lt", "Titlecase_Letter"),
-            ("lu", "Uppercase_Letter"),
-            ("m", "Mark"),
-            ("mark", "Mark"),
-            ("mathsymbol", "Math_Symbol"),
-            ("mc", "Spacing_Mark"),
-            ("me", "Enclosing_Mark"),
-            ("mn", "Nonspacing_Mark"),
-            ("modifierletter", "Modifier_Letter"),
-            ("modifiersymbol", "Modifier_Symbol"),
-            ("n", "Number"),
-            ("nd", "Decimal_Number"),
-            ("nl", "Letter_Number"),
-            ("no", "Other_Number"),
-            ("nonspacingmark", "Nonspacing_Mark"),
-            ("number", "Number"),
-            ("openpunctuation", "Open_Punctuation"),
-            ("other", "Other"),
-            ("otherletter", "Other_Letter"),
-            ("othernumber", "Other_Number"),
-            ("otherpunctuation", "Other_Punctuation"),
-            ("othersymbol", "Other_Symbol"),
-            ("p", "Punctuation"),
-            ("paragraphseparator", "Paragraph_Separator"),
-            ("pc", "Connector_Punctuation"),
-            ("pd", "Dash_Punctuation"),
-            ("pe", "Close_Punctuation"),
-            ("pf", "Final_Punctuation"),
-            ("pi", "Initial_Punctuation"),
-            ("po", "Other_Punctuation"),
-            ("privateuse", "Private_Use"),
-            ("ps", "Open_Punctuation"),
-            ("punct", "Punctuation"),
-            ("punctuation", "Punctuation"),
-            ("s", "Symbol"),
-            ("sc", "Currency_Symbol"),
-            ("separator", "Separator"),
-            ("sk", "Modifier_Symbol"),
-            ("sm", "Math_Symbol"),
-            ("so", "Other_Symbol"),
-            ("spaceseparator", "Space_Separator"),
-            ("spacingmark", "Spacing_Mark"),
-            ("surrogate", "Surrogate"),
-            ("symbol", "Symbol"),
-            ("titlecaseletter", "Titlecase_Letter"),
-            ("unassigned", "Unassigned"),
-            ("uppercaseletter", "Uppercase_Letter"),
-            ("z", "Separator"),
-            ("zl", "Line_Separator"),
-            ("zp", "Paragraph_Separator"),
-            ("zs", "Space_Separator"),
-        ],
-    ),
-    (
-        "Grapheme_Cluster_Break",
-        &[
-            ("cn", "Control"),
-            ("control", "Control"),
-            ("cr", "CR"),
-            ("eb", "E_Base"),
-            ("ebase", "E_Base"),
-            ("ebasegaz", "E_Base_GAZ"),
-            ("ebg", "E_Base_GAZ"),
-            ("em", "E_Modifier"),
-            ("emodifier", "E_Modifier"),
-            ("ex", "Extend"),
-            ("extend", "Extend"),
-            ("gaz", "Glue_After_Zwj"),
-            ("glueafterzwj", "Glue_After_Zwj"),
-            ("l", "L"),
-            ("lf", "LF"),
-            ("lv", "LV"),
-            ("lvt", "LVT"),
-            ("other", "Other"),
-            ("pp", "Prepend"),
-            ("prepend", "Prepend"),
-            ("regionalindicator", "Regional_Indicator"),
-            ("ri", "Regional_Indicator"),
-            ("sm", "SpacingMark"),
-            ("spacingmark", "SpacingMark"),
-            ("t", "T"),
-            ("v", "V"),
-            ("xx", "Other"),
-            ("zwj", "ZWJ"),
-        ],
-    ),
-    (
-        "Script",
-        &[
-            ("adlam", "Adlam"),
-            ("adlm", "Adlam"),
-            ("aghb", "Caucasian_Albanian"),
-            ("ahom", "Ahom"),
-            ("anatolianhieroglyphs", "Anatolian_Hieroglyphs"),
-            ("arab", "Arabic"),
-            ("arabic", "Arabic"),
-            ("armenian", "Armenian"),
-            ("armi", "Imperial_Aramaic"),
-            ("armn", "Armenian"),
-            ("avestan", "Avestan"),
-            ("avst", "Avestan"),
-            ("bali", "Balinese"),
-            ("balinese", "Balinese"),
-            ("bamu", "Bamum"),
-            ("bamum", "Bamum"),
-            ("bass", "Bassa_Vah"),
-            ("bassavah", "Bassa_Vah"),
-            ("batak", "Batak"),
-            ("batk", "Batak"),
-            ("beng", "Bengali"),
-            ("bengali", "Bengali"),
-            ("bhaiksuki", "Bhaiksuki"),
-            ("bhks", "Bhaiksuki"),
-            ("bopo", "Bopomofo"),
-            ("bopomofo", "Bopomofo"),
-            ("brah", "Brahmi"),
-            ("brahmi", "Brahmi"),
-            ("brai", "Braille"),
-            ("braille", "Braille"),
-            ("bugi", "Buginese"),
-            ("buginese", "Buginese"),
-            ("buhd", "Buhid"),
-            ("buhid", "Buhid"),
-            ("cakm", "Chakma"),
-            ("canadianaboriginal", "Canadian_Aboriginal"),
-            ("cans", "Canadian_Aboriginal"),
-            ("cari", "Carian"),
-            ("carian", "Carian"),
-            ("caucasianalbanian", "Caucasian_Albanian"),
-            ("chakma", "Chakma"),
-            ("cham", "Cham"),
-            ("cher", "Cherokee"),
-            ("cherokee", "Cherokee"),
-            ("chorasmian", "Chorasmian"),
-            ("chrs", "Chorasmian"),
-            ("common", "Common"),
-            ("copt", "Coptic"),
-            ("coptic", "Coptic"),
-            ("cpmn", "Cypro_Minoan"),
-            ("cprt", "Cypriot"),
-            ("cuneiform", "Cuneiform"),
-            ("cypriot", "Cypriot"),
-            ("cyprominoan", "Cypro_Minoan"),
-            ("cyrillic", "Cyrillic"),
-            ("cyrl", "Cyrillic"),
-            ("deseret", "Deseret"),
-            ("deva", "Devanagari"),
-            ("devanagari", "Devanagari"),
-            ("diak", "Dives_Akuru"),
-            ("divesakuru", "Dives_Akuru"),
-            ("dogr", "Dogra"),
-            ("dogra", "Dogra"),
-            ("dsrt", "Deseret"),
-            ("dupl", "Duployan"),
-            ("duployan", "Duployan"),
-            ("egyp", "Egyptian_Hieroglyphs"),
-            ("egyptianhieroglyphs", "Egyptian_Hieroglyphs"),
-            ("elba", "Elbasan"),
-            ("elbasan", "Elbasan"),
-            ("elym", "Elymaic"),
-            ("elymaic", "Elymaic"),
-            ("ethi", "Ethiopic"),
-            ("ethiopic", "Ethiopic"),
-            ("gara", "Garay"),
-            ("garay", "Garay"),
-            ("geor", "Georgian"),
-            ("georgian", "Georgian"),
-            ("glag", "Glagolitic"),
-            ("glagolitic", "Glagolitic"),
-            ("gong", "Gunjala_Gondi"),
-            ("gonm", "Masaram_Gondi"),
-            ("goth", "Gothic"),
-            ("gothic", "Gothic"),
-            ("gran", "Grantha"),
-            ("grantha", "Grantha"),
-            ("greek", "Greek"),
-            ("grek", "Greek"),
-            ("gujarati", "Gujarati"),
-            ("gujr", "Gujarati"),
-            ("gukh", "Gurung_Khema"),
-            ("gunjalagondi", "Gunjala_Gondi"),
-            ("gurmukhi", "Gurmukhi"),
-            ("guru", "Gurmukhi"),
-            ("gurungkhema", "Gurung_Khema"),
-            ("han", "Han"),
-            ("hang", "Hangul"),
-            ("hangul", "Hangul"),
-            ("hani", "Han"),
-            ("hanifirohingya", "Hanifi_Rohingya"),
-            ("hano", "Hanunoo"),
-            ("hanunoo", "Hanunoo"),
-            ("hatr", "Hatran"),
-            ("hatran", "Hatran"),
-            ("hebr", "Hebrew"),
-            ("hebrew", "Hebrew"),
-            ("hira", "Hiragana"),
-            ("hiragana", "Hiragana"),
-            ("hluw", "Anatolian_Hieroglyphs"),
-            ("hmng", "Pahawh_Hmong"),
-            ("hmnp", "Nyiakeng_Puachue_Hmong"),
-            ("hrkt", "Katakana_Or_Hiragana"),
-            ("hung", "Old_Hungarian"),
-            ("imperialaramaic", "Imperial_Aramaic"),
-            ("inherited", "Inherited"),
-            ("inscriptionalpahlavi", "Inscriptional_Pahlavi"),
-            ("inscriptionalparthian", "Inscriptional_Parthian"),
-            ("ital", "Old_Italic"),
-            ("java", "Javanese"),
-            ("javanese", "Javanese"),
-            ("kaithi", "Kaithi"),
-            ("kali", "Kayah_Li"),
-            ("kana", "Katakana"),
-            ("kannada", "Kannada"),
-            ("katakana", "Katakana"),
-            ("katakanaorhiragana", "Katakana_Or_Hiragana"),
-            ("kawi", "Kawi"),
-            ("kayahli", "Kayah_Li"),
-            ("khar", "Kharoshthi"),
-            ("kharoshthi", "Kharoshthi"),
-            ("khitansmallscript", "Khitan_Small_Script"),
-            ("khmer", "Khmer"),
-            ("khmr", "Khmer"),
-            ("khoj", "Khojki"),
-            ("khojki", "Khojki"),
-            ("khudawadi", "Khudawadi"),
-            ("kiratrai", "Kirat_Rai"),
-            ("kits", "Khitan_Small_Script"),
-            ("knda", "Kannada"),
-            ("krai", "Kirat_Rai"),
-            ("kthi", "Kaithi"),
-            ("lana", "Tai_Tham"),
-            ("lao", "Lao"),
-            ("laoo", "Lao"),
-            ("latin", "Latin"),
-            ("latn", "Latin"),
-            ("lepc", "Lepcha"),
-            ("lepcha", "Lepcha"),
-            ("limb", "Limbu"),
-            ("limbu", "Limbu"),
-            ("lina", "Linear_A"),
-            ("linb", "Linear_B"),
-            ("lineara", "Linear_A"),
-            ("linearb", "Linear_B"),
-            ("lisu", "Lisu"),
-            ("lyci", "Lycian"),
-            ("lycian", "Lycian"),
-            ("lydi", "Lydian"),
-            ("lydian", "Lydian"),
-            ("mahajani", "Mahajani"),
-            ("mahj", "Mahajani"),
-            ("maka", "Makasar"),
-            ("makasar", "Makasar"),
-            ("malayalam", "Malayalam"),
-            ("mand", "Mandaic"),
-            ("mandaic", "Mandaic"),
-            ("mani", "Manichaean"),
-            ("manichaean", "Manichaean"),
-            ("marc", "Marchen"),
-            ("marchen", "Marchen"),
-            ("masaramgondi", "Masaram_Gondi"),
-            ("medefaidrin", "Medefaidrin"),
-            ("medf", "Medefaidrin"),
-            ("meeteimayek", "Meetei_Mayek"),
-            ("mend", "Mende_Kikakui"),
-            ("mendekikakui", "Mende_Kikakui"),
-            ("merc", "Meroitic_Cursive"),
-            ("mero", "Meroitic_Hieroglyphs"),
-            ("meroiticcursive", "Meroitic_Cursive"),
-            ("meroitichieroglyphs", "Meroitic_Hieroglyphs"),
-            ("miao", "Miao"),
-            ("mlym", "Malayalam"),
-            ("modi", "Modi"),
-            ("mong", "Mongolian"),
-            ("mongolian", "Mongolian"),
-            ("mro", "Mro"),
-            ("mroo", "Mro"),
-            ("mtei", "Meetei_Mayek"),
-            ("mult", "Multani"),
-            ("multani", "Multani"),
-            ("myanmar", "Myanmar"),
-            ("mymr", "Myanmar"),
-            ("nabataean", "Nabataean"),
-            ("nagm", "Nag_Mundari"),
-            ("nagmundari", "Nag_Mundari"),
-            ("nand", "Nandinagari"),
-            ("nandinagari", "Nandinagari"),
-            ("narb", "Old_North_Arabian"),
-            ("nbat", "Nabataean"),
-            ("newa", "Newa"),
-            ("newtailue", "New_Tai_Lue"),
-            ("nko", "Nko"),
-            ("nkoo", "Nko"),
-            ("nshu", "Nushu"),
-            ("nushu", "Nushu"),
-            ("nyiakengpuachuehmong", "Nyiakeng_Puachue_Hmong"),
-            ("ogam", "Ogham"),
-            ("ogham", "Ogham"),
-            ("olchiki", "Ol_Chiki"),
-            ("olck", "Ol_Chiki"),
-            ("oldhungarian", "Old_Hungarian"),
-            ("olditalic", "Old_Italic"),
-            ("oldnortharabian", "Old_North_Arabian"),
-            ("oldpermic", "Old_Permic"),
-            ("oldpersian", "Old_Persian"),
-            ("oldsogdian", "Old_Sogdian"),
-            ("oldsoutharabian", "Old_South_Arabian"),
-            ("oldturkic", "Old_Turkic"),
-            ("olduyghur", "Old_Uyghur"),
-            ("olonal", "Ol_Onal"),
-            ("onao", "Ol_Onal"),
-            ("oriya", "Oriya"),
-            ("orkh", "Old_Turkic"),
-            ("orya", "Oriya"),
-            ("osage", "Osage"),
-            ("osge", "Osage"),
-            ("osma", "Osmanya"),
-            ("osmanya", "Osmanya"),
-            ("ougr", "Old_Uyghur"),
-            ("pahawhhmong", "Pahawh_Hmong"),
-            ("palm", "Palmyrene"),
-            ("palmyrene", "Palmyrene"),
-            ("pauc", "Pau_Cin_Hau"),
-            ("paucinhau", "Pau_Cin_Hau"),
-            ("perm", "Old_Permic"),
-            ("phag", "Phags_Pa"),
-            ("phagspa", "Phags_Pa"),
-            ("phli", "Inscriptional_Pahlavi"),
-            ("phlp", "Psalter_Pahlavi"),
-            ("phnx", "Phoenician"),
-            ("phoenician", "Phoenician"),
-            ("plrd", "Miao"),
-            ("prti", "Inscriptional_Parthian"),
-            ("psalterpahlavi", "Psalter_Pahlavi"),
-            ("qaac", "Coptic"),
-            ("qaai", "Inherited"),
-            ("rejang", "Rejang"),
-            ("rjng", "Rejang"),
-            ("rohg", "Hanifi_Rohingya"),
-            ("runic", "Runic"),
-            ("runr", "Runic"),
-            ("samaritan", "Samaritan"),
-            ("samr", "Samaritan"),
-            ("sarb", "Old_South_Arabian"),
-            ("saur", "Saurashtra"),
-            ("saurashtra", "Saurashtra"),
-            ("sgnw", "SignWriting"),
-            ("sharada", "Sharada"),
-            ("shavian", "Shavian"),
-            ("shaw", "Shavian"),
-            ("shrd", "Sharada"),
-            ("sidd", "Siddham"),
-            ("siddham", "Siddham"),
-            ("signwriting", "SignWriting"),
-            ("sind", "Khudawadi"),
-            ("sinh", "Sinhala"),
-            ("sinhala", "Sinhala"),
-            ("sogd", "Sogdian"),
-            ("sogdian", "Sogdian"),
-            ("sogo", "Old_Sogdian"),
-            ("sora", "Sora_Sompeng"),
-            ("sorasompeng", "Sora_Sompeng"),
-            ("soyo", "Soyombo"),
-            ("soyombo", "Soyombo"),
-            ("sund", "Sundanese"),
-            ("sundanese", "Sundanese"),
-            ("sunu", "Sunuwar"),
-            ("sunuwar", "Sunuwar"),
-            ("sylo", "Syloti_Nagri"),
-            ("sylotinagri", "Syloti_Nagri"),
-            ("syrc", "Syriac"),
-            ("syriac", "Syriac"),
-            ("tagalog", "Tagalog"),
-            ("tagb", "Tagbanwa"),
-            ("tagbanwa", "Tagbanwa"),
-            ("taile", "Tai_Le"),
-            ("taitham", "Tai_Tham"),
-            ("taiviet", "Tai_Viet"),
-            ("takr", "Takri"),
-            ("takri", "Takri"),
-            ("tale", "Tai_Le"),
-            ("talu", "New_Tai_Lue"),
-            ("tamil", "Tamil"),
-            ("taml", "Tamil"),
-            ("tang", "Tangut"),
-            ("tangsa", "Tangsa"),
-            ("tangut", "Tangut"),
-            ("tavt", "Tai_Viet"),
-            ("telu", "Telugu"),
-            ("telugu", "Telugu"),
-            ("tfng", "Tifinagh"),
-            ("tglg", "Tagalog"),
-            ("thaa", "Thaana"),
-            ("thaana", "Thaana"),
-            ("thai", "Thai"),
-            ("tibetan", "Tibetan"),
-            ("tibt", "Tibetan"),
-            ("tifinagh", "Tifinagh"),
-            ("tirh", "Tirhuta"),
-            ("tirhuta", "Tirhuta"),
-            ("tnsa", "Tangsa"),
-            ("todhri", "Todhri"),
-            ("todr", "Todhri"),
-            ("toto", "Toto"),
-            ("tulutigalari", "Tulu_Tigalari"),
-            ("tutg", "Tulu_Tigalari"),
-            ("ugar", "Ugaritic"),
-            ("ugaritic", "Ugaritic"),
-            ("unknown", "Unknown"),
-            ("vai", "Vai"),
-            ("vaii", "Vai"),
-            ("vith", "Vithkuqi"),
-            ("vithkuqi", "Vithkuqi"),
-            ("wancho", "Wancho"),
-            ("wara", "Warang_Citi"),
-            ("warangciti", "Warang_Citi"),
-            ("wcho", "Wancho"),
-            ("xpeo", "Old_Persian"),
-            ("xsux", "Cuneiform"),
-            ("yezi", "Yezidi"),
-            ("yezidi", "Yezidi"),
-            ("yi", "Yi"),
-            ("yiii", "Yi"),
-            ("zanabazarsquare", "Zanabazar_Square"),
-            ("zanb", "Zanabazar_Square"),
-            ("zinh", "Inherited"),
-            ("zyyy", "Common"),
-            ("zzzz", "Unknown"),
-        ],
-    ),
-    (
-        "Script_Extensions",
-        &[
-            ("adlam", "Adlam"),
-            ("adlm", "Adlam"),
-            ("aghb", "Caucasian_Albanian"),
-            ("ahom", "Ahom"),
-            ("anatolianhieroglyphs", "Anatolian_Hieroglyphs"),
-            ("arab", "Arabic"),
-            ("arabic", "Arabic"),
-            ("armenian", "Armenian"),
-            ("armi", "Imperial_Aramaic"),
-            ("armn", "Armenian"),
-            ("avestan", "Avestan"),
-            ("avst", "Avestan"),
-            ("bali", "Balinese"),
-            ("balinese", "Balinese"),
-            ("bamu", "Bamum"),
-            ("bamum", "Bamum"),
-            ("bass", "Bassa_Vah"),
-            ("bassavah", "Bassa_Vah"),
-            ("batak", "Batak"),
-            ("batk", "Batak"),
-            ("beng", "Bengali"),
-            ("bengali", "Bengali"),
-            ("bhaiksuki", "Bhaiksuki"),
-            ("bhks", "Bhaiksuki"),
-            ("bopo", "Bopomofo"),
-            ("bopomofo", "Bopomofo"),
-            ("brah", "Brahmi"),
-            ("brahmi", "Brahmi"),
-            ("brai", "Braille"),
-            ("braille", "Braille"),
-            ("bugi", "Buginese"),
-            ("buginese", "Buginese"),
-            ("buhd", "Buhid"),
-            ("buhid", "Buhid"),
-            ("cakm", "Chakma"),
-            ("canadianaboriginal", "Canadian_Aboriginal"),
-            ("cans", "Canadian_Aboriginal"),
-            ("cari", "Carian"),
-            ("carian", "Carian"),
-            ("caucasianalbanian", "Caucasian_Albanian"),
-            ("chakma", "Chakma"),
-            ("cham", "Cham"),
-            ("cher", "Cherokee"),
-            ("cherokee", "Cherokee"),
-            ("chorasmian", "Chorasmian"),
-            ("chrs", "Chorasmian"),
-            ("common", "Common"),
-            ("copt", "Coptic"),
-            ("coptic", "Coptic"),
-            ("cpmn", "Cypro_Minoan"),
-            ("cprt", "Cypriot"),
-            ("cuneiform", "Cuneiform"),
-            ("cypriot", "Cypriot"),
-            ("cyprominoan", "Cypro_Minoan"),
-            ("cyrillic", "Cyrillic"),
-            ("cyrl", "Cyrillic"),
-            ("deseret", "Deseret"),
-            ("deva", "Devanagari"),
-            ("devanagari", "Devanagari"),
-            ("diak", "Dives_Akuru"),
-            ("divesakuru", "Dives_Akuru"),
-            ("dogr", "Dogra"),
-            ("dogra", "Dogra"),
-            ("dsrt", "Deseret"),
-            ("dupl", "Duployan"),
-            ("duployan", "Duployan"),
-            ("egyp", "Egyptian_Hieroglyphs"),
-            ("egyptianhieroglyphs", "Egyptian_Hieroglyphs"),
-            ("elba", "Elbasan"),
-            ("elbasan", "Elbasan"),
-            ("elym", "Elymaic"),
-            ("elymaic", "Elymaic"),
-            ("ethi", "Ethiopic"),
-            ("ethiopic", "Ethiopic"),
-            ("gara", "Garay"),
-            ("garay", "Garay"),
-            ("geor", "Georgian"),
-            ("georgian", "Georgian"),
-            ("glag", "Glagolitic"),
-            ("glagolitic", "Glagolitic"),
-            ("gong", "Gunjala_Gondi"),
-            ("gonm", "Masaram_Gondi"),
-            ("goth", "Gothic"),
-            ("gothic", "Gothic"),
-            ("gran", "Grantha"),
-            ("grantha", "Grantha"),
-            ("greek", "Greek"),
-            ("grek", "Greek"),
-            ("gujarati", "Gujarati"),
-            ("gujr", "Gujarati"),
-            ("gukh", "Gurung_Khema"),
-            ("gunjalagondi", "Gunjala_Gondi"),
-            ("gurmukhi", "Gurmukhi"),
-            ("guru", "Gurmukhi"),
-            ("gurungkhema", "Gurung_Khema"),
-            ("han", "Han"),
-            ("hang", "Hangul"),
-            ("hangul", "Hangul"),
-            ("hani", "Han"),
-            ("hanifirohingya", "Hanifi_Rohingya"),
-            ("hano", "Hanunoo"),
-            ("hanunoo", "Hanunoo"),
-            ("hatr", "Hatran"),
-            ("hatran", "Hatran"),
-            ("hebr", "Hebrew"),
-            ("hebrew", "Hebrew"),
-            ("hira", "Hiragana"),
-            ("hiragana", "Hiragana"),
-            ("hluw", "Anatolian_Hieroglyphs"),
-            ("hmng", "Pahawh_Hmong"),
-            ("hmnp", "Nyiakeng_Puachue_Hmong"),
-            ("hrkt", "Katakana_Or_Hiragana"),
-            ("hung", "Old_Hungarian"),
-            ("imperialaramaic", "Imperial_Aramaic"),
-            ("inherited", "Inherited"),
-            ("inscriptionalpahlavi", "Inscriptional_Pahlavi"),
-            ("inscriptionalparthian", "Inscriptional_Parthian"),
-            ("ital", "Old_Italic"),
-            ("java", "Javanese"),
-            ("javanese", "Javanese"),
-            ("kaithi", "Kaithi"),
-            ("kali", "Kayah_Li"),
-            ("kana", "Katakana"),
-            ("kannada", "Kannada"),
-            ("katakana", "Katakana"),
-            ("katakanaorhiragana", "Katakana_Or_Hiragana"),
-            ("kawi", "Kawi"),
-            ("kayahli", "Kayah_Li"),
-            ("khar", "Kharoshthi"),
-            ("kharoshthi", "Kharoshthi"),
-            ("khitansmallscript", "Khitan_Small_Script"),
-            ("khmer", "Khmer"),
-            ("khmr", "Khmer"),
-            ("khoj", "Khojki"),
-            ("khojki", "Khojki"),
-            ("khudawadi", "Khudawadi"),
-            ("kiratrai", "Kirat_Rai"),
-            ("kits", "Khitan_Small_Script"),
-            ("knda", "Kannada"),
-            ("krai", "Kirat_Rai"),
-            ("kthi", "Kaithi"),
-            ("lana", "Tai_Tham"),
-            ("lao", "Lao"),
-            ("laoo", "Lao"),
-            ("latin", "Latin"),
-            ("latn", "Latin"),
-            ("lepc", "Lepcha"),
-            ("lepcha", "Lepcha"),
-            ("limb", "Limbu"),
-            ("limbu", "Limbu"),
-            ("lina", "Linear_A"),
-            ("linb", "Linear_B"),
-            ("lineara", "Linear_A"),
-            ("linearb", "Linear_B"),
-            ("lisu", "Lisu"),
-            ("lyci", "Lycian"),
-            ("lycian", "Lycian"),
-            ("lydi", "Lydian"),
-            ("lydian", "Lydian"),
-            ("mahajani", "Mahajani"),
-            ("mahj", "Mahajani"),
-            ("maka", "Makasar"),
-            ("makasar", "Makasar"),
-            ("malayalam", "Malayalam"),
-            ("mand", "Mandaic"),
-            ("mandaic", "Mandaic"),
-            ("mani", "Manichaean"),
-            ("manichaean", "Manichaean"),
-            ("marc", "Marchen"),
-            ("marchen", "Marchen"),
-            ("masaramgondi", "Masaram_Gondi"),
-            ("medefaidrin", "Medefaidrin"),
-            ("medf", "Medefaidrin"),
-            ("meeteimayek", "Meetei_Mayek"),
-            ("mend", "Mende_Kikakui"),
-            ("mendekikakui", "Mende_Kikakui"),
-            ("merc", "Meroitic_Cursive"),
-            ("mero", "Meroitic_Hieroglyphs"),
-            ("meroiticcursive", "Meroitic_Cursive"),
-            ("meroitichieroglyphs", "Meroitic_Hieroglyphs"),
-            ("miao", "Miao"),
-            ("mlym", "Malayalam"),
-            ("modi", "Modi"),
-            ("mong", "Mongolian"),
-            ("mongolian", "Mongolian"),
-            ("mro", "Mro"),
-            ("mroo", "Mro"),
-            ("mtei", "Meetei_Mayek"),
-            ("mult", "Multani"),
-            ("multani", "Multani"),
-            ("myanmar", "Myanmar"),
-            ("mymr", "Myanmar"),
-            ("nabataean", "Nabataean"),
-            ("nagm", "Nag_Mundari"),
-            ("nagmundari", "Nag_Mundari"),
-            ("nand", "Nandinagari"),
-            ("nandinagari", "Nandinagari"),
-            ("narb", "Old_North_Arabian"),
-            ("nbat", "Nabataean"),
-            ("newa", "Newa"),
-            ("newtailue", "New_Tai_Lue"),
-            ("nko", "Nko"),
-            ("nkoo", "Nko"),
-            ("nshu", "Nushu"),
-            ("nushu", "Nushu"),
-            ("nyiakengpuachuehmong", "Nyiakeng_Puachue_Hmong"),
-            ("ogam", "Ogham"),
-            ("ogham", "Ogham"),
-            ("olchiki", "Ol_Chiki"),
-            ("olck", "Ol_Chiki"),
-            ("oldhungarian", "Old_Hungarian"),
-            ("olditalic", "Old_Italic"),
-            ("oldnortharabian", "Old_North_Arabian"),
-            ("oldpermic", "Old_Permic"),
-            ("oldpersian", "Old_Persian"),
-            ("oldsogdian", "Old_Sogdian"),
-            ("oldsoutharabian", "Old_South_Arabian"),
-            ("oldturkic", "Old_Turkic"),
-            ("olduyghur", "Old_Uyghur"),
-            ("olonal", "Ol_Onal"),
-            ("onao", "Ol_Onal"),
-            ("oriya", "Oriya"),
-            ("orkh", "Old_Turkic"),
-            ("orya", "Oriya"),
-            ("osage", "Osage"),
-            ("osge", "Osage"),
-            ("osma", "Osmanya"),
-            ("osmanya", "Osmanya"),
-            ("ougr", "Old_Uyghur"),
-            ("pahawhhmong", "Pahawh_Hmong"),
-            ("palm", "Palmyrene"),
-            ("palmyrene", "Palmyrene"),
-            ("pauc", "Pau_Cin_Hau"),
-            ("paucinhau", "Pau_Cin_Hau"),
-            ("perm", "Old_Permic"),
-            ("phag", "Phags_Pa"),
-            ("phagspa", "Phags_Pa"),
-            ("phli", "Inscriptional_Pahlavi"),
-            ("phlp", "Psalter_Pahlavi"),
-            ("phnx", "Phoenician"),
-            ("phoenician", "Phoenician"),
-            ("plrd", "Miao"),
-            ("prti", "Inscriptional_Parthian"),
-            ("psalterpahlavi", "Psalter_Pahlavi"),
-            ("qaac", "Coptic"),
-            ("qaai", "Inherited"),
-            ("rejang", "Rejang"),
-            ("rjng", "Rejang"),
-            ("rohg", "Hanifi_Rohingya"),
-            ("runic", "Runic"),
-            ("runr", "Runic"),
-            ("samaritan", "Samaritan"),
-            ("samr", "Samaritan"),
-            ("sarb", "Old_South_Arabian"),
-            ("saur", "Saurashtra"),
-            ("saurashtra", "Saurashtra"),
-            ("sgnw", "SignWriting"),
-            ("sharada", "Sharada"),
-            ("shavian", "Shavian"),
-            ("shaw", "Shavian"),
-            ("shrd", "Sharada"),
-            ("sidd", "Siddham"),
-            ("siddham", "Siddham"),
-            ("signwriting", "SignWriting"),
-            ("sind", "Khudawadi"),
-            ("sinh", "Sinhala"),
-            ("sinhala", "Sinhala"),
-            ("sogd", "Sogdian"),
-            ("sogdian", "Sogdian"),
-            ("sogo", "Old_Sogdian"),
-            ("sora", "Sora_Sompeng"),
-            ("sorasompeng", "Sora_Sompeng"),
-            ("soyo", "Soyombo"),
-            ("soyombo", "Soyombo"),
-            ("sund", "Sundanese"),
-            ("sundanese", "Sundanese"),
-            ("sunu", "Sunuwar"),
-            ("sunuwar", "Sunuwar"),
-            ("sylo", "Syloti_Nagri"),
-            ("sylotinagri", "Syloti_Nagri"),
-            ("syrc", "Syriac"),
-            ("syriac", "Syriac"),
-            ("tagalog", "Tagalog"),
-            ("tagb", "Tagbanwa"),
-            ("tagbanwa", "Tagbanwa"),
-            ("taile", "Tai_Le"),
-            ("taitham", "Tai_Tham"),
-            ("taiviet", "Tai_Viet"),
-            ("takr", "Takri"),
-            ("takri", "Takri"),
-            ("tale", "Tai_Le"),
-            ("talu", "New_Tai_Lue"),
-            ("tamil", "Tamil"),
-            ("taml", "Tamil"),
-            ("tang", "Tangut"),
-            ("tangsa", "Tangsa"),
-            ("tangut", "Tangut"),
-            ("tavt", "Tai_Viet"),
-            ("telu", "Telugu"),
-            ("telugu", "Telugu"),
-            ("tfng", "Tifinagh"),
-            ("tglg", "Tagalog"),
-            ("thaa", "Thaana"),
-            ("thaana", "Thaana"),
-            ("thai", "Thai"),
-            ("tibetan", "Tibetan"),
-            ("tibt", "Tibetan"),
-            ("tifinagh", "Tifinagh"),
-            ("tirh", "Tirhuta"),
-            ("tirhuta", "Tirhuta"),
-            ("tnsa", "Tangsa"),
-            ("todhri", "Todhri"),
-            ("todr", "Todhri"),
-            ("toto", "Toto"),
-            ("tulutigalari", "Tulu_Tigalari"),
-            ("tutg", "Tulu_Tigalari"),
-            ("ugar", "Ugaritic"),
-            ("ugaritic", "Ugaritic"),
-            ("unknown", "Unknown"),
-            ("vai", "Vai"),
-            ("vaii", "Vai"),
-            ("vith", "Vithkuqi"),
-            ("vithkuqi", "Vithkuqi"),
-            ("wancho", "Wancho"),
-            ("wara", "Warang_Citi"),
-            ("warangciti", "Warang_Citi"),
-            ("wcho", "Wancho"),
-            ("xpeo", "Old_Persian"),
-            ("xsux", "Cuneiform"),
-            ("yezi", "Yezidi"),
-            ("yezidi", "Yezidi"),
-            ("yi", "Yi"),
-            ("yiii", "Yi"),
-            ("zanabazarsquare", "Zanabazar_Square"),
-            ("zanb", "Zanabazar_Square"),
-            ("zinh", "Inherited"),
-            ("zyyy", "Common"),
-            ("zzzz", "Unknown"),
-        ],
-    ),
-    (
-        "Sentence_Break",
-        &[
-            ("at", "ATerm"),
-            ("aterm", "ATerm"),
-            ("cl", "Close"),
-            ("close", "Close"),
-            ("cr", "CR"),
-            ("ex", "Extend"),
-            ("extend", "Extend"),
-            ("fo", "Format"),
-            ("format", "Format"),
-            ("le", "OLetter"),
-            ("lf", "LF"),
-            ("lo", "Lower"),
-            ("lower", "Lower"),
-            ("nu", "Numeric"),
-            ("numeric", "Numeric"),
-            ("oletter", "OLetter"),
-            ("other", "Other"),
-            ("sc", "SContinue"),
-            ("scontinue", "SContinue"),
-            ("se", "Sep"),
-            ("sep", "Sep"),
-            ("sp", "Sp"),
-            ("st", "STerm"),
-            ("sterm", "STerm"),
-            ("up", "Upper"),
-            ("upper", "Upper"),
-            ("xx", "Other"),
-        ],
-    ),
-    (
-        "Word_Break",
-        &[
-            ("aletter", "ALetter"),
-            ("cr", "CR"),
-            ("doublequote", "Double_Quote"),
-            ("dq", "Double_Quote"),
-            ("eb", "E_Base"),
-            ("ebase", "E_Base"),
-            ("ebasegaz", "E_Base_GAZ"),
-            ("ebg", "E_Base_GAZ"),
-            ("em", "E_Modifier"),
-            ("emodifier", "E_Modifier"),
-            ("ex", "ExtendNumLet"),
-            ("extend", "Extend"),
-            ("extendnumlet", "ExtendNumLet"),
-            ("fo", "Format"),
-            ("format", "Format"),
-            ("gaz", "Glue_After_Zwj"),
-            ("glueafterzwj", "Glue_After_Zwj"),
-            ("hebrewletter", "Hebrew_Letter"),
-            ("hl", "Hebrew_Letter"),
-            ("ka", "Katakana"),
-            ("katakana", "Katakana"),
-            ("le", "ALetter"),
-            ("lf", "LF"),
-            ("mb", "MidNumLet"),
-            ("midletter", "MidLetter"),
-            ("midnum", "MidNum"),
-            ("midnumlet", "MidNumLet"),
-            ("ml", "MidLetter"),
-            ("mn", "MidNum"),
-            ("newline", "Newline"),
-            ("nl", "Newline"),
-            ("nu", "Numeric"),
-            ("numeric", "Numeric"),
-            ("other", "Other"),
-            ("regionalindicator", "Regional_Indicator"),
-            ("ri", "Regional_Indicator"),
-            ("singlequote", "Single_Quote"),
-            ("sq", "Single_Quote"),
-            ("wsegspace", "WSegSpace"),
-            ("xx", "Other"),
-            ("zwj", "ZWJ"),
-        ],
-    ),
-];
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/script.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/script.rs
deleted file mode 100644
index 3e437ca..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/script.rs
+++ /dev/null
@@ -1,1300 +0,0 @@
-// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY:
-//
-//   ucd-generate script ucd-16.0.0 --chars
-//
-// Unicode version: 16.0.0.
-//
-// ucd-generate 0.3.1 is available on crates.io.
-
-pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[
-    ("Adlam", ADLAM),
-    ("Ahom", AHOM),
-    ("Anatolian_Hieroglyphs", ANATOLIAN_HIEROGLYPHS),
-    ("Arabic", ARABIC),
-    ("Armenian", ARMENIAN),
-    ("Avestan", AVESTAN),
-    ("Balinese", BALINESE),
-    ("Bamum", BAMUM),
-    ("Bassa_Vah", BASSA_VAH),
-    ("Batak", BATAK),
-    ("Bengali", BENGALI),
-    ("Bhaiksuki", BHAIKSUKI),
-    ("Bopomofo", BOPOMOFO),
-    ("Brahmi", BRAHMI),
-    ("Braille", BRAILLE),
-    ("Buginese", BUGINESE),
-    ("Buhid", BUHID),
-    ("Canadian_Aboriginal", CANADIAN_ABORIGINAL),
-    ("Carian", CARIAN),
-    ("Caucasian_Albanian", CAUCASIAN_ALBANIAN),
-    ("Chakma", CHAKMA),
-    ("Cham", CHAM),
-    ("Cherokee", CHEROKEE),
-    ("Chorasmian", CHORASMIAN),
-    ("Common", COMMON),
-    ("Coptic", COPTIC),
-    ("Cuneiform", CUNEIFORM),
-    ("Cypriot", CYPRIOT),
-    ("Cypro_Minoan", CYPRO_MINOAN),
-    ("Cyrillic", CYRILLIC),
-    ("Deseret", DESERET),
-    ("Devanagari", DEVANAGARI),
-    ("Dives_Akuru", DIVES_AKURU),
-    ("Dogra", DOGRA),
-    ("Duployan", DUPLOYAN),
-    ("Egyptian_Hieroglyphs", EGYPTIAN_HIEROGLYPHS),
-    ("Elbasan", ELBASAN),
-    ("Elymaic", ELYMAIC),
-    ("Ethiopic", ETHIOPIC),
-    ("Garay", GARAY),
-    ("Georgian", GEORGIAN),
-    ("Glagolitic", GLAGOLITIC),
-    ("Gothic", GOTHIC),
-    ("Grantha", GRANTHA),
-    ("Greek", GREEK),
-    ("Gujarati", GUJARATI),
-    ("Gunjala_Gondi", GUNJALA_GONDI),
-    ("Gurmukhi", GURMUKHI),
-    ("Gurung_Khema", GURUNG_KHEMA),
-    ("Han", HAN),
-    ("Hangul", HANGUL),
-    ("Hanifi_Rohingya", HANIFI_ROHINGYA),
-    ("Hanunoo", HANUNOO),
-    ("Hatran", HATRAN),
-    ("Hebrew", HEBREW),
-    ("Hiragana", HIRAGANA),
-    ("Imperial_Aramaic", IMPERIAL_ARAMAIC),
-    ("Inherited", INHERITED),
-    ("Inscriptional_Pahlavi", INSCRIPTIONAL_PAHLAVI),
-    ("Inscriptional_Parthian", INSCRIPTIONAL_PARTHIAN),
-    ("Javanese", JAVANESE),
-    ("Kaithi", KAITHI),
-    ("Kannada", KANNADA),
-    ("Katakana", KATAKANA),
-    ("Kawi", KAWI),
-    ("Kayah_Li", KAYAH_LI),
-    ("Kharoshthi", KHAROSHTHI),
-    ("Khitan_Small_Script", KHITAN_SMALL_SCRIPT),
-    ("Khmer", KHMER),
-    ("Khojki", KHOJKI),
-    ("Khudawadi", KHUDAWADI),
-    ("Kirat_Rai", KIRAT_RAI),
-    ("Lao", LAO),
-    ("Latin", LATIN),
-    ("Lepcha", LEPCHA),
-    ("Limbu", LIMBU),
-    ("Linear_A", LINEAR_A),
-    ("Linear_B", LINEAR_B),
-    ("Lisu", LISU),
-    ("Lycian", LYCIAN),
-    ("Lydian", LYDIAN),
-    ("Mahajani", MAHAJANI),
-    ("Makasar", MAKASAR),
-    ("Malayalam", MALAYALAM),
-    ("Mandaic", MANDAIC),
-    ("Manichaean", MANICHAEAN),
-    ("Marchen", MARCHEN),
-    ("Masaram_Gondi", MASARAM_GONDI),
-    ("Medefaidrin", MEDEFAIDRIN),
-    ("Meetei_Mayek", MEETEI_MAYEK),
-    ("Mende_Kikakui", MENDE_KIKAKUI),
-    ("Meroitic_Cursive", MEROITIC_CURSIVE),
-    ("Meroitic_Hieroglyphs", MEROITIC_HIEROGLYPHS),
-    ("Miao", MIAO),
-    ("Modi", MODI),
-    ("Mongolian", MONGOLIAN),
-    ("Mro", MRO),
-    ("Multani", MULTANI),
-    ("Myanmar", MYANMAR),
-    ("Nabataean", NABATAEAN),
-    ("Nag_Mundari", NAG_MUNDARI),
-    ("Nandinagari", NANDINAGARI),
-    ("New_Tai_Lue", NEW_TAI_LUE),
-    ("Newa", NEWA),
-    ("Nko", NKO),
-    ("Nushu", NUSHU),
-    ("Nyiakeng_Puachue_Hmong", NYIAKENG_PUACHUE_HMONG),
-    ("Ogham", OGHAM),
-    ("Ol_Chiki", OL_CHIKI),
-    ("Ol_Onal", OL_ONAL),
-    ("Old_Hungarian", OLD_HUNGARIAN),
-    ("Old_Italic", OLD_ITALIC),
-    ("Old_North_Arabian", OLD_NORTH_ARABIAN),
-    ("Old_Permic", OLD_PERMIC),
-    ("Old_Persian", OLD_PERSIAN),
-    ("Old_Sogdian", OLD_SOGDIAN),
-    ("Old_South_Arabian", OLD_SOUTH_ARABIAN),
-    ("Old_Turkic", OLD_TURKIC),
-    ("Old_Uyghur", OLD_UYGHUR),
-    ("Oriya", ORIYA),
-    ("Osage", OSAGE),
-    ("Osmanya", OSMANYA),
-    ("Pahawh_Hmong", PAHAWH_HMONG),
-    ("Palmyrene", PALMYRENE),
-    ("Pau_Cin_Hau", PAU_CIN_HAU),
-    ("Phags_Pa", PHAGS_PA),
-    ("Phoenician", PHOENICIAN),
-    ("Psalter_Pahlavi", PSALTER_PAHLAVI),
-    ("Rejang", REJANG),
-    ("Runic", RUNIC),
-    ("Samaritan", SAMARITAN),
-    ("Saurashtra", SAURASHTRA),
-    ("Sharada", SHARADA),
-    ("Shavian", SHAVIAN),
-    ("Siddham", SIDDHAM),
-    ("SignWriting", SIGNWRITING),
-    ("Sinhala", SINHALA),
-    ("Sogdian", SOGDIAN),
-    ("Sora_Sompeng", SORA_SOMPENG),
-    ("Soyombo", SOYOMBO),
-    ("Sundanese", SUNDANESE),
-    ("Sunuwar", SUNUWAR),
-    ("Syloti_Nagri", SYLOTI_NAGRI),
-    ("Syriac", SYRIAC),
-    ("Tagalog", TAGALOG),
-    ("Tagbanwa", TAGBANWA),
-    ("Tai_Le", TAI_LE),
-    ("Tai_Tham", TAI_THAM),
-    ("Tai_Viet", TAI_VIET),
-    ("Takri", TAKRI),
-    ("Tamil", TAMIL),
-    ("Tangsa", TANGSA),
-    ("Tangut", TANGUT),
-    ("Telugu", TELUGU),
-    ("Thaana", THAANA),
-    ("Thai", THAI),
-    ("Tibetan", TIBETAN),
-    ("Tifinagh", TIFINAGH),
-    ("Tirhuta", TIRHUTA),
-    ("Todhri", TODHRI),
-    ("Toto", TOTO),
-    ("Tulu_Tigalari", TULU_TIGALARI),
-    ("Ugaritic", UGARITIC),
-    ("Vai", VAI),
-    ("Vithkuqi", VITHKUQI),
-    ("Wancho", WANCHO),
-    ("Warang_Citi", WARANG_CITI),
-    ("Yezidi", YEZIDI),
-    ("Yi", YI),
-    ("Zanabazar_Square", ZANABAZAR_SQUARE),
-];
-
-pub const ADLAM: &'static [(char, char)] =
-    &[('𞀀', 'đž„‹'), ('𞄐', 'đž„™'), ('𞄞', 'đž„Ÿ')];
-
-pub const AHOM: &'static [(char, char)] =
-    &[('𑜀', '𑜚'), ('\u{1171d}', '\u{1172b}'), ('𑜰', '𑝆')];
-
-pub const ANATOLIAN_HIEROGLYPHS: &'static [(char, char)] = &[('𔐀', '𔙆')];
-
-pub const ARABIC: &'static [(char, char)] = &[
-    ('\u{600}', '\u{604}'),
-    ('ۆ', 'ۋ'),
-    ('ۍ', '\u{61a}'),
-    ('\u{61c}', '۞'),
-    ('Ű ', 'Űż'),
-    ('ف', 'ي'),
-    ('\u{656}', 'ÙŻ'),
-    ('Ù±', '\u{6dc}'),
-    ('۞', 'Ûż'),
-    ('ʐ', 'ʿ'),
-    ('àĄ°', 'àąŽ'),
-    ('\u{890}', '\u{891}'),
-    ('\u{897}', '\u{8e1}'),
-    ('\u{8e3}', '\u{8ff}'),
-    ('ﭐ', 'ïŻ‚'),
-    ('ïŻ“', ''),
-    ('', 'ﶏ'),
-    ('ﶒ', 'ﷇ'),
-    ('﷏', '﷏'),
-    ('ï·°', 'ï·ż'),
-    ('ïč°', 'ïčŽ'),
-    ('ïč¶', 'ﻌ'),
-    ('đč ', 'đčŸ'),
-    ('𐻂', '𐻄'),
-    ('\u{10efc}', '\u{10eff}'),
-    ('𞾀', '𞾃'),
-    ('𞾅', '𞾟'),
-    ('𞾡', '𞾱'),
-    ('𞞀', '𞞀'),
-    ('𞾧', '𞾧'),
-    ('đžž©', 'đžžČ'),
-    ('𞾮', 'đžž·'),
-    ('đžžč', 'đžžč'),
-    ('đžž»', 'đžž»'),
-    ('đžč‚', 'đžč‚'),
-    ('đžč‡', 'đžč‡'),
-    ('đžč‰', 'đžč‰'),
-    ('đžč‹', 'đžč‹'),
-    ('đžč', 'đžč'),
-    ('đžč‘', 'đžč’'),
-    ('đžč”', 'đžč”'),
-    ('đžč—', 'đžč—'),
-    ('đžč™', 'đžč™'),
-    ('đžč›', 'đžč›'),
-    ('đžč', 'đžč'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžčĄ', 'đžčą'),
-    ('đžč€', 'đžč€'),
-    ('đžč§', 'đžčȘ'),
-    ('đžčŹ', 'đžčČ'),
-    ('đžčŽ', 'đžč·'),
-    ('đžčč', 'đžčŒ'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžș€', 'đžș‰'),
-    ('đžș‹', 'đžș›'),
-    ('đžșĄ', 'đžșŁ'),
-    ('đžș„', 'đžș©'),
-    ('đžș«', 'đžș»'),
-    ('đž»°', 'đž»±'),
-];
-
-pub const ARMENIAN: &'static [(char, char)] =
-    &[('Ô±', 'Ֆ'), ('ՙ', '֊'), ('֍', '֏'), ('ïŹ“', 'ïŹ—')];
-
-pub const AVESTAN: &'static [(char, char)] = &[('𐬀', '𐏔'), ('đŹč', '𐏿')];
-
-pub const BALINESE: &'static [(char, char)] = &[('\u{1b00}', 'ᭌ'), ('᭎', '᭿')];
-
-pub const BAMUM: &'static [(char, char)] = &[('ꚠ', '꛷'), ('𖠀', '𖹾')];
-
-pub const BASSA_VAH: &'static [(char, char)] =
-    &[('𖫐', 'đ–«­'), ('\u{16af0}', 'đ–«”')];
-
-pub const BATAK: &'static [(char, char)] = &[('ᯀ', '\u{1bf3}'), ('áŻŒ', '᯿')];
-
-pub const BENGALI: &'static [(char, char)] = &[
-    ('àŠ€', 'àŠƒ'),
-    ('àŠ…', 'àŠŒ'),
-    ('àŠ', 'àŠ'),
-    ('àŠ“', 'àŠš'),
-    ('àŠȘ', 'àŠ°'),
-    ('àŠČ', 'àŠČ'),
-    ('àŠ¶', 'àŠč'),
-    ('\u{9bc}', '\u{9c4}'),
-    ('ে', 'ৈ'),
-    ('ো', 'ৎ'),
-    ('\u{9d7}', '\u{9d7}'),
-    ('ড়', 'ঢ়'),
-    ('য়', '\u{9e3}'),
-    ('à§Š', '\u{9fe}'),
-];
-
-pub const BHAIKSUKI: &'static [(char, char)] =
-    &[('𑰀', '𑰈'), ('𑰊', '\u{11c36}'), ('\u{11c38}', '𑱅'), ('𑱐', '𑱏')];
-
-pub const BOPOMOFO: &'static [(char, char)] =
-    &[('ËȘ', 'Ë«'), ('ㄅ', 'ㄯ'), ('ㆠ', 'ㆿ')];
-
-pub const BRAHMI: &'static [(char, char)] =
-    &[('𑀀', '𑁍'), ('𑁒', '𑁔'), ('\u{1107f}', '\u{1107f}')];
-
-pub const BRAILLE: &'static [(char, char)] = &[('⠀', '⣿')];
-
-pub const BUGINESE: &'static [(char, char)] = &[('Ṁ', '\u{1a1b}'), ('Ṟ', 'ṟ')];
-
-pub const BUHID: &'static [(char, char)] = &[('ᝀ', '\u{1753}')];
-
-pub const CANADIAN_ABORIGINAL: &'static [(char, char)] =
-    &[('᐀', 'ᙿ'), ('áą°', 'ᣔ'), ('đ‘Ș°', 'đ‘Șż')];
-
-pub const CARIAN: &'static [(char, char)] = &[('𐊠', '𐋐')];
-
-pub const CAUCASIAN_ALBANIAN: &'static [(char, char)] =
-    &[('𐔰', '𐕣'), ('𐕯', '𐕯')];
-
-pub const CHAKMA: &'static [(char, char)] =
-    &[('\u{11100}', '\u{11134}'), ('đ‘„¶', '𑅇')];
-
-pub const CHAM: &'static [(char, char)] =
-    &[('Ꚁ', '\u{aa36}'), ('ꩀ', 'ꩍ'), ('꩐', '꩙'), ('꩜', '꩟')];
-
-pub const CHEROKEE: &'static [(char, char)] =
-    &[('Ꭰ', 'Ꮤ'), ('Ꮮ', 'Ꮬ'), ('ê­°', 'êźż')];
-
-pub const CHORASMIAN: &'static [(char, char)] = &[('đŸ°', '𐿋')];
-
-pub const COMMON: &'static [(char, char)] = &[
-    ('\0', '@'),
-    ('[', '`'),
-    ('{', '©'),
-    ('«', '¹'),
-    ('»', '¿'),
-    ('×', '×'),
-    ('÷', '÷'),
-    ('Êč', '˟'),
-    ('Ë„', 'Ë©'),
-    ('ËŹ', 'Ëż'),
-    ('ÍŽ', 'ÍŽ'),
-    ('ÍŸ', 'ÍŸ'),
-    ('΅', '΅'),
-    ('·', '·'),
-    ('\u{605}', '\u{605}'),
-    ('ی', 'ی'),
-    ('ۛ', 'ۛ'),
-    ('۟', '۟'),
-    ('ـ', 'ـ'),
-    ('\u{6dd}', '\u{6dd}'),
-    ('\u{8e2}', '\u{8e2}'),
-    ('à„€', 'à„„'),
-    ('àžż', 'àžż'),
-    ('àż•', 'àż˜'),
-    ('჻', '჻'),
-    ('᛫', '᛭'),
-    ('᜔', '᜶'),
-    ('᠂', '᠃'),
-    ('᠅', '᠅'),
-    ('᳓', '᳓'),
-    ('᳥', '᳥'),
-    ('ᳩ', '᳏'),
-    ('áłź', 'áłł'),
-    ('áł”', 'áł·'),
-    ('áłș', 'áłș'),
-    ('\u{2000}', '\u{200b}'),
-    ('\u{200e}', '\u{2064}'),
-    ('\u{2066}', '⁰'),
-    ('⁎', ' '),
-    ('₀', '₎'),
-    ('₠', '⃀'),
-    ('℀', 'â„„'),
-    ('℧', '℩'),
-    ('ℬ', 'ℱ'),
-    ('ℳ', '⅍'),
-    ('⅏', '⅟'),
-    ('↉', '↋'),
-    ('←', '␩'),
-    ('⑀', '⑊'),
-    ('①', '⟿'),
-    (' ', 'â­ł'),
-    ('â­¶', '⼕'),
-    ('⼗', '⯿'),
-    ('⾀', 'âč'),
-    ('⿰', '〄'),
-    ('〆', '〆'),
-    ('〈', '〠'),
-    ('〰', '〷'),
-    ('ă€Œ', '〿'),
-    ('゛', '゜'),
-    ('゠', '゠'),
-    ('・', 'ăƒŒ'),
-    ('㆐', '㆟'),
-    ('㇀', '㇄'),
-    ('㇯', '㇯'),
-    ('㈠', '㉟'),
-    ('㉿', '㋏'),
-    ('㋿', '㋿'),
-    ('㍘', '㏿'),
-    ('䷀', 'ä·ż'),
-    ('꜀', 'êœĄ'),
-    ('ꞈ', '꞊'),
-    ('ê °', 'ê č'),
-    ('ê€ź', 'ê€ź'),
-    ('ꧏ', 'ꧏ'),
-    ('꭛', '꭛'),
-    ('ê­Ș', 'ê­«'),
-    ('', 'ïŽż'),
-    ('', 'ïž™'),
-    ('ïž°', 'ïč’'),
-    ('ïč”', 'ïčŠ'),
-    ('ïčš', 'ïč«'),
-    ('\u{feff}', '\u{feff}'),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('\u{ff9e}', '\u{ff9f}'),
-    ('ïż ', 'ïżŠ'),
-    ('ïżš', 'ïżź'),
-    ('\u{fff9}', 'ïżœ'),
-    ('𐄀', '𐄂'),
-    ('𐄇', '𐄳'),
-    ('𐄷', '𐄿'),
-    ('𐆐', '𐆜'),
-    ('𐇐', 'đ‡Œ'),
-    ('𐋡', '𐋻'),
-    ('\u{1bca0}', '\u{1bca3}'),
-    ('𜰀', 'đœłč'),
-    ('𜮀', 'đœșł'),
-    ('đœœ', '𜿃'),
-    ('𝀀', 'đƒ”'),
-    ('𝄀', '𝄩'),
-    ('đ„©', '\u{1d166}'),
-    ('đ…Ș', '\u{1d17a}'),
-    ('𝆃', '𝆄'),
-    ('𝆌', 'đ†©'),
-    ('𝆺𝅥', 'đ‡Ș'),
-    ('𝋀', '𝋓'),
-    ('𝋠', '𝋳'),
-    ('𝌀', '𝍖'),
-    ('𝍠', '𝍾'),
-    ('𝐀', '𝑔'),
-    ('𝑖', '𝒜'),
-    ('𝒞', '𝒟'),
-    ('𝒱', '𝒱'),
-    ('đ’„', '𝒩'),
-    ('đ’©', '𝒬'),
-    ('𝒼', 'đ’č'),
-    ('đ’»', 'đ’»'),
-    ('đ’œ', '𝓃'),
-    ('𝓅', '𝔅'),
-    ('𝔇', '𝔊'),
-    ('𝔍', '𝔔'),
-    ('𝔖', '𝔜'),
-    ('𝔞', 'đ”č'),
-    ('đ”»', 'đ”Ÿ'),
-    ('𝕀', '𝕄'),
-    ('𝕆', '𝕆'),
-    ('𝕊', '𝕐'),
-    ('𝕒', 'đš„'),
-    ('𝚹', '𝟋'),
-    ('𝟎', '𝟿'),
-    ('đž±±', 'đžČŽ'),
-    ('𞮁', 'đžŽœ'),
-    ('🀀', 'đŸ€«'),
-    ('🀰', '🂓'),
-    ('🂠', '🂼'),
-    ('đŸ‚±', '🂿'),
-    ('🃁', '🃏'),
-    ('🃑', 'đŸƒ”'),
-    ('🄀', '🆭'),
-    ('🇩', '🇿'),
-    ('🈁', '🈂'),
-    ('🈐', 'đŸˆ»'),
-    ('🉀', '🉈'),
-    ('🉐', '🉑'),
-    ('🉠', 'đŸ‰„'),
-    ('🌀', '🛗'),
-    ('🛜', '🛬'),
-    ('🛰', 'đŸ›Œ'),
-    ('🜀', 'đŸ¶'),
-    ('đŸ»', '🟙'),
-    ('🟠', 'đŸŸ«'),
-    ('🟰', '🟰'),
-    ('🠀', '🠋'),
-    ('🠐', '🡇'),
-    ('🡐', '🡙'),
-    ('🡠', '🱇'),
-    ('🱐', '🱭'),
-    ('🱰', 'đŸą»'),
-    ('🣀', '🣁'),
-    ('đŸ€€', 'đŸ©“'),
-    ('đŸ© ', 'đŸ©­'),
-    ('đŸ©°', 'đŸ©Œ'),
-    ('đŸȘ€', 'đŸȘ‰'),
-    ('đŸȘ', 'đŸ«†'),
-    ('đŸ«Ž', 'đŸ«œ'),
-    ('đŸ«Ÿ', 'đŸ«©'),
-    ('đŸ«°', 'đŸ«ž'),
-    ('🬀', '🼒'),
-    ('🼔', 'đŸŻč'),
-    ('\u{e0001}', '\u{e0001}'),
-    ('\u{e0020}', '\u{e007f}'),
-];
-
-pub const COPTIC: &'static [(char, char)] =
-    &[('Ïą', 'ÏŻ'), ('âȀ', 'âłł'), ('âłč', 'âłż')];
-
-pub const CUNEIFORM: &'static [(char, char)] =
-    &[('𒀀', '𒎙'), ('𒐀', '𒑼'), ('𒑰', '𒑮'), ('𒒀', '𒕃')];
-
-pub const CYPRIOT: &'static [(char, char)] =
-    &[('𐠀', '𐠅'), ('𐠈', '𐠈'), ('𐠊', '𐠔'), ('𐠷', '𐠞'), ('đ Œ', 'đ Œ'), ('𐠿', '𐠿')];
-
-pub const CYPRO_MINOAN: &'static [(char, char)] = &[('đ’Ÿ', 'đ’żČ')];
-
-pub const CYRILLIC: &'static [(char, char)] = &[
-    ('Ѐ', '\u{484}'),
-    ('\u{487}', 'ÔŻ'),
-    ('áȀ', 'áȊ'),
-    ('Ꭻ', 'Ꭻ'),
-    ('ᔞ', 'ᔞ'),
-    ('\u{2de0}', '\u{2dff}'),
-    ('Ꙁ', '\u{a69f}'),
-    ('\u{fe2e}', '\u{fe2f}'),
-    ('𞀰', '𞁭'),
-    ('\u{1e08f}', '\u{1e08f}'),
-];
-
-pub const DESERET: &'static [(char, char)] = &[('𐐀', '𐑏')];
-
-pub const DEVANAGARI: &'static [(char, char)] = &[
-    ('\u{900}', 'à„'),
-    ('\u{955}', '\u{963}'),
-    ('à„Š', 'à„ż'),
-    ('\u{a8e0}', '\u{a8ff}'),
-    ('𑬀', '𑬉'),
-];
-
-pub const DIVES_AKURU: &'static [(char, char)] = &[
-    ('đ‘€€', '𑀆'),
-    ('𑀉', '𑀉'),
-    ('đ‘€Œ', '𑀓'),
-    ('𑀕', 'đ‘€–'),
-    ('đ‘€˜', 'đ‘€”'),
-    ('đ‘€·', '𑀞'),
-    ('\u{1193b}', '𑄆'),
-    ('𑄐', 'đ‘„™'),
-];
-
-pub const DOGRA: &'static [(char, char)] = &[('𑠀', 'đ‘ »')];
-
-pub const DUPLOYAN: &'static [(char, char)] =
-    &[('𛰀', 'đ›±Ș'), ('đ›±°', 'đ›±Œ'), ('đ›Č€', 'đ›Čˆ'), ('đ›Č', 'đ›Č™'), ('đ›Čœ', 'đ›ČŸ')];
-
-pub const EGYPTIAN_HIEROGLYPHS: &'static [(char, char)] =
-    &[('𓀀', '\u{13455}'), ('𓑠', 'đ”ș')];
-
-pub const ELBASAN: &'static [(char, char)] = &[('𐔀', '𐔧')];
-
-pub const ELYMAIC: &'static [(char, char)] = &[('𐿠', '𐿶')];
-
-pub const ETHIOPIC: &'static [(char, char)] = &[
-    ('ሀ', 'ቈ'),
-    ('ቊ', 'ቍ'),
-    ('ቐ', 'ቖ'),
-    ('ቘ', 'ቘ'),
-    ('ቚ', 'ቝ'),
-    ('በ', 'ኈ'),
-    ('ኊ', 'ኍ'),
-    ('ነ', 'ኰ'),
-    ('áŠČ', 'ኔ'),
-    ('ኾ', 'ኟ'),
-    ('ዀ', 'ዀ'),
-    ('ዂ', 'ዅ'),
-    ('ወ', 'ዖ'),
-    ('ዘ', 'ጐ'),
-    ('ጒ', 'ጕ'),
-    ('ጘ', 'ፚ'),
-    ('\u{135d}', 'ፌ'),
-    ('ᎀ', '᎙'),
-    ('ⶀ', 'ⶖ'),
-    ('â¶ ', 'â¶Š'),
-    ('â¶š', 'â¶ź'),
-    ('â¶°', 'â¶¶'),
-    ('â¶ž', 'â¶Ÿ'),
-    ('ⷀ', 'ⷆ'),
-    ('ⷈ', 'ⷎ'),
-    ('ⷐ', 'ⷖ'),
-    ('ⷘ', 'ⷞ'),
-    ('êŹ', 'êŹ†'),
-    ('êŹ‰', 'êŹŽ'),
-    ('êŹ‘', 'êŹ–'),
-    ('êŹ ', 'êŹŠ'),
-    ('êŹš', 'êŹź'),
-    ('𞟠', '𞟩'),
-    ('𞟹', 'đžŸ«'),
-    ('𞟭', '𞟼'),
-    ('𞟰', 'đžŸŸ'),
-];
-
-pub const GARAY: &'static [(char, char)] =
-    &[('𐔀', '𐔄'), ('\u{10d69}', '𐶅'), ('𐶎', 'đ¶')];
-
-pub const GEORGIAN: &'static [(char, char)] = &[
-    ('Ⴀ', 'Ⴥ'),
-    ('Ⴧ', 'Ⴧ'),
-    ('Ⴭ', 'Ⴭ'),
-    ('ა', 'áƒș'),
-    ('჌', 'ჿ'),
-    ('áȐ', 'áČș'),
-    ('áČœ', 'áČż'),
-    ('⮀', '⎄'),
-    ('⎧', '⎧'),
-    ('⎭', '⎭'),
-];
-
-pub const GLAGOLITIC: &'static [(char, char)] = &[
-    ('Ⰰ', 'ⱟ'),
-    ('\u{1e000}', '\u{1e006}'),
-    ('\u{1e008}', '\u{1e018}'),
-    ('\u{1e01b}', '\u{1e021}'),
-    ('\u{1e023}', '\u{1e024}'),
-    ('\u{1e026}', '\u{1e02a}'),
-];
-
-pub const GOTHIC: &'static [(char, char)] = &[('𐌰', '𐍊')];
-
-pub const GRANTHA: &'static [(char, char)] = &[
-    ('\u{11300}', '𑌃'),
-    ('𑌅', '𑌌'),
-    ('𑌏', '𑌐'),
-    ('𑌓', '𑌹'),
-    ('đ‘ŒȘ', '𑌰'),
-    ('đ‘ŒČ', '𑌳'),
-    ('đ‘Œ”', 'đ‘Œč'),
-    ('\u{1133c}', '𑍄'),
-    ('𑍇', '𑍈'),
-    ('𑍋', '\u{1134d}'),
-    ('𑍐', '𑍐'),
-    ('\u{11357}', '\u{11357}'),
-    ('𑍝', '𑍣'),
-    ('\u{11366}', '\u{1136c}'),
-    ('\u{11370}', '\u{11374}'),
-];
-
-pub const GREEK: &'static [(char, char)] = &[
-    ('Ͱ', 'ͳ'),
-    ('Í”', 'Í·'),
-    ('Íș', 'Íœ'),
-    ('Íż', 'Íż'),
-    ('΄', '΄'),
-    ('Ά', 'Ά'),
-    ('Έ', 'Ί'),
-    ('Ό', 'Ό'),
-    ('Ύ', 'Ρ'),
-    ('Σ', 'ÏĄ'),
-    ('ϰ', 'Ïż'),
-    ('ᎊ', 'áŽȘ'),
-    ('ᔝ', 'ᔥ'),
-    ('ᔊ', 'á”Ș'),
-    ('á¶ż', 'á¶ż'),
-    ('ጀ', 'ጕ'),
-    ('ጘ', 'ጝ'),
-    ('ጠ', 'ᜅ'),
-    ('ᜈ', 'ᜍ'),
-    ('ᜐ', '᜗'),
-    ('᜙', '᜙'),
-    ('᜛', '᜛'),
-    ('᜝', '᜝'),
-    ('ᜟ', '᜜'),
-    ('ៀ', '៎'),
-    ('៶', 'ῄ'),
-    ('ῆ', 'ΐ'),
-    ('ῖ', 'Ί'),
-    ('῝', '`'),
-    ('áżČ', '῎'),
-    ('áż¶', 'áżŸ'),
-    ('℩', '℩'),
-    ('ê­„', 'ê­„'),
-    ('𐅀', '𐆎'),
-    ('𐆠', '𐆠'),
-    ('𝈀', '𝉅'),
-];
-
-pub const GUJARATI: &'static [(char, char)] = &[
-    ('\u{a81}', 'àȘƒ'),
-    ('àȘ…', 'àȘ'),
-    ('àȘ', 'àȘ‘'),
-    ('àȘ“', 'àȘš'),
-    ('àȘȘ', 'àȘ°'),
-    ('àȘČ', 'àȘł'),
-    ('àȘ”', 'àȘč'),
-    ('\u{abc}', '\u{ac5}'),
-    ('\u{ac7}', 'ૉ'),
-    ('ો', '\u{acd}'),
-    ('ૐ', 'ૐ'),
-    ('à« ', '\u{ae3}'),
-    ('૊', '૱'),
-    ('à«č', '\u{aff}'),
-];
-
-pub const GUNJALA_GONDI: &'static [(char, char)] = &[
-    ('đ‘” ', 'đ‘”„'),
-    ('đ‘”§', '𑔚'),
-    ('đ‘”Ș', 'đ‘¶Ž'),
-    ('\u{11d90}', '\u{11d91}'),
-    ('đ‘¶“', 'đ‘¶˜'),
-    ('đ‘¶ ', 'đ‘¶©'),
-];
-
-pub const GURMUKHI: &'static [(char, char)] = &[
-    ('\u{a01}', 'àšƒ'),
-    ('àš…', 'àšŠ'),
-    ('àš', 'àš'),
-    ('àš“', 'àšš'),
-    ('àšȘ', 'àš°'),
-    ('àšČ', 'àšł'),
-    ('àš”', 'àš¶'),
-    ('àšž', 'àšč'),
-    ('\u{a3c}', '\u{a3c}'),
-    ('àšŸ', '\u{a42}'),
-    ('\u{a47}', '\u{a48}'),
-    ('\u{a4b}', '\u{a4d}'),
-    ('\u{a51}', '\u{a51}'),
-    ('ਖ਼', 'ੜ'),
-    ('ਫ਼', 'ਫ਼'),
-    ('੊', '੶'),
-];
-
-pub const GURUNG_KHEMA: &'static [(char, char)] = &[('𖄀', 'đ–„č')];
-
-pub const HAN: &'static [(char, char)] = &[
-    ('âș€', 'âș™'),
-    ('âș›', '⻳'),
-    ('⌀', '⿕'),
-    ('々', '々'),
-    ('〇', '〇'),
-    ('〡', '〩'),
-    ('〾', '〻'),
-    ('㐀', 'ä¶ż'),
-    ('侀', '鿿'),
-    ('', 'ï©­'),
-    ('並', '龎'),
-    ('𖿱', '𖿣'),
-    ('\u{16ff0}', '\u{16ff1}'),
-    ('𠀀', 'đȘ›Ÿ'),
-    ('đȘœ€', 'đ«œč'),
-    ('đ«€', 'đ« '),
-    ('đ«  ', 'đŹșĄ'),
-    ('đŹș°', '🯠'),
-    ('🯰', 'đźč'),
-    ('丽', '𯹝'),
-    ('𰀀', 'đ±Š'),
-    ('đ±', 'đȎŻ'),
-];
-
-pub const HANGUL: &'static [(char, char)] = &[
-    ('ᄀ', 'ᇿ'),
-    ('\u{302e}', '\u{302f}'),
-    ('ㄱ', 'ㆎ'),
-    ('㈀', '㈞'),
-    ('㉠', 'ă‰Ÿ'),
-    ('ꄠ', 'ꄌ'),
-    ('가', '힣'),
-    ('ힰ', 'ퟆ'),
-    ('ퟋ', 'ퟻ'),
-    ('', ''),
-    ('ïż‚', 'ïż‡'),
-    ('ïżŠ', 'ïż'),
-    ('ïż’', 'ïż—'),
-    ('ïżš', 'ïżœ'),
-];
-
-pub const HANIFI_ROHINGYA: &'static [(char, char)] =
-    &[('𐮀', '\u{10d27}'), ('𐎰', 'đŽč')];
-
-pub const HANUNOO: &'static [(char, char)] = &[('ᜠ', '\u{1734}')];
-
-pub const HATRAN: &'static [(char, char)] =
-    &[('𐣠', 'đŁČ'), ('𐣎', '𐣔'), ('𐣻', '𐣿')];
-
-pub const HEBREW: &'static [(char, char)] = &[
-    ('\u{591}', '\u{5c7}'),
-    ('ڐ', 'ŚȘ'),
-    ('ŚŻ', 'ŚŽ'),
-    ('ïŹ', 'ïŹ¶'),
-    ('ïŹž', 'ïŹŒ'),
-    ('ïŹŸ', 'ïŹŸ'),
-    ('נּ', 'סּ'),
-    ('ףּ', 'פּ'),
-    ('צּ', 'ﭏ'),
-];
-
-pub const HIRAGANA: &'static [(char, char)] = &[
-    ('ぁ', 'ゖ'),
-    ('ゝ', 'ゟ'),
-    ('𛀁', '𛄟'),
-    ('đ›„Č', 'đ›„Č'),
-    ('𛅐', '𛅒'),
-    ('🈀', '🈀'),
-];
-
-pub const IMPERIAL_ARAMAIC: &'static [(char, char)] =
-    &[('𐡀', '𐡕'), ('𐡗', '𐡟')];
-
-pub const INHERITED: &'static [(char, char)] = &[
-    ('\u{300}', '\u{36f}'),
-    ('\u{485}', '\u{486}'),
-    ('\u{64b}', '\u{655}'),
-    ('\u{670}', '\u{670}'),
-    ('\u{951}', '\u{954}'),
-    ('\u{1ab0}', '\u{1ace}'),
-    ('\u{1cd0}', '\u{1cd2}'),
-    ('\u{1cd4}', '\u{1ce0}'),
-    ('\u{1ce2}', '\u{1ce8}'),
-    ('\u{1ced}', '\u{1ced}'),
-    ('\u{1cf4}', '\u{1cf4}'),
-    ('\u{1cf8}', '\u{1cf9}'),
-    ('\u{1dc0}', '\u{1dff}'),
-    ('\u{200c}', '\u{200d}'),
-    ('\u{20d0}', '\u{20f0}'),
-    ('\u{302a}', '\u{302d}'),
-    ('\u{3099}', '\u{309a}'),
-    ('\u{fe00}', '\u{fe0f}'),
-    ('\u{fe20}', '\u{fe2d}'),
-    ('\u{101fd}', '\u{101fd}'),
-    ('\u{102e0}', '\u{102e0}'),
-    ('\u{1133b}', '\u{1133b}'),
-    ('\u{1cf00}', '\u{1cf2d}'),
-    ('\u{1cf30}', '\u{1cf46}'),
-    ('\u{1d167}', '\u{1d169}'),
-    ('\u{1d17b}', '\u{1d182}'),
-    ('\u{1d185}', '\u{1d18b}'),
-    ('\u{1d1aa}', '\u{1d1ad}'),
-    ('\u{e0100}', '\u{e01ef}'),
-];
-
-pub const INSCRIPTIONAL_PAHLAVI: &'static [(char, char)] =
-    &[('𐭠', 'đ­Č'), ('𐭞', '𐭿')];
-
-pub const INSCRIPTIONAL_PARTHIAN: &'static [(char, char)] =
-    &[('𐭀', '𐭕'), ('𐭘', '𐭟')];
-
-pub const JAVANESE: &'static [(char, char)] =
-    &[('\u{a980}', '꧍'), ('꧐', '꧙'), ('꧞', '꧟')];
-
-pub const KAITHI: &'static [(char, char)] =
-    &[('\u{11080}', '\u{110c2}'), ('\u{110cd}', '\u{110cd}')];
-
-pub const KANNADA: &'static [(char, char)] = &[
-    ('àȀ', 'àȌ'),
-    ('àȎ', 'àȐ'),
-    ('àȒ', 'àČš'),
-    ('àČȘ', 'àČł'),
-    ('àČ”', 'àČč'),
-    ('\u{cbc}', 'àł„'),
-    ('\u{cc6}', '\u{cc8}'),
-    ('\u{cca}', '\u{ccd}'),
-    ('\u{cd5}', '\u{cd6}'),
-    ('àł', 'àłž'),
-    ('àł ', '\u{ce3}'),
-    ('àłŠ', 'àłŻ'),
-    ('àł±', 'àłł'),
-];
-
-pub const KATAKANA: &'static [(char, char)] = &[
-    ('ァ', 'ăƒș'),
-    ('ăƒœ', 'ヿ'),
-    ('ㇰ', 'ㇿ'),
-    ('㋐', 'ă‹Ÿ'),
-    ('㌀', '㍗'),
-    ('', 'ïœŻ'),
-    ('', ''),
-    ('𚿰', '𚿳'),
-    ('đšż”', 'đšż»'),
-    ('đšżœ', 'đšżŸ'),
-    ('𛀀', '𛀀'),
-    ('𛄠', '𛄱'),
-    ('𛅕', '𛅕'),
-    ('đ›…€', '𛅧'),
-];
-
-pub const KAWI: &'static [(char, char)] =
-    &[('\u{11f00}', 'đ‘Œ'), ('đ‘Œ’', '\u{11f3a}'), ('đ‘ŒŸ', '\u{11f5a}')];
-
-pub const KAYAH_LI: &'static [(char, char)] = &[('ꀀ', '\u{a92d}'), ('ê€Ż', 'ê€Ż')];
-
-pub const KHAROSHTHI: &'static [(char, char)] = &[
-    ('𐹀', '\u{10a03}'),
-    ('\u{10a05}', '\u{10a06}'),
-    ('\u{10a0c}', '𐹓'),
-    ('𐹕', '𐹗'),
-    ('𐹙', '𐚔'),
-    ('\u{10a38}', '\u{10a3a}'),
-    ('\u{10a3f}', '𐩈'),
-    ('𐩐', '𐩘'),
-];
-
-pub const KHITAN_SMALL_SCRIPT: &'static [(char, char)] =
-    &[('\u{16fe4}', '\u{16fe4}'), ('𘬀', '𘳕'), ('𘳿', '𘳿')];
-
-pub const KHMER: &'static [(char, char)] =
-    &[('ក', '\u{17dd}'), ('០', '៩'), ('៰', 'áŸč'), ('á§ ', 'á§ż')];
-
-pub const KHOJKI: &'static [(char, char)] = &[('𑈀', '𑈑'), ('𑈓', '\u{11241}')];
-
-pub const KHUDAWADI: &'static [(char, char)] =
-    &[('𑊰', '\u{112ea}'), ('𑋰', 'đ‘‹č')];
-
-pub const KIRAT_RAI: &'static [(char, char)] = &[('𖔀', 'đ–”č')];
-
-pub const LAO: &'static [(char, char)] = &[
-    ('àș', 'àș‚'),
-    ('àș„', 'àș„'),
-    ('àș†', 'àșŠ'),
-    ('àșŒ', 'àșŁ'),
-    ('àș„', 'àș„'),
-    ('àș§', 'àșœ'),
-    ('ເ', 'ໄ'),
-    ('ໆ', 'ໆ'),
-    ('\u{ec8}', '\u{ece}'),
-    ('໐', '໙'),
-    ('ໜ', 'ໟ'),
-];
-
-pub const LATIN: &'static [(char, char)] = &[
-    ('A', 'Z'),
-    ('a', 'z'),
-    ('ª', 'ª'),
-    ('º', 'º'),
-    ('À', 'Ö'),
-    ('Ø', 'ö'),
-    ('ø', 'Êž'),
-    ('Ë ', 'Ë€'),
-    ('ᮀ', 'ᎄ'),
-    ('ᎏ', 'ᔜ'),
-    ('ᔹ', 'ᔄ'),
-    ('ᔫ', 'ᔷ'),
-    ('á”č', 'á¶Ÿ'),
-    ('ᾀ', 'ỿ'),
-    ('ⁱ', 'ⁱ'),
-    ('ⁿ', 'ⁿ'),
-    ('ₐ', 'ₜ'),
-    ('â„Ș', 'Å'),
-    ('â„Č', 'â„Č'),
-    ('ⅎ', 'ⅎ'),
-    ('Ⅰ', 'ↈ'),
-    ('Ⱡ', 'Ɀ'),
-    ('êœą', 'ꞇ'),
-    ('Ꞌ', 'ꟍ'),
-    ('Ꟑ', 'ꟑ'),
-    ('ꟓ', 'ꟓ'),
-    ('ꟕ', 'Ƛ'),
-    ('êŸČ', 'êŸż'),
-    ('êŹ°', 'ꭚ'),
-    ('ꭜ', 'ê­€'),
-    ('ê­Š', 'ê­©'),
-    ('ïŹ€', 'ïŹ†'),
-    ('ïŒĄ', 'ïŒș'),
-    ('', ''),
-    ('𐞀', '𐞅'),
-    ('𐞇', '𐞰'),
-    ('đžČ', 'đžș'),
-    ('đŒ€', 'đŒž'),
-    ('đŒ„', 'đŒȘ'),
-];
-
-pub const LEPCHA: &'static [(char, char)] =
-    &[('ᰀ', '\u{1c37}'), ('᰻', '᱉'), ('ᱍ', 'ᱏ')];
-
-pub const LIMBU: &'static [(char, char)] = &[
-    ('က', 'သ'),
-    ('\u{1920}', 'ါ'),
-    ('ူ', '\u{193b}'),
-    ('á„€', 'á„€'),
-    ('á„„', 'ᄏ'),
-];
-
-pub const LINEAR_A: &'static [(char, char)] =
-    &[('𐘀', 'đœ¶'), ('𐝀', '𐝕'), ('𐝠', '𐝧')];
-
-pub const LINEAR_B: &'static [(char, char)] = &[
-    ('𐀀', '𐀋'),
-    ('𐀍', '𐀩'),
-    ('𐀹', 'đ€ș'),
-    ('đ€Œ', 'đ€œ'),
-    ('𐀿', '𐁍'),
-    ('𐁐', '𐁝'),
-    ('𐂀', 'đƒș'),
-];
-
-pub const LISU: &'static [(char, char)] = &[('ꓐ', 'ê“ż'), ('đ‘Ÿ°', 'đ‘Ÿ°')];
-
-pub const LYCIAN: &'static [(char, char)] = &[('𐊀', '𐊜')];
-
-pub const LYDIAN: &'static [(char, char)] = &[('𐀠', 'đ€č'), ('𐀿', '𐀿')];
-
-pub const MAHAJANI: &'static [(char, char)] = &[('𑅐', 'đ‘…¶')];
-
-pub const MAKASAR: &'static [(char, char)] = &[('đ‘» ', '𑻞')];
-
-pub const MALAYALAM: &'static [(char, char)] = &[
-    ('\u{d00}', 'àŽŒ'),
-    ('àŽŽ', 'àŽ'),
-    ('àŽ’', '\u{d44}'),
-    ('à”†', 'à”ˆ'),
-    ('à”Š', 'à”'),
-    ('à””', '\u{d63}'),
-    ('à”Š', 'à”ż'),
-];
-
-pub const MANDAIC: &'static [(char, char)] = &[('àĄ€', '\u{85b}'), ('àĄž', 'àĄž')];
-
-pub const MANICHAEAN: &'static [(char, char)] =
-    &[('𐫀', '\u{10ae6}'), ('𐫫', '𐫶')];
-
-pub const MARCHEN: &'static [(char, char)] =
-    &[('𑱰', 'đ‘ȏ'), ('\u{11c92}', '\u{11ca7}'), ('đ‘Č©', '\u{11cb6}')];
-
-pub const MASARAM_GONDI: &'static [(char, char)] = &[
-    ('𑮀', '𑮆'),
-    ('𑮈', '𑮉'),
-    ('𑮋', '\u{11d36}'),
-    ('\u{11d3a}', '\u{11d3a}'),
-    ('\u{11d3c}', '\u{11d3d}'),
-    ('\u{11d3f}', '\u{11d47}'),
-    ('𑔐', 'đ‘”™'),
-];
-
-pub const MEDEFAIDRIN: &'static [(char, char)] = &[('đ–č€', 'đ–șš')];
-
-pub const MEETEI_MAYEK: &'static [(char, char)] =
-    &[('ê« ', '\u{aaf6}'), ('êŻ€', '\u{abed}'), ('êŻ°', 'êŻč')];
-
-pub const MENDE_KIKAKUI: &'static [(char, char)] =
-    &[('𞠀', '𞣄'), ('𞣇', '\u{1e8d6}')];
-
-pub const MEROITIC_CURSIVE: &'static [(char, char)] =
-    &[('𐊠', '𐊷'), ('đŠŒ', '𐧏'), ('𐧒', '𐧿')];
-
-pub const MEROITIC_HIEROGLYPHS: &'static [(char, char)] = &[('𐩀', '𐩟')];
-
-pub const MIAO: &'static [(char, char)] =
-    &[('đ–Œ€', 'đ–œŠ'), ('\u{16f4f}', 'đ–Ÿ‡'), ('\u{16f8f}', 'đ–ŸŸ')];
-
-pub const MODI: &'static [(char, char)] = &[('𑘀', '𑙄'), ('𑙐', '𑙙')];
-
-pub const MONGOLIAN: &'static [(char, char)] =
-    &[('᠀', '᠁'), ('᠄', '᠄'), ('᠆', '᠙'), ('á  ', 'ᥞ'), ('᱀', 'áąȘ'), ('𑙠', '𑙬')];
-
-pub const MRO: &'static [(char, char)] = &[('đ–©€', 'đ–©ž'), ('đ–© ', 'đ–©©'), ('đ–©ź', 'đ–©Ż')];
-
-pub const MULTANI: &'static [(char, char)] =
-    &[('𑊀', '𑊆'), ('𑊈', '𑊈'), ('𑊊', '𑊍'), ('𑊏', '𑊝'), ('𑊟', '𑊩')];
-
-pub const MYANMAR: &'static [(char, char)] =
-    &[('က', '႟'), ('ê§ ', 'ê§Ÿ'), ('ê© ', 'ê©ż'), ('𑛐', '𑛣')];
-
-pub const NABATAEAN: &'static [(char, char)] = &[('𐱀', '𐱞'), ('𐱧', '𐱯')];
-
-pub const NAG_MUNDARI: &'static [(char, char)] = &[('𞓐', 'đž“č')];
-
-pub const NANDINAGARI: &'static [(char, char)] =
-    &[('𑩠', '𑩧'), ('đ‘ŠȘ', '\u{119d7}'), ('\u{119da}', 'đ‘§€')];
-
-pub const NEW_TAI_LUE: &'static [(char, char)] =
-    &[('ᩀ', 'ካ'), ('ኰ', 'ᧉ'), ('᧐', '᧚'), ('᧞', '᧟')];
-
-pub const NEWA: &'static [(char, char)] = &[('𑐀', '𑑛'), ('𑑝', '𑑡')];
-
-pub const NKO: &'static [(char, char)] = &[('߀', 'ßș'), ('\u{7fd}', 'ßż')];
-
-pub const NUSHU: &'static [(char, char)] = &[('𖿡', '𖿡'), ('𛅰', '𛋻')];
-
-pub const NYIAKENG_PUACHUE_HMONG: &'static [(char, char)] =
-    &[('𞄀', '𞄬'), ('\u{1e130}', 'đž„œ'), ('𞅀', '𞅉'), ('𞅎', '𞅏')];
-
-pub const OGHAM: &'static [(char, char)] = &[('\u{1680}', '᚜')];
-
-pub const OL_CHIKI: &'static [(char, char)] = &[('᱐', '᱿')];
-
-pub const OL_ONAL: &'static [(char, char)] = &[('𞗐', 'đž—ș'), ('𞗿', '𞗿')];
-
-pub const OLD_HUNGARIAN: &'static [(char, char)] =
-    &[('đČ€', 'đČČ'), ('𐳀', 'đłČ'), ('đłș', '𐳿')];
-
-pub const OLD_ITALIC: &'static [(char, char)] = &[('𐌀', '𐌣'), ('𐌭', '𐌯')];
-
-pub const OLD_NORTH_ARABIAN: &'static [(char, char)] = &[('đȘ€', 'đȘŸ')];
-
-pub const OLD_PERMIC: &'static [(char, char)] = &[('𐍐', '\u{1037a}')];
-
-pub const OLD_PERSIAN: &'static [(char, char)] = &[('𐎠', '𐏃'), ('𐏈', '𐏕')];
-
-pub const OLD_SOGDIAN: &'static [(char, char)] = &[('đŒ€', 'đŒ§')];
-
-pub const OLD_SOUTH_ARABIAN: &'static [(char, char)] = &[('𐩠', '𐩿')];
-
-pub const OLD_TURKIC: &'static [(char, char)] = &[('𐰀', '𐱈')];
-
-pub const OLD_UYGHUR: &'static [(char, char)] = &[('đœ°', 'đŸ‰')];
-
-pub const ORIYA: &'static [(char, char)] = &[
-    ('\u{b01}', 'àŹƒ'),
-    ('àŹ…', 'àŹŒ'),
-    ('àŹ', 'àŹ'),
-    ('àŹ“', 'àŹš'),
-    ('àŹȘ', 'àŹ°'),
-    ('àŹČ', 'àŹł'),
-    ('àŹ”', 'àŹč'),
-    ('\u{b3c}', '\u{b44}'),
-    ('େ', 'ୈ'),
-    ('ୋ', '\u{b4d}'),
-    ('\u{b55}', '\u{b57}'),
-    ('ଡ଼', 'ଢ଼'),
-    ('ୟ', '\u{b63}'),
-    ('à­Š', 'à­·'),
-];
-
-pub const OSAGE: &'static [(char, char)] = &[('𐒰', '𐓓'), ('𐓘', '𐓻')];
-
-pub const OSMANYA: &'static [(char, char)] = &[('𐒀', '𐒝'), ('𐒠', '𐒩')];
-
-pub const PAHAWH_HMONG: &'static [(char, char)] =
-    &[('𖬀', '𖭅'), ('𖭐', '𖭙'), ('𖭛', '𖭡'), ('𖭣', 'đ–­·'), ('đ–­œ', '𖼏')];
-
-pub const PALMYRENE: &'static [(char, char)] = &[('𐥠', '𐥿')];
-
-pub const PAU_CIN_HAU: &'static [(char, char)] = &[('đ‘«€', 'đ‘«ž')];
-
-pub const PHAGS_PA: &'static [(char, char)] = &[('êĄ€', 'êĄ·')];
-
-pub const PHOENICIAN: &'static [(char, char)] = &[('𐀀', '𐀛'), ('đ€Ÿ', 'đ€Ÿ')];
-
-pub const PSALTER_PAHLAVI: &'static [(char, char)] =
-    &[('𐼀', '𐼑'), ('𐼙', '𐼜'), ('𐟩', '𐟯')];
-
-pub const REJANG: &'static [(char, char)] = &[('ꀰ', '\u{a953}'), ('ꄟ', 'ꄟ')];
-
-pub const RUNIC: &'static [(char, char)] = &[('ᚠ', 'á›Ș'), ('᛼', '᛾')];
-
-pub const SAMARITAN: &'static [(char, char)] = &[('ࠀ', '\u{82d}'), ('à °', 'à Ÿ')];
-
-pub const SAURASHTRA: &'static [(char, char)] =
-    &[('êą€', '\u{a8c5}'), ('êŁŽ', 'êŁ™')];
-
-pub const SHARADA: &'static [(char, char)] = &[('\u{11180}', '𑇟')];
-
-pub const SHAVIAN: &'static [(char, char)] = &[('𐑐', '𐑿')];
-
-pub const SIDDHAM: &'static [(char, char)] =
-    &[('𑖀', '\u{115b5}'), ('𑖾', '\u{115dd}')];
-
-pub const SIGNWRITING: &'static [(char, char)] =
-    &[('𝠀', 'đȘ‹'), ('\u{1da9b}', '\u{1da9f}'), ('\u{1daa1}', '\u{1daaf}')];
-
-pub const SINHALA: &'static [(char, char)] = &[
-    ('\u{d81}', 'ඃ'),
-    ('අ', 'ඖ'),
-    ('ක', 'න'),
-    ('à¶ł', 'à¶»'),
-    ('ග', 'ග'),
-    ('ව', 'ෆ'),
-    ('\u{dca}', '\u{dca}'),
-    ('\u{dcf}', '\u{dd4}'),
-    ('\u{dd6}', '\u{dd6}'),
-    ('ෘ', '\u{ddf}'),
-    ('à·Š', 'à·Ż'),
-    ('à·Č', 'à·Ž'),
-    ('𑇡', '𑇮'),
-];
-
-pub const SOGDIAN: &'static [(char, char)] = &[('đŒ°', 'đœ™')];
-
-pub const SORA_SOMPENG: &'static [(char, char)] = &[('𑃐', '𑃹'), ('𑃰', 'đ‘ƒč')];
-
-pub const SOYOMBO: &'static [(char, char)] = &[('𑩐', 'đ‘Șą')];
-
-pub const SUNDANESE: &'static [(char, char)] =
-    &[('\u{1b80}', 'Ἷ'), ('᳀', '᳇')];
-
-pub const SUNUWAR: &'static [(char, char)] = &[('𑯀', '𑯡'), ('𑯰', 'đ‘Żč')];
-
-pub const SYLOTI_NAGRI: &'static [(char, char)] = &[('ꠀ', '\u{a82c}')];
-
-pub const SYRIAC: &'static [(char, char)] =
-    &[('܀', '܍'), ('\u{70f}', '\u{74a}'), ('ʍ', 'ʏ'), ('àĄ ', 'àĄȘ')];
-
-pub const TAGALOG: &'static [(char, char)] = &[('ᜀ', '\u{1715}'), ('ᜟ', 'ᜟ')];
-
-pub const TAGBANWA: &'static [(char, char)] =
-    &[('ᝠ', 'ᝬ'), ('᝼', 'ᝰ'), ('\u{1772}', '\u{1773}')];
-
-pub const TAI_LE: &'static [(char, char)] = &[('ᄐ', 'ᄭ'), ('ᄰ', 'ᄎ')];
-
-pub const TAI_THAM: &'static [(char, char)] = &[
-    ('áš ', '\u{1a5e}'),
-    ('\u{1a60}', '\u{1a7c}'),
-    ('\u{1a7f}', 'áȘ‰'),
-    ('áȘ', 'áȘ™'),
-    ('áȘ ', 'áȘ­'),
-];
-
-pub const TAI_VIET: &'static [(char, char)] = &[('êȘ€', 'ꫂ'), ('ꫛ', '꫟')];
-
-pub const TAKRI: &'static [(char, char)] = &[('𑚀', 'đ‘šč'), ('𑛀', '𑛉')];
-
-pub const TAMIL: &'static [(char, char)] = &[
-    ('\u{b82}', 'àźƒ'),
-    ('àź…', 'àźŠ'),
-    ('àźŽ', 'àź'),
-    ('àź’', 'àź•'),
-    ('àź™', 'àźš'),
-    ('àźœ', 'àźœ'),
-    ('àźž', 'àźŸ'),
-    ('àźŁ', 'àź€'),
-    ('àźš', 'àźȘ'),
-    ('àźź', 'àźč'),
-    ('\u{bbe}', 'àŻ‚'),
-    ('àŻ†', 'àŻˆ'),
-    ('àŻŠ', '\u{bcd}'),
-    ('àŻ', 'àŻ'),
-    ('\u{bd7}', '\u{bd7}'),
-    ('àŻŠ', 'àŻș'),
-    ('𑿀', '𑿱'),
-    ('𑿿', '𑿿'),
-];
-
-pub const TANGSA: &'static [(char, char)] = &[('đ–©°', 'đ–ȘŸ'), ('đ–«€', '𖫉')];
-
-pub const TANGUT: &'static [(char, char)] =
-    &[('𖿠', '𖿠'), ('𗀀', 'đ˜Ÿ·'), ('𘠀', '𘫿'), ('𘮀', '𘎈')];
-
-pub const TELUGU: &'static [(char, char)] = &[
-    ('\u{c00}', 'ఌ'),
-    ('ఎ', 'ఐ'),
-    ('ఒ', 'à°š'),
-    ('à°Ș', 'à°č'),
-    ('\u{c3c}', 'ౄ'),
-    ('\u{c46}', '\u{c48}'),
-    ('\u{c4a}', '\u{c4d}'),
-    ('\u{c55}', '\u{c56}'),
-    ('ౘ', 'ౚ'),
-    ('ౝ', 'ౝ'),
-    ('à± ', '\u{c63}'),
-    ('ొ', 'à±Ż'),
-    ('à±·', 'à±ż'),
-];
-
-pub const THAANA: &'static [(char, char)] = &[('Ț€', 'Ț±')];
-
-pub const THAI: &'static [(char, char)] = &[('àž', '\u{e3a}'), ('àč€', 'àč›')];
-
-pub const TIBETAN: &'static [(char, char)] = &[
-    ('àŒ€', 'àœ‡'),
-    ('àœ‰', 'àœŹ'),
-    ('\u{f71}', '\u{f97}'),
-    ('\u{f99}', '\u{fbc}'),
-    ('àŸŸ', 'àżŒ'),
-    ('àżŽ', 'àż”'),
-    ('àż™', 'àżš'),
-];
-
-pub const TIFINAGH: &'static [(char, char)] =
-    &[('⎰', '┧'), ('┯', '┰'), ('\u{2d7f}', '\u{2d7f}')];
-
-pub const TIRHUTA: &'static [(char, char)] = &[('𑒀', '𑓇'), ('𑓐', '𑓙')];
-
-pub const TODHRI: &'static [(char, char)] = &[('𐗀', '𐗳')];
-
-pub const TOTO: &'static [(char, char)] = &[('𞊐', '\u{1e2ae}')];
-
-pub const TULU_TIGALARI: &'static [(char, char)] = &[
-    ('𑎀', '𑎉'),
-    ('𑎋', '𑎋'),
-    ('𑎎', '𑎎'),
-    ('𑎐', '𑎔'),
-    ('𑎷', '\u{113c0}'),
-    ('\u{113c2}', '\u{113c2}'),
-    ('\u{113c5}', '\u{113c5}'),
-    ('\u{113c7}', '𑏊'),
-    ('𑏌', '𑏕'),
-    ('𑏗', '𑏘'),
-    ('\u{113e1}', '\u{113e2}'),
-];
-
-pub const UGARITIC: &'static [(char, char)] = &[('𐎀', '𐎝'), ('𐎟', '𐎟')];
-
-pub const VAI: &'static [(char, char)] = &[('ꔀ', 'ꘫ')];
-
-pub const VITHKUQI: &'static [(char, char)] = &[
-    ('𐕰', 'đ•ș'),
-    ('đ•Œ', '𐖊'),
-    ('𐖌', '𐖒'),
-    ('𐖔', '𐖕'),
-    ('𐖗', '𐖡'),
-    ('𐖣', '𐖱'),
-    ('𐖳', 'đ–č'),
-    ('𐖻', 'đ–Œ'),
-];
-
-pub const WANCHO: &'static [(char, char)] = &[('𞋀', 'đž‹č'), ('𞋿', '𞋿')];
-
-pub const WARANG_CITI: &'static [(char, char)] = &[('𑱠', 'đ‘ŁČ'), ('𑣿', '𑣿')];
-
-pub const YEZIDI: &'static [(char, char)] =
-    &[('đș€', 'đș©'), ('\u{10eab}', 'đș­'), ('đș°', 'đș±')];
-
-pub const YI: &'static [(char, char)] = &[('ꀀ', 'ꒌ'), ('꒐', '꓆')];
-
-pub const ZANABAZAR_SQUARE: &'static [(char, char)] = &[('𑹀', '\u{11a47}')];
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/script_extension.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/script_extension.rs
deleted file mode 100644
index e3f492e..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/script_extension.rs
+++ /dev/null
@@ -1,1718 +0,0 @@
-// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY:
-//
-//   ucd-generate script-extension ucd-16.0.0 --chars
-//
-// Unicode version: 16.0.0.
-//
-// ucd-generate 0.3.1 is available on crates.io.
-
-pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[
-    ("Adlam", ADLAM),
-    ("Ahom", AHOM),
-    ("Anatolian_Hieroglyphs", ANATOLIAN_HIEROGLYPHS),
-    ("Arabic", ARABIC),
-    ("Armenian", ARMENIAN),
-    ("Avestan", AVESTAN),
-    ("Balinese", BALINESE),
-    ("Bamum", BAMUM),
-    ("Bassa_Vah", BASSA_VAH),
-    ("Batak", BATAK),
-    ("Bengali", BENGALI),
-    ("Bhaiksuki", BHAIKSUKI),
-    ("Bopomofo", BOPOMOFO),
-    ("Brahmi", BRAHMI),
-    ("Braille", BRAILLE),
-    ("Buginese", BUGINESE),
-    ("Buhid", BUHID),
-    ("Canadian_Aboriginal", CANADIAN_ABORIGINAL),
-    ("Carian", CARIAN),
-    ("Caucasian_Albanian", CAUCASIAN_ALBANIAN),
-    ("Chakma", CHAKMA),
-    ("Cham", CHAM),
-    ("Cherokee", CHEROKEE),
-    ("Chorasmian", CHORASMIAN),
-    ("Common", COMMON),
-    ("Coptic", COPTIC),
-    ("Cuneiform", CUNEIFORM),
-    ("Cypriot", CYPRIOT),
-    ("Cypro_Minoan", CYPRO_MINOAN),
-    ("Cyrillic", CYRILLIC),
-    ("Deseret", DESERET),
-    ("Devanagari", DEVANAGARI),
-    ("Dives_Akuru", DIVES_AKURU),
-    ("Dogra", DOGRA),
-    ("Duployan", DUPLOYAN),
-    ("Egyptian_Hieroglyphs", EGYPTIAN_HIEROGLYPHS),
-    ("Elbasan", ELBASAN),
-    ("Elymaic", ELYMAIC),
-    ("Ethiopic", ETHIOPIC),
-    ("Garay", GARAY),
-    ("Georgian", GEORGIAN),
-    ("Glagolitic", GLAGOLITIC),
-    ("Gothic", GOTHIC),
-    ("Grantha", GRANTHA),
-    ("Greek", GREEK),
-    ("Gujarati", GUJARATI),
-    ("Gunjala_Gondi", GUNJALA_GONDI),
-    ("Gurmukhi", GURMUKHI),
-    ("Gurung_Khema", GURUNG_KHEMA),
-    ("Han", HAN),
-    ("Hangul", HANGUL),
-    ("Hanifi_Rohingya", HANIFI_ROHINGYA),
-    ("Hanunoo", HANUNOO),
-    ("Hatran", HATRAN),
-    ("Hebrew", HEBREW),
-    ("Hiragana", HIRAGANA),
-    ("Imperial_Aramaic", IMPERIAL_ARAMAIC),
-    ("Inherited", INHERITED),
-    ("Inscriptional_Pahlavi", INSCRIPTIONAL_PAHLAVI),
-    ("Inscriptional_Parthian", INSCRIPTIONAL_PARTHIAN),
-    ("Javanese", JAVANESE),
-    ("Kaithi", KAITHI),
-    ("Kannada", KANNADA),
-    ("Katakana", KATAKANA),
-    ("Kawi", KAWI),
-    ("Kayah_Li", KAYAH_LI),
-    ("Kharoshthi", KHAROSHTHI),
-    ("Khitan_Small_Script", KHITAN_SMALL_SCRIPT),
-    ("Khmer", KHMER),
-    ("Khojki", KHOJKI),
-    ("Khudawadi", KHUDAWADI),
-    ("Kirat_Rai", KIRAT_RAI),
-    ("Lao", LAO),
-    ("Latin", LATIN),
-    ("Lepcha", LEPCHA),
-    ("Limbu", LIMBU),
-    ("Linear_A", LINEAR_A),
-    ("Linear_B", LINEAR_B),
-    ("Lisu", LISU),
-    ("Lycian", LYCIAN),
-    ("Lydian", LYDIAN),
-    ("Mahajani", MAHAJANI),
-    ("Makasar", MAKASAR),
-    ("Malayalam", MALAYALAM),
-    ("Mandaic", MANDAIC),
-    ("Manichaean", MANICHAEAN),
-    ("Marchen", MARCHEN),
-    ("Masaram_Gondi", MASARAM_GONDI),
-    ("Medefaidrin", MEDEFAIDRIN),
-    ("Meetei_Mayek", MEETEI_MAYEK),
-    ("Mende_Kikakui", MENDE_KIKAKUI),
-    ("Meroitic_Cursive", MEROITIC_CURSIVE),
-    ("Meroitic_Hieroglyphs", MEROITIC_HIEROGLYPHS),
-    ("Miao", MIAO),
-    ("Modi", MODI),
-    ("Mongolian", MONGOLIAN),
-    ("Mro", MRO),
-    ("Multani", MULTANI),
-    ("Myanmar", MYANMAR),
-    ("Nabataean", NABATAEAN),
-    ("Nag_Mundari", NAG_MUNDARI),
-    ("Nandinagari", NANDINAGARI),
-    ("New_Tai_Lue", NEW_TAI_LUE),
-    ("Newa", NEWA),
-    ("Nko", NKO),
-    ("Nushu", NUSHU),
-    ("Nyiakeng_Puachue_Hmong", NYIAKENG_PUACHUE_HMONG),
-    ("Ogham", OGHAM),
-    ("Ol_Chiki", OL_CHIKI),
-    ("Ol_Onal", OL_ONAL),
-    ("Old_Hungarian", OLD_HUNGARIAN),
-    ("Old_Italic", OLD_ITALIC),
-    ("Old_North_Arabian", OLD_NORTH_ARABIAN),
-    ("Old_Permic", OLD_PERMIC),
-    ("Old_Persian", OLD_PERSIAN),
-    ("Old_Sogdian", OLD_SOGDIAN),
-    ("Old_South_Arabian", OLD_SOUTH_ARABIAN),
-    ("Old_Turkic", OLD_TURKIC),
-    ("Old_Uyghur", OLD_UYGHUR),
-    ("Oriya", ORIYA),
-    ("Osage", OSAGE),
-    ("Osmanya", OSMANYA),
-    ("Pahawh_Hmong", PAHAWH_HMONG),
-    ("Palmyrene", PALMYRENE),
-    ("Pau_Cin_Hau", PAU_CIN_HAU),
-    ("Phags_Pa", PHAGS_PA),
-    ("Phoenician", PHOENICIAN),
-    ("Psalter_Pahlavi", PSALTER_PAHLAVI),
-    ("Rejang", REJANG),
-    ("Runic", RUNIC),
-    ("Samaritan", SAMARITAN),
-    ("Saurashtra", SAURASHTRA),
-    ("Sharada", SHARADA),
-    ("Shavian", SHAVIAN),
-    ("Siddham", SIDDHAM),
-    ("SignWriting", SIGNWRITING),
-    ("Sinhala", SINHALA),
-    ("Sogdian", SOGDIAN),
-    ("Sora_Sompeng", SORA_SOMPENG),
-    ("Soyombo", SOYOMBO),
-    ("Sundanese", SUNDANESE),
-    ("Sunuwar", SUNUWAR),
-    ("Syloti_Nagri", SYLOTI_NAGRI),
-    ("Syriac", SYRIAC),
-    ("Tagalog", TAGALOG),
-    ("Tagbanwa", TAGBANWA),
-    ("Tai_Le", TAI_LE),
-    ("Tai_Tham", TAI_THAM),
-    ("Tai_Viet", TAI_VIET),
-    ("Takri", TAKRI),
-    ("Tamil", TAMIL),
-    ("Tangsa", TANGSA),
-    ("Tangut", TANGUT),
-    ("Telugu", TELUGU),
-    ("Thaana", THAANA),
-    ("Thai", THAI),
-    ("Tibetan", TIBETAN),
-    ("Tifinagh", TIFINAGH),
-    ("Tirhuta", TIRHUTA),
-    ("Todhri", TODHRI),
-    ("Toto", TOTO),
-    ("Tulu_Tigalari", TULU_TIGALARI),
-    ("Ugaritic", UGARITIC),
-    ("Vai", VAI),
-    ("Vithkuqi", VITHKUQI),
-    ("Wancho", WANCHO),
-    ("Warang_Citi", WARANG_CITI),
-    ("Yezidi", YEZIDI),
-    ("Yi", YI),
-    ("Zanabazar_Square", ZANABAZAR_SQUARE),
-];
-
-pub const ADLAM: &'static [(char, char)] = &[
-    ('۟', '۟'),
-    ('ـ', 'ـ'),
-    ('⁏', '⁏'),
-    ('âč', 'âč'),
-    ('𞀀', 'đž„‹'),
-    ('𞄐', 'đž„™'),
-    ('𞄞', 'đž„Ÿ'),
-];
-
-pub const AHOM: &'static [(char, char)] =
-    &[('𑜀', '𑜚'), ('\u{1171d}', '\u{1172b}'), ('𑜰', '𑝆')];
-
-pub const ANATOLIAN_HIEROGLYPHS: &'static [(char, char)] = &[('𔐀', '𔙆')];
-
-pub const ARABIC: &'static [(char, char)] = &[
-    ('\u{600}', '\u{604}'),
-    ('ۆ', '\u{6dc}'),
-    ('۞', 'Ûż'),
-    ('ʐ', 'ʿ'),
-    ('àĄ°', 'àąŽ'),
-    ('\u{890}', '\u{891}'),
-    ('\u{897}', '\u{8e1}'),
-    ('\u{8e3}', '\u{8ff}'),
-    ('⁏', '⁏'),
-    ('âč', 'âč'),
-    ('ﭐ', 'ïŻ‚'),
-    ('ïŻ“', 'ﶏ'),
-    ('ﶒ', 'ﷇ'),
-    ('﷏', '﷏'),
-    ('ï·°', 'ï·ż'),
-    ('ïč°', 'ïčŽ'),
-    ('ïč¶', 'ﻌ'),
-    ('\u{102e0}', '𐋻'),
-    ('đč ', 'đčŸ'),
-    ('𐻂', '𐻄'),
-    ('\u{10efc}', '\u{10eff}'),
-    ('𞾀', '𞾃'),
-    ('𞾅', '𞾟'),
-    ('𞾡', '𞾱'),
-    ('𞞀', '𞞀'),
-    ('𞾧', '𞾧'),
-    ('đžž©', 'đžžČ'),
-    ('𞾮', 'đžž·'),
-    ('đžžč', 'đžžč'),
-    ('đžž»', 'đžž»'),
-    ('đžč‚', 'đžč‚'),
-    ('đžč‡', 'đžč‡'),
-    ('đžč‰', 'đžč‰'),
-    ('đžč‹', 'đžč‹'),
-    ('đžč', 'đžč'),
-    ('đžč‘', 'đžč’'),
-    ('đžč”', 'đžč”'),
-    ('đžč—', 'đžč—'),
-    ('đžč™', 'đžč™'),
-    ('đžč›', 'đžč›'),
-    ('đžč', 'đžč'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžčĄ', 'đžčą'),
-    ('đžč€', 'đžč€'),
-    ('đžč§', 'đžčȘ'),
-    ('đžčŹ', 'đžčČ'),
-    ('đžčŽ', 'đžč·'),
-    ('đžčč', 'đžčŒ'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžș€', 'đžș‰'),
-    ('đžș‹', 'đžș›'),
-    ('đžșĄ', 'đžșŁ'),
-    ('đžș„', 'đžș©'),
-    ('đžș«', 'đžș»'),
-    ('đž»°', 'đž»±'),
-];
-
-pub const ARMENIAN: &'static [(char, char)] =
-    &[('\u{308}', '\u{308}'), ('Ô±', 'Ֆ'), ('ՙ', '֊'), ('֍', '֏'), ('ïŹ“', 'ïŹ—')];
-
-pub const AVESTAN: &'static [(char, char)] =
-    &[('·', '·'), ('âž°', 'âž±'), ('𐬀', '𐏔'), ('đŹč', '𐏿')];
-
-pub const BALINESE: &'static [(char, char)] = &[('\u{1b00}', 'ᭌ'), ('᭎', '᭿')];
-
-pub const BAMUM: &'static [(char, char)] = &[('ꚠ', '꛷'), ('𖠀', '𖹾')];
-
-pub const BASSA_VAH: &'static [(char, char)] =
-    &[('𖫐', 'đ–«­'), ('\u{16af0}', 'đ–«”')];
-
-pub const BATAK: &'static [(char, char)] = &[('ᯀ', '\u{1bf3}'), ('áŻŒ', '᯿')];
-
-pub const BENGALI: &'static [(char, char)] = &[
-    ('ʌ', 'ʌ'),
-    ('\u{951}', '\u{952}'),
-    ('à„€', 'à„„'),
-    ('àŠ€', 'àŠƒ'),
-    ('àŠ…', 'àŠŒ'),
-    ('àŠ', 'àŠ'),
-    ('àŠ“', 'àŠš'),
-    ('àŠȘ', 'àŠ°'),
-    ('àŠČ', 'àŠČ'),
-    ('àŠ¶', 'àŠč'),
-    ('\u{9bc}', '\u{9c4}'),
-    ('ে', 'ৈ'),
-    ('ো', 'ৎ'),
-    ('\u{9d7}', '\u{9d7}'),
-    ('ড়', 'ঢ়'),
-    ('য়', '\u{9e3}'),
-    ('à§Š', '\u{9fe}'),
-    ('\u{1cd0}', '\u{1cd0}'),
-    ('\u{1cd2}', '\u{1cd2}'),
-    ('\u{1cd5}', '\u{1cd6}'),
-    ('\u{1cd8}', '\u{1cd8}'),
-    ('᳥', '᳥'),
-    ('áłȘ', 'áłȘ'),
-    ('\u{1ced}', '\u{1ced}'),
-    ('áłČ', 'áłČ'),
-    ('áł”', 'áł·'),
-    ('\u{a8f1}', '\u{a8f1}'),
-];
-
-pub const BHAIKSUKI: &'static [(char, char)] =
-    &[('𑰀', '𑰈'), ('𑰊', '\u{11c36}'), ('\u{11c38}', '𑱅'), ('𑱐', '𑱏')];
-
-pub const BOPOMOFO: &'static [(char, char)] = &[
-    ('ˇ', 'ˇ'),
-    ('ˉ', 'ˋ'),
-    ('˙', '˙'),
-    ('ËȘ', 'Ë«'),
-    ('、', '〃'),
-    ('〈', '】'),
-    ('〓', '〟'),
-    ('\u{302a}', '\u{302d}'),
-    ('〰', '〰'),
-    ('〷', '〷'),
-    ('・', '・'),
-    ('ㄅ', 'ㄯ'),
-    ('ㆠ', 'ㆿ'),
-    ('ïč…', 'ïč†'),
-    ('ïœĄ', ''),
-];
-
-pub const BRAHMI: &'static [(char, char)] =
-    &[('𑀀', '𑁍'), ('𑁒', '𑁔'), ('\u{1107f}', '\u{1107f}')];
-
-pub const BRAILLE: &'static [(char, char)] = &[('⠀', '⣿')];
-
-pub const BUGINESE: &'static [(char, char)] =
-    &[('Ṁ', '\u{1a1b}'), ('᚞', 'ṟ'), ('ꧏ', 'ꧏ')];
-
-pub const BUHID: &'static [(char, char)] = &[('᜔', '᜶'), ('ᝀ', '\u{1753}')];
-
-pub const CANADIAN_ABORIGINAL: &'static [(char, char)] =
-    &[('᐀', 'ᙿ'), ('áą°', 'ᣔ'), ('đ‘Ș°', 'đ‘Șż')];
-
-pub const CARIAN: &'static [(char, char)] =
-    &[('·', '·'), ('⁚', '⁚'), ('⁝', '⁝'), ('âž±', 'âž±'), ('𐊠', '𐋐')];
-
-pub const CAUCASIAN_ALBANIAN: &'static [(char, char)] = &[
-    ('\u{304}', '\u{304}'),
-    ('\u{331}', '\u{331}'),
-    ('\u{35e}', '\u{35e}'),
-    ('𐔰', '𐕣'),
-    ('𐕯', '𐕯'),
-];
-
-pub const CHAKMA: &'static [(char, char)] =
-    &[('à§Š', 'à§Ż'), ('၀', '၉'), ('\u{11100}', '\u{11134}'), ('đ‘„¶', '𑅇')];
-
-pub const CHAM: &'static [(char, char)] =
-    &[('Ꚁ', '\u{aa36}'), ('ꩀ', 'ꩍ'), ('꩐', '꩙'), ('꩜', '꩟')];
-
-pub const CHEROKEE: &'static [(char, char)] = &[
-    ('\u{300}', '\u{302}'),
-    ('\u{304}', '\u{304}'),
-    ('\u{30b}', '\u{30c}'),
-    ('\u{323}', '\u{324}'),
-    ('\u{330}', '\u{331}'),
-    ('Ꭰ', 'Ꮤ'),
-    ('Ꮮ', 'Ꮬ'),
-    ('ê­°', 'êźż'),
-];
-
-pub const CHORASMIAN: &'static [(char, char)] = &[('đŸ°', '𐿋')];
-
-pub const COMMON: &'static [(char, char)] = &[
-    ('\0', '@'),
-    ('[', '`'),
-    ('{', '©'),
-    ('«', '¶'),
-    ('¸', '¹'),
-    ('»', '¿'),
-    ('×', '×'),
-    ('÷', '÷'),
-    ('Êč', 'Ê»'),
-    ('Êœ', 'ˆ'),
-    ('ˈ', 'ˈ'),
-    ('ˌ', 'ˌ'),
-    ('ˎ', '˖'),
-    ('˘', '˘'),
-    ('˚', '˟'),
-    ('Ë„', 'Ë©'),
-    ('ËŹ', 'Ëż'),
-    ('ÍŸ', 'ÍŸ'),
-    ('΅', '΅'),
-    ('·', '·'),
-    ('\u{605}', '\u{605}'),
-    ('\u{6dd}', '\u{6dd}'),
-    ('\u{8e2}', '\u{8e2}'),
-    ('àžż', 'àžż'),
-    ('àż•', 'àż˜'),
-    ('\u{2000}', '\u{200b}'),
-    ('\u{200e}', '\u{202e}'),
-    ('‰', '⁎'),
-    ('⁐', '⁙'),
-    ('⁛', '⁜'),
-    ('⁞', '\u{2064}'),
-    ('\u{2066}', '⁰'),
-    ('⁎', ' '),
-    ('₀', '₎'),
-    ('₠', '⃀'),
-    ('℀', 'â„„'),
-    ('℧', '℩'),
-    ('ℬ', 'ℱ'),
-    ('ℳ', '⅍'),
-    ('⅏', '⅟'),
-    ('↉', '↋'),
-    ('←', '␩'),
-    ('⑀', '⑊'),
-    ('①', '⟿'),
-    (' ', 'â­ł'),
-    ('â­¶', '⼕'),
-    ('⼗', '⯿'),
-    ('⾀', '⾖'),
-    ('➘', '➯'),
-    ('âžČ', 'âž»'),
-    ('âžœ', 'âč€'),
-    ('âč‚', 'âč‚'),
-    ('âč„', 'âč'),
-    ('\u{3000}', '\u{3000}'),
-    ('〄', '〄'),
-    ('〒', '〒'),
-    ('〠', '〠'),
-    ('〶', '〶'),
-    ('㉈', '㉟'),
-    ('㉿', '㉿'),
-    ('㊱', '㊿'),
-    ('㋌', '㋏'),
-    ('ă±', 'ăș'),
-    ('㎀', '㏟'),
-    ('㏿', '㏿'),
-    ('䷀', 'ä·ż'),
-    ('꜈', 'êœĄ'),
-    ('ꞈ', '꞊'),
-    ('꭛', '꭛'),
-    ('ê­Ș', 'ê­«'),
-    ('', 'ïž™'),
-    ('ïž°', 'ïč„'),
-    ('ïč‡', 'ïč’'),
-    ('ïč”', 'ïčŠ'),
-    ('ïčš', 'ïč«'),
-    ('\u{feff}', '\u{feff}'),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('ïż ', 'ïżŠ'),
-    ('ïżš', 'ïżź'),
-    ('\u{fff9}', 'ïżœ'),
-    ('𐆐', '𐆜'),
-    ('𐇐', 'đ‡Œ'),
-    ('𜰀', 'đœłč'),
-    ('𜮀', 'đœșł'),
-    ('đœœ', '𜿃'),
-    ('𝀀', 'đƒ”'),
-    ('𝄀', '𝄩'),
-    ('đ„©', '\u{1d166}'),
-    ('đ…Ș', '\u{1d17a}'),
-    ('𝆃', '𝆄'),
-    ('𝆌', 'đ†©'),
-    ('𝆺𝅥', 'đ‡Ș'),
-    ('𝋀', '𝋓'),
-    ('𝋠', '𝋳'),
-    ('𝌀', '𝍖'),
-    ('đČ', '𝍾'),
-    ('𝐀', '𝑔'),
-    ('𝑖', '𝒜'),
-    ('𝒞', '𝒟'),
-    ('𝒱', '𝒱'),
-    ('đ’„', '𝒩'),
-    ('đ’©', '𝒬'),
-    ('𝒼', 'đ’č'),
-    ('đ’»', 'đ’»'),
-    ('đ’œ', '𝓃'),
-    ('𝓅', '𝔅'),
-    ('𝔇', '𝔊'),
-    ('𝔍', '𝔔'),
-    ('𝔖', '𝔜'),
-    ('𝔞', 'đ”č'),
-    ('đ”»', 'đ”Ÿ'),
-    ('𝕀', '𝕄'),
-    ('𝕆', '𝕆'),
-    ('𝕊', '𝕐'),
-    ('𝕒', 'đš„'),
-    ('𝚹', '𝟋'),
-    ('𝟎', '𝟿'),
-    ('đž±±', 'đžČŽ'),
-    ('𞮁', 'đžŽœ'),
-    ('🀀', 'đŸ€«'),
-    ('🀰', '🂓'),
-    ('🂠', '🂼'),
-    ('đŸ‚±', '🂿'),
-    ('🃁', '🃏'),
-    ('🃑', 'đŸƒ”'),
-    ('🄀', '🆭'),
-    ('🇩', '🇿'),
-    ('🈁', '🈂'),
-    ('🈐', 'đŸˆ»'),
-    ('🉀', '🉈'),
-    ('🉠', 'đŸ‰„'),
-    ('🌀', '🛗'),
-    ('🛜', '🛬'),
-    ('🛰', 'đŸ›Œ'),
-    ('🜀', 'đŸ¶'),
-    ('đŸ»', '🟙'),
-    ('🟠', 'đŸŸ«'),
-    ('🟰', '🟰'),
-    ('🠀', '🠋'),
-    ('🠐', '🡇'),
-    ('🡐', '🡙'),
-    ('🡠', '🱇'),
-    ('🱐', '🱭'),
-    ('🱰', 'đŸą»'),
-    ('🣀', '🣁'),
-    ('đŸ€€', 'đŸ©“'),
-    ('đŸ© ', 'đŸ©­'),
-    ('đŸ©°', 'đŸ©Œ'),
-    ('đŸȘ€', 'đŸȘ‰'),
-    ('đŸȘ', 'đŸ«†'),
-    ('đŸ«Ž', 'đŸ«œ'),
-    ('đŸ«Ÿ', 'đŸ«©'),
-    ('đŸ«°', 'đŸ«ž'),
-    ('🬀', '🼒'),
-    ('🼔', 'đŸŻč'),
-    ('\u{e0001}', '\u{e0001}'),
-    ('\u{e0020}', '\u{e007f}'),
-];
-
-pub const COPTIC: &'static [(char, char)] = &[
-    ('·', '·'),
-    ('\u{300}', '\u{300}'),
-    ('\u{304}', '\u{305}'),
-    ('\u{307}', '\u{307}'),
-    ('ÍŽ', 'Í”'),
-    ('Ïą', 'ÏŻ'),
-    ('âȀ', 'âłł'),
-    ('âłč', 'âłż'),
-    ('⾗', '⾗'),
-    ('\u{102e0}', '𐋻'),
-];
-
-pub const CUNEIFORM: &'static [(char, char)] =
-    &[('𒀀', '𒎙'), ('𒐀', '𒑼'), ('𒑰', '𒑮'), ('𒒀', '𒕃')];
-
-pub const CYPRIOT: &'static [(char, char)] = &[
-    ('𐄀', '𐄂'),
-    ('𐄇', '𐄳'),
-    ('𐄷', '𐄿'),
-    ('𐠀', '𐠅'),
-    ('𐠈', '𐠈'),
-    ('𐠊', '𐠔'),
-    ('𐠷', '𐠞'),
-    ('đ Œ', 'đ Œ'),
-    ('𐠿', '𐠿'),
-];
-
-pub const CYPRO_MINOAN: &'static [(char, char)] = &[('𐄀', '𐄁'), ('đ’Ÿ', 'đ’żČ')];
-
-pub const CYRILLIC: &'static [(char, char)] = &[
-    ('ʌ', 'ʌ'),
-    ('\u{300}', '\u{302}'),
-    ('\u{304}', '\u{304}'),
-    ('\u{306}', '\u{306}'),
-    ('\u{308}', '\u{308}'),
-    ('\u{30b}', '\u{30b}'),
-    ('\u{311}', '\u{311}'),
-    ('Ѐ', 'ԯ'),
-    ('áȀ', 'áȊ'),
-    ('Ꭻ', 'Ꭻ'),
-    ('ᔞ', 'ᔞ'),
-    ('\u{1df8}', '\u{1df8}'),
-    ('\u{2de0}', '\u{2dff}'),
-    ('âčƒ', 'âčƒ'),
-    ('Ꙁ', '\u{a69f}'),
-    ('\u{fe2e}', '\u{fe2f}'),
-    ('𞀰', '𞁭'),
-    ('\u{1e08f}', '\u{1e08f}'),
-];
-
-pub const DESERET: &'static [(char, char)] = &[('𐐀', '𐑏')];
-
-pub const DEVANAGARI: &'static [(char, char)] = &[
-    ('ʌ', 'ʌ'),
-    ('\u{900}', '\u{952}'),
-    ('\u{955}', 'à„ż'),
-    ('\u{1cd0}', 'áł¶'),
-    ('\u{1cf8}', '\u{1cf9}'),
-    ('\u{20f0}', '\u{20f0}'),
-    ('ê °', 'ê č'),
-    ('\u{a8e0}', '\u{a8ff}'),
-    ('𑬀', '𑬉'),
-];
-
-pub const DIVES_AKURU: &'static [(char, char)] = &[
-    ('đ‘€€', '𑀆'),
-    ('𑀉', '𑀉'),
-    ('đ‘€Œ', '𑀓'),
-    ('𑀕', 'đ‘€–'),
-    ('đ‘€˜', 'đ‘€”'),
-    ('đ‘€·', '𑀞'),
-    ('\u{1193b}', '𑄆'),
-    ('𑄐', 'đ‘„™'),
-];
-
-pub const DOGRA: &'static [(char, char)] =
-    &[('à„€', 'à„Ż'), ('ê °', 'ê č'), ('𑠀', 'đ‘ »')];
-
-pub const DUPLOYAN: &'static [(char, char)] = &[
-    ('·', '·'),
-    ('\u{307}', '\u{308}'),
-    ('\u{30a}', '\u{30a}'),
-    ('\u{323}', '\u{324}'),
-    ('➌', '➌'),
-    ('𛰀', 'đ›±Ș'),
-    ('đ›±°', 'đ›±Œ'),
-    ('đ›Č€', 'đ›Čˆ'),
-    ('đ›Č', 'đ›Č™'),
-    ('đ›Čœ', '\u{1bca3}'),
-];
-
-pub const EGYPTIAN_HIEROGLYPHS: &'static [(char, char)] =
-    &[('𓀀', '\u{13455}'), ('𓑠', 'đ”ș')];
-
-pub const ELBASAN: &'static [(char, char)] =
-    &[('·', '·'), ('\u{305}', '\u{305}'), ('𐔀', '𐔧')];
-
-pub const ELYMAIC: &'static [(char, char)] = &[('𐿠', '𐿶')];
-
-pub const ETHIOPIC: &'static [(char, char)] = &[
-    ('\u{30e}', '\u{30e}'),
-    ('ሀ', 'ቈ'),
-    ('ቊ', 'ቍ'),
-    ('ቐ', 'ቖ'),
-    ('ቘ', 'ቘ'),
-    ('ቚ', 'ቝ'),
-    ('በ', 'ኈ'),
-    ('ኊ', 'ኍ'),
-    ('ነ', 'ኰ'),
-    ('áŠČ', 'ኔ'),
-    ('ኾ', 'ኟ'),
-    ('ዀ', 'ዀ'),
-    ('ዂ', 'ዅ'),
-    ('ወ', 'ዖ'),
-    ('ዘ', 'ጐ'),
-    ('ጒ', 'ጕ'),
-    ('ጘ', 'ፚ'),
-    ('\u{135d}', 'ፌ'),
-    ('ᎀ', '᎙'),
-    ('ⶀ', 'ⶖ'),
-    ('â¶ ', 'â¶Š'),
-    ('â¶š', 'â¶ź'),
-    ('â¶°', 'â¶¶'),
-    ('â¶ž', 'â¶Ÿ'),
-    ('ⷀ', 'ⷆ'),
-    ('ⷈ', 'ⷎ'),
-    ('ⷐ', 'ⷖ'),
-    ('ⷘ', 'ⷞ'),
-    ('êŹ', 'êŹ†'),
-    ('êŹ‰', 'êŹŽ'),
-    ('êŹ‘', 'êŹ–'),
-    ('êŹ ', 'êŹŠ'),
-    ('êŹš', 'êŹź'),
-    ('𞟠', '𞟩'),
-    ('𞟹', 'đžŸ«'),
-    ('𞟭', '𞟼'),
-    ('𞟰', 'đžŸŸ'),
-];
-
-pub const GARAY: &'static [(char, char)] = &[
-    ('ی', 'ی'),
-    ('ۛ', 'ۛ'),
-    ('۟', '۟'),
-    ('𐔀', '𐔄'),
-    ('\u{10d69}', '𐶅'),
-    ('𐶎', 'đ¶'),
-];
-
-pub const GEORGIAN: &'static [(char, char)] = &[
-    ('·', '·'),
-    ('։', '։'),
-    ('Ⴀ', 'Ⴥ'),
-    ('Ⴧ', 'Ⴧ'),
-    ('Ⴭ', 'Ⴭ'),
-    ('ა', 'ჿ'),
-    ('áȐ', 'áČș'),
-    ('áČœ', 'áČż'),
-    ('⁚', '⁚'),
-    ('⮀', '⎄'),
-    ('⎧', '⎧'),
-    ('⎭', '⎭'),
-    ('âž±', 'âž±'),
-];
-
-pub const GLAGOLITIC: &'static [(char, char)] = &[
-    ('·', '·'),
-    ('\u{303}', '\u{303}'),
-    ('\u{305}', '\u{305}'),
-    ('\u{484}', '\u{484}'),
-    ('\u{487}', '\u{487}'),
-    ('։', '։'),
-    ('჻', '჻'),
-    ('⁚', '⁚'),
-    ('Ⰰ', 'ⱟ'),
-    ('âčƒ', 'âčƒ'),
-    ('\u{a66f}', '\u{a66f}'),
-    ('\u{1e000}', '\u{1e006}'),
-    ('\u{1e008}', '\u{1e018}'),
-    ('\u{1e01b}', '\u{1e021}'),
-    ('\u{1e023}', '\u{1e024}'),
-    ('\u{1e026}', '\u{1e02a}'),
-];
-
-pub const GOTHIC: &'static [(char, char)] = &[
-    ('·', '·'),
-    ('\u{304}', '\u{305}'),
-    ('\u{308}', '\u{308}'),
-    ('\u{331}', '\u{331}'),
-    ('𐌰', '𐍊'),
-];
-
-pub const GRANTHA: &'static [(char, char)] = &[
-    ('\u{951}', '\u{952}'),
-    ('à„€', 'à„„'),
-    ('àŻŠ', 'àŻł'),
-    ('\u{1cd0}', '\u{1cd0}'),
-    ('\u{1cd2}', '᳓'),
-    ('áłČ', '\u{1cf4}'),
-    ('\u{1cf8}', '\u{1cf9}'),
-    ('\u{20f0}', '\u{20f0}'),
-    ('\u{11300}', '𑌃'),
-    ('𑌅', '𑌌'),
-    ('𑌏', '𑌐'),
-    ('𑌓', '𑌹'),
-    ('đ‘ŒȘ', '𑌰'),
-    ('đ‘ŒČ', '𑌳'),
-    ('đ‘Œ”', 'đ‘Œč'),
-    ('\u{1133b}', '𑍄'),
-    ('𑍇', '𑍈'),
-    ('𑍋', '\u{1134d}'),
-    ('𑍐', '𑍐'),
-    ('\u{11357}', '\u{11357}'),
-    ('𑍝', '𑍣'),
-    ('\u{11366}', '\u{1136c}'),
-    ('\u{11370}', '\u{11374}'),
-    ('𑿐', '𑿑'),
-    ('𑿓', '𑿓'),
-];
-
-pub const GREEK: &'static [(char, char)] = &[
-    ('·', '·'),
-    ('\u{300}', '\u{301}'),
-    ('\u{304}', '\u{304}'),
-    ('\u{306}', '\u{306}'),
-    ('\u{308}', '\u{308}'),
-    ('\u{313}', '\u{313}'),
-    ('\u{342}', '\u{342}'),
-    ('\u{345}', '\u{345}'),
-    ('Ͱ', 'ͷ'),
-    ('Íș', 'Íœ'),
-    ('Íż', 'Íż'),
-    ('΄', '΄'),
-    ('Ά', 'Ά'),
-    ('Έ', 'Ί'),
-    ('Ό', 'Ό'),
-    ('Ύ', 'Ρ'),
-    ('Σ', 'ÏĄ'),
-    ('ϰ', 'Ïż'),
-    ('ᎊ', 'áŽȘ'),
-    ('ᔝ', 'ᔥ'),
-    ('ᔊ', 'á”Ș'),
-    ('á¶ż', '\u{1dc1}'),
-    ('ጀ', 'ጕ'),
-    ('ጘ', 'ጝ'),
-    ('ጠ', 'ᜅ'),
-    ('ᜈ', 'ᜍ'),
-    ('ᜐ', '᜗'),
-    ('᜙', '᜙'),
-    ('᜛', '᜛'),
-    ('᜝', '᜝'),
-    ('ᜟ', '᜜'),
-    ('ៀ', '៎'),
-    ('៶', 'ῄ'),
-    ('ῆ', 'ΐ'),
-    ('ῖ', 'Ί'),
-    ('῝', '`'),
-    ('áżČ', '῎'),
-    ('áż¶', 'áżŸ'),
-    ('⁝', '⁝'),
-    ('℩', '℩'),
-    ('ê­„', 'ê­„'),
-    ('𐅀', '𐆎'),
-    ('𐆠', '𐆠'),
-    ('𝈀', '𝉅'),
-];
-
-pub const GUJARATI: &'static [(char, char)] = &[
-    ('\u{951}', '\u{952}'),
-    ('à„€', 'à„„'),
-    ('\u{a81}', 'àȘƒ'),
-    ('àȘ…', 'àȘ'),
-    ('àȘ', 'àȘ‘'),
-    ('àȘ“', 'àȘš'),
-    ('àȘȘ', 'àȘ°'),
-    ('àȘČ', 'àȘł'),
-    ('àȘ”', 'àȘč'),
-    ('\u{abc}', '\u{ac5}'),
-    ('\u{ac7}', 'ૉ'),
-    ('ો', '\u{acd}'),
-    ('ૐ', 'ૐ'),
-    ('à« ', '\u{ae3}'),
-    ('૊', '૱'),
-    ('à«č', '\u{aff}'),
-    ('ê °', 'ê č'),
-];
-
-pub const GUNJALA_GONDI: &'static [(char, char)] = &[
-    ('·', '·'),
-    ('à„€', 'à„„'),
-    ('đ‘” ', 'đ‘”„'),
-    ('đ‘”§', '𑔚'),
-    ('đ‘”Ș', 'đ‘¶Ž'),
-    ('\u{11d90}', '\u{11d91}'),
-    ('đ‘¶“', 'đ‘¶˜'),
-    ('đ‘¶ ', 'đ‘¶©'),
-];
-
-pub const GURMUKHI: &'static [(char, char)] = &[
-    ('\u{951}', '\u{952}'),
-    ('à„€', 'à„„'),
-    ('\u{a01}', 'àšƒ'),
-    ('àš…', 'àšŠ'),
-    ('àš', 'àš'),
-    ('àš“', 'àšš'),
-    ('àšȘ', 'àš°'),
-    ('àšČ', 'àšł'),
-    ('àš”', 'àš¶'),
-    ('àšž', 'àšč'),
-    ('\u{a3c}', '\u{a3c}'),
-    ('àšŸ', '\u{a42}'),
-    ('\u{a47}', '\u{a48}'),
-    ('\u{a4b}', '\u{a4d}'),
-    ('\u{a51}', '\u{a51}'),
-    ('ਖ਼', 'ੜ'),
-    ('ਫ਼', 'ਫ਼'),
-    ('੊', '੶'),
-    ('ê °', 'ê č'),
-];
-
-pub const GURUNG_KHEMA: &'static [(char, char)] = &[('à„„', 'à„„'), ('𖄀', 'đ–„č')];
-
-pub const HAN: &'static [(char, char)] = &[
-    ('·', '·'),
-    ('âș€', 'âș™'),
-    ('âș›', '⻳'),
-    ('⌀', '⿕'),
-    ('âż°', 'âżż'),
-    ('、', '〃'),
-    ('々', '】'),
-    ('〓', '〟'),
-    ('〡', '\u{302d}'),
-    ('〰', '〰'),
-    ('〷', '〿'),
-    ('・', '・'),
-    ('㆐', '㆟'),
-    ('㇀', '㇄'),
-    ('㇯', '㇯'),
-    ('㈠', '㉇'),
-    ('㊀', '㊰'),
-    ('㋀', '㋋'),
-    ('㋿', '㋿'),
-    ('㍘', '㍰'),
-    ('ă»', '㍿'),
-    ('㏠', 'ăŸ'),
-    ('㐀', 'ä¶ż'),
-    ('侀', '鿿'),
-    ('꜀', '꜇'),
-    ('', 'ï©­'),
-    ('並', '龎'),
-    ('ïč…', 'ïč†'),
-    ('ïœĄ', ''),
-    ('𖿱', '𖿣'),
-    ('\u{16ff0}', '\u{16ff1}'),
-    ('𝍠', 'đ±'),
-    ('🉐', '🉑'),
-    ('𠀀', 'đȘ›Ÿ'),
-    ('đȘœ€', 'đ«œč'),
-    ('đ«€', 'đ« '),
-    ('đ«  ', 'đŹșĄ'),
-    ('đŹș°', '🯠'),
-    ('🯰', 'đźč'),
-    ('丽', '𯹝'),
-    ('𰀀', 'đ±Š'),
-    ('đ±', 'đȎŻ'),
-];
-
-pub const HANGUL: &'static [(char, char)] = &[
-    ('ᄀ', 'ᇿ'),
-    ('、', '〃'),
-    ('〈', '】'),
-    ('〓', '〟'),
-    ('\u{302e}', '〰'),
-    ('〷', '〷'),
-    ('・', '・'),
-    ('ㄱ', 'ㆎ'),
-    ('㈀', '㈞'),
-    ('㉠', 'ă‰Ÿ'),
-    ('ꄠ', 'ꄌ'),
-    ('가', '힣'),
-    ('ힰ', 'ퟆ'),
-    ('ퟋ', 'ퟻ'),
-    ('ïč…', 'ïč†'),
-    ('ïœĄ', ''),
-    ('', ''),
-    ('ïż‚', 'ïż‡'),
-    ('ïżŠ', 'ïż'),
-    ('ïż’', 'ïż—'),
-    ('ïżš', 'ïżœ'),
-];
-
-pub const HANIFI_ROHINGYA: &'static [(char, char)] = &[
-    ('ی', 'ی'),
-    ('ۛ', 'ۛ'),
-    ('۟', '۟'),
-    ('ـ', 'ـ'),
-    ('۔', '۔'),
-    ('𐮀', '\u{10d27}'),
-    ('𐎰', 'đŽč'),
-];
-
-pub const HANUNOO: &'static [(char, char)] = &[('ᜠ', '᜶')];
-
-pub const HATRAN: &'static [(char, char)] =
-    &[('𐣠', 'đŁČ'), ('𐣎', '𐣔'), ('𐣻', '𐣿')];
-
-pub const HEBREW: &'static [(char, char)] = &[
-    ('\u{307}', '\u{308}'),
-    ('\u{591}', '\u{5c7}'),
-    ('ڐ', 'ŚȘ'),
-    ('ŚŻ', 'ŚŽ'),
-    ('ïŹ', 'ïŹ¶'),
-    ('ïŹž', 'ïŹŒ'),
-    ('ïŹŸ', 'ïŹŸ'),
-    ('נּ', 'סּ'),
-    ('ףּ', 'פּ'),
-    ('צּ', 'ﭏ'),
-];
-
-pub const HIRAGANA: &'static [(char, char)] = &[
-    ('、', '〃'),
-    ('〈', '】'),
-    ('〓', '〟'),
-    ('〰', '〔'),
-    ('〷', '〷'),
-    ('ă€Œ', 'ă€œ'),
-    ('ぁ', 'ゖ'),
-    ('\u{3099}', '゠'),
-    ('・', 'ăƒŒ'),
-    ('ïč…', 'ïč†'),
-    ('ïœĄ', ''),
-    ('', ''),
-    ('\u{ff9e}', '\u{ff9f}'),
-    ('𛀁', '𛄟'),
-    ('đ›„Č', 'đ›„Č'),
-    ('𛅐', '𛅒'),
-    ('🈀', '🈀'),
-];
-
-pub const IMPERIAL_ARAMAIC: &'static [(char, char)] =
-    &[('𐡀', '𐡕'), ('𐡗', '𐡟')];
-
-pub const INHERITED: &'static [(char, char)] = &[
-    ('\u{30f}', '\u{30f}'),
-    ('\u{312}', '\u{312}'),
-    ('\u{314}', '\u{31f}'),
-    ('\u{321}', '\u{322}'),
-    ('\u{326}', '\u{32c}'),
-    ('\u{32f}', '\u{32f}'),
-    ('\u{332}', '\u{341}'),
-    ('\u{343}', '\u{344}'),
-    ('\u{346}', '\u{357}'),
-    ('\u{359}', '\u{35d}'),
-    ('\u{35f}', '\u{362}'),
-    ('\u{953}', '\u{954}'),
-    ('\u{1ab0}', '\u{1ace}'),
-    ('\u{1dc2}', '\u{1df7}'),
-    ('\u{1df9}', '\u{1df9}'),
-    ('\u{1dfb}', '\u{1dff}'),
-    ('\u{200c}', '\u{200d}'),
-    ('\u{20d0}', '\u{20ef}'),
-    ('\u{fe00}', '\u{fe0f}'),
-    ('\u{fe20}', '\u{fe2d}'),
-    ('\u{101fd}', '\u{101fd}'),
-    ('\u{1cf00}', '\u{1cf2d}'),
-    ('\u{1cf30}', '\u{1cf46}'),
-    ('\u{1d167}', '\u{1d169}'),
-    ('\u{1d17b}', '\u{1d182}'),
-    ('\u{1d185}', '\u{1d18b}'),
-    ('\u{1d1aa}', '\u{1d1ad}'),
-    ('\u{e0100}', '\u{e01ef}'),
-];
-
-pub const INSCRIPTIONAL_PAHLAVI: &'static [(char, char)] =
-    &[('𐭠', 'đ­Č'), ('𐭞', '𐭿')];
-
-pub const INSCRIPTIONAL_PARTHIAN: &'static [(char, char)] =
-    &[('𐭀', '𐭕'), ('𐭘', '𐭟')];
-
-pub const JAVANESE: &'static [(char, char)] =
-    &[('\u{a980}', '꧍'), ('ꧏ', '꧙'), ('꧞', '꧟')];
-
-pub const KAITHI: &'static [(char, char)] = &[
-    ('à„Š', 'à„Ż'),
-    ('âž±', 'âž±'),
-    ('ê °', 'ê č'),
-    ('\u{11080}', '\u{110c2}'),
-    ('\u{110cd}', '\u{110cd}'),
-];
-
-pub const KANNADA: &'static [(char, char)] = &[
-    ('\u{951}', '\u{952}'),
-    ('à„€', 'à„„'),
-    ('àȀ', 'àȌ'),
-    ('àȎ', 'àȐ'),
-    ('àȒ', 'àČš'),
-    ('àČȘ', 'àČł'),
-    ('àČ”', 'àČč'),
-    ('\u{cbc}', 'àł„'),
-    ('\u{cc6}', '\u{cc8}'),
-    ('\u{cca}', '\u{ccd}'),
-    ('\u{cd5}', '\u{cd6}'),
-    ('àł', 'àłž'),
-    ('àł ', '\u{ce3}'),
-    ('àłŠ', 'àłŻ'),
-    ('àł±', 'àłł'),
-    ('\u{1cd0}', '\u{1cd0}'),
-    ('\u{1cd2}', '᳓'),
-    ('\u{1cda}', '\u{1cda}'),
-    ('áłČ', 'áłČ'),
-    ('\u{1cf4}', '\u{1cf4}'),
-    ('ê °', 'ê ”'),
-];
-
-pub const KATAKANA: &'static [(char, char)] = &[
-    ('\u{305}', '\u{305}'),
-    ('\u{323}', '\u{323}'),
-    ('、', '〃'),
-    ('〈', '】'),
-    ('〓', '〟'),
-    ('〰', '〔'),
-    ('〷', '〷'),
-    ('ă€Œ', 'ă€œ'),
-    ('\u{3099}', '゜'),
-    ('゠', 'ヿ'),
-    ('ㇰ', 'ㇿ'),
-    ('㋐', 'ă‹Ÿ'),
-    ('㌀', '㍗'),
-    ('ïč…', 'ïč†'),
-    ('ïœĄ', '\u{ff9f}'),
-    ('𚿰', '𚿳'),
-    ('đšż”', 'đšż»'),
-    ('đšżœ', 'đšżŸ'),
-    ('𛀀', '𛀀'),
-    ('𛄠', '𛄱'),
-    ('𛅕', '𛅕'),
-    ('đ›…€', '𛅧'),
-];
-
-pub const KAWI: &'static [(char, char)] =
-    &[('\u{11f00}', 'đ‘Œ'), ('đ‘Œ’', '\u{11f3a}'), ('đ‘ŒŸ', '\u{11f5a}')];
-
-pub const KAYAH_LI: &'static [(char, char)] = &[('ꀀ', 'ê€Ż')];
-
-pub const KHAROSHTHI: &'static [(char, char)] = &[
-    ('𐹀', '\u{10a03}'),
-    ('\u{10a05}', '\u{10a06}'),
-    ('\u{10a0c}', '𐹓'),
-    ('𐹕', '𐹗'),
-    ('𐹙', '𐚔'),
-    ('\u{10a38}', '\u{10a3a}'),
-    ('\u{10a3f}', '𐩈'),
-    ('𐩐', '𐩘'),
-];
-
-pub const KHITAN_SMALL_SCRIPT: &'static [(char, char)] =
-    &[('\u{16fe4}', '\u{16fe4}'), ('𘬀', '𘳕'), ('𘳿', '𘳿')];
-
-pub const KHMER: &'static [(char, char)] =
-    &[('ក', '\u{17dd}'), ('០', '៩'), ('៰', 'áŸč'), ('á§ ', 'á§ż')];
-
-pub const KHOJKI: &'static [(char, char)] =
-    &[('૊', 'à«Ż'), ('ê °', 'ê č'), ('𑈀', '𑈑'), ('𑈓', '\u{11241}')];
-
-pub const KHUDAWADI: &'static [(char, char)] =
-    &[('à„€', 'à„„'), ('ê °', 'ê č'), ('𑊰', '\u{112ea}'), ('𑋰', 'đ‘‹č')];
-
-pub const KIRAT_RAI: &'static [(char, char)] = &[('𖔀', 'đ–”č')];
-
-pub const LAO: &'static [(char, char)] = &[
-    ('àș', 'àș‚'),
-    ('àș„', 'àș„'),
-    ('àș†', 'àșŠ'),
-    ('àșŒ', 'àșŁ'),
-    ('àș„', 'àș„'),
-    ('àș§', 'àșœ'),
-    ('ເ', 'ໄ'),
-    ('ໆ', 'ໆ'),
-    ('\u{ec8}', '\u{ece}'),
-    ('໐', '໙'),
-    ('ໜ', 'ໟ'),
-];
-
-pub const LATIN: &'static [(char, char)] = &[
-    ('A', 'Z'),
-    ('a', 'z'),
-    ('ª', 'ª'),
-    ('·', '·'),
-    ('º', 'º'),
-    ('À', 'Ö'),
-    ('Ø', 'ö'),
-    ('ø', 'Êž'),
-    ('ʌ', 'ʌ'),
-    ('ˇ', 'ˇ'),
-    ('ˉ', 'ˋ'),
-    ('ˍ', 'ˍ'),
-    ('˗', '˗'),
-    ('˙', '˙'),
-    ('Ë ', 'Ë€'),
-    ('\u{300}', '\u{30e}'),
-    ('\u{310}', '\u{311}'),
-    ('\u{313}', '\u{313}'),
-    ('\u{320}', '\u{320}'),
-    ('\u{323}', '\u{325}'),
-    ('\u{32d}', '\u{32e}'),
-    ('\u{330}', '\u{331}'),
-    ('\u{358}', '\u{358}'),
-    ('\u{35e}', '\u{35e}'),
-    ('\u{363}', '\u{36f}'),
-    ('\u{485}', '\u{486}'),
-    ('\u{951}', '\u{952}'),
-    ('჻', '჻'),
-    ('ᮀ', 'ᎄ'),
-    ('ᎏ', 'ᔜ'),
-    ('ᔹ', 'ᔄ'),
-    ('ᔫ', 'ᔷ'),
-    ('á”č', 'á¶Ÿ'),
-    ('\u{1df8}', '\u{1df8}'),
-    ('ᾀ', 'ỿ'),
-    ('\u{202f}', '\u{202f}'),
-    ('ⁱ', 'ⁱ'),
-    ('ⁿ', 'ⁿ'),
-    ('ₐ', 'ₜ'),
-    ('\u{20f0}', '\u{20f0}'),
-    ('â„Ș', 'Å'),
-    ('â„Č', 'â„Č'),
-    ('ⅎ', 'ⅎ'),
-    ('Ⅰ', 'ↈ'),
-    ('Ⱡ', 'Ɀ'),
-    ('⾗', '⾗'),
-    ('꜀', '꜇'),
-    ('êœą', 'ꞇ'),
-    ('Ꞌ', 'ꟍ'),
-    ('Ꟑ', 'ꟑ'),
-    ('ꟓ', 'ꟓ'),
-    ('ꟕ', 'Ƛ'),
-    ('êŸČ', 'êŸż'),
-    ('ê€ź', 'ê€ź'),
-    ('êŹ°', 'ꭚ'),
-    ('ꭜ', 'ê­€'),
-    ('ê­Š', 'ê­©'),
-    ('ïŹ€', 'ïŹ†'),
-    ('ïŒĄ', 'ïŒș'),
-    ('', ''),
-    ('𐞀', '𐞅'),
-    ('𐞇', '𐞰'),
-    ('đžČ', 'đžș'),
-    ('đŒ€', 'đŒž'),
-    ('đŒ„', 'đŒȘ'),
-];
-
-pub const LEPCHA: &'static [(char, char)] =
-    &[('ᰀ', '\u{1c37}'), ('᰻', '᱉'), ('ᱍ', 'ᱏ')];
-
-pub const LIMBU: &'static [(char, char)] = &[
-    ('à„„', 'à„„'),
-    ('က', 'သ'),
-    ('\u{1920}', 'ါ'),
-    ('ူ', '\u{193b}'),
-    ('á„€', 'á„€'),
-    ('á„„', 'ᄏ'),
-];
-
-pub const LINEAR_A: &'static [(char, char)] =
-    &[('𐄇', '𐄳'), ('𐘀', 'đœ¶'), ('𐝀', '𐝕'), ('𐝠', '𐝧')];
-
-pub const LINEAR_B: &'static [(char, char)] = &[
-    ('𐀀', '𐀋'),
-    ('𐀍', '𐀩'),
-    ('𐀹', 'đ€ș'),
-    ('đ€Œ', 'đ€œ'),
-    ('𐀿', '𐁍'),
-    ('𐁐', '𐁝'),
-    ('𐂀', 'đƒș'),
-    ('𐄀', '𐄂'),
-    ('𐄇', '𐄳'),
-    ('𐄷', '𐄿'),
-];
-
-pub const LISU: &'static [(char, char)] =
-    &[('ÊŒ', 'ÊŒ'), ('ˍ', 'ˍ'), ('《', '》'), ('ꓐ', 'ê“ż'), ('đ‘Ÿ°', 'đ‘Ÿ°')];
-
-pub const LYCIAN: &'static [(char, char)] = &[('⁚', '⁚'), ('𐊀', '𐊜')];
-
-pub const LYDIAN: &'static [(char, char)] =
-    &[('·', '·'), ('âž±', 'âž±'), ('𐀠', 'đ€č'), ('𐀿', '𐀿')];
-
-pub const MAHAJANI: &'static [(char, char)] =
-    &[('·', '·'), ('à„€', 'à„Ż'), ('ê °', 'ê č'), ('𑅐', 'đ‘…¶')];
-
-pub const MAKASAR: &'static [(char, char)] = &[('đ‘» ', '𑻞')];
-
-pub const MALAYALAM: &'static [(char, char)] = &[
-    ('\u{951}', '\u{952}'),
-    ('à„€', 'à„„'),
-    ('\u{d00}', 'àŽŒ'),
-    ('àŽŽ', 'àŽ'),
-    ('àŽ’', '\u{d44}'),
-    ('à”†', 'à”ˆ'),
-    ('à”Š', 'à”'),
-    ('à””', '\u{d63}'),
-    ('à”Š', 'à”ż'),
-    ('\u{1cda}', '\u{1cda}'),
-    ('áłČ', 'áłČ'),
-    ('ê °', 'ê Č'),
-];
-
-pub const MANDAIC: &'static [(char, char)] =
-    &[('ـ', 'ـ'), ('àĄ€', '\u{85b}'), ('àĄž', 'àĄž')];
-
-pub const MANICHAEAN: &'static [(char, char)] =
-    &[('ـ', 'ـ'), ('𐫀', '\u{10ae6}'), ('𐫫', '𐫶')];
-
-pub const MARCHEN: &'static [(char, char)] =
-    &[('𑱰', 'đ‘ȏ'), ('\u{11c92}', '\u{11ca7}'), ('đ‘Č©', '\u{11cb6}')];
-
-pub const MASARAM_GONDI: &'static [(char, char)] = &[
-    ('à„€', 'à„„'),
-    ('𑮀', '𑮆'),
-    ('𑮈', '𑮉'),
-    ('𑮋', '\u{11d36}'),
-    ('\u{11d3a}', '\u{11d3a}'),
-    ('\u{11d3c}', '\u{11d3d}'),
-    ('\u{11d3f}', '\u{11d47}'),
-    ('𑔐', 'đ‘”™'),
-];
-
-pub const MEDEFAIDRIN: &'static [(char, char)] = &[('đ–č€', 'đ–șš')];
-
-pub const MEETEI_MAYEK: &'static [(char, char)] =
-    &[('ê« ', '\u{aaf6}'), ('êŻ€', '\u{abed}'), ('êŻ°', 'êŻč')];
-
-pub const MENDE_KIKAKUI: &'static [(char, char)] =
-    &[('𞠀', '𞣄'), ('𞣇', '\u{1e8d6}')];
-
-pub const MEROITIC_CURSIVE: &'static [(char, char)] =
-    &[('𐊠', '𐊷'), ('đŠŒ', '𐧏'), ('𐧒', '𐧿')];
-
-pub const MEROITIC_HIEROGLYPHS: &'static [(char, char)] =
-    &[('⁝', '⁝'), ('𐩀', '𐩟')];
-
-pub const MIAO: &'static [(char, char)] =
-    &[('đ–Œ€', 'đ–œŠ'), ('\u{16f4f}', 'đ–Ÿ‡'), ('\u{16f8f}', 'đ–ŸŸ')];
-
-pub const MODI: &'static [(char, char)] =
-    &[('ê °', 'ê č'), ('𑘀', '𑙄'), ('𑙐', '𑙙')];
-
-pub const MONGOLIAN: &'static [(char, char)] = &[
-    ('᠀', '᠙'),
-    ('ᠠ', 'ᥞ'),
-    ('᱀', 'áąȘ'),
-    ('\u{202f}', '\u{202f}'),
-    ('、', '。'),
-    ('〈', '》'),
-    ('𑙠', '𑙬'),
-];
-
-pub const MRO: &'static [(char, char)] = &[('đ–©€', 'đ–©ž'), ('đ–© ', 'đ–©©'), ('đ–©ź', 'đ–©Ż')];
-
-pub const MULTANI: &'static [(char, char)] =
-    &[('੊', 'à©Ż'), ('𑊀', '𑊆'), ('𑊈', '𑊈'), ('𑊊', '𑊍'), ('𑊏', '𑊝'), ('𑊟', '𑊩')];
-
-pub const MYANMAR: &'static [(char, char)] =
-    &[('က', '႟'), ('ê€ź', 'ê€ź'), ('ê§ ', 'ê§Ÿ'), ('ê© ', 'ê©ż'), ('𑛐', '𑛣')];
-
-pub const NABATAEAN: &'static [(char, char)] = &[('𐱀', '𐱞'), ('𐱧', '𐱯')];
-
-pub const NAG_MUNDARI: &'static [(char, char)] = &[('𞓐', 'đž“č')];
-
-pub const NANDINAGARI: &'static [(char, char)] = &[
-    ('à„€', 'à„„'),
-    ('àłŠ', 'àłŻ'),
-    ('áł©', 'áł©'),
-    ('áłČ', 'áłČ'),
-    ('áłș', 'áłș'),
-    ('ê °', 'ê ”'),
-    ('𑩠', '𑩧'),
-    ('đ‘ŠȘ', '\u{119d7}'),
-    ('\u{119da}', 'đ‘§€'),
-];
-
-pub const NEW_TAI_LUE: &'static [(char, char)] =
-    &[('ᩀ', 'ካ'), ('ኰ', 'ᧉ'), ('᧐', '᧚'), ('᧞', '᧟')];
-
-pub const NEWA: &'static [(char, char)] = &[('𑐀', '𑑛'), ('𑑝', '𑑡')];
-
-pub const NKO: &'static [(char, char)] = &[
-    ('ی', 'ی'),
-    ('ۛ', 'ۛ'),
-    ('۟', '۟'),
-    ('߀', 'ßș'),
-    ('\u{7fd}', 'ßż'),
-    ('', 'ïŽż'),
-];
-
-pub const NUSHU: &'static [(char, char)] = &[('𖿡', '𖿡'), ('𛅰', '𛋻')];
-
-pub const NYIAKENG_PUACHUE_HMONG: &'static [(char, char)] =
-    &[('𞄀', '𞄬'), ('\u{1e130}', 'đž„œ'), ('𞅀', '𞅉'), ('𞅎', '𞅏')];
-
-pub const OGHAM: &'static [(char, char)] = &[('\u{1680}', '᚜')];
-
-pub const OL_CHIKI: &'static [(char, char)] = &[('᱐', '᱿')];
-
-pub const OL_ONAL: &'static [(char, char)] =
-    &[('à„€', 'à„„'), ('𞗐', 'đž—ș'), ('𞗿', '𞗿')];
-
-pub const OLD_HUNGARIAN: &'static [(char, char)] = &[
-    ('⁚', '⁚'),
-    ('⁝', '⁝'),
-    ('âž±', 'âž±'),
-    ('âč', 'âč'),
-    ('đČ€', 'đČČ'),
-    ('𐳀', 'đłČ'),
-    ('đłș', '𐳿'),
-];
-
-pub const OLD_ITALIC: &'static [(char, char)] = &[('𐌀', '𐌣'), ('𐌭', '𐌯')];
-
-pub const OLD_NORTH_ARABIAN: &'static [(char, char)] = &[('đȘ€', 'đȘŸ')];
-
-pub const OLD_PERMIC: &'static [(char, char)] = &[
-    ('·', '·'),
-    ('\u{300}', '\u{300}'),
-    ('\u{306}', '\u{308}'),
-    ('\u{313}', '\u{313}'),
-    ('\u{483}', '\u{483}'),
-    ('𐍐', '\u{1037a}'),
-];
-
-pub const OLD_PERSIAN: &'static [(char, char)] = &[('𐎠', '𐏃'), ('𐏈', '𐏕')];
-
-pub const OLD_SOGDIAN: &'static [(char, char)] = &[('đŒ€', 'đŒ§')];
-
-pub const OLD_SOUTH_ARABIAN: &'static [(char, char)] = &[('𐩠', '𐩿')];
-
-pub const OLD_TURKIC: &'static [(char, char)] =
-    &[('⁚', '⁚'), ('âž°', 'âž°'), ('𐰀', '𐱈')];
-
-pub const OLD_UYGHUR: &'static [(char, char)] =
-    &[('ـ', 'ـ'), ('đ«Č', 'đ«Č'), ('đœ°', 'đŸ‰')];
-
-pub const ORIYA: &'static [(char, char)] = &[
-    ('\u{951}', '\u{952}'),
-    ('à„€', 'à„„'),
-    ('\u{b01}', 'àŹƒ'),
-    ('àŹ…', 'àŹŒ'),
-    ('àŹ', 'àŹ'),
-    ('àŹ“', 'àŹš'),
-    ('àŹȘ', 'àŹ°'),
-    ('àŹČ', 'àŹł'),
-    ('àŹ”', 'àŹč'),
-    ('\u{b3c}', '\u{b44}'),
-    ('େ', 'ୈ'),
-    ('ୋ', '\u{b4d}'),
-    ('\u{b55}', '\u{b57}'),
-    ('ଡ଼', 'ଢ଼'),
-    ('ୟ', '\u{b63}'),
-    ('à­Š', 'à­·'),
-    ('\u{1cda}', '\u{1cda}'),
-    ('áłČ', 'áłČ'),
-];
-
-pub const OSAGE: &'static [(char, char)] = &[
-    ('\u{301}', '\u{301}'),
-    ('\u{304}', '\u{304}'),
-    ('\u{30b}', '\u{30b}'),
-    ('\u{358}', '\u{358}'),
-    ('𐒰', '𐓓'),
-    ('𐓘', '𐓻'),
-];
-
-pub const OSMANYA: &'static [(char, char)] = &[('𐒀', '𐒝'), ('𐒠', '𐒩')];
-
-pub const PAHAWH_HMONG: &'static [(char, char)] =
-    &[('𖬀', '𖭅'), ('𖭐', '𖭙'), ('𖭛', '𖭡'), ('𖭣', 'đ–­·'), ('đ–­œ', '𖼏')];
-
-pub const PALMYRENE: &'static [(char, char)] = &[('𐥠', '𐥿')];
-
-pub const PAU_CIN_HAU: &'static [(char, char)] = &[('đ‘«€', 'đ‘«ž')];
-
-pub const PHAGS_PA: &'static [(char, char)] = &[
-    ('᠂', '᠃'),
-    ('᠅', '᠅'),
-    ('\u{202f}', '\u{202f}'),
-    ('。', '。'),
-    ('êĄ€', 'êĄ·'),
-];
-
-pub const PHOENICIAN: &'static [(char, char)] = &[('𐀀', '𐀛'), ('đ€Ÿ', 'đ€Ÿ')];
-
-pub const PSALTER_PAHLAVI: &'static [(char, char)] =
-    &[('ـ', 'ـ'), ('𐼀', '𐼑'), ('𐼙', '𐼜'), ('𐟩', '𐟯')];
-
-pub const REJANG: &'static [(char, char)] = &[('ꀰ', '\u{a953}'), ('ꄟ', 'ꄟ')];
-
-pub const RUNIC: &'static [(char, char)] = &[('ᚠ', '᛾')];
-
-pub const SAMARITAN: &'static [(char, char)] =
-    &[('ࠀ', '\u{82d}'), ('à °', 'à Ÿ'), ('âž±', 'âž±')];
-
-pub const SAURASHTRA: &'static [(char, char)] =
-    &[('êą€', '\u{a8c5}'), ('êŁŽ', 'êŁ™')];
-
-pub const SHARADA: &'static [(char, char)] = &[
-    ('\u{951}', '\u{951}'),
-    ('\u{1cd7}', '\u{1cd7}'),
-    ('\u{1cd9}', '\u{1cd9}'),
-    ('\u{1cdc}', '\u{1cdd}'),
-    ('\u{1ce0}', '\u{1ce0}'),
-    ('ê °', 'ê ”'),
-    ('ê ž', 'ê ž'),
-    ('\u{11180}', '𑇟'),
-];
-
-pub const SHAVIAN: &'static [(char, char)] = &[('·', '·'), ('𐑐', '𐑿')];
-
-pub const SIDDHAM: &'static [(char, char)] =
-    &[('𑖀', '\u{115b5}'), ('𑖾', '\u{115dd}')];
-
-pub const SIGNWRITING: &'static [(char, char)] =
-    &[('𝠀', 'đȘ‹'), ('\u{1da9b}', '\u{1da9f}'), ('\u{1daa1}', '\u{1daaf}')];
-
-pub const SINHALA: &'static [(char, char)] = &[
-    ('à„€', 'à„„'),
-    ('\u{d81}', 'ඃ'),
-    ('අ', 'ඖ'),
-    ('ක', 'න'),
-    ('à¶ł', 'à¶»'),
-    ('ග', 'ග'),
-    ('ව', 'ෆ'),
-    ('\u{dca}', '\u{dca}'),
-    ('\u{dcf}', '\u{dd4}'),
-    ('\u{dd6}', '\u{dd6}'),
-    ('ෘ', '\u{ddf}'),
-    ('à·Š', 'à·Ż'),
-    ('à·Č', 'à·Ž'),
-    ('áłČ', 'áłČ'),
-    ('𑇡', '𑇮'),
-];
-
-pub const SOGDIAN: &'static [(char, char)] = &[('ـ', 'ـ'), ('đŒ°', 'đœ™')];
-
-pub const SORA_SOMPENG: &'static [(char, char)] = &[('𑃐', '𑃹'), ('𑃰', 'đ‘ƒč')];
-
-pub const SOYOMBO: &'static [(char, char)] = &[('𑩐', 'đ‘Șą')];
-
-pub const SUNDANESE: &'static [(char, char)] =
-    &[('\u{1b80}', 'Ἷ'), ('᳀', '᳇')];
-
-pub const SUNUWAR: &'static [(char, char)] = &[
-    ('\u{300}', '\u{301}'),
-    ('\u{303}', '\u{303}'),
-    ('\u{30d}', '\u{30d}'),
-    ('\u{310}', '\u{310}'),
-    ('\u{32d}', '\u{32d}'),
-    ('\u{331}', '\u{331}'),
-    ('𑯀', '𑯡'),
-    ('𑯰', 'đ‘Żč'),
-];
-
-pub const SYLOTI_NAGRI: &'static [(char, char)] =
-    &[('à„€', 'à„„'), ('à§Š', 'à§Ż'), ('ꠀ', '\u{a82c}')];
-
-pub const SYRIAC: &'static [(char, char)] = &[
-    ('\u{303}', '\u{304}'),
-    ('\u{307}', '\u{308}'),
-    ('\u{30a}', '\u{30a}'),
-    ('\u{320}', '\u{320}'),
-    ('\u{323}', '\u{325}'),
-    ('\u{32d}', '\u{32e}'),
-    ('\u{330}', '\u{330}'),
-    ('ی', 'ی'),
-    ('ۛ', '\u{61c}'),
-    ('۟', '۟'),
-    ('ـ', 'ـ'),
-    ('\u{64b}', '\u{655}'),
-    ('\u{670}', '\u{670}'),
-    ('܀', '܍'),
-    ('\u{70f}', '\u{74a}'),
-    ('ʍ', 'ʏ'),
-    ('àĄ ', 'àĄȘ'),
-    ('\u{1df8}', '\u{1df8}'),
-    ('\u{1dfa}', '\u{1dfa}'),
-];
-
-pub const TAGALOG: &'static [(char, char)] =
-    &[('ᜀ', '\u{1715}'), ('ᜟ', 'ᜟ'), ('᜔', '᜶')];
-
-pub const TAGBANWA: &'static [(char, char)] =
-    &[('᜔', '᜶'), ('ᝠ', 'ᝬ'), ('᝼', 'ᝰ'), ('\u{1772}', '\u{1773}')];
-
-pub const TAI_LE: &'static [(char, char)] = &[
-    ('\u{300}', '\u{301}'),
-    ('\u{307}', '\u{308}'),
-    ('\u{30c}', '\u{30c}'),
-    ('၀', '၉'),
-    ('ᄐ', 'ᄭ'),
-    ('ᄰ', 'ᄎ'),
-];
-
-pub const TAI_THAM: &'static [(char, char)] = &[
-    ('áš ', '\u{1a5e}'),
-    ('\u{1a60}', '\u{1a7c}'),
-    ('\u{1a7f}', 'áȘ‰'),
-    ('áȘ', 'áȘ™'),
-    ('áȘ ', 'áȘ­'),
-];
-
-pub const TAI_VIET: &'static [(char, char)] = &[('êȘ€', 'ꫂ'), ('ꫛ', '꫟')];
-
-pub const TAKRI: &'static [(char, char)] =
-    &[('à„€', 'à„„'), ('ê °', 'ê č'), ('𑚀', 'đ‘šč'), ('𑛀', '𑛉')];
-
-pub const TAMIL: &'static [(char, char)] = &[
-    ('\u{951}', '\u{952}'),
-    ('à„€', 'à„„'),
-    ('\u{b82}', 'àźƒ'),
-    ('àź…', 'àźŠ'),
-    ('àźŽ', 'àź'),
-    ('àź’', 'àź•'),
-    ('àź™', 'àźš'),
-    ('àźœ', 'àźœ'),
-    ('àźž', 'àźŸ'),
-    ('àźŁ', 'àź€'),
-    ('àźš', 'àźȘ'),
-    ('àźź', 'àźč'),
-    ('\u{bbe}', 'àŻ‚'),
-    ('àŻ†', 'àŻˆ'),
-    ('àŻŠ', '\u{bcd}'),
-    ('àŻ', 'àŻ'),
-    ('\u{bd7}', '\u{bd7}'),
-    ('àŻŠ', 'àŻș'),
-    ('\u{1cda}', '\u{1cda}'),
-    ('êŁł', 'êŁł'),
-    ('\u{11301}', '\u{11301}'),
-    ('𑌃', '𑌃'),
-    ('\u{1133b}', '\u{1133c}'),
-    ('𑿀', '𑿱'),
-    ('𑿿', '𑿿'),
-];
-
-pub const TANGSA: &'static [(char, char)] = &[('đ–©°', 'đ–ȘŸ'), ('đ–«€', '𖫉')];
-
-pub const TANGUT: &'static [(char, char)] = &[
-    ('âż°', 'âżż'),
-    ('㇯', '㇯'),
-    ('𖿠', '𖿠'),
-    ('𗀀', 'đ˜Ÿ·'),
-    ('𘠀', '𘫿'),
-    ('𘮀', '𘮈'),
-];
-
-pub const TELUGU: &'static [(char, char)] = &[
-    ('\u{951}', '\u{952}'),
-    ('à„€', 'à„„'),
-    ('\u{c00}', 'ఌ'),
-    ('ఎ', 'ఐ'),
-    ('ఒ', 'à°š'),
-    ('à°Ș', 'à°č'),
-    ('\u{c3c}', 'ౄ'),
-    ('\u{c46}', '\u{c48}'),
-    ('\u{c4a}', '\u{c4d}'),
-    ('\u{c55}', '\u{c56}'),
-    ('ౘ', 'ౚ'),
-    ('ౝ', 'ౝ'),
-    ('à± ', '\u{c63}'),
-    ('ొ', 'à±Ż'),
-    ('à±·', 'à±ż'),
-    ('\u{1cda}', '\u{1cda}'),
-    ('áłČ', 'áłČ'),
-];
-
-pub const THAANA: &'static [(char, char)] = &[
-    ('ی', 'ی'),
-    ('ۛ', '\u{61c}'),
-    ('۟', '۟'),
-    ('Ù ', 'Ù©'),
-    ('Ț€', 'Ț±'),
-    ('ï·Č', 'ï·Č'),
-    ('﷜', '﷜'),
-];
-
-pub const THAI: &'static [(char, char)] = &[
-    ('ʌ', 'ʌ'),
-    ('˗', '˗'),
-    ('\u{303}', '\u{303}'),
-    ('\u{331}', '\u{331}'),
-    ('àž', '\u{e3a}'),
-    ('àč€', 'àč›'),
-];
-
-pub const TIBETAN: &'static [(char, char)] = &[
-    ('àŒ€', 'àœ‡'),
-    ('àœ‰', 'àœŹ'),
-    ('\u{f71}', '\u{f97}'),
-    ('\u{f99}', '\u{fbc}'),
-    ('àŸŸ', 'àżŒ'),
-    ('àżŽ', 'àż”'),
-    ('àż™', 'àżš'),
-    ('〈', '》'),
-];
-
-pub const TIFINAGH: &'static [(char, char)] = &[
-    ('\u{302}', '\u{302}'),
-    ('\u{304}', '\u{304}'),
-    ('\u{307}', '\u{307}'),
-    ('\u{309}', '\u{309}'),
-    ('⎰', '┧'),
-    ('┯', '┰'),
-    ('\u{2d7f}', '\u{2d7f}'),
-];
-
-pub const TIRHUTA: &'static [(char, char)] = &[
-    ('\u{951}', '\u{952}'),
-    ('à„€', 'à„„'),
-    ('áłČ', 'áłČ'),
-    ('ê °', 'ê č'),
-    ('𑒀', '𑓇'),
-    ('𑓐', '𑓙'),
-];
-
-pub const TODHRI: &'static [(char, char)] = &[
-    ('\u{301}', '\u{301}'),
-    ('\u{304}', '\u{304}'),
-    ('\u{307}', '\u{307}'),
-    ('\u{311}', '\u{311}'),
-    ('\u{313}', '\u{313}'),
-    ('\u{35e}', '\u{35e}'),
-    ('𐗀', '𐗳'),
-];
-
-pub const TOTO: &'static [(char, char)] = &[('ÊŒ', 'ÊŒ'), ('𞊐', '\u{1e2ae}')];
-
-pub const TULU_TIGALARI: &'static [(char, char)] = &[
-    ('àłŠ', 'àłŻ'),
-    ('áłČ', 'áłČ'),
-    ('\u{1cf4}', '\u{1cf4}'),
-    ('ê °', 'ê ”'),
-    ('\u{a8f1}', '\u{a8f1}'),
-    ('𑎀', '𑎉'),
-    ('𑎋', '𑎋'),
-    ('𑎎', '𑎎'),
-    ('𑎐', '𑎔'),
-    ('𑎷', '\u{113c0}'),
-    ('\u{113c2}', '\u{113c2}'),
-    ('\u{113c5}', '\u{113c5}'),
-    ('\u{113c7}', '𑏊'),
-    ('𑏌', '𑏕'),
-    ('𑏗', '𑏘'),
-    ('\u{113e1}', '\u{113e2}'),
-];
-
-pub const UGARITIC: &'static [(char, char)] = &[('𐎀', '𐎝'), ('𐎟', '𐎟')];
-
-pub const VAI: &'static [(char, char)] = &[('ꔀ', 'ꘫ')];
-
-pub const VITHKUQI: &'static [(char, char)] = &[
-    ('𐕰', 'đ•ș'),
-    ('đ•Œ', '𐖊'),
-    ('𐖌', '𐖒'),
-    ('𐖔', '𐖕'),
-    ('𐖗', '𐖡'),
-    ('𐖣', '𐖱'),
-    ('𐖳', 'đ–č'),
-    ('𐖻', 'đ–Œ'),
-];
-
-pub const WANCHO: &'static [(char, char)] = &[('𞋀', 'đž‹č'), ('𞋿', '𞋿')];
-
-pub const WARANG_CITI: &'static [(char, char)] = &[('𑱠', 'đ‘ŁČ'), ('𑣿', '𑣿')];
-
-pub const YEZIDI: &'static [(char, char)] = &[
-    ('ی', 'ی'),
-    ('ۛ', 'ۛ'),
-    ('۟', '۟'),
-    ('Ù ', 'Ù©'),
-    ('đș€', 'đș©'),
-    ('\u{10eab}', 'đș­'),
-    ('đș°', 'đș±'),
-];
-
-pub const YI: &'static [(char, char)] = &[
-    ('、', '。'),
-    ('〈', '】'),
-    ('〔', '〛'),
-    ('・', '・'),
-    ('ꀀ', 'ꒌ'),
-    ('꒐', '꓆'),
-    ('ïœĄ', ''),
-];
-
-pub const ZANABAZAR_SQUARE: &'static [(char, char)] = &[('𑹀', '\u{11a47}')];
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/sentence_break.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/sentence_break.rs
deleted file mode 100644
index af1c5bea..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/sentence_break.rs
+++ /dev/null
@@ -1,2530 +0,0 @@
-// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY:
-//
-//   ucd-generate sentence-break ucd-16.0.0 --chars
-//
-// Unicode version: 16.0.0.
-//
-// ucd-generate 0.3.1 is available on crates.io.
-
-pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[
-    ("ATerm", ATERM),
-    ("CR", CR),
-    ("Close", CLOSE),
-    ("Extend", EXTEND),
-    ("Format", FORMAT),
-    ("LF", LF),
-    ("Lower", LOWER),
-    ("Numeric", NUMERIC),
-    ("OLetter", OLETTER),
-    ("SContinue", SCONTINUE),
-    ("STerm", STERM),
-    ("Sep", SEP),
-    ("Sp", SP),
-    ("Upper", UPPER),
-];
-
-pub const ATERM: &'static [(char, char)] =
-    &[('.', '.'), (' ', ' '), ('ïč’', 'ïč’'), ('', '')];
-
-pub const CR: &'static [(char, char)] = &[('\r', '\r')];
-
-pub const CLOSE: &'static [(char, char)] = &[
-    ('"', '"'),
-    ('\'', ')'),
-    ('[', '['),
-    (']', ']'),
-    ('{', '{'),
-    ('}', '}'),
-    ('«', '«'),
-    ('»', '»'),
-    ('àŒș', 'àŒœ'),
-    ('᚛', '᚜'),
-    ('‘', '‟'),
-    ('‹', '›'),
-    ('⁅', '⁆'),
-    ('⁜', ' '),
-    ('₍', '₎'),
-    ('⌈', '⌋'),
-    ('⟨', '⟩'),
-    ('❛', '❠'),
-    ('❚', '❔'),
-    ('⟅', '⟆'),
-    ('⟩', '⟯'),
-    ('⊃', '⊘'),
-    ('⧘', '⧛'),
-    ('⧌', '⧜'),
-    ('⾀', '⾍'),
-    ('⾜', '⾝'),
-    ('âž ', 'âž©'),
-    ('âč‚', 'âč‚'),
-    ('âč•', 'âčœ'),
-    ('〈', '】'),
-    ('〔', '〛'),
-    ('〝', '〟'),
-    ('', 'ïŽż'),
-    ('ïž—', ''),
-    ('ïž”', 'ïč„'),
-    ('ïč‡', 'ïčˆ'),
-    ('ïč™', 'ïčž'),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-    ('ïœą', 'ïœŁ'),
-    ('đŸ™¶', '🙾'),
-];
-
-pub const EXTEND: &'static [(char, char)] = &[
-    ('\u{300}', '\u{36f}'),
-    ('\u{483}', '\u{489}'),
-    ('\u{591}', '\u{5bd}'),
-    ('\u{5bf}', '\u{5bf}'),
-    ('\u{5c1}', '\u{5c2}'),
-    ('\u{5c4}', '\u{5c5}'),
-    ('\u{5c7}', '\u{5c7}'),
-    ('\u{610}', '\u{61a}'),
-    ('\u{64b}', '\u{65f}'),
-    ('\u{670}', '\u{670}'),
-    ('\u{6d6}', '\u{6dc}'),
-    ('\u{6df}', '\u{6e4}'),
-    ('\u{6e7}', '\u{6e8}'),
-    ('\u{6ea}', '\u{6ed}'),
-    ('\u{711}', '\u{711}'),
-    ('\u{730}', '\u{74a}'),
-    ('\u{7a6}', '\u{7b0}'),
-    ('\u{7eb}', '\u{7f3}'),
-    ('\u{7fd}', '\u{7fd}'),
-    ('\u{816}', '\u{819}'),
-    ('\u{81b}', '\u{823}'),
-    ('\u{825}', '\u{827}'),
-    ('\u{829}', '\u{82d}'),
-    ('\u{859}', '\u{85b}'),
-    ('\u{897}', '\u{89f}'),
-    ('\u{8ca}', '\u{8e1}'),
-    ('\u{8e3}', 'à€ƒ'),
-    ('\u{93a}', '\u{93c}'),
-    ('à€Ÿ', 'à„'),
-    ('\u{951}', '\u{957}'),
-    ('\u{962}', '\u{963}'),
-    ('\u{981}', 'àŠƒ'),
-    ('\u{9bc}', '\u{9bc}'),
-    ('\u{9be}', '\u{9c4}'),
-    ('ে', 'ৈ'),
-    ('ো', '\u{9cd}'),
-    ('\u{9d7}', '\u{9d7}'),
-    ('\u{9e2}', '\u{9e3}'),
-    ('\u{9fe}', '\u{9fe}'),
-    ('\u{a01}', 'àšƒ'),
-    ('\u{a3c}', '\u{a3c}'),
-    ('àšŸ', '\u{a42}'),
-    ('\u{a47}', '\u{a48}'),
-    ('\u{a4b}', '\u{a4d}'),
-    ('\u{a51}', '\u{a51}'),
-    ('\u{a70}', '\u{a71}'),
-    ('\u{a75}', '\u{a75}'),
-    ('\u{a81}', 'àȘƒ'),
-    ('\u{abc}', '\u{abc}'),
-    ('àȘŸ', '\u{ac5}'),
-    ('\u{ac7}', 'ૉ'),
-    ('ો', '\u{acd}'),
-    ('\u{ae2}', '\u{ae3}'),
-    ('\u{afa}', '\u{aff}'),
-    ('\u{b01}', 'àŹƒ'),
-    ('\u{b3c}', '\u{b3c}'),
-    ('\u{b3e}', '\u{b44}'),
-    ('େ', 'ୈ'),
-    ('ୋ', '\u{b4d}'),
-    ('\u{b55}', '\u{b57}'),
-    ('\u{b62}', '\u{b63}'),
-    ('\u{b82}', '\u{b82}'),
-    ('\u{bbe}', 'àŻ‚'),
-    ('àŻ†', 'àŻˆ'),
-    ('àŻŠ', '\u{bcd}'),
-    ('\u{bd7}', '\u{bd7}'),
-    ('\u{c00}', '\u{c04}'),
-    ('\u{c3c}', '\u{c3c}'),
-    ('\u{c3e}', 'ౄ'),
-    ('\u{c46}', '\u{c48}'),
-    ('\u{c4a}', '\u{c4d}'),
-    ('\u{c55}', '\u{c56}'),
-    ('\u{c62}', '\u{c63}'),
-    ('\u{c81}', 'àȃ'),
-    ('\u{cbc}', '\u{cbc}'),
-    ('àČŸ', 'àł„'),
-    ('\u{cc6}', '\u{cc8}'),
-    ('\u{cca}', '\u{ccd}'),
-    ('\u{cd5}', '\u{cd6}'),
-    ('\u{ce2}', '\u{ce3}'),
-    ('àłł', 'àłł'),
-    ('\u{d00}', 'àŽƒ'),
-    ('\u{d3b}', '\u{d3c}'),
-    ('\u{d3e}', '\u{d44}'),
-    ('à”†', 'à”ˆ'),
-    ('à”Š', '\u{d4d}'),
-    ('\u{d57}', '\u{d57}'),
-    ('\u{d62}', '\u{d63}'),
-    ('\u{d81}', 'ඃ'),
-    ('\u{dca}', '\u{dca}'),
-    ('\u{dcf}', '\u{dd4}'),
-    ('\u{dd6}', '\u{dd6}'),
-    ('ෘ', '\u{ddf}'),
-    ('à·Č', 'à·ł'),
-    ('\u{e31}', '\u{e31}'),
-    ('\u{e34}', '\u{e3a}'),
-    ('\u{e47}', '\u{e4e}'),
-    ('\u{eb1}', '\u{eb1}'),
-    ('\u{eb4}', '\u{ebc}'),
-    ('\u{ec8}', '\u{ece}'),
-    ('\u{f18}', '\u{f19}'),
-    ('\u{f35}', '\u{f35}'),
-    ('\u{f37}', '\u{f37}'),
-    ('\u{f39}', '\u{f39}'),
-    ('àŒŸ', 'àŒż'),
-    ('\u{f71}', '\u{f84}'),
-    ('\u{f86}', '\u{f87}'),
-    ('\u{f8d}', '\u{f97}'),
-    ('\u{f99}', '\u{fbc}'),
-    ('\u{fc6}', '\u{fc6}'),
-    ('ါ', '\u{103e}'),
-    ('ၖ', '\u{1059}'),
-    ('\u{105e}', '\u{1060}'),
-    ('ၹ', '၀'),
-    ('ၧ', 'ၭ'),
-    ('\u{1071}', '\u{1074}'),
-    ('\u{1082}', '\u{108d}'),
-    ('ႏ', 'ႏ'),
-    ('ႚ', '\u{109d}'),
-    ('\u{135d}', '\u{135f}'),
-    ('\u{1712}', '\u{1715}'),
-    ('\u{1732}', '\u{1734}'),
-    ('\u{1752}', '\u{1753}'),
-    ('\u{1772}', '\u{1773}'),
-    ('\u{17b4}', '\u{17d3}'),
-    ('\u{17dd}', '\u{17dd}'),
-    ('\u{180b}', '\u{180d}'),
-    ('\u{180f}', '\u{180f}'),
-    ('\u{1885}', '\u{1886}'),
-    ('\u{18a9}', '\u{18a9}'),
-    ('\u{1920}', 'ါ'),
-    ('ူ', '\u{193b}'),
-    ('\u{1a17}', '\u{1a1b}'),
-    ('ᩕ', '\u{1a5e}'),
-    ('\u{1a60}', '\u{1a7c}'),
-    ('\u{1a7f}', '\u{1a7f}'),
-    ('\u{1ab0}', '\u{1ace}'),
-    ('\u{1b00}', 'ᬄ'),
-    ('\u{1b34}', '\u{1b44}'),
-    ('\u{1b6b}', '\u{1b73}'),
-    ('\u{1b80}', 'ἂ'),
-    ('៥', '\u{1bad}'),
-    ('\u{1be6}', '\u{1bf3}'),
-    ('á°€', '\u{1c37}'),
-    ('\u{1cd0}', '\u{1cd2}'),
-    ('\u{1cd4}', '\u{1ce8}'),
-    ('\u{1ced}', '\u{1ced}'),
-    ('\u{1cf4}', '\u{1cf4}'),
-    ('áł·', '\u{1cf9}'),
-    ('\u{1dc0}', '\u{1dff}'),
-    ('\u{200c}', '\u{200d}'),
-    ('\u{20d0}', '\u{20f0}'),
-    ('\u{2cef}', '\u{2cf1}'),
-    ('\u{2d7f}', '\u{2d7f}'),
-    ('\u{2de0}', '\u{2dff}'),
-    ('\u{302a}', '\u{302f}'),
-    ('\u{3099}', '\u{309a}'),
-    ('\u{a66f}', '\u{a672}'),
-    ('\u{a674}', '\u{a67d}'),
-    ('\u{a69e}', '\u{a69f}'),
-    ('\u{a6f0}', '\u{a6f1}'),
-    ('\u{a802}', '\u{a802}'),
-    ('\u{a806}', '\u{a806}'),
-    ('\u{a80b}', '\u{a80b}'),
-    ('ê Ł', 'ê §'),
-    ('\u{a82c}', '\u{a82c}'),
-    ('êą€', 'êą'),
-    ('êąŽ', '\u{a8c5}'),
-    ('\u{a8e0}', '\u{a8f1}'),
-    ('\u{a8ff}', '\u{a8ff}'),
-    ('\u{a926}', '\u{a92d}'),
-    ('\u{a947}', '\u{a953}'),
-    ('\u{a980}', 'ꊃ'),
-    ('\u{a9b3}', '\u{a9c0}'),
-    ('\u{a9e5}', '\u{a9e5}'),
-    ('\u{aa29}', '\u{aa36}'),
-    ('\u{aa43}', '\u{aa43}'),
-    ('\u{aa4c}', 'ꩍ'),
-    ('ꩻ', '꩜'),
-    ('\u{aab0}', '\u{aab0}'),
-    ('\u{aab2}', '\u{aab4}'),
-    ('\u{aab7}', '\u{aab8}'),
-    ('\u{aabe}', '\u{aabf}'),
-    ('\u{aac1}', '\u{aac1}'),
-    ('ê««', 'ê«Ż'),
-    ('ê«”', '\u{aaf6}'),
-    ('êŻŁ', 'êŻȘ'),
-    ('êŻŹ', '\u{abed}'),
-    ('\u{fb1e}', '\u{fb1e}'),
-    ('\u{fe00}', '\u{fe0f}'),
-    ('\u{fe20}', '\u{fe2f}'),
-    ('\u{ff9e}', '\u{ff9f}'),
-    ('\u{101fd}', '\u{101fd}'),
-    ('\u{102e0}', '\u{102e0}'),
-    ('\u{10376}', '\u{1037a}'),
-    ('\u{10a01}', '\u{10a03}'),
-    ('\u{10a05}', '\u{10a06}'),
-    ('\u{10a0c}', '\u{10a0f}'),
-    ('\u{10a38}', '\u{10a3a}'),
-    ('\u{10a3f}', '\u{10a3f}'),
-    ('\u{10ae5}', '\u{10ae6}'),
-    ('\u{10d24}', '\u{10d27}'),
-    ('\u{10d69}', '\u{10d6d}'),
-    ('\u{10eab}', '\u{10eac}'),
-    ('\u{10efc}', '\u{10eff}'),
-    ('\u{10f46}', '\u{10f50}'),
-    ('\u{10f82}', '\u{10f85}'),
-    ('𑀀', '𑀂'),
-    ('\u{11038}', '\u{11046}'),
-    ('\u{11070}', '\u{11070}'),
-    ('\u{11073}', '\u{11074}'),
-    ('\u{1107f}', '𑂂'),
-    ('𑂰', '\u{110ba}'),
-    ('\u{110c2}', '\u{110c2}'),
-    ('\u{11100}', '\u{11102}'),
-    ('\u{11127}', '\u{11134}'),
-    ('𑅅', '𑅆'),
-    ('\u{11173}', '\u{11173}'),
-    ('\u{11180}', '𑆂'),
-    ('𑆳', '\u{111c0}'),
-    ('\u{111c9}', '\u{111cc}'),
-    ('𑇎', '\u{111cf}'),
-    ('𑈬', '\u{11237}'),
-    ('\u{1123e}', '\u{1123e}'),
-    ('\u{11241}', '\u{11241}'),
-    ('\u{112df}', '\u{112ea}'),
-    ('\u{11300}', '𑌃'),
-    ('\u{1133b}', '\u{1133c}'),
-    ('\u{1133e}', '𑍄'),
-    ('𑍇', '𑍈'),
-    ('𑍋', '\u{1134d}'),
-    ('\u{11357}', '\u{11357}'),
-    ('𑍱', '𑍣'),
-    ('\u{11366}', '\u{1136c}'),
-    ('\u{11370}', '\u{11374}'),
-    ('\u{113b8}', '\u{113c0}'),
-    ('\u{113c2}', '\u{113c2}'),
-    ('\u{113c5}', '\u{113c5}'),
-    ('\u{113c7}', '𑏊'),
-    ('𑏌', '\u{113d0}'),
-    ('\u{113d2}', '\u{113d2}'),
-    ('\u{113e1}', '\u{113e2}'),
-    ('𑐔', '\u{11446}'),
-    ('\u{1145e}', '\u{1145e}'),
-    ('\u{114b0}', '\u{114c3}'),
-    ('\u{115af}', '\u{115b5}'),
-    ('𑖾', '\u{115c0}'),
-    ('\u{115dc}', '\u{115dd}'),
-    ('𑘰', '\u{11640}'),
-    ('\u{116ab}', '\u{116b7}'),
-    ('\u{1171d}', '\u{1172b}'),
-    ('𑠬', '\u{1183a}'),
-    ('\u{11930}', 'đ‘€”'),
-    ('đ‘€·', '𑀞'),
-    ('\u{1193b}', '\u{1193e}'),
-    ('đ‘„€', 'đ‘„€'),
-    ('đ‘„‚', '\u{11943}'),
-    ('𑧑', '\u{119d7}'),
-    ('\u{119da}', '\u{119e0}'),
-    ('đ‘§€', 'đ‘§€'),
-    ('\u{11a01}', '\u{11a0a}'),
-    ('\u{11a33}', 'đ‘šč'),
-    ('\u{11a3b}', '\u{11a3e}'),
-    ('\u{11a47}', '\u{11a47}'),
-    ('\u{11a51}', '\u{11a5b}'),
-    ('\u{11a8a}', '\u{11a99}'),
-    ('𑰯', '\u{11c36}'),
-    ('\u{11c38}', '\u{11c3f}'),
-    ('\u{11c92}', '\u{11ca7}'),
-    ('đ‘Č©', '\u{11cb6}'),
-    ('\u{11d31}', '\u{11d36}'),
-    ('\u{11d3a}', '\u{11d3a}'),
-    ('\u{11d3c}', '\u{11d3d}'),
-    ('\u{11d3f}', '\u{11d45}'),
-    ('\u{11d47}', '\u{11d47}'),
-    ('đ‘¶Š', 'đ‘¶Ž'),
-    ('\u{11d90}', '\u{11d91}'),
-    ('đ‘¶“', '\u{11d97}'),
-    ('\u{11ef3}', 'đ‘»¶'),
-    ('\u{11f00}', '\u{11f01}'),
-    ('đ‘Œƒ', 'đ‘Œƒ'),
-    ('đ‘ŒŽ', '\u{11f3a}'),
-    ('đ‘ŒŸ', '\u{11f42}'),
-    ('\u{11f5a}', '\u{11f5a}'),
-    ('\u{13440}', '\u{13440}'),
-    ('\u{13447}', '\u{13455}'),
-    ('\u{1611e}', '\u{1612f}'),
-    ('\u{16af0}', '\u{16af4}'),
-    ('\u{16b30}', '\u{16b36}'),
-    ('\u{16f4f}', '\u{16f4f}'),
-    ('đ–œ‘', 'đ–Ÿ‡'),
-    ('\u{16f8f}', '\u{16f92}'),
-    ('\u{16fe4}', '\u{16fe4}'),
-    ('\u{16ff0}', '\u{16ff1}'),
-    ('\u{1bc9d}', '\u{1bc9e}'),
-    ('\u{1cf00}', '\u{1cf2d}'),
-    ('\u{1cf30}', '\u{1cf46}'),
-    ('\u{1d165}', '\u{1d169}'),
-    ('\u{1d16d}', '\u{1d172}'),
-    ('\u{1d17b}', '\u{1d182}'),
-    ('\u{1d185}', '\u{1d18b}'),
-    ('\u{1d1aa}', '\u{1d1ad}'),
-    ('\u{1d242}', '\u{1d244}'),
-    ('\u{1da00}', '\u{1da36}'),
-    ('\u{1da3b}', '\u{1da6c}'),
-    ('\u{1da75}', '\u{1da75}'),
-    ('\u{1da84}', '\u{1da84}'),
-    ('\u{1da9b}', '\u{1da9f}'),
-    ('\u{1daa1}', '\u{1daaf}'),
-    ('\u{1e000}', '\u{1e006}'),
-    ('\u{1e008}', '\u{1e018}'),
-    ('\u{1e01b}', '\u{1e021}'),
-    ('\u{1e023}', '\u{1e024}'),
-    ('\u{1e026}', '\u{1e02a}'),
-    ('\u{1e08f}', '\u{1e08f}'),
-    ('\u{1e130}', '\u{1e136}'),
-    ('\u{1e2ae}', '\u{1e2ae}'),
-    ('\u{1e2ec}', '\u{1e2ef}'),
-    ('\u{1e4ec}', '\u{1e4ef}'),
-    ('\u{1e5ee}', '\u{1e5ef}'),
-    ('\u{1e8d0}', '\u{1e8d6}'),
-    ('\u{1e944}', '\u{1e94a}'),
-    ('\u{e0020}', '\u{e007f}'),
-    ('\u{e0100}', '\u{e01ef}'),
-];
-
-pub const FORMAT: &'static [(char, char)] = &[
-    ('\u{ad}', '\u{ad}'),
-    ('\u{61c}', '\u{61c}'),
-    ('\u{70f}', '\u{70f}'),
-    ('\u{180e}', '\u{180e}'),
-    ('\u{200b}', '\u{200b}'),
-    ('\u{200e}', '\u{200f}'),
-    ('\u{202a}', '\u{202e}'),
-    ('\u{2060}', '\u{2064}'),
-    ('\u{2066}', '\u{206f}'),
-    ('\u{feff}', '\u{feff}'),
-    ('\u{fff9}', '\u{fffb}'),
-    ('\u{13430}', '\u{1343f}'),
-    ('\u{1bca0}', '\u{1bca3}'),
-    ('\u{1d173}', '\u{1d17a}'),
-    ('\u{e0001}', '\u{e0001}'),
-];
-
-pub const LF: &'static [(char, char)] = &[('\n', '\n')];
-
-pub const LOWER: &'static [(char, char)] = &[
-    ('a', 'z'),
-    ('ª', 'ª'),
-    ('µ', 'µ'),
-    ('º', 'º'),
-    ('ß', 'ö'),
-    ('ø', 'ÿ'),
-    ('ā', 'ā'),
-    ('ă', 'ă'),
-    ('ą', 'ą'),
-    ('ć', 'ć'),
-    ('ĉ', 'ĉ'),
-    ('ċ', 'ċ'),
-    ('č', 'č'),
-    ('ď', 'ď'),
-    ('đ', 'đ'),
-    ('ē', 'ē'),
-    ('ĕ', 'ĕ'),
-    ('ė', 'ė'),
-    ('ę', 'ę'),
-    ('ě', 'ě'),
-    ('ĝ', 'ĝ'),
-    ('ğ', 'ğ'),
-    ('ÄĄ', 'ÄĄ'),
-    ('ÄŁ', 'ÄŁ'),
-    ('Ä„', 'Ä„'),
-    ('ħ', 'ħ'),
-    ('Ä©', 'Ä©'),
-    ('Ä«', 'Ä«'),
-    ('Ä­', 'Ä­'),
-    ('ÄŻ', 'ÄŻ'),
-    ('ı', 'ı'),
-    ('Äł', 'Äł'),
-    ('Ä”', 'Ä”'),
-    ('Ä·', 'Äž'),
-    ('Äș', 'Äș'),
-    ('Č', 'Č'),
-    ('ÄŸ', 'ÄŸ'),
-    ('ƀ', 'ƀ'),
-    ('Ƃ', 'Ƃ'),
-    ('Ƅ', 'Ƅ'),
-    ('Ɔ', 'Ɔ'),
-    ('ƈ', 'Ɖ'),
-    ('Ƌ', 'Ƌ'),
-    ('ƍ', 'ƍ'),
-    ('Ə', 'Ə'),
-    ('Ƒ', 'Ƒ'),
-    ('œ', 'œ'),
-    ('ƕ', 'ƕ'),
-    ('Ɨ', 'Ɨ'),
-    ('ƙ', 'ƙ'),
-    ('ƛ', 'ƛ'),
-    ('Ɲ', 'Ɲ'),
-    ('ß', 'ß'),
-    ('š', 'š'),
-    ('ĆŁ', 'ĆŁ'),
-    ('Ć„', 'Ć„'),
-    ('Ƨ', 'Ƨ'),
-    ('Ć©', 'Ć©'),
-    ('Ć«', 'Ć«'),
-    ('Ć­', 'Ć­'),
-    ('ĆŻ', 'ĆŻ'),
-    ('Ʊ', 'Ʊ'),
-    ('Ćł', 'Ćł'),
-    ('Ć”', 'Ć”'),
-    ('Ć·', 'Ć·'),
-    ('Ćș', 'Ćș'),
-    ('ĆŒ', 'ĆŒ'),
-    ('ĆŸ', 'ƀ'),
-    ('ƃ', 'ƃ'),
-    ('ƅ', 'ƅ'),
-    ('ƈ', 'ƈ'),
-    ('ƌ', 'ƍ'),
-    ('ƒ', 'ƒ'),
-    ('ƕ', 'ƕ'),
-    ('ƙ', 'ƛ'),
-    ('ƞ', 'ƞ'),
-    ('ÆĄ', 'ÆĄ'),
-    ('ÆŁ', 'ÆŁ'),
-    ('Æ„', 'Æ„'),
-    ('Æš', 'Æš'),
-    ('ÆȘ', 'Æ«'),
-    ('Æ­', 'Æ­'),
-    ('ư', 'ư'),
-    ('ÆŽ', 'ÆŽ'),
-    ('ƶ', 'ƶ'),
-    ('Æč', 'Æș'),
-    ('Æœ', 'Æż'),
-    ('dž', 'dž'),
-    ('lj', 'lj'),
-    ('nj', 'nj'),
-    ('ǎ', 'ǎ'),
-    ('ǐ', 'ǐ'),
-    ('ǒ', 'ǒ'),
-    ('ǔ', 'ǔ'),
-    ('ǖ', 'ǖ'),
-    ('ǘ', 'ǘ'),
-    ('ǚ', 'ǚ'),
-    ('ǜ', 'ǝ'),
-    ('ǟ', 'ǟ'),
-    ('ÇĄ', 'ÇĄ'),
-    ('ÇŁ', 'ÇŁ'),
-    ('Ç„', 'Ç„'),
-    ('ǧ', 'ǧ'),
-    ('Ç©', 'Ç©'),
-    ('Ç«', 'Ç«'),
-    ('Ç­', 'Ç­'),
-    ('ǯ', 'ǰ'),
-    ('Çł', 'Çł'),
-    ('Ç”', 'Ç”'),
-    ('Çč', 'Çč'),
-    ('Ç»', 'Ç»'),
-    ('ǜ', 'ǜ'),
-    ('Çż', 'Çż'),
-    ('ȁ', 'ȁ'),
-    ('ȃ', 'ȃ'),
-    ('ȅ', 'ȅ'),
-    ('ȇ', 'ȇ'),
-    ('ȉ', 'ȉ'),
-    ('ȋ', 'ȋ'),
-    ('ȍ', 'ȍ'),
-    ('ȏ', 'ȏ'),
-    ('ȑ', 'ȑ'),
-    ('ȓ', 'ȓ'),
-    ('ȕ', 'ȕ'),
-    ('ȗ', 'ȗ'),
-    ('ș', 'ș'),
-    ('ț', 'ț'),
-    ('ȝ', 'ȝ'),
-    ('ȟ', 'ȟ'),
-    ('ÈĄ', 'ÈĄ'),
-    ('ÈŁ', 'ÈŁ'),
-    ('È„', 'È„'),
-    ('ȧ', 'ȧ'),
-    ('È©', 'È©'),
-    ('È«', 'È«'),
-    ('È­', 'È­'),
-    ('ÈŻ', 'ÈŻ'),
-    ('ȱ', 'ȱ'),
-    ('Èł', 'Èč'),
-    ('Ȍ', 'Ȍ'),
-    ('Èż', 'ɀ'),
-    ('ɂ', 'ɂ'),
-    ('ɇ', 'ɇ'),
-    ('ɉ', 'ɉ'),
-    ('ɋ', 'ɋ'),
-    ('ɍ', 'ɍ'),
-    ('ɏ', 'ʓ'),
-    ('ʕ', 'Êž'),
-    ('ˀ', 'ˁ'),
-    ('Ë ', 'Ë€'),
-    ('ͱ', 'ͱ'),
-    ('Íł', 'Íł'),
-    ('Í·', 'Í·'),
-    ('Íș', 'Íœ'),
-    ('ΐ', 'ΐ'),
-    ('ÎŹ', 'ώ'),
-    ('ϐ', 'ϑ'),
-    ('ϕ', 'ϗ'),
-    ('ϙ', 'ϙ'),
-    ('ϛ', 'ϛ'),
-    ('ϝ', 'ϝ'),
-    ('ϟ', 'ϟ'),
-    ('ÏĄ', 'ÏĄ'),
-    ('ÏŁ', 'ÏŁ'),
-    ('Ï„', 'Ï„'),
-    ('ϧ', 'ϧ'),
-    ('Ï©', 'Ï©'),
-    ('Ï«', 'Ï«'),
-    ('Ï­', 'Ï­'),
-    ('ÏŻ', 'Ïł'),
-    ('Ï”', 'Ï”'),
-    ('Ïž', 'Ïž'),
-    ('ϻ', 'ό'),
-    ('а', 'џ'),
-    ('ŃĄ', 'ŃĄ'),
-    ('ŃŁ', 'ŃŁ'),
-    ('Ń„', 'Ń„'),
-    ('ѧ', 'ѧ'),
-    ('Ń©', 'Ń©'),
-    ('Ń«', 'Ń«'),
-    ('Ń­', 'Ń­'),
-    ('ŃŻ', 'ŃŻ'),
-    ('ѱ', 'ѱ'),
-    ('Ńł', 'Ńł'),
-    ('Ń”', 'Ń”'),
-    ('Ń·', 'Ń·'),
-    ('Ńč', 'Ńč'),
-    ('Ń»', 'Ń»'),
-    ('Ńœ', 'Ńœ'),
-    ('Ńż', 'Ńż'),
-    ('ҁ', 'ҁ'),
-    ('ҋ', 'ҋ'),
-    ('ҍ', 'ҍ'),
-    ('ҏ', 'ҏ'),
-    ('ґ', 'ґ'),
-    ('ғ', 'ғ'),
-    ('ҕ', 'ҕ'),
-    ('җ', 'җ'),
-    ('ҙ', 'ҙ'),
-    ('қ', 'қ'),
-    ('ҝ', 'ҝ'),
-    ('ҟ', 'ҟ'),
-    ('ÒĄ', 'ÒĄ'),
-    ('ÒŁ', 'ÒŁ'),
-    ('Ò„', 'Ò„'),
-    ('Ò§', 'Ò§'),
-    ('Ò©', 'Ò©'),
-    ('Ò«', 'Ò«'),
-    ('Ò­', 'Ò­'),
-    ('ÒŻ', 'ÒŻ'),
-    ('Ò±', 'Ò±'),
-    ('Òł', 'Òł'),
-    ('Ò”', 'Ò”'),
-    ('Ò·', 'Ò·'),
-    ('Òč', 'Òč'),
-    ('Ò»', 'Ò»'),
-    ('Ҝ', 'Ҝ'),
-    ('Òż', 'Òż'),
-    ('ӂ', 'ӂ'),
-    ('ӄ', 'ӄ'),
-    ('ӆ', 'ӆ'),
-    ('ӈ', 'ӈ'),
-    ('ӊ', 'ӊ'),
-    ('ӌ', 'ӌ'),
-    ('ӎ', 'ӏ'),
-    ('ӑ', 'ӑ'),
-    ('ӓ', 'ӓ'),
-    ('ӕ', 'ӕ'),
-    ('ӗ', 'ӗ'),
-    ('ә', 'ә'),
-    ('ӛ', 'ӛ'),
-    ('ӝ', 'ӝ'),
-    ('ӟ', 'ӟ'),
-    ('ÓĄ', 'ÓĄ'),
-    ('ÓŁ', 'ÓŁ'),
-    ('Ó„', 'Ó„'),
-    ('Ó§', 'Ó§'),
-    ('Ó©', 'Ó©'),
-    ('Ó«', 'Ó«'),
-    ('Ó­', 'Ó­'),
-    ('ÓŻ', 'ÓŻ'),
-    ('Ó±', 'Ó±'),
-    ('Ół', 'Ół'),
-    ('Ó”', 'Ó”'),
-    ('Ó·', 'Ó·'),
-    ('Óč', 'Óč'),
-    ('Ó»', 'Ó»'),
-    ('Ӝ', 'Ӝ'),
-    ('Óż', 'Óż'),
-    ('ԁ', 'ԁ'),
-    ('ԃ', 'ԃ'),
-    ('ԅ', 'ԅ'),
-    ('ԇ', 'ԇ'),
-    ('ԉ', 'ԉ'),
-    ('ԋ', 'ԋ'),
-    ('ԍ', 'ԍ'),
-    ('ԏ', 'ԏ'),
-    ('ԑ', 'ԑ'),
-    ('ԓ', 'ԓ'),
-    ('ԕ', 'ԕ'),
-    ('ԗ', 'ԗ'),
-    ('ԙ', 'ԙ'),
-    ('ԛ', 'ԛ'),
-    ('ԝ', 'ԝ'),
-    ('ԟ', 'ԟ'),
-    ('ÔĄ', 'ÔĄ'),
-    ('ÔŁ', 'ÔŁ'),
-    ('Ô„', 'Ô„'),
-    ('Ô§', 'Ô§'),
-    ('Ô©', 'Ô©'),
-    ('Ô«', 'Ô«'),
-    ('Ô­', 'Ô­'),
-    ('ÔŻ', 'ÔŻ'),
-    ('ՠ', 'ֈ'),
-    ('჌', '჌'),
-    ('Ꮮ', 'Ꮬ'),
-    ('áȀ', 'áȈ'),
-    ('áȊ', 'áȊ'),
-    ('ᮀ', 'á¶ż'),
-    ('ខ', 'ខ'),
-    ('ឃ', 'ឃ'),
-    ('ᾅ', 'ᾅ'),
-    ('ᾇ', 'ᾇ'),
-    ('ᾉ', 'ᾉ'),
-    ('ᾋ', 'ᾋ'),
-    ('ឍ', 'ឍ'),
-    ('ត', 'ត'),
-    ('ᾑ', 'ᾑ'),
-    ('ᾓ', 'ᾓ'),
-    ('ᾕ', 'ᾕ'),
-    ('ᾗ', 'ᾗ'),
-    ('ᾙ', 'ᾙ'),
-    ('ᾛ', 'ᾛ'),
-    ('ឝ', 'ឝ'),
-    ('ᾟ', 'ᾟ'),
-    ('ឥ', 'ឥ'),
-    ('ឣ', 'ឣ'),
-    ('áž„', 'áž„'),
-    ('áž§', 'áž§'),
-    ('áž©', 'áž©'),
-    ('áž«', 'áž«'),
-    ('áž­', 'áž­'),
-    ('ឯ', 'ឯ'),
-    ('áž±', 'áž±'),
-    ('ážł', 'ážł'),
-    ('áž”', 'áž”'),
-    ('áž·', 'áž·'),
-    ('ážč', 'ážč'),
-    ('áž»', 'áž»'),
-    ('វ', 'វ'),
-    ('ážż', 'ážż'),
-    ('áč', 'áč'),
-    ('áčƒ', 'áčƒ'),
-    ('áč…', 'áč…'),
-    ('áč‡', 'áč‡'),
-    ('áč‰', 'áč‰'),
-    ('áč‹', 'áč‹'),
-    ('áč', 'áč'),
-    ('áč', 'áč'),
-    ('áč‘', 'áč‘'),
-    ('áč“', 'áč“'),
-    ('áč•', 'áč•'),
-    ('áč—', 'áč—'),
-    ('áč™', 'áč™'),
-    ('áč›', 'áč›'),
-    ('áč', 'áč'),
-    ('áčŸ', 'áčŸ'),
-    ('áčĄ', 'áčĄ'),
-    ('áčŁ', 'áčŁ'),
-    ('áč„', 'áč„'),
-    ('áč§', 'áč§'),
-    ('áč©', 'áč©'),
-    ('áč«', 'áč«'),
-    ('áč­', 'áč­'),
-    ('áčŻ', 'áčŻ'),
-    ('áč±', 'áč±'),
-    ('áčł', 'áčł'),
-    ('áč”', 'áč”'),
-    ('áč·', 'áč·'),
-    ('áčč', 'áčč'),
-    ('áč»', 'áč»'),
-    ('áčœ', 'áčœ'),
-    ('áčż', 'áčż'),
-    ('áș', 'áș'),
-    ('áșƒ', 'áșƒ'),
-    ('áș…', 'áș…'),
-    ('áș‡', 'áș‡'),
-    ('áș‰', 'áș‰'),
-    ('áș‹', 'áș‹'),
-    ('áș', 'áș'),
-    ('áș', 'áș'),
-    ('áș‘', 'áș‘'),
-    ('áș“', 'áș“'),
-    ('áș•', 'áș'),
-    ('áșŸ', 'áșŸ'),
-    ('áșĄ', 'áșĄ'),
-    ('áșŁ', 'áșŁ'),
-    ('áș„', 'áș„'),
-    ('áș§', 'áș§'),
-    ('áș©', 'áș©'),
-    ('áș«', 'áș«'),
-    ('áș­', 'áș­'),
-    ('áșŻ', 'áșŻ'),
-    ('áș±', 'áș±'),
-    ('áșł', 'áșł'),
-    ('áș”', 'áș”'),
-    ('áș·', 'áș·'),
-    ('áșč', 'áșč'),
-    ('áș»', 'áș»'),
-    ('áșœ', 'áșœ'),
-    ('áșż', 'áșż'),
-    ('ề', 'ề'),
-    ('ể', 'ể'),
-    ('ễ', 'ễ'),
-    ('ệ', 'ệ'),
-    ('ỉ', 'ỉ'),
-    ('ị', 'ị'),
-    ('ọ', 'ọ'),
-    ('ỏ', 'ỏ'),
-    ('ố', 'ố'),
-    ('ồ', 'ồ'),
-    ('ổ', 'ổ'),
-    ('ỗ', 'ỗ'),
-    ('ộ', 'ộ'),
-    ('ớ', 'ớ'),
-    ('ờ', 'ờ'),
-    ('ở', 'ở'),
-    ('ụ', 'ụ'),
-    ('ợ', 'ợ'),
-    ('Ễ', 'Ễ'),
-    ('á»§', 'á»§'),
-    ('ứ', 'ứ'),
-    ('ừ', 'ừ'),
-    ('á»­', 'á»­'),
-    ('ữ', 'ữ'),
-    ('á»±', 'á»±'),
-    ('ỳ', 'ỳ'),
-    ('á»”', 'á»”'),
-    ('á»·', 'á»·'),
-    ('á»č', 'á»č'),
-    ('á»»', 'á»»'),
-    ('Ờ', 'Ờ'),
-    ('ỿ', 'ጇ'),
-    ('ጐ', 'ጕ'),
-    ('ጠ', 'ጧ'),
-    ('ጰ', 'ጷ'),
-    ('ᜀ', 'ᜅ'),
-    ('ᜐ', '᜗'),
-    ('ᜠ', 'ᜧ'),
-    ('ᜰ', '᜜'),
-    ('ៀ', 'ះ'),
-    ('័', 'ៗ'),
-    ('០', '៧'),
-    ('៰', '៎'),
-    ('៶', '៷'),
-    ('៟', '៟'),
-    ('ῂ', 'ῄ'),
-    ('ῆ', 'ῇ'),
-    ('ῐ', 'ΐ'),
-    ('ῖ', 'ῗ'),
-    ('áż ', 'áż§'),
-    ('áżČ', '῎'),
-    ('áż¶', 'áż·'),
-    ('ⁱ', 'ⁱ'),
-    ('ⁿ', 'ⁿ'),
-    ('ₐ', 'ₜ'),
-    ('ℊ', 'ℊ'),
-    ('ℎ', 'ℏ'),
-    ('ℓ', 'ℓ'),
-    ('ℯ', 'ℯ'),
-    ('℮', '℮'),
-    ('â„č', 'â„č'),
-    ('ℌ', 'ℜ'),
-    ('ⅆ', 'ⅉ'),
-    ('ⅎ', 'ⅎ'),
-    ('ⅰ', 'ⅿ'),
-    ('ↄ', 'ↄ'),
-    ('ⓐ', 'ⓩ'),
-    ('ⰰ', 'ⱟ'),
-    ('ⱥ', 'ⱥ'),
-    ('ⱄ', 'ⱊ'),
-    ('ⱚ', 'ⱚ'),
-    ('â±Ș', 'â±Ș'),
-    ('ⱏ', 'ⱏ'),
-    ('â±±', 'â±±'),
-    ('ⱳ', 'ⱎ'),
-    ('ⱶ', 'ⱜ'),
-    ('âȁ', 'âȁ'),
-    ('âȃ', 'âȃ'),
-    ('âȅ', 'âȅ'),
-    ('âȇ', 'âȇ'),
-    ('âȉ', 'âȉ'),
-    ('âȋ', 'âȋ'),
-    ('âȍ', 'âȍ'),
-    ('âȏ', 'âȏ'),
-    ('âȑ', 'âȑ'),
-    ('âȓ', 'âȓ'),
-    ('âȕ', 'âȕ'),
-    ('âȗ', 'âȗ'),
-    ('âș', 'âș'),
-    ('âț', 'âț'),
-    ('âȝ', 'âȝ'),
-    ('âȟ', 'âȟ'),
-    ('âČĄ', 'âČĄ'),
-    ('âČŁ', 'âČŁ'),
-    ('âČ„', 'âČ„'),
-    ('âȧ', 'âȧ'),
-    ('âČ©', 'âČ©'),
-    ('âČ«', 'âČ«'),
-    ('âČ­', 'âČ­'),
-    ('âČŻ', 'âČŻ'),
-    ('âȱ', 'âȱ'),
-    ('âČł', 'âČł'),
-    ('âČ”', 'âČ”'),
-    ('âČ·', 'âČ·'),
-    ('âČč', 'âČč'),
-    ('âČ»', 'âČ»'),
-    ('âČœ', 'âČœ'),
-    ('âČż', 'âČż'),
-    ('ⳁ', 'ⳁ'),
-    ('ⳃ', 'ⳃ'),
-    ('ⳅ', 'ⳅ'),
-    ('ⳇ', 'ⳇ'),
-    ('ⳉ', 'ⳉ'),
-    ('ⳋ', 'ⳋ'),
-    ('ⳍ', 'ⳍ'),
-    ('ⳏ', 'ⳏ'),
-    ('ⳑ', 'ⳑ'),
-    ('ⳓ', 'ⳓ'),
-    ('ⳕ', 'ⳕ'),
-    ('ⳗ', 'ⳗ'),
-    ('ⳙ', 'ⳙ'),
-    ('ⳛ', 'ⳛ'),
-    ('ⳝ', 'ⳝ'),
-    ('ⳟ', 'ⳟ'),
-    ('⳥', '⳥'),
-    ('ⳣ', 'Ⳁ'),
-    ('ⳏ', 'ⳏ'),
-    ('âłź', 'âłź'),
-    ('âłł', 'âłł'),
-    ('⮀', '⎄'),
-    ('⎧', '⎧'),
-    ('⎭', '⎭'),
-    ('ꙁ', 'ꙁ'),
-    ('ꙃ', 'ꙃ'),
-    ('ꙅ', 'ꙅ'),
-    ('ꙇ', 'ꙇ'),
-    ('ꙉ', 'ꙉ'),
-    ('ꙋ', 'ꙋ'),
-    ('ꙍ', 'ꙍ'),
-    ('ꙏ', 'ꙏ'),
-    ('ꙑ', 'ꙑ'),
-    ('ꙓ', 'ꙓ'),
-    ('ꙕ', 'ꙕ'),
-    ('ꙗ', 'ꙗ'),
-    ('ꙙ', 'ꙙ'),
-    ('ꙛ', 'ꙛ'),
-    ('ꙝ', 'ꙝ'),
-    ('ꙟ', 'ꙟ'),
-    ('ê™Ą', 'ê™Ą'),
-    ('ê™Ł', 'ê™Ł'),
-    ('Ꙅ', 'Ꙅ'),
-    ('ꙧ', 'ꙧ'),
-    ('ꙩ', 'ꙩ'),
-    ('ꙫ', 'ꙫ'),
-    ('ꙭ', 'ꙭ'),
-    ('ꚁ', 'ꚁ'),
-    ('ꚃ', 'ꚃ'),
-    ('ꚅ', 'ꚅ'),
-    ('ꚇ', 'ꚇ'),
-    ('ꚉ', 'ꚉ'),
-    ('ꚋ', 'ꚋ'),
-    ('ꚍ', 'ꚍ'),
-    ('ꚏ', 'ꚏ'),
-    ('ꚑ', 'ꚑ'),
-    ('ꚓ', 'ꚓ'),
-    ('ꚕ', 'ꚕ'),
-    ('ꚗ', 'ꚗ'),
-    ('ꚙ', 'ꚙ'),
-    ('ꚛ', 'ꚝ'),
-    ('êœŁ', 'êœŁ'),
-    ('꜄', '꜄'),
-    ('ꜧ', 'ꜧ'),
-    ('ꜩ', 'ꜩ'),
-    ('ꜫ', 'ꜫ'),
-    ('ꜭ', 'ꜭ'),
-    ('êœŻ', 'ꜱ'),
-    ('êœł', 'êœł'),
-    ('꜔', '꜔'),
-    ('ꜷ', 'ꜷ'),
-    ('êœč', 'êœč'),
-    ('ꜻ', 'ꜻ'),
-    ('ꜜ', 'ꜜ'),
-    ('êœż', 'êœż'),
-    ('ꝁ', 'ꝁ'),
-    ('ꝃ', 'ꝃ'),
-    ('ꝅ', 'ꝅ'),
-    ('ꝇ', 'ꝇ'),
-    ('ꝉ', 'ꝉ'),
-    ('ꝋ', 'ꝋ'),
-    ('ꝍ', 'ꝍ'),
-    ('ꝏ', 'ꝏ'),
-    ('ꝑ', 'ꝑ'),
-    ('ꝓ', 'ꝓ'),
-    ('ꝕ', 'ꝕ'),
-    ('ꝗ', 'ꝗ'),
-    ('ꝙ', 'ꝙ'),
-    ('ꝛ', 'ꝛ'),
-    ('ꝝ', 'ꝝ'),
-    ('ꝟ', 'ꝟ'),
-    ('êĄ', 'êĄ'),
-    ('êŁ', 'êŁ'),
-    ('Ꝅ', 'Ꝅ'),
-    ('ꝧ', 'ꝧ'),
-    ('ꝩ', 'ꝩ'),
-    ('ꝫ', 'ꝫ'),
-    ('ꝭ', 'ꝭ'),
-    ('êŻ', 'Ꝟ'),
-    ('êș', 'êș'),
-    ('Ꝍ', 'Ꝍ'),
-    ('êż', 'êż'),
-    ('ꞁ', 'ꞁ'),
-    ('ꞃ', 'ꞃ'),
-    ('ꞅ', 'ꞅ'),
-    ('ꞇ', 'ꞇ'),
-    ('ꞌ', 'ꞌ'),
-    ('ꞎ', 'ꞎ'),
-    ('ꞑ', 'ꞑ'),
-    ('ꞓ', 'ꞕ'),
-    ('ꞗ', 'ꞗ'),
-    ('ꞙ', 'ꞙ'),
-    ('ꞛ', 'ꞛ'),
-    ('ꞝ', 'ꞝ'),
-    ('ꞟ', 'ꞟ'),
-    ('êžĄ', 'êžĄ'),
-    ('êžŁ', 'êžŁ'),
-    ('Ꞅ', 'Ꞅ'),
-    ('ꞧ', 'ꞧ'),
-    ('ꞩ', 'ꞩ'),
-    ('êžŻ', 'êžŻ'),
-    ('ꞔ', 'ꞔ'),
-    ('ꞷ', 'ꞷ'),
-    ('êžč', 'êžč'),
-    ('ꞻ', 'ꞻ'),
-    ('Ꞝ', 'Ꞝ'),
-    ('êžż', 'êžż'),
-    ('ꟁ', 'ꟁ'),
-    ('ꟃ', 'ꟃ'),
-    ('ꟈ', 'ꟈ'),
-    ('ꟊ', 'ꟊ'),
-    ('ꟍ', 'ꟍ'),
-    ('ꟑ', 'ꟑ'),
-    ('ꟓ', 'ꟓ'),
-    ('ꟕ', 'ꟕ'),
-    ('ꟗ', 'ꟗ'),
-    ('ꟙ', 'ꟙ'),
-    ('ꟛ', 'ꟛ'),
-    ('êŸČ', '꟎'),
-    ('ꟶ', 'ꟶ'),
-    ('꟞', 'êŸș'),
-    ('êŹ°', 'ꭚ'),
-    ('ꭜ', 'ꭩ'),
-    ('ê­°', 'êźż'),
-    ('ïŹ€', 'ïŹ†'),
-    ('ïŹ“', 'ïŹ—'),
-    ('', ''),
-    ('𐐹', '𐑏'),
-    ('𐓘', '𐓻'),
-    ('𐖗', '𐖡'),
-    ('𐖣', '𐖱'),
-    ('𐖳', 'đ–č'),
-    ('𐖻', 'đ–Œ'),
-    ('𐞀', '𐞀'),
-    ('𐞃', '𐞅'),
-    ('𐞇', '𐞰'),
-    ('đžČ', 'đžș'),
-    ('𐳀', 'đłČ'),
-    ('𐔰', '𐶅'),
-    ('𑣀', '𑣟'),
-    ('đ–č ', 'đ–čż'),
-    ('𝐚', '𝐳'),
-    ('𝑎', '𝑔'),
-    ('𝑖', '𝑧'),
-    ('𝒂', '𝒛'),
-    ('đ’¶', 'đ’č'),
-    ('đ’»', 'đ’»'),
-    ('đ’œ', '𝓃'),
-    ('𝓅', '𝓏'),
-    ('đ“Ș', '𝔃'),
-    ('𝔞', 'đ”·'),
-    ('𝕒', 'đ•«'),
-    ('𝖆', '𝖟'),
-    ('đ–ș', '𝗓'),
-    ('𝗼', '𝘇'),
-    ('𝘱', 'đ˜»'),
-    ('𝙖', '𝙯'),
-    ('𝚊', 'đš„'),
-    ('𝛂', '𝛚'),
-    ('𝛜', '𝛡'),
-    ('đ›Œ', '𝜔'),
-    ('𝜖', '𝜛'),
-    ('đœ¶', '𝝎'),
-    ('𝝐', '𝝕'),
-    ('𝝰', '𝞈'),
-    ('𝞊', '𝞏'),
-    ('đžȘ', '𝟂'),
-    ('𝟄', '𝟉'),
-    ('𝟋', '𝟋'),
-    ('đŒ€', 'đŒ‰'),
-    ('đŒ‹', 'đŒž'),
-    ('đŒ„', 'đŒȘ'),
-    ('𞀰', '𞁭'),
-    ('𞀹', 'đž„ƒ'),
-];
-
-pub const NUMERIC: &'static [(char, char)] = &[
-    ('0', '9'),
-    ('\u{600}', '\u{605}'),
-    ('Ù ', 'Ù©'),
-    ('Ù«', 'ÙŹ'),
-    ('\u{6dd}', '\u{6dd}'),
-    ('Û°', 'Ûč'),
-    ('߀', '߉'),
-    ('\u{890}', '\u{891}'),
-    ('\u{8e2}', '\u{8e2}'),
-    ('à„Š', 'à„Ż'),
-    ('à§Š', 'à§Ż'),
-    ('੊', 'à©Ż'),
-    ('૊', 'à«Ż'),
-    ('à­Š', 'à­Ż'),
-    ('àŻŠ', 'àŻŻ'),
-    ('ొ', 'à±Ż'),
-    ('àłŠ', 'àłŻ'),
-    ('à”Š', 'à”Ż'),
-    ('à·Š', 'à·Ż'),
-    ('àč', 'àč™'),
-    ('໐', '໙'),
-    ('àŒ ', 'àŒ©'),
-    ('၀', '၉'),
-    ('႐', '႙'),
-    ('០', '៩'),
-    ('᠐', '᠙'),
-    ('ᄆ', 'ᄏ'),
-    ('᧐', '᧚'),
-    ('áȘ€', 'áȘ‰'),
-    ('áȘ', 'áȘ™'),
-    ('᭐', '᭙'),
-    ('áź°', 'áźč'),
-    ('᱀', '᱉'),
-    ('᱐', '᱙'),
-    ('꘠', '꘩'),
-    ('êŁ', 'êŁ™'),
-    ('ꀀ', 'ꀉ'),
-    ('꧐', '꧙'),
-    ('ê§°', 'ê§č'),
-    ('꩐', '꩙'),
-    ('êŻ°', 'êŻč'),
-    ('', ''),
-    ('𐒠', '𐒩'),
-    ('𐎰', 'đŽč'),
-    ('𐔀', '𐔉'),
-    ('𑁩', '𑁯'),
-    ('\u{110bd}', '\u{110bd}'),
-    ('\u{110cd}', '\u{110cd}'),
-    ('𑃰', 'đ‘ƒč'),
-    ('đ‘„¶', '𑄿'),
-    ('𑇐', '𑇙'),
-    ('𑋰', 'đ‘‹č'),
-    ('𑑐', '𑑙'),
-    ('𑓐', '𑓙'),
-    ('𑙐', '𑙙'),
-    ('𑛀', '𑛉'),
-    ('𑛐', '𑛣'),
-    ('𑜰', 'đ‘œč'),
-    ('𑣠', '𑣩'),
-    ('𑄐', 'đ‘„™'),
-    ('𑯰', 'đ‘Żč'),
-    ('𑱐', '𑱙'),
-    ('𑔐', 'đ‘”™'),
-    ('đ‘¶ ', 'đ‘¶©'),
-    ('đ‘œ', 'đ‘œ™'),
-    ('𖄰', 'đ–„č'),
-    ('đ–© ', 'đ–©©'),
-    ('đ–«€', '𖫉'),
-    ('𖭐', '𖭙'),
-    ('đ–”°', 'đ–”č'),
-    ('𜳰', 'đœłč'),
-    ('𝟎', '𝟿'),
-    ('𞅀', '𞅉'),
-    ('𞋰', 'đž‹č'),
-    ('𞓰', 'đž“č'),
-    ('đž—±', 'đž—ș'),
-    ('𞄐', 'đž„™'),
-    ('🯰', 'đŸŻč'),
-];
-
-pub const OLETTER: &'static [(char, char)] = &[
-    ('Æ»', 'Æ»'),
-    ('ǀ', 'ǃ'),
-    ('ʔ', 'ʔ'),
-    ('Êč', 'Êż'),
-    ('ˆ', 'ˑ'),
-    ('ËŹ', 'ËŹ'),
-    ('Ëź', 'Ëź'),
-    ('ÍŽ', 'ÍŽ'),
-    ('ՙ', 'ՙ'),
-    ('ڐ', 'ŚȘ'),
-    ('ŚŻ', 'Śł'),
-    ('Ű ', 'ي'),
-    ('Ùź', 'ÙŻ'),
-    ('ٱ', 'ۓ'),
-    ('ە', 'ە'),
-    ('Û„', 'ÛŠ'),
-    ('Ûź', 'ÛŻ'),
-    ('Ûș', 'ÛŒ'),
-    ('Ûż', 'Ûż'),
-    ('ܐ', 'ܐ'),
-    ('ܒ', 'ܯ'),
-    ('ʍ', 'Ț„'),
-    ('Ț±', 'Ț±'),
-    ('ߊ', 'ßȘ'),
-    ('ߎ', 'ߔ'),
-    ('ßș', 'ßș'),
-    ('ࠀ', 'ࠕ'),
-    ('ࠚ', 'ࠚ'),
-    ('à €', 'à €'),
-    ('à š', 'à š'),
-    ('àĄ€', 'àĄ˜'),
-    ('àĄ ', 'àĄȘ'),
-    ('àĄ°', 'àą‡'),
-    ('àą‰', 'àąŽ'),
-    ('àą ', 'àŁ‰'),
-    ('à€„', 'à€č'),
-    ('à€œ', 'à€œ'),
-    ('à„', 'à„'),
-    ('à„˜', 'à„Ą'),
-    ('à„±', 'àŠ€'),
-    ('àŠ…', 'àŠŒ'),
-    ('àŠ', 'àŠ'),
-    ('àŠ“', 'àŠš'),
-    ('àŠȘ', 'àŠ°'),
-    ('àŠČ', 'àŠČ'),
-    ('àŠ¶', 'àŠč'),
-    ('àŠœ', 'àŠœ'),
-    ('ৎ', 'ৎ'),
-    ('ড়', 'ঢ়'),
-    ('য়', 'à§Ą'),
-    ('à§°', 'à§±'),
-    ('ৌ', 'ৌ'),
-    ('àš…', 'àšŠ'),
-    ('àš', 'àš'),
-    ('àš“', 'àšš'),
-    ('àšȘ', 'àš°'),
-    ('àšČ', 'àšł'),
-    ('àš”', 'àš¶'),
-    ('àšž', 'àšč'),
-    ('ਖ਼', 'ੜ'),
-    ('ਫ਼', 'ਫ਼'),
-    ('à©Č', '੎'),
-    ('àȘ…', 'àȘ'),
-    ('àȘ', 'àȘ‘'),
-    ('àȘ“', 'àȘš'),
-    ('àȘȘ', 'àȘ°'),
-    ('àȘČ', 'àȘł'),
-    ('àȘ”', 'àȘč'),
-    ('àȘœ', 'àȘœ'),
-    ('ૐ', 'ૐ'),
-    ('à« ', 'à«Ą'),
-    ('à«č', 'à«č'),
-    ('àŹ…', 'àŹŒ'),
-    ('àŹ', 'àŹ'),
-    ('àŹ“', 'àŹš'),
-    ('àŹȘ', 'àŹ°'),
-    ('àŹČ', 'àŹł'),
-    ('àŹ”', 'àŹč'),
-    ('àŹœ', 'àŹœ'),
-    ('ଡ଼', 'ଢ଼'),
-    ('ୟ', 'à­Ą'),
-    ('à­±', 'à­±'),
-    ('àźƒ', 'àźƒ'),
-    ('àź…', 'àźŠ'),
-    ('àźŽ', 'àź'),
-    ('àź’', 'àź•'),
-    ('àź™', 'àźš'),
-    ('àźœ', 'àźœ'),
-    ('àźž', 'àźŸ'),
-    ('àźŁ', 'àź€'),
-    ('àźš', 'àźȘ'),
-    ('àźź', 'àźč'),
-    ('àŻ', 'àŻ'),
-    ('అ', 'ఌ'),
-    ('ఎ', 'ఐ'),
-    ('ఒ', 'à°š'),
-    ('à°Ș', 'à°č'),
-    ('జ', 'జ'),
-    ('ౘ', 'ౚ'),
-    ('ౝ', 'ౝ'),
-    ('à± ', 'à±Ą'),
-    ('àȀ', 'àȀ'),
-    ('àȅ', 'àȌ'),
-    ('àȎ', 'àȐ'),
-    ('àȒ', 'àČš'),
-    ('àČȘ', 'àČł'),
-    ('àČ”', 'àČč'),
-    ('àČœ', 'àČœ'),
-    ('àł', 'àłž'),
-    ('àł ', 'àłĄ'),
-    ('àł±', 'àłČ'),
-    ('àŽ„', 'àŽŒ'),
-    ('àŽŽ', 'àŽ'),
-    ('àŽ’', 'àŽș'),
-    ('àŽœ', 'àŽœ'),
-    ('à”Ž', 'à”Ž'),
-    ('à””', 'à”–'),
-    ('à”Ÿ', 'à”Ą'),
-    ('à”ș', 'à”ż'),
-    ('අ', 'ඖ'),
-    ('ක', 'න'),
-    ('à¶ł', 'à¶»'),
-    ('ග', 'ග'),
-    ('ව', 'ෆ'),
-    ('àž', 'àž°'),
-    ('àžČ', 'àžł'),
-    ('àč€', 'àč†'),
-    ('àș', 'àș‚'),
-    ('àș„', 'àș„'),
-    ('àș†', 'àșŠ'),
-    ('àșŒ', 'àșŁ'),
-    ('àș„', 'àș„'),
-    ('àș§', 'àș°'),
-    ('àșČ', 'àșł'),
-    ('àșœ', 'àșœ'),
-    ('ເ', 'ໄ'),
-    ('ໆ', 'ໆ'),
-    ('ໜ', 'ໟ'),
-    ('àŒ€', 'àŒ€'),
-    ('àœ€', 'àœ‡'),
-    ('àœ‰', 'àœŹ'),
-    ('àŸˆ', 'àŸŒ'),
-    ('က', 'á€Ș'),
-    ('ဿ', 'ဿ'),
-    ('ၐ', 'ၕ'),
-    ('ၚ', 'ၝ'),
-    ('ၥ', 'ၥ'),
-    ('၄', '၊'),
-    ('ၟ', 'ၰ'),
-    ('ၔ', 'ႁ'),
-    ('ႎ', 'ႎ'),
-    ('ა', 'áƒș'),
-    ('ნ', 'ቈ'),
-    ('ቊ', 'ቍ'),
-    ('ቐ', 'ቖ'),
-    ('ቘ', 'ቘ'),
-    ('ቚ', 'ቝ'),
-    ('በ', 'ኈ'),
-    ('ኊ', 'ኍ'),
-    ('ነ', 'ኰ'),
-    ('áŠČ', 'ኔ'),
-    ('ኾ', 'ኟ'),
-    ('ዀ', 'ዀ'),
-    ('ዂ', 'ዅ'),
-    ('ወ', 'ዖ'),
-    ('ዘ', 'ጐ'),
-    ('ጒ', 'ጕ'),
-    ('ጘ', 'ፚ'),
-    ('ᎀ', 'ᎏ'),
-    ('ᐁ', 'ᙬ'),
-    ('ᙯ', 'ᙿ'),
-    ('ᚁ', 'ᚚ'),
-    ('ᚠ', 'á›Ș'),
-    ('᛼', '᛾'),
-    ('ᜀ', 'ᜑ'),
-    ('ᜟ', 'ᜱ'),
-    ('ᝀ', 'ᝑ'),
-    ('ᝠ', 'ᝬ'),
-    ('᝼', 'ᝰ'),
-    ('ក', 'ឳ'),
-    ('ៗ', 'ៗ'),
-    ('ៜ', 'ៜ'),
-    ('ᠠ', 'ᥞ'),
-    ('᱀', '᱄'),
-    ('᱇', 'ᱹ'),
-    ('áąȘ', 'áąȘ'),
-    ('Ṱ', 'ᣔ'),
-    ('က', 'သ'),
-    ('ᄐ', 'ᄭ'),
-    ('ᄰ', 'ᄎ'),
-    ('ᩀ', 'ካ'),
-    ('ᩰ', 'ᧉ'),
-    ('Ṁ', 'Ṗ'),
-    ('áš ', 'ᩔ'),
-    ('áȘ§', 'áȘ§'),
-    ('ᬅ', 'ᬳ'),
-    ('ᭅ', 'ᭌ'),
-    ('ៃ', '០'),
-    ('៟', '៯'),
-    ('áźș', 'ᯄ'),
-    ('ᰀ', 'ᰣ'),
-    ('ᱍ', 'ᱏ'),
-    ('ᱚ', 'ᱜ'),
-    ('áȐ', 'áČș'),
-    ('áČœ', 'áČż'),
-    ('ᳩ', '᳏'),
-    ('áłź', 'áłł'),
-    ('áł”', 'áł¶'),
-    ('áłș', 'áłș'),
-    ('ℵ', 'ℾ'),
-    ('ↀ', 'ↂ'),
-    ('ↅ', 'ↈ'),
-    ('⎰', '┧'),
-    ('┯', '┯'),
-    ('ⶀ', 'ⶖ'),
-    ('â¶ ', 'â¶Š'),
-    ('â¶š', 'â¶ź'),
-    ('â¶°', 'â¶¶'),
-    ('â¶ž', 'â¶Ÿ'),
-    ('ⷀ', 'ⷆ'),
-    ('ⷈ', 'ⷎ'),
-    ('ⷐ', 'ⷖ'),
-    ('ⷘ', 'ⷞ'),
-    ('➯', '➯'),
-    ('々', '〇'),
-    ('〡', '〩'),
-    ('〱', '〔'),
-    ('〾', 'ă€Œ'),
-    ('ぁ', 'ゖ'),
-    ('ゝ', 'ゟ'),
-    ('ァ', 'ăƒș'),
-    ('ăƒŒ', 'ヿ'),
-    ('ㄅ', 'ㄯ'),
-    ('ㄱ', 'ㆎ'),
-    ('ㆠ', 'ㆿ'),
-    ('ㇰ', 'ㇿ'),
-    ('㐀', 'ä¶ż'),
-    ('侀', 'ꒌ'),
-    ('ꓐ', 'ꓜ'),
-    ('ꔀ', 'ꘌ'),
-    ('ꘐ', 'ꘟ'),
-    ('ê˜Ș', 'ꘫ'),
-    ('ê™ź', 'ê™ź'),
-    ('ê™ż', 'ê™ż'),
-    ('ꚠ', 'ê›Ż'),
-    ('ꜗ', 'ꜟ'),
-    ('ꞈ', 'ꞈ'),
-    ('ꞏ', 'ꞏ'),
-    ('ꟷ', 'ꟷ'),
-    ('ꟻ', 'ꠁ'),
-    ('ꠃ', 'ꠅ'),
-    ('ꠇ', 'ꠊ'),
-    ('ꠌ', 'ê ą'),
-    ('êĄ€', 'êĄł'),
-    ('êą‚', 'êął'),
-    ('êŁČ', 'êŁ·'),
-    ('êŁ»', 'êŁ»'),
-    ('êŁœ', 'êŁŸ'),
-    ('ꀊ', 'ꀄ'),
-    ('ꀰ', 'ꄆ'),
-    ('ꄠ', 'ꄌ'),
-    ('ꊄ', 'êŠČ'),
-    ('ꧏ', 'ꧏ'),
-    ('ê§ ', 'ê§€'),
-    ('ê§Š', 'ê§Ż'),
-    ('ê§ș', 'ê§Ÿ'),
-    ('Ꚁ', 'êšš'),
-    ('ꩀ', 'ꩂ'),
-    ('ꩄ', 'ꩋ'),
-    ('ê© ', 'ê©¶'),
-    ('ê©ș', 'ê©ș'),
-    ('꩟', 'êȘŻ'),
-    ('êȘ±', 'êȘ±'),
-    ('êȘ”', 'êȘ¶'),
-    ('êȘč', 'êȘœ'),
-    ('ꫀ', 'ꫀ'),
-    ('ꫂ', 'ꫂ'),
-    ('ꫛ', 'ꫝ'),
-    ('ê« ', 'ê«Ș'),
-    ('ê«Č', '꫎'),
-    ('êŹ', 'êŹ†'),
-    ('êŹ‰', 'êŹŽ'),
-    ('êŹ‘', 'êŹ–'),
-    ('êŹ ', 'êŹŠ'),
-    ('êŹš', 'êŹź'),
-    ('êŻ€', 'êŻą'),
-    ('가', '힣'),
-    ('ힰ', 'ퟆ'),
-    ('ퟋ', 'ퟻ'),
-    ('', 'ï©­'),
-    ('並', '龎'),
-    ('ïŹ', 'ïŹ'),
-    ('ïŹŸ', 'ïŹš'),
-    ('ïŹȘ', 'ïŹ¶'),
-    ('ïŹž', 'ïŹŒ'),
-    ('ïŹŸ', 'ïŹŸ'),
-    ('נּ', 'סּ'),
-    ('ףּ', 'פּ'),
-    ('צּ', 'ïź±'),
-    ('ïŻ“', ''),
-    ('', 'ﶏ'),
-    ('ﶒ', 'ﷇ'),
-    ('ï·°', 'ï·»'),
-    ('ïč°', 'ïčŽ'),
-    ('ïč¶', 'ﻌ'),
-    ('', ''),
-    ('', ''),
-    ('ïż‚', 'ïż‡'),
-    ('ïżŠ', 'ïż'),
-    ('ïż’', 'ïż—'),
-    ('ïżš', 'ïżœ'),
-    ('𐀀', '𐀋'),
-    ('𐀍', '𐀩'),
-    ('𐀹', 'đ€ș'),
-    ('đ€Œ', 'đ€œ'),
-    ('𐀿', '𐁍'),
-    ('𐁐', '𐁝'),
-    ('𐂀', 'đƒș'),
-    ('𐅀', '𐅮'),
-    ('𐊀', '𐊜'),
-    ('𐊠', '𐋐'),
-    ('𐌀', '𐌟'),
-    ('𐌭', '𐍊'),
-    ('𐍐', 'đ”'),
-    ('𐎀', '𐎝'),
-    ('𐎠', '𐏃'),
-    ('𐏈', '𐏏'),
-    ('𐏑', '𐏕'),
-    ('𐑐', '𐒝'),
-    ('𐔀', '𐔧'),
-    ('𐔰', '𐕣'),
-    ('𐗀', '𐗳'),
-    ('𐘀', 'đœ¶'),
-    ('𐝀', '𐝕'),
-    ('𐝠', '𐝧'),
-    ('𐞁', '𐞂'),
-    ('𐠀', '𐠅'),
-    ('𐠈', '𐠈'),
-    ('𐠊', '𐠔'),
-    ('𐠷', '𐠞'),
-    ('đ Œ', 'đ Œ'),
-    ('𐠿', '𐡕'),
-    ('𐥠', '𐥶'),
-    ('𐱀', '𐱞'),
-    ('𐣠', 'đŁČ'),
-    ('𐣎', '𐣔'),
-    ('𐀀', '𐀕'),
-    ('𐀠', 'đ€č'),
-    ('𐩀', '𐊷'),
-    ('đŠŸ', '𐊿'),
-    ('𐹀', '𐹀'),
-    ('𐹐', '𐹓'),
-    ('𐹕', '𐹗'),
-    ('𐹙', '𐚔'),
-    ('𐩠', 'đ©Œ'),
-    ('đȘ€', 'đȘœ'),
-    ('𐫀', '𐫇'),
-    ('𐫉', '𐫀'),
-    ('𐬀', '𐏔'),
-    ('𐭀', '𐭕'),
-    ('𐭠', 'đ­Č'),
-    ('𐼀', '𐼑'),
-    ('𐰀', '𐱈'),
-    ('𐮀', '𐮣'),
-    ('𐔊', 'đ”'),
-    ('𐔯', '𐔯'),
-    ('đș€', 'đș©'),
-    ('đș°', 'đș±'),
-    ('𐻂', '𐻄'),
-    ('đŒ€', 'đŒœ'),
-    ('đŒ§', 'đŒ§'),
-    ('đŒ°', 'đœ…'),
-    ('đœ°', 'đŸ'),
-    ('đŸ°', '𐿄'),
-    ('𐿠', '𐿶'),
-    ('𑀃', 'đ‘€·'),
-    ('𑁱', 'đ‘Č'),
-    ('𑁔', '𑁔'),
-    ('𑂃', '𑂯'),
-    ('𑃐', '𑃹'),
-    ('𑄃', '𑄩'),
-    ('𑅄', '𑅄'),
-    ('𑅇', '𑅇'),
-    ('𑅐', 'đ‘…Č'),
-    ('đ‘…¶', 'đ‘…¶'),
-    ('𑆃', 'đ‘†Č'),
-    ('𑇁', '𑇄'),
-    ('𑇚', '𑇚'),
-    ('𑇜', '𑇜'),
-    ('𑈀', '𑈑'),
-    ('𑈓', 'đ‘ˆ«'),
-    ('𑈿', '𑉀'),
-    ('𑊀', '𑊆'),
-    ('𑊈', '𑊈'),
-    ('𑊊', '𑊍'),
-    ('𑊏', '𑊝'),
-    ('𑊟', '𑊹'),
-    ('𑊰', '𑋞'),
-    ('𑌅', '𑌌'),
-    ('𑌏', '𑌐'),
-    ('𑌓', '𑌹'),
-    ('đ‘ŒȘ', '𑌰'),
-    ('đ‘ŒČ', '𑌳'),
-    ('đ‘Œ”', 'đ‘Œč'),
-    ('đ‘Œœ', 'đ‘Œœ'),
-    ('𑍐', '𑍐'),
-    ('𑍝', '𑍡'),
-    ('𑎀', '𑎉'),
-    ('𑎋', '𑎋'),
-    ('𑎎', '𑎎'),
-    ('𑎐', '𑎔'),
-    ('𑎷', '𑎷'),
-    ('𑏑', '𑏑'),
-    ('𑏓', '𑏓'),
-    ('𑐀', '𑐮'),
-    ('𑑇', '𑑊'),
-    ('𑑟', '𑑡'),
-    ('𑒀', '𑒯'),
-    ('𑓄', '𑓅'),
-    ('𑓇', '𑓇'),
-    ('𑖀', '𑖼'),
-    ('𑗘', '𑗛'),
-    ('𑘀', '𑘯'),
-    ('𑙄', '𑙄'),
-    ('𑚀', 'đ‘šȘ'),
-    ('𑚾', '𑚾'),
-    ('𑜀', '𑜚'),
-    ('𑝀', '𑝆'),
-    ('𑠀', 'đ‘ «'),
-    ('𑣿', '𑀆'),
-    ('𑀉', '𑀉'),
-    ('đ‘€Œ', '𑀓'),
-    ('𑀕', 'đ‘€–'),
-    ('đ‘€˜', '𑀯'),
-    ('𑀿', '𑀿'),
-    ('𑄁', '𑄁'),
-    ('𑩠', '𑩧'),
-    ('đ‘ŠȘ', '𑧐'),
-    ('𑧡', '𑧡'),
-    ('𑧣', '𑧣'),
-    ('𑹀', '𑹀'),
-    ('𑹋', 'đ‘šČ'),
-    ('đ‘šș', 'đ‘šș'),
-    ('𑩐', '𑩐'),
-    ('đ‘©œ', 'đ‘Ș‰'),
-    ('đ‘Ș', 'đ‘Ș'),
-    ('đ‘Ș°', 'đ‘«ž'),
-    ('𑯀', '𑯠'),
-    ('𑰀', '𑰈'),
-    ('𑰊', '𑰼'),
-    ('𑱀', '𑱀'),
-    ('đ‘±Č', 'đ‘ȏ'),
-    ('𑮀', '𑮆'),
-    ('𑮈', '𑮉'),
-    ('𑮋', '𑮰'),
-    ('𑔆', '𑔆'),
-    ('đ‘” ', 'đ‘”„'),
-    ('đ‘”§', '𑔚'),
-    ('đ‘”Ș', '𑶉'),
-    ('đ‘¶˜', 'đ‘¶˜'),
-    ('đ‘» ', 'đ‘»Č'),
-    ('đ‘Œ‚', 'đ‘Œ‚'),
-    ('đ‘Œ„', 'đ‘Œ'),
-    ('đ‘Œ’', 'đ‘Œł'),
-    ('đ‘Ÿ°', 'đ‘Ÿ°'),
-    ('𒀀', '𒎙'),
-    ('𒐀', '𒑼'),
-    ('𒒀', '𒕃'),
-    ('đ’Ÿ', '𒿰'),
-    ('𓀀', '𓐯'),
-    ('𓑁', '𓑆'),
-    ('𓑠', 'đ”ș'),
-    ('𔐀', '𔙆'),
-    ('𖄀', '𖄝'),
-    ('𖠀', '𖹾'),
-    ('đ–©€', 'đ–©ž'),
-    ('đ–©°', 'đ–ȘŸ'),
-    ('𖫐', 'đ–«­'),
-    ('𖬀', '𖬯'),
-    ('𖭀', '𖭃'),
-    ('𖭣', 'đ–­·'),
-    ('đ–­œ', '𖼏'),
-    ('𖔀', '𖔏'),
-    ('đ–Œ€', 'đ–œŠ'),
-    ('đ–œ', 'đ–œ'),
-    ('đ–Ÿ“', 'đ–ŸŸ'),
-    ('𖿠', '𖿡'),
-    ('𖿣', '𖿣'),
-    ('𗀀', 'đ˜Ÿ·'),
-    ('𘠀', '𘳕'),
-    ('𘳿', '𘎈'),
-    ('𚿰', '𚿳'),
-    ('đšż”', 'đšż»'),
-    ('đšżœ', 'đšżŸ'),
-    ('𛀀', '𛄱'),
-    ('đ›„Č', 'đ›„Č'),
-    ('𛅐', '𛅒'),
-    ('𛅕', '𛅕'),
-    ('đ›…€', '𛅧'),
-    ('𛅰', '𛋻'),
-    ('𛰀', 'đ›±Ș'),
-    ('đ›±°', 'đ›±Œ'),
-    ('đ›Č€', 'đ›Čˆ'),
-    ('đ›Č', 'đ›Č™'),
-    ('đŒŠ', 'đŒŠ'),
-    ('𞄀', '𞄬'),
-    ('đž„·', 'đž„œ'),
-    ('𞅎', '𞅎'),
-    ('𞊐', '𞊭'),
-    ('𞋀', 'đž‹«'),
-    ('𞓐', 'đž“«'),
-    ('𞗐', '𞗭'),
-    ('𞗰', '𞗰'),
-    ('𞟠', '𞟩'),
-    ('𞟹', 'đžŸ«'),
-    ('𞟭', '𞟼'),
-    ('𞟰', 'đžŸŸ'),
-    ('𞠀', '𞣄'),
-    ('đž„‹', 'đž„‹'),
-    ('𞾀', '𞾃'),
-    ('𞾅', '𞾟'),
-    ('𞾡', '𞾱'),
-    ('𞞀', '𞞀'),
-    ('𞾧', '𞾧'),
-    ('đžž©', 'đžžČ'),
-    ('𞾮', 'đžž·'),
-    ('đžžč', 'đžžč'),
-    ('đžž»', 'đžž»'),
-    ('đžč‚', 'đžč‚'),
-    ('đžč‡', 'đžč‡'),
-    ('đžč‰', 'đžč‰'),
-    ('đžč‹', 'đžč‹'),
-    ('đžč', 'đžč'),
-    ('đžč‘', 'đžč’'),
-    ('đžč”', 'đžč”'),
-    ('đžč—', 'đžč—'),
-    ('đžč™', 'đžč™'),
-    ('đžč›', 'đžč›'),
-    ('đžč', 'đžč'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžčĄ', 'đžčą'),
-    ('đžč€', 'đžč€'),
-    ('đžč§', 'đžčȘ'),
-    ('đžčŹ', 'đžčČ'),
-    ('đžčŽ', 'đžč·'),
-    ('đžčč', 'đžčŒ'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžș€', 'đžș‰'),
-    ('đžș‹', 'đžș›'),
-    ('đžșĄ', 'đžșŁ'),
-    ('đžș„', 'đžș©'),
-    ('đžș«', 'đžș»'),
-    ('𠀀', 'đȘ›Ÿ'),
-    ('đȘœ€', 'đ«œč'),
-    ('đ«€', 'đ« '),
-    ('đ«  ', 'đŹșĄ'),
-    ('đŹș°', '🯠'),
-    ('🯰', 'đźč'),
-    ('丽', '𯹝'),
-    ('𰀀', 'đ±Š'),
-    ('đ±', 'đȎŻ'),
-];
-
-pub const SCONTINUE: &'static [(char, char)] = &[
-    (',', '-'),
-    (':', ';'),
-    ('ÍŸ', 'ÍŸ'),
-    ('՝', '՝'),
-    ('ی', 'ۍ'),
-    ('ßž', 'ßž'),
-    ('᠂', '᠂'),
-    ('᠈', '᠈'),
-    ('–', '—'),
-    ('、', '、'),
-    ('', 'ïž‘'),
-    ('ïž“', 'ïž”'),
-    ('ïž±', 'ïžČ'),
-    ('ïč', 'ïč‘'),
-    ('ïč”', 'ïč•'),
-    ('ïč˜', 'ïč˜'),
-    ('ïčŁ', 'ïčŁ'),
-    ('', ''),
-    ('', ''),
-    ('', ''),
-];
-
-pub const STERM: &'static [(char, char)] = &[
-    ('!', '!'),
-    ('?', '?'),
-    ('։', '։'),
-    ('۝', '۟'),
-    ('۔', '۔'),
-    ('܀', '܂'),
-    ('ßč', 'ßč'),
-    ('à ·', 'à ·'),
-    ('à č', 'à č'),
-    ('ࠜ', 'ࠟ'),
-    ('à„€', 'à„„'),
-    ('၊', '။'),
-    ('፱', '፱'),
-    ('፧', 'ፚ'),
-    ('ᙼ', 'ᙼ'),
-    ('᜔', '᜶'),
-    ('។', '៕'),
-    ('᠃', '᠃'),
-    ('᠉', '᠉'),
-    ('á„„', 'á„…'),
-    ('áȘš', 'áȘ«'),
-    ('᭎', '᭏'),
-    ('᭚', '᭛'),
-    ('᭞', '᭟'),
-    ('á­œ', 'á­ż'),
-    ('᰻', 'ᰌ'),
-    ('ᱟ', '᱿'),
-    ('‌', '“'),
-    ('⁇', '⁉'),
-    ('âłč', 'âł»'),
-    ('âžź', 'âžź'),
-    ('➌', '➌'),
-    ('âč“', 'âč”'),
-    ('。', '。'),
-    ('ê“ż', 'ê“ż'),
-    ('꘎', '꘏'),
-    ('ê›ł', 'ê›ł'),
-    ('꛷', '꛷'),
-    ('êĄ¶', 'êĄ·'),
-    ('êŁŽ', 'êŁ'),
-    ('ê€Ż', 'ê€Ż'),
-    ('꧈', '꧉'),
-    ('꩝', '꩟'),
-    ('꫰', '꫱'),
-    ('êŻ«', 'êŻ«'),
-    ('ïž’', 'ïž’'),
-    ('ïž•', 'ïž–'),
-    ('ïč–', 'ïč—'),
-    ('', ''),
-    ('', ''),
-    ('ïœĄ', 'ïœĄ'),
-    ('𐩖', '𐩗'),
-    ('đœ•', 'đœ™'),
-    ('đŸ†', 'đŸ‰'),
-    ('𑁇', '𑁈'),
-    ('đ‘‚Ÿ', '𑃁'),
-    ('𑅁', '𑅃'),
-    ('𑇅', '𑇆'),
-    ('𑇍', '𑇍'),
-    ('𑇞', '𑇟'),
-    ('𑈾', 'đ‘ˆč'),
-    ('đ‘ˆ»', 'đ‘ˆŒ'),
-    ('𑊩', '𑊩'),
-    ('𑏔', '𑏕'),
-    ('𑑋', '𑑌'),
-    ('𑗂', '𑗃'),
-    ('𑗉', '𑗗'),
-    ('𑙁', '𑙂'),
-    ('đ‘œŒ', 'đ‘œŸ'),
-    ('đ‘„„', 'đ‘„„'),
-    ('𑄆', '𑄆'),
-    ('đ‘©‚', 'đ‘©ƒ'),
-    ('đ‘Ș›', 'đ‘Șœ'),
-    ('𑱁', '𑱂'),
-    ('đ‘»·', '𑻞'),
-    ('đ‘œƒ', 'đ‘œ„'),
-    ('đ–©ź', 'đ–©Ż'),
-    ('đ–«”', 'đ–«”'),
-    ('đ–Ź·', '𖬾'),
-    ('𖭄', '𖭄'),
-    ('𖔟', '𖔯'),
-    ('đ–ș˜', 'đ–ș˜'),
-    ('đ›ČŸ', 'đ›ČŸ'),
-    ('đȘˆ', 'đȘˆ'),
-];
-
-pub const SEP: &'static [(char, char)] =
-    &[('\u{85}', '\u{85}'), ('\u{2028}', '\u{2029}')];
-
-pub const SP: &'static [(char, char)] = &[
-    ('\t', '\t'),
-    ('\u{b}', '\u{c}'),
-    (' ', ' '),
-    ('\u{a0}', '\u{a0}'),
-    ('\u{1680}', '\u{1680}'),
-    ('\u{2000}', '\u{200a}'),
-    ('\u{202f}', '\u{202f}'),
-    ('\u{205f}', '\u{205f}'),
-    ('\u{3000}', '\u{3000}'),
-];
-
-pub const UPPER: &'static [(char, char)] = &[
-    ('A', 'Z'),
-    ('À', 'Ö'),
-    ('Ø', 'Þ'),
-    ('Ā', 'Ā'),
-    ('Ă', 'Ă'),
-    ('Ą', 'Ą'),
-    ('Ć', 'Ć'),
-    ('Ĉ', 'Ĉ'),
-    ('Ċ', 'Ċ'),
-    ('Č', 'Č'),
-    ('Ď', 'Ď'),
-    ('Đ', 'Đ'),
-    ('Ē', 'Ē'),
-    ('Ĕ', 'Ĕ'),
-    ('Ė', 'Ė'),
-    ('Ę', 'Ę'),
-    ('Ě', 'Ě'),
-    ('Ĝ', 'Ĝ'),
-    ('Ğ', 'Ğ'),
-    ('Ä ', 'Ä '),
-    ('Äą', 'Äą'),
-    ('Ä€', 'Ä€'),
-    ('ÄŠ', 'ÄŠ'),
-    ('Äš', 'Äš'),
-    ('ÄȘ', 'ÄȘ'),
-    ('ÄŹ', 'ÄŹ'),
-    ('Äź', 'Äź'),
-    ('İ', 'İ'),
-    ('ÄČ', 'ÄČ'),
-    ('ÄŽ', 'ÄŽ'),
-    ('Ķ', 'Ķ'),
-    ('Äč', 'Äč'),
-    ('Ä»', 'Ä»'),
-    ('Ĝ', 'Ĝ'),
-    ('Äż', 'Äż'),
-    ('Ɓ', 'Ɓ'),
-    ('ƃ', 'ƃ'),
-    ('ƅ', 'ƅ'),
-    ('Ƈ', 'Ƈ'),
-    ('Ê', 'Ê'),
-    ('Ì', 'Ì'),
-    ('Ǝ', 'Ǝ'),
-    ('Ɛ', 'Ɛ'),
-    ('Œ', 'Œ'),
-    ('Ɣ', 'Ɣ'),
-    ('Ɩ', 'Ɩ'),
-    ('Ƙ', 'Ƙ'),
-    ('Ú', 'Ú'),
-    ('Ü', 'Ü'),
-    ('ƞ', 'ƞ'),
-    ('Š', 'Š'),
-    ('Ćą', 'Ćą'),
-    ('Ć€', 'Ć€'),
-    ('ĆŠ', 'ĆŠ'),
-    ('Ćš', 'Ćš'),
-    ('ĆȘ', 'ĆȘ'),
-    ('ĆŹ', 'ĆŹ'),
-    ('Ćź', 'Ćź'),
-    ('ư', 'ư'),
-    ('ĆČ', 'ĆČ'),
-    ('ĆŽ', 'ĆŽ'),
-    ('ƶ', 'ƶ'),
-    ('Ÿ', 'Ćč'),
-    ('Ć»', 'Ć»'),
-    ('Ćœ', 'Ćœ'),
-    ('Ɓ', 'Ƃ'),
-    ('Ƅ', 'Ƅ'),
-    ('Ɔ', 'Ƈ'),
-    ('Ɖ', 'Ƌ'),
-    ('Ǝ', 'Ƒ'),
-    ('Ɠ', 'Ɣ'),
-    ('Ɩ', 'Ƙ'),
-    ('Ɯ', 'Ɲ'),
-    ('Ɵ', 'Ơ'),
-    ('Æą', 'Æą'),
-    ('Æ€', 'Æ€'),
-    ('Ɗ', 'Ƨ'),
-    ('Æ©', 'Æ©'),
-    ('ÆŹ', 'ÆŹ'),
-    ('Æź', 'ÆŻ'),
-    ('Ʊ', 'Æł'),
-    ('Æ”', 'Æ”'),
-    ('Æ·', 'Æž'),
-    ('ƌ', 'ƌ'),
-    ('DŽ', 'Dž'),
-    ('LJ', 'Lj'),
-    ('NJ', 'Nj'),
-    ('Ǎ', 'Ǎ'),
-    ('Ǐ', 'Ǐ'),
-    ('Ǒ', 'Ǒ'),
-    ('Ǔ', 'Ǔ'),
-    ('Ǖ', 'Ǖ'),
-    ('Ǘ', 'Ǘ'),
-    ('Ǚ', 'Ǚ'),
-    ('Ǜ', 'Ǜ'),
-    ('Ǟ', 'Ǟ'),
-    ('Ç ', 'Ç '),
-    ('Çą', 'Çą'),
-    ('Ç€', 'Ç€'),
-    ('ÇŠ', 'ÇŠ'),
-    ('Çš', 'Çš'),
-    ('ÇȘ', 'ÇȘ'),
-    ('ÇŹ', 'ÇŹ'),
-    ('Çź', 'Çź'),
-    ('DZ', 'ÇČ'),
-    ('ÇŽ', 'ÇŽ'),
-    ('Ƕ', 'Ǟ'),
-    ('Çș', 'Çș'),
-    ('nj', 'nj'),
-    ('ÇŸ', 'ÇŸ'),
-    ('Ȁ', 'Ȁ'),
-    ('Ȃ', 'Ȃ'),
-    ('Ȅ', 'Ȅ'),
-    ('Ȇ', 'Ȇ'),
-    ('Ȉ', 'Ȉ'),
-    ('Ȋ', 'Ȋ'),
-    ('Ȍ', 'Ȍ'),
-    ('Ȏ', 'Ȏ'),
-    ('Ȑ', 'Ȑ'),
-    ('Ȓ', 'Ȓ'),
-    ('Ȕ', 'Ȕ'),
-    ('Ȗ', 'Ȗ'),
-    ('Ș', 'Ș'),
-    ('Ț', 'Ț'),
-    ('Ȝ', 'Ȝ'),
-    ('Ȟ', 'Ȟ'),
-    ('È ', 'È '),
-    ('Èą', 'Èą'),
-    ('È€', 'È€'),
-    ('ÈŠ', 'ÈŠ'),
-    ('Èš', 'Èš'),
-    ('ÈȘ', 'ÈȘ'),
-    ('ÈŹ', 'ÈŹ'),
-    ('Èź', 'Èź'),
-    ('Ȱ', 'Ȱ'),
-    ('ÈČ', 'ÈČ'),
-    ('Èș', 'È»'),
-    ('Ȝ', 'ȟ'),
-    ('Ɂ', 'Ɂ'),
-    ('Ƀ', 'Ɇ'),
-    ('Ɉ', 'Ɉ'),
-    ('Ɋ', 'Ɋ'),
-    ('Ɍ', 'Ɍ'),
-    ('Ɏ', 'Ɏ'),
-    ('Ͱ', 'Ͱ'),
-    ('ÍČ', 'ÍČ'),
-    ('Ͷ', 'Ͷ'),
-    ('Íż', 'Íż'),
-    ('Ά', 'Ά'),
-    ('Έ', 'Ί'),
-    ('Ό', 'Ό'),
-    ('Ύ', 'Ώ'),
-    ('Α', 'Ρ'),
-    ('Σ', 'Ϋ'),
-    ('Ϗ', 'Ϗ'),
-    ('ϒ', 'ϔ'),
-    ('Ϙ', 'Ϙ'),
-    ('Ϛ', 'Ϛ'),
-    ('Ϝ', 'Ϝ'),
-    ('Ϟ', 'Ϟ'),
-    ('Ï ', 'Ï '),
-    ('Ïą', 'Ïą'),
-    ('Ï€', 'Ï€'),
-    ('ÏŠ', 'ÏŠ'),
-    ('Ïš', 'Ïš'),
-    ('ÏȘ', 'ÏȘ'),
-    ('ÏŹ', 'ÏŹ'),
-    ('Ïź', 'Ïź'),
-    ('ÏŽ', 'ÏŽ'),
-    ('Ï·', 'Ï·'),
-    ('Ïč', 'Ïș'),
-    ('Ïœ', 'ĐŻ'),
-    ('Ń ', 'Ń '),
-    ('Ńą', 'Ńą'),
-    ('Ń€', 'Ń€'),
-    ('ŃŠ', 'ŃŠ'),
-    ('Ńš', 'Ńš'),
-    ('ŃȘ', 'ŃȘ'),
-    ('ŃŹ', 'ŃŹ'),
-    ('Ńź', 'Ńź'),
-    ('Ѱ', 'Ѱ'),
-    ('ŃČ', 'ŃČ'),
-    ('ŃŽ', 'ŃŽ'),
-    ('Ѷ', 'Ѷ'),
-    ('Ńž', 'Ńž'),
-    ('Ńș', 'Ńș'),
-    ('ŃŒ', 'ŃŒ'),
-    ('ŃŸ', 'ŃŸ'),
-    ('Ҁ', 'Ҁ'),
-    ('Ҋ', 'Ҋ'),
-    ('Ҍ', 'Ҍ'),
-    ('Ҏ', 'Ҏ'),
-    ('Ґ', 'Ґ'),
-    ('Ғ', 'Ғ'),
-    ('Ҕ', 'Ҕ'),
-    ('Җ', 'Җ'),
-    ('Ҙ', 'Ҙ'),
-    ('Қ', 'Қ'),
-    ('Ҝ', 'Ҝ'),
-    ('Ҟ', 'Ҟ'),
-    ('Ò ', 'Ò '),
-    ('Òą', 'Òą'),
-    ('Ò€', 'Ò€'),
-    ('ÒŠ', 'ÒŠ'),
-    ('Òš', 'Òš'),
-    ('ÒȘ', 'ÒȘ'),
-    ('ÒŹ', 'ÒŹ'),
-    ('Òź', 'Òź'),
-    ('Ò°', 'Ò°'),
-    ('ÒČ', 'ÒČ'),
-    ('ÒŽ', 'ÒŽ'),
-    ('Ò¶', 'Ò¶'),
-    ('Òž', 'Òž'),
-    ('Òș', 'Òș'),
-    ('Ҍ', 'Ҍ'),
-    ('ÒŸ', 'ÒŸ'),
-    ('Ӏ', 'Ӂ'),
-    ('Ӄ', 'Ӄ'),
-    ('Ӆ', 'Ӆ'),
-    ('Ӈ', 'Ӈ'),
-    ('Ӊ', 'Ӊ'),
-    ('Ӌ', 'Ӌ'),
-    ('Ӎ', 'Ӎ'),
-    ('Ӑ', 'Ӑ'),
-    ('Ӓ', 'Ӓ'),
-    ('Ӕ', 'Ӕ'),
-    ('Ӗ', 'Ӗ'),
-    ('Ә', 'Ә'),
-    ('Ӛ', 'Ӛ'),
-    ('Ӝ', 'Ӝ'),
-    ('Ӟ', 'Ӟ'),
-    ('Ó ', 'Ó '),
-    ('Óą', 'Óą'),
-    ('Ó€', 'Ó€'),
-    ('ÓŠ', 'ÓŠ'),
-    ('Óš', 'Óš'),
-    ('ÓȘ', 'ÓȘ'),
-    ('ÓŹ', 'ÓŹ'),
-    ('Óź', 'Óź'),
-    ('Ó°', 'Ó°'),
-    ('ÓČ', 'ÓČ'),
-    ('ÓŽ', 'ÓŽ'),
-    ('Ó¶', 'Ó¶'),
-    ('Óž', 'Óž'),
-    ('Óș', 'Óș'),
-    ('ӌ', 'ӌ'),
-    ('ÓŸ', 'ÓŸ'),
-    ('Ԁ', 'Ԁ'),
-    ('Ԃ', 'Ԃ'),
-    ('Ԅ', 'Ԅ'),
-    ('Ԇ', 'Ԇ'),
-    ('Ԉ', 'Ԉ'),
-    ('Ԋ', 'Ԋ'),
-    ('Ԍ', 'Ԍ'),
-    ('Ԏ', 'Ԏ'),
-    ('Ԑ', 'Ԑ'),
-    ('Ԓ', 'Ԓ'),
-    ('Ԕ', 'Ԕ'),
-    ('Ԗ', 'Ԗ'),
-    ('Ԙ', 'Ԙ'),
-    ('Ԛ', 'Ԛ'),
-    ('Ԝ', 'Ԝ'),
-    ('Ԟ', 'Ԟ'),
-    ('Ô ', 'Ô '),
-    ('Ôą', 'Ôą'),
-    ('Ô€', 'Ô€'),
-    ('ÔŠ', 'ÔŠ'),
-    ('Ôš', 'Ôš'),
-    ('ÔȘ', 'ÔȘ'),
-    ('ÔŹ', 'ÔŹ'),
-    ('Ôź', 'Ôź'),
-    ('Ô±', 'Ֆ'),
-    ('Ⴀ', 'Ⴥ'),
-    ('Ⴧ', 'Ⴧ'),
-    ('Ⴭ', 'Ⴭ'),
-    ('Ꭰ', 'Ꮤ'),
-    ('áȉ', 'áȉ'),
-    ('ᾀ', 'ᾀ'),
-    ('ᾂ', 'ᾂ'),
-    ('ᾄ', 'ᾄ'),
-    ('ᾆ', 'ᾆ'),
-    ('ឈ', 'ឈ'),
-    ('ᾊ', 'ᾊ'),
-    ('ᾌ', 'ᾌ'),
-    ('ᾎ', 'ᾎ'),
-    ('ថ', 'ថ'),
-    ('ᾒ', 'ᾒ'),
-    ('ᾔ', 'ᾔ'),
-    ('ᾖ', 'ᾖ'),
-    ('ម', 'ម'),
-    ('ᾚ', 'ᾚ'),
-    ('ᾜ', 'ᾜ'),
-    ('ᾞ', 'ᾞ'),
-    ('áž ', 'áž '),
-    ('ážą', 'ážą'),
-    ('ក', 'ក'),
-    ('ដ', 'ដ'),
-    ('ážš', 'ážš'),
-    ('ážȘ', 'ážȘ'),
-    ('ត', 'ត'),
-    ('ážź', 'ážź'),
-    ('áž°', 'áž°'),
-    ('ážČ', 'ážČ'),
-    ('ណ', 'ណ'),
-    ('áž¶', 'áž¶'),
-    ('ážž', 'ážž'),
-    ('ážș', 'ážș'),
-    ('ឌ', 'ឌ'),
-    ('ស', 'ស'),
-    ('áč€', 'áč€'),
-    ('áč‚', 'áč‚'),
-    ('áč„', 'áč„'),
-    ('áč†', 'áč†'),
-    ('áčˆ', 'áčˆ'),
-    ('áčŠ', 'áčŠ'),
-    ('áčŒ', 'áčŒ'),
-    ('áčŽ', 'áčŽ'),
-    ('áč', 'áč'),
-    ('áč’', 'áč’'),
-    ('áč”', 'áč”'),
-    ('áč–', 'áč–'),
-    ('áč˜', 'áč˜'),
-    ('áčš', 'áčš'),
-    ('áčœ', 'áčœ'),
-    ('áčž', 'áčž'),
-    ('áč ', 'áč '),
-    ('áčą', 'áčą'),
-    ('áč€', 'áč€'),
-    ('áčŠ', 'áčŠ'),
-    ('áčš', 'áčš'),
-    ('áčȘ', 'áčȘ'),
-    ('áčŹ', 'áčŹ'),
-    ('áčź', 'áčź'),
-    ('áč°', 'áč°'),
-    ('áčČ', 'áčČ'),
-    ('áčŽ', 'áčŽ'),
-    ('áč¶', 'áč¶'),
-    ('áčž', 'áčž'),
-    ('áčș', 'áčș'),
-    ('áčŒ', 'áčŒ'),
-    ('áčŸ', 'áčŸ'),
-    ('áș€', 'áș€'),
-    ('áș‚', 'áș‚'),
-    ('áș„', 'áș„'),
-    ('áș†', 'áș†'),
-    ('áșˆ', 'áșˆ'),
-    ('áșŠ', 'áșŠ'),
-    ('áșŒ', 'áșŒ'),
-    ('áșŽ', 'áșŽ'),
-    ('áș', 'áș'),
-    ('áș’', 'áș’'),
-    ('áș”', 'áș”'),
-    ('áșž', 'áșž'),
-    ('áș ', 'áș '),
-    ('áșą', 'áșą'),
-    ('áș€', 'áș€'),
-    ('áșŠ', 'áșŠ'),
-    ('áșš', 'áșš'),
-    ('áșȘ', 'áșȘ'),
-    ('áșŹ', 'áșŹ'),
-    ('áșź', 'áșź'),
-    ('áș°', 'áș°'),
-    ('áșČ', 'áșČ'),
-    ('áșŽ', 'áșŽ'),
-    ('áș¶', 'áș¶'),
-    ('áșž', 'áșž'),
-    ('áșș', 'áșș'),
-    ('áșŒ', 'áșŒ'),
-    ('áșŸ', 'áșŸ'),
-    ('Ề', 'Ề'),
-    ('Ể', 'Ể'),
-    ('Ễ', 'Ễ'),
-    ('Ệ', 'Ệ'),
-    ('Ỉ', 'Ỉ'),
-    ('Ị', 'Ị'),
-    ('Ọ', 'Ọ'),
-    ('Ỏ', 'Ỏ'),
-    ('Ố', 'Ố'),
-    ('Ồ', 'Ồ'),
-    ('Ổ', 'Ổ'),
-    ('Ỗ', 'Ỗ'),
-    ('Ộ', 'Ộ'),
-    ('Ớ', 'Ớ'),
-    ('Ờ', 'Ờ'),
-    ('Ở', 'Ở'),
-    ('á» ', 'á» '),
-    ('ỹ', 'ỹ'),
-    ('Ề', 'Ề'),
-    ('Ị', 'Ị'),
-    ('Ớ', 'Ớ'),
-    ('á»Ș', 'á»Ș'),
-    ('ỏ', 'ỏ'),
-    ('ở', 'ở'),
-    ('á»°', 'á»°'),
-    ('á»Č', 'á»Č'),
-    ('Ỏ', 'Ỏ'),
-    ('á»¶', 'á»¶'),
-    ('Ở', 'Ở'),
-    ('á»ș', 'á»ș'),
-    ('Ọ', 'Ọ'),
-    ('ở', 'ở'),
-    ('ገ', 'ጏ'),
-    ('ጘ', 'ጝ'),
-    ('ጚ', 'áŒŻ'),
-    ('ጞ', 'áŒż'),
-    ('ᜈ', 'ᜍ'),
-    ('᜙', '᜙'),
-    ('᜛', '᜛'),
-    ('᜝', '᜝'),
-    ('ᜟ', 'ᜟ'),
-    ('᜚', 'áœŻ'),
-    ('ៈ', '៏'),
-    ('៘', '៟'),
-    ('៚', 'áŸŻ'),
-    ('៞', '៌'),
-    ('Ὲ', 'ῌ'),
-    ('Ῐ', 'Ί'),
-    ('Ὶ', '῏'),
-    ('áżž', 'áżŒ'),
-    ('ℂ', 'ℂ'),
-    ('ℇ', 'ℇ'),
-    ('ℋ', 'ℍ'),
-    ('ℐ', 'ℒ'),
-    ('ℕ', 'ℕ'),
-    ('ℙ', 'ℝ'),
-    ('â„€', 'â„€'),
-    ('℩', '℩'),
-    ('ℹ', 'ℹ'),
-    ('â„Ș', 'ℭ'),
-    ('ℰ', 'ℳ'),
-    ('℟', 'ℿ'),
-    ('ⅅ', 'ⅅ'),
-    ('Ⅰ', 'Ⅿ'),
-    ('Ↄ', 'Ↄ'),
-    ('Ⓐ', 'Ⓩ'),
-    ('Ⰰ', 'Ⱟ'),
-    ('â± ', 'â± '),
-    ('ⱹ', 'ⱀ'),
-    ('â±§', 'â±§'),
-    ('Ⱪ', 'Ⱪ'),
-    ('Ⱬ', 'Ⱬ'),
-    ('â±­', 'â±°'),
-    ('â±Č', 'â±Č'),
-    ('â±”', 'â±”'),
-    ('ⱟ', 'âȀ'),
-    ('âȂ', 'âȂ'),
-    ('âȄ', 'âȄ'),
-    ('âȆ', 'âȆ'),
-    ('âȈ', 'âȈ'),
-    ('âȊ', 'âȊ'),
-    ('âȌ', 'âȌ'),
-    ('âȎ', 'âȎ'),
-    ('âȐ', 'âȐ'),
-    ('âȒ', 'âȒ'),
-    ('âȔ', 'âȔ'),
-    ('âȖ', 'âȖ'),
-    ('âȘ', 'âȘ'),
-    ('âȚ', 'âȚ'),
-    ('âȜ', 'âȜ'),
-    ('âȞ', 'âȞ'),
-    ('âČ ', 'âČ '),
-    ('âČą', 'âČą'),
-    ('âČ€', 'âČ€'),
-    ('âČŠ', 'âČŠ'),
-    ('âČš', 'âČš'),
-    ('âČȘ', 'âČȘ'),
-    ('âČŹ', 'âČŹ'),
-    ('âČź', 'âČź'),
-    ('âȰ', 'âȰ'),
-    ('âČČ', 'âČČ'),
-    ('âČŽ', 'âČŽ'),
-    ('âȶ', 'âȶ'),
-    ('âČž', 'âČž'),
-    ('âČș', 'âČș'),
-    ('âČŒ', 'âČŒ'),
-    ('âČŸ', 'âČŸ'),
-    ('Ⳁ', 'Ⳁ'),
-    ('Ⳃ', 'Ⳃ'),
-    ('Ⳅ', 'Ⳅ'),
-    ('Ⳇ', 'Ⳇ'),
-    ('Ⳉ', 'Ⳉ'),
-    ('Ⳋ', 'Ⳋ'),
-    ('Ⳍ', 'Ⳍ'),
-    ('Ⳏ', 'Ⳏ'),
-    ('Ⳑ', 'Ⳑ'),
-    ('Ⳓ', 'Ⳓ'),
-    ('Ⳕ', 'Ⳕ'),
-    ('Ⳗ', 'Ⳗ'),
-    ('Ⳙ', 'Ⳙ'),
-    ('Ⳛ', 'Ⳛ'),
-    ('Ⳝ', 'Ⳝ'),
-    ('Ⳟ', 'Ⳟ'),
-    ('âł ', 'âł '),
-    ('âłą', 'âłą'),
-    ('âł«', 'âł«'),
-    ('âł­', 'âł­'),
-    ('âłČ', 'âłČ'),
-    ('Ꙁ', 'Ꙁ'),
-    ('Ꙃ', 'Ꙃ'),
-    ('Ꙅ', 'Ꙅ'),
-    ('Ꙇ', 'Ꙇ'),
-    ('Ꙉ', 'Ꙉ'),
-    ('Ꙋ', 'Ꙋ'),
-    ('Ꙍ', 'Ꙍ'),
-    ('Ꙏ', 'Ꙏ'),
-    ('Ꙑ', 'Ꙑ'),
-    ('Ꙓ', 'Ꙓ'),
-    ('Ꙕ', 'Ꙕ'),
-    ('Ꙗ', 'Ꙗ'),
-    ('Ꙙ', 'Ꙙ'),
-    ('Ꙛ', 'Ꙛ'),
-    ('Ꙝ', 'Ꙝ'),
-    ('Ꙟ', 'Ꙟ'),
-    ('Ꙡ', 'Ꙡ'),
-    ('ê™ą', 'ê™ą'),
-    ('Ꙁ', 'Ꙁ'),
-    ('Ꙋ', 'Ꙋ'),
-    ('Ꙛ', 'Ꙛ'),
-    ('ê™Ș', 'ê™Ș'),
-    ('ê™Ź', 'ê™Ź'),
-    ('Ꚁ', 'Ꚁ'),
-    ('Ꚃ', 'Ꚃ'),
-    ('Ꚅ', 'Ꚅ'),
-    ('Ꚇ', 'Ꚇ'),
-    ('Ꚉ', 'Ꚉ'),
-    ('Ꚋ', 'Ꚋ'),
-    ('Ꚍ', 'Ꚍ'),
-    ('Ꚏ', 'Ꚏ'),
-    ('Ꚑ', 'Ꚑ'),
-    ('Ꚓ', 'Ꚓ'),
-    ('Ꚕ', 'Ꚕ'),
-    ('Ꚗ', 'Ꚗ'),
-    ('Ꚙ', 'Ꚙ'),
-    ('Ꚛ', 'Ꚛ'),
-    ('êœą', 'êœą'),
-    ('꜀', '꜀'),
-    ('꜊', '꜊'),
-    ('ꜚ', 'ꜚ'),
-    ('êœȘ', 'êœȘ'),
-    ('êœŹ', 'êœŹ'),
-    ('êœź', 'êœź'),
-    ('êœČ', 'êœČ'),
-    ('꜎', '꜎'),
-    ('Ꜷ', 'Ꜷ'),
-    ('ꜞ', 'ꜞ'),
-    ('êœș', 'êœș'),
-    ('꜌', '꜌'),
-    ('ꜟ', 'ꜟ'),
-    ('Ꝁ', 'Ꝁ'),
-    ('Ꝃ', 'Ꝃ'),
-    ('Ꝅ', 'Ꝅ'),
-    ('Ꝇ', 'Ꝇ'),
-    ('Ꝉ', 'Ꝉ'),
-    ('Ꝋ', 'Ꝋ'),
-    ('Ꝍ', 'Ꝍ'),
-    ('Ꝏ', 'Ꝏ'),
-    ('Ꝑ', 'Ꝑ'),
-    ('Ꝓ', 'Ꝓ'),
-    ('Ꝕ', 'Ꝕ'),
-    ('Ꝗ', 'Ꝗ'),
-    ('Ꝙ', 'Ꝙ'),
-    ('Ꝛ', 'Ꝛ'),
-    ('Ꝝ', 'Ꝝ'),
-    ('Ꝟ', 'Ꝟ'),
-    ('Ꝡ', 'Ꝡ'),
-    ('êą', 'êą'),
-    ('Ꝁ', 'Ꝁ'),
-    ('Ꝋ', 'Ꝋ'),
-    ('Ꝛ', 'Ꝛ'),
-    ('êȘ', 'êȘ'),
-    ('êŹ', 'êŹ'),
-    ('êź', 'êź'),
-    ('êč', 'êč'),
-    ('Ꝼ', 'Ꝼ'),
-    ('Ꝝ', 'ꝟ'),
-    ('Ꞁ', 'Ꞁ'),
-    ('Ꞃ', 'Ꞃ'),
-    ('Ꞅ', 'Ꞅ'),
-    ('Ꞇ', 'Ꞇ'),
-    ('Ꞌ', 'Ꞌ'),
-    ('Ɥ', 'Ɥ'),
-    ('Ꞑ', 'Ꞑ'),
-    ('Ꞓ', 'Ꞓ'),
-    ('Ꞗ', 'Ꞗ'),
-    ('Ꞙ', 'Ꞙ'),
-    ('Ꞛ', 'Ꞛ'),
-    ('Ꞝ', 'Ꞝ'),
-    ('Ꞟ', 'Ꞟ'),
-    ('Ꞡ', 'Ꞡ'),
-    ('êžą', 'êžą'),
-    ('Ꞁ', 'Ꞁ'),
-    ('꞊', '꞊'),
-    ('Ꞛ', 'Ꞛ'),
-    ('êžȘ', 'êžź'),
-    ('Ʞ', 'ꞎ'),
-    ('Ꞷ', 'Ꞷ'),
-    ('êžž', 'êžž'),
-    ('êžș', 'êžș'),
-    ('ꞌ', 'ꞌ'),
-    ('ꞟ', 'ꞟ'),
-    ('Ꟁ', 'Ꟁ'),
-    ('Ꟃ', 'Ꟃ'),
-    ('Ꞔ', 'Ꟈ'),
-    ('Ꟊ', 'Ꟊ'),
-    ('Ɤ', 'Ꟍ'),
-    ('Ꟑ', 'Ꟑ'),
-    ('Ꟗ', 'Ꟗ'),
-    ('Ꟙ', 'Ꟙ'),
-    ('Ꟛ', 'Ꟛ'),
-    ('Ƛ', 'Ƛ'),
-    ('꟔', '꟔'),
-    ('ïŒĄ', 'ïŒș'),
-    ('𐐀', '𐐧'),
-    ('𐒰', '𐓓'),
-    ('𐕰', 'đ•ș'),
-    ('đ•Œ', '𐖊'),
-    ('𐖌', '𐖒'),
-    ('𐖔', '𐖕'),
-    ('đČ€', 'đČČ'),
-    ('𐔐', '𐔄'),
-    ('𑱠', '𑱿'),
-    ('đ–č€', 'đ–čŸ'),
-    ('𝐀', '𝐙'),
-    ('𝐮', '𝑍'),
-    ('𝑹', '𝒁'),
-    ('𝒜', '𝒜'),
-    ('𝒞', '𝒟'),
-    ('𝒱', '𝒱'),
-    ('đ’„', '𝒩'),
-    ('đ’©', '𝒬'),
-    ('𝒼', 'đ’”'),
-    ('𝓐', 'đ“©'),
-    ('𝔄', '𝔅'),
-    ('𝔇', '𝔊'),
-    ('𝔍', '𝔔'),
-    ('𝔖', '𝔜'),
-    ('𝔾', 'đ”č'),
-    ('đ”»', 'đ”Ÿ'),
-    ('𝕀', '𝕄'),
-    ('𝕆', '𝕆'),
-    ('𝕊', '𝕐'),
-    ('𝕬', '𝖅'),
-    ('𝖠', 'đ–č'),
-    ('𝗔', '𝗭'),
-    ('𝘈', '𝘡'),
-    ('đ˜Œ', '𝙕'),
-    ('𝙰', '𝚉'),
-    ('𝚹', '𝛀'),
-    ('𝛱', 'đ›ș'),
-    ('𝜜', '𝜮'),
-    ('𝝖', '𝝼'),
-    ('𝞐', '𝞹'),
-    ('𝟊', '𝟊'),
-    ('𞀀', '𞀥'),
-    ('🄰', '🅉'),
-    ('🅐', 'đŸ…©'),
-    ('🅰', '🆉'),
-];
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/word_break.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/word_break.rs
deleted file mode 100644
index b764d34..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/word_break.rs
+++ /dev/null
@@ -1,1152 +0,0 @@
-// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY:
-//
-//   ucd-generate word-break ucd-16.0.0 --chars
-//
-// Unicode version: 16.0.0.
-//
-// ucd-generate 0.3.1 is available on crates.io.
-
-pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[
-    ("ALetter", ALETTER),
-    ("CR", CR),
-    ("Double_Quote", DOUBLE_QUOTE),
-    ("Extend", EXTEND),
-    ("ExtendNumLet", EXTENDNUMLET),
-    ("Format", FORMAT),
-    ("Hebrew_Letter", HEBREW_LETTER),
-    ("Katakana", KATAKANA),
-    ("LF", LF),
-    ("MidLetter", MIDLETTER),
-    ("MidNum", MIDNUM),
-    ("MidNumLet", MIDNUMLET),
-    ("Newline", NEWLINE),
-    ("Numeric", NUMERIC),
-    ("Regional_Indicator", REGIONAL_INDICATOR),
-    ("Single_Quote", SINGLE_QUOTE),
-    ("WSegSpace", WSEGSPACE),
-    ("ZWJ", ZWJ),
-];
-
-pub const ALETTER: &'static [(char, char)] = &[
-    ('A', 'Z'),
-    ('a', 'z'),
-    ('ª', 'ª'),
-    ('µ', 'µ'),
-    ('º', 'º'),
-    ('À', 'Ö'),
-    ('Ø', 'ö'),
-    ('ø', '˗'),
-    ('˞', '˿'),
-    ('Ͱ', '͎'),
-    ('Ͷ', 'ͷ'),
-    ('Íș', 'Íœ'),
-    ('Íż', 'Íż'),
-    ('Ά', 'Ά'),
-    ('Έ', 'Ί'),
-    ('Ό', 'Ό'),
-    ('Ύ', 'Ρ'),
-    ('Σ', 'Ï”'),
-    ('Ϸ', 'ҁ'),
-    ('Ҋ', 'ÔŻ'),
-    ('Ô±', 'Ֆ'),
-    ('ՙ', '՜'),
-    ('՞', '՞'),
-    ('ՠ', 'ֈ'),
-    ('֊', '֊'),
-    ('Śł', 'Śł'),
-    ('Ű ', 'ي'),
-    ('Ùź', 'ÙŻ'),
-    ('ٱ', 'ۓ'),
-    ('ە', 'ە'),
-    ('Û„', 'ÛŠ'),
-    ('Ûź', 'ÛŻ'),
-    ('Ûș', 'ÛŒ'),
-    ('Ûż', 'Ûż'),
-    ('\u{70f}', 'ܐ'),
-    ('ܒ', 'ܯ'),
-    ('ʍ', 'Ț„'),
-    ('Ț±', 'Ț±'),
-    ('ߊ', 'ßȘ'),
-    ('ߎ', 'ߔ'),
-    ('ßș', 'ßș'),
-    ('ࠀ', 'ࠕ'),
-    ('ࠚ', 'ࠚ'),
-    ('à €', 'à €'),
-    ('à š', 'à š'),
-    ('àĄ€', 'àĄ˜'),
-    ('àĄ ', 'àĄȘ'),
-    ('àĄ°', 'àą‡'),
-    ('àą‰', 'àąŽ'),
-    ('àą ', 'àŁ‰'),
-    ('à€„', 'à€č'),
-    ('à€œ', 'à€œ'),
-    ('à„', 'à„'),
-    ('à„˜', 'à„Ą'),
-    ('à„±', 'àŠ€'),
-    ('àŠ…', 'àŠŒ'),
-    ('àŠ', 'àŠ'),
-    ('àŠ“', 'àŠš'),
-    ('àŠȘ', 'àŠ°'),
-    ('àŠČ', 'àŠČ'),
-    ('àŠ¶', 'àŠč'),
-    ('àŠœ', 'àŠœ'),
-    ('ৎ', 'ৎ'),
-    ('ড়', 'ঢ়'),
-    ('য়', 'à§Ą'),
-    ('à§°', 'à§±'),
-    ('ৌ', 'ৌ'),
-    ('àš…', 'àšŠ'),
-    ('àš', 'àš'),
-    ('àš“', 'àšš'),
-    ('àšȘ', 'àš°'),
-    ('àšČ', 'àšł'),
-    ('àš”', 'àš¶'),
-    ('àšž', 'àšč'),
-    ('ਖ਼', 'ੜ'),
-    ('ਫ਼', 'ਫ਼'),
-    ('à©Č', '੎'),
-    ('àȘ…', 'àȘ'),
-    ('àȘ', 'àȘ‘'),
-    ('àȘ“', 'àȘš'),
-    ('àȘȘ', 'àȘ°'),
-    ('àȘČ', 'àȘł'),
-    ('àȘ”', 'àȘč'),
-    ('àȘœ', 'àȘœ'),
-    ('ૐ', 'ૐ'),
-    ('à« ', 'à«Ą'),
-    ('à«č', 'à«č'),
-    ('àŹ…', 'àŹŒ'),
-    ('àŹ', 'àŹ'),
-    ('àŹ“', 'àŹš'),
-    ('àŹȘ', 'àŹ°'),
-    ('àŹČ', 'àŹł'),
-    ('àŹ”', 'àŹč'),
-    ('àŹœ', 'àŹœ'),
-    ('ଡ଼', 'ଢ଼'),
-    ('ୟ', 'à­Ą'),
-    ('à­±', 'à­±'),
-    ('àźƒ', 'àźƒ'),
-    ('àź…', 'àźŠ'),
-    ('àźŽ', 'àź'),
-    ('àź’', 'àź•'),
-    ('àź™', 'àźš'),
-    ('àźœ', 'àźœ'),
-    ('àźž', 'àźŸ'),
-    ('àźŁ', 'àź€'),
-    ('àźš', 'àźȘ'),
-    ('àźź', 'àźč'),
-    ('àŻ', 'àŻ'),
-    ('అ', 'ఌ'),
-    ('ఎ', 'ఐ'),
-    ('ఒ', 'à°š'),
-    ('à°Ș', 'à°č'),
-    ('జ', 'జ'),
-    ('ౘ', 'ౚ'),
-    ('ౝ', 'ౝ'),
-    ('à± ', 'à±Ą'),
-    ('àȀ', 'àȀ'),
-    ('àȅ', 'àȌ'),
-    ('àȎ', 'àȐ'),
-    ('àȒ', 'àČš'),
-    ('àČȘ', 'àČł'),
-    ('àČ”', 'àČč'),
-    ('àČœ', 'àČœ'),
-    ('àł', 'àłž'),
-    ('àł ', 'àłĄ'),
-    ('àł±', 'àłČ'),
-    ('àŽ„', 'àŽŒ'),
-    ('àŽŽ', 'àŽ'),
-    ('àŽ’', 'àŽș'),
-    ('àŽœ', 'àŽœ'),
-    ('à”Ž', 'à”Ž'),
-    ('à””', 'à”–'),
-    ('à”Ÿ', 'à”Ą'),
-    ('à”ș', 'à”ż'),
-    ('අ', 'ඖ'),
-    ('ක', 'න'),
-    ('à¶ł', 'à¶»'),
-    ('ග', 'ග'),
-    ('ව', 'ෆ'),
-    ('àŒ€', 'àŒ€'),
-    ('àœ€', 'àœ‡'),
-    ('àœ‰', 'àœŹ'),
-    ('àŸˆ', 'àŸŒ'),
-    ('Ⴀ', 'Ⴥ'),
-    ('Ⴧ', 'Ⴧ'),
-    ('Ⴭ', 'Ⴭ'),
-    ('ა', 'áƒș'),
-    ('჌', 'ቈ'),
-    ('ቊ', 'ቍ'),
-    ('ቐ', 'ቖ'),
-    ('ቘ', 'ቘ'),
-    ('ቚ', 'ቝ'),
-    ('በ', 'ኈ'),
-    ('ኊ', 'ኍ'),
-    ('ነ', 'ኰ'),
-    ('áŠČ', 'ኔ'),
-    ('ኾ', 'ኟ'),
-    ('ዀ', 'ዀ'),
-    ('ዂ', 'ዅ'),
-    ('ወ', 'ዖ'),
-    ('ዘ', 'ጐ'),
-    ('ጒ', 'ጕ'),
-    ('ጘ', 'ፚ'),
-    ('ᎀ', 'ᎏ'),
-    ('Ꭰ', 'Ꮤ'),
-    ('Ꮮ', 'Ꮬ'),
-    ('ᐁ', 'ᙬ'),
-    ('ᙯ', 'ᙿ'),
-    ('ᚁ', 'ᚚ'),
-    ('ᚠ', 'á›Ș'),
-    ('᛼', '᛾'),
-    ('ᜀ', 'ᜑ'),
-    ('ᜟ', 'ᜱ'),
-    ('ᝀ', 'ᝑ'),
-    ('ᝠ', 'ᝬ'),
-    ('᝼', 'ᝰ'),
-    ('ᠠ', 'ᥞ'),
-    ('᱀', '᱄'),
-    ('᱇', 'ᱹ'),
-    ('áąȘ', 'áąȘ'),
-    ('Ṱ', 'ᣔ'),
-    ('က', 'သ'),
-    ('Ṁ', 'Ṗ'),
-    ('ᬅ', 'ᬳ'),
-    ('ᭅ', 'ᭌ'),
-    ('ៃ', '០'),
-    ('៟', '៯'),
-    ('áźș', 'ᯄ'),
-    ('ᰀ', 'ᰣ'),
-    ('ᱍ', 'ᱏ'),
-    ('ᱚ', 'ᱜ'),
-    ('áȀ', 'áȊ'),
-    ('áȐ', 'áČș'),
-    ('áČœ', 'áČż'),
-    ('ᳩ', '᳏'),
-    ('áłź', 'áłł'),
-    ('áł”', 'áł¶'),
-    ('áłș', 'áłș'),
-    ('ᮀ', 'á¶ż'),
-    ('ᾀ', 'ጕ'),
-    ('ጘ', 'ጝ'),
-    ('ጠ', 'ᜅ'),
-    ('ᜈ', 'ᜍ'),
-    ('ᜐ', '᜗'),
-    ('᜙', '᜙'),
-    ('᜛', '᜛'),
-    ('᜝', '᜝'),
-    ('ᜟ', '᜜'),
-    ('ៀ', '៎'),
-    ('៶', '៌'),
-    ('៟', '៟'),
-    ('ῂ', 'ῄ'),
-    ('ῆ', 'ῌ'),
-    ('ῐ', 'ΐ'),
-    ('ῖ', 'Ί'),
-    ('ῠ', '῏'),
-    ('áżČ', '῎'),
-    ('áż¶', 'áżŒ'),
-    ('ⁱ', 'ⁱ'),
-    ('ⁿ', 'ⁿ'),
-    ('ₐ', 'ₜ'),
-    ('ℂ', 'ℂ'),
-    ('ℇ', 'ℇ'),
-    ('ℊ', 'ℓ'),
-    ('ℕ', 'ℕ'),
-    ('ℙ', 'ℝ'),
-    ('â„€', 'â„€'),
-    ('℩', '℩'),
-    ('ℹ', 'ℹ'),
-    ('â„Ș', 'ℭ'),
-    ('ℯ', 'â„č'),
-    ('ℌ', 'ℿ'),
-    ('ⅅ', 'ⅉ'),
-    ('ⅎ', 'ⅎ'),
-    ('Ⅰ', 'ↈ'),
-    ('Ⓐ', 'ⓩ'),
-    ('Ⰰ', 'Ⳁ'),
-    ('âł«', 'âłź'),
-    ('âłČ', 'âłł'),
-    ('⮀', '⎄'),
-    ('⎧', '⎧'),
-    ('⎭', '⎭'),
-    ('⎰', '┧'),
-    ('┯', '┯'),
-    ('ⶀ', 'ⶖ'),
-    ('â¶ ', 'â¶Š'),
-    ('â¶š', 'â¶ź'),
-    ('â¶°', 'â¶¶'),
-    ('â¶ž', 'â¶Ÿ'),
-    ('ⷀ', 'ⷆ'),
-    ('ⷈ', 'ⷎ'),
-    ('ⷐ', 'ⷖ'),
-    ('ⷘ', 'ⷞ'),
-    ('➯', '➯'),
-    ('々', '々'),
-    ('〻', 'ă€Œ'),
-    ('ㄅ', 'ㄯ'),
-    ('ㄱ', 'ㆎ'),
-    ('ㆠ', 'ㆿ'),
-    ('ꀀ', 'ꒌ'),
-    ('ꓐ', 'ꓜ'),
-    ('ꔀ', 'ꘌ'),
-    ('ꘐ', 'ꘟ'),
-    ('ê˜Ș', 'ꘫ'),
-    ('Ꙁ', 'ê™ź'),
-    ('ê™ż', 'ꚝ'),
-    ('ꚠ', 'ê›Ż'),
-    ('꜈', 'ꟍ'),
-    ('Ꟑ', 'ꟑ'),
-    ('ꟓ', 'ꟓ'),
-    ('ꟕ', 'Ƛ'),
-    ('êŸČ', 'ꠁ'),
-    ('ꠃ', 'ꠅ'),
-    ('ꠇ', 'ꠊ'),
-    ('ꠌ', 'ê ą'),
-    ('êĄ€', 'êĄł'),
-    ('êą‚', 'êął'),
-    ('êŁČ', 'êŁ·'),
-    ('êŁ»', 'êŁ»'),
-    ('êŁœ', 'êŁŸ'),
-    ('ꀊ', 'ꀄ'),
-    ('ꀰ', 'ꄆ'),
-    ('ꄠ', 'ꄌ'),
-    ('ꊄ', 'êŠČ'),
-    ('ꧏ', 'ꧏ'),
-    ('Ꚁ', 'êšš'),
-    ('ꩀ', 'ꩂ'),
-    ('ꩄ', 'ꩋ'),
-    ('ê« ', 'ê«Ș'),
-    ('ê«Č', '꫎'),
-    ('êŹ', 'êŹ†'),
-    ('êŹ‰', 'êŹŽ'),
-    ('êŹ‘', 'êŹ–'),
-    ('êŹ ', 'êŹŠ'),
-    ('êŹš', 'êŹź'),
-    ('êŹ°', 'ê­©'),
-    ('ê­°', 'êŻą'),
-    ('가', '힣'),
-    ('ힰ', 'ퟆ'),
-    ('ퟋ', 'ퟻ'),
-    ('ïŹ€', 'ïŹ†'),
-    ('ïŹ“', 'ïŹ—'),
-    ('ﭐ', 'ïź±'),
-    ('ïŻ“', ''),
-    ('', 'ﶏ'),
-    ('ﶒ', 'ﷇ'),
-    ('ï·°', 'ï·»'),
-    ('ïč°', 'ïčŽ'),
-    ('ïč¶', 'ﻌ'),
-    ('ïŒĄ', 'ïŒș'),
-    ('', ''),
-    ('', ''),
-    ('ïż‚', 'ïż‡'),
-    ('ïżŠ', 'ïż'),
-    ('ïż’', 'ïż—'),
-    ('ïżš', 'ïżœ'),
-    ('𐀀', '𐀋'),
-    ('𐀍', '𐀩'),
-    ('𐀹', 'đ€ș'),
-    ('đ€Œ', 'đ€œ'),
-    ('𐀿', '𐁍'),
-    ('𐁐', '𐁝'),
-    ('𐂀', 'đƒș'),
-    ('𐅀', '𐅮'),
-    ('𐊀', '𐊜'),
-    ('𐊠', '𐋐'),
-    ('𐌀', '𐌟'),
-    ('𐌭', '𐍊'),
-    ('𐍐', 'đ”'),
-    ('𐎀', '𐎝'),
-    ('𐎠', '𐏃'),
-    ('𐏈', '𐏏'),
-    ('𐏑', '𐏕'),
-    ('𐐀', '𐒝'),
-    ('𐒰', '𐓓'),
-    ('𐓘', '𐓻'),
-    ('𐔀', '𐔧'),
-    ('𐔰', '𐕣'),
-    ('𐕰', 'đ•ș'),
-    ('đ•Œ', '𐖊'),
-    ('𐖌', '𐖒'),
-    ('𐖔', '𐖕'),
-    ('𐖗', '𐖡'),
-    ('𐖣', '𐖱'),
-    ('𐖳', 'đ–č'),
-    ('𐖻', 'đ–Œ'),
-    ('𐗀', '𐗳'),
-    ('𐘀', 'đœ¶'),
-    ('𐝀', '𐝕'),
-    ('𐝠', '𐝧'),
-    ('𐞀', '𐞅'),
-    ('𐞇', '𐞰'),
-    ('đžČ', 'đžș'),
-    ('𐠀', '𐠅'),
-    ('𐠈', '𐠈'),
-    ('𐠊', '𐠔'),
-    ('𐠷', '𐠞'),
-    ('đ Œ', 'đ Œ'),
-    ('𐠿', '𐡕'),
-    ('𐥠', '𐥶'),
-    ('𐱀', '𐱞'),
-    ('𐣠', 'đŁČ'),
-    ('𐣎', '𐣔'),
-    ('𐀀', '𐀕'),
-    ('𐀠', 'đ€č'),
-    ('𐩀', '𐊷'),
-    ('đŠŸ', '𐊿'),
-    ('𐹀', '𐹀'),
-    ('𐹐', '𐹓'),
-    ('𐹕', '𐹗'),
-    ('𐹙', '𐚔'),
-    ('𐩠', 'đ©Œ'),
-    ('đȘ€', 'đȘœ'),
-    ('𐫀', '𐫇'),
-    ('𐫉', '𐫀'),
-    ('𐬀', '𐏔'),
-    ('𐭀', '𐭕'),
-    ('𐭠', 'đ­Č'),
-    ('𐼀', '𐼑'),
-    ('𐰀', '𐱈'),
-    ('đČ€', 'đČČ'),
-    ('𐳀', 'đłČ'),
-    ('𐮀', '𐮣'),
-    ('𐔊', '𐔄'),
-    ('𐔯', '𐶅'),
-    ('đș€', 'đș©'),
-    ('đș°', 'đș±'),
-    ('𐻂', '𐻄'),
-    ('đŒ€', 'đŒœ'),
-    ('đŒ§', 'đŒ§'),
-    ('đŒ°', 'đœ…'),
-    ('đœ°', 'đŸ'),
-    ('đŸ°', '𐿄'),
-    ('𐿠', '𐿶'),
-    ('𑀃', 'đ‘€·'),
-    ('𑁱', 'đ‘Č'),
-    ('𑁔', '𑁔'),
-    ('𑂃', '𑂯'),
-    ('𑃐', '𑃹'),
-    ('𑄃', '𑄩'),
-    ('𑅄', '𑅄'),
-    ('𑅇', '𑅇'),
-    ('𑅐', 'đ‘…Č'),
-    ('đ‘…¶', 'đ‘…¶'),
-    ('𑆃', 'đ‘†Č'),
-    ('𑇁', '𑇄'),
-    ('𑇚', '𑇚'),
-    ('𑇜', '𑇜'),
-    ('𑈀', '𑈑'),
-    ('𑈓', 'đ‘ˆ«'),
-    ('𑈿', '𑉀'),
-    ('𑊀', '𑊆'),
-    ('𑊈', '𑊈'),
-    ('𑊊', '𑊍'),
-    ('𑊏', '𑊝'),
-    ('𑊟', '𑊹'),
-    ('𑊰', '𑋞'),
-    ('𑌅', '𑌌'),
-    ('𑌏', '𑌐'),
-    ('𑌓', '𑌹'),
-    ('đ‘ŒȘ', '𑌰'),
-    ('đ‘ŒČ', '𑌳'),
-    ('đ‘Œ”', 'đ‘Œč'),
-    ('đ‘Œœ', 'đ‘Œœ'),
-    ('𑍐', '𑍐'),
-    ('𑍝', '𑍡'),
-    ('𑎀', '𑎉'),
-    ('𑎋', '𑎋'),
-    ('𑎎', '𑎎'),
-    ('𑎐', '𑎔'),
-    ('𑎷', '𑎷'),
-    ('𑏑', '𑏑'),
-    ('𑏓', '𑏓'),
-    ('𑐀', '𑐮'),
-    ('𑑇', '𑑊'),
-    ('𑑟', '𑑡'),
-    ('𑒀', '𑒯'),
-    ('𑓄', '𑓅'),
-    ('𑓇', '𑓇'),
-    ('𑖀', '𑖼'),
-    ('𑗘', '𑗛'),
-    ('𑘀', '𑘯'),
-    ('𑙄', '𑙄'),
-    ('𑚀', 'đ‘šȘ'),
-    ('𑚾', '𑚾'),
-    ('𑠀', 'đ‘ «'),
-    ('𑱠', '𑣟'),
-    ('𑣿', '𑀆'),
-    ('𑀉', '𑀉'),
-    ('đ‘€Œ', '𑀓'),
-    ('𑀕', 'đ‘€–'),
-    ('đ‘€˜', '𑀯'),
-    ('𑀿', '𑀿'),
-    ('𑄁', '𑄁'),
-    ('𑩠', '𑩧'),
-    ('đ‘ŠȘ', '𑧐'),
-    ('𑧡', '𑧡'),
-    ('𑧣', '𑧣'),
-    ('𑹀', '𑹀'),
-    ('𑹋', 'đ‘šČ'),
-    ('đ‘šș', 'đ‘šș'),
-    ('𑩐', '𑩐'),
-    ('đ‘©œ', 'đ‘Ș‰'),
-    ('đ‘Ș', 'đ‘Ș'),
-    ('đ‘Ș°', 'đ‘«ž'),
-    ('𑯀', '𑯠'),
-    ('𑰀', '𑰈'),
-    ('𑰊', '𑰼'),
-    ('𑱀', '𑱀'),
-    ('đ‘±Č', 'đ‘ȏ'),
-    ('𑮀', '𑮆'),
-    ('𑮈', '𑮉'),
-    ('𑮋', '𑮰'),
-    ('𑔆', '𑔆'),
-    ('đ‘” ', 'đ‘”„'),
-    ('đ‘”§', '𑔚'),
-    ('đ‘”Ș', '𑶉'),
-    ('đ‘¶˜', 'đ‘¶˜'),
-    ('đ‘» ', 'đ‘»Č'),
-    ('đ‘Œ‚', 'đ‘Œ‚'),
-    ('đ‘Œ„', 'đ‘Œ'),
-    ('đ‘Œ’', 'đ‘Œł'),
-    ('đ‘Ÿ°', 'đ‘Ÿ°'),
-    ('𒀀', '𒎙'),
-    ('𒐀', '𒑼'),
-    ('𒒀', '𒕃'),
-    ('đ’Ÿ', '𒿰'),
-    ('𓀀', '𓐯'),
-    ('𓑁', '𓑆'),
-    ('𓑠', 'đ”ș'),
-    ('𔐀', '𔙆'),
-    ('𖄀', '𖄝'),
-    ('𖠀', '𖹾'),
-    ('đ–©€', 'đ–©ž'),
-    ('đ–©°', 'đ–ȘŸ'),
-    ('𖫐', 'đ–«­'),
-    ('𖬀', '𖬯'),
-    ('𖭀', '𖭃'),
-    ('𖭣', 'đ–­·'),
-    ('đ–­œ', '𖼏'),
-    ('𖔀', '𖔏'),
-    ('đ–č€', 'đ–čż'),
-    ('đ–Œ€', 'đ–œŠ'),
-    ('đ–œ', 'đ–œ'),
-    ('đ–Ÿ“', 'đ–ŸŸ'),
-    ('𖿠', '𖿡'),
-    ('𖿣', '𖿣'),
-    ('𛰀', 'đ›±Ș'),
-    ('đ›±°', 'đ›±Œ'),
-    ('đ›Č€', 'đ›Čˆ'),
-    ('đ›Č', 'đ›Č™'),
-    ('𝐀', '𝑔'),
-    ('𝑖', '𝒜'),
-    ('𝒞', '𝒟'),
-    ('𝒱', '𝒱'),
-    ('đ’„', '𝒩'),
-    ('đ’©', '𝒬'),
-    ('𝒼', 'đ’č'),
-    ('đ’»', 'đ’»'),
-    ('đ’œ', '𝓃'),
-    ('𝓅', '𝔅'),
-    ('𝔇', '𝔊'),
-    ('𝔍', '𝔔'),
-    ('𝔖', '𝔜'),
-    ('𝔞', 'đ”č'),
-    ('đ”»', 'đ”Ÿ'),
-    ('𝕀', '𝕄'),
-    ('𝕆', '𝕆'),
-    ('𝕊', '𝕐'),
-    ('𝕒', 'đš„'),
-    ('𝚹', '𝛀'),
-    ('𝛂', '𝛚'),
-    ('𝛜', 'đ›ș'),
-    ('đ›Œ', '𝜔'),
-    ('𝜖', '𝜮'),
-    ('đœ¶', '𝝎'),
-    ('𝝐', '𝝼'),
-    ('𝝰', '𝞈'),
-    ('𝞊', '𝞹'),
-    ('đžȘ', '𝟂'),
-    ('𝟄', '𝟋'),
-    ('đŒ€', 'đŒž'),
-    ('đŒ„', 'đŒȘ'),
-    ('𞀰', '𞁭'),
-    ('𞄀', '𞄬'),
-    ('đž„·', 'đž„œ'),
-    ('𞅎', '𞅎'),
-    ('𞊐', '𞊭'),
-    ('𞋀', 'đž‹«'),
-    ('𞓐', 'đž“«'),
-    ('𞗐', '𞗭'),
-    ('𞗰', '𞗰'),
-    ('𞟠', '𞟩'),
-    ('𞟹', 'đžŸ«'),
-    ('𞟭', '𞟼'),
-    ('𞟰', 'đžŸŸ'),
-    ('𞠀', '𞣄'),
-    ('𞀀', 'đž„ƒ'),
-    ('đž„‹', 'đž„‹'),
-    ('𞾀', '𞾃'),
-    ('𞾅', '𞾟'),
-    ('𞾡', '𞾱'),
-    ('𞞀', '𞞀'),
-    ('𞾧', '𞾧'),
-    ('đžž©', 'đžžČ'),
-    ('𞾮', 'đžž·'),
-    ('đžžč', 'đžžč'),
-    ('đžž»', 'đžž»'),
-    ('đžč‚', 'đžč‚'),
-    ('đžč‡', 'đžč‡'),
-    ('đžč‰', 'đžč‰'),
-    ('đžč‹', 'đžč‹'),
-    ('đžč', 'đžč'),
-    ('đžč‘', 'đžč’'),
-    ('đžč”', 'đžč”'),
-    ('đžč—', 'đžč—'),
-    ('đžč™', 'đžč™'),
-    ('đžč›', 'đžč›'),
-    ('đžč', 'đžč'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžčĄ', 'đžčą'),
-    ('đžč€', 'đžč€'),
-    ('đžč§', 'đžčȘ'),
-    ('đžčŹ', 'đžčČ'),
-    ('đžčŽ', 'đžč·'),
-    ('đžčč', 'đžčŒ'),
-    ('đžčŸ', 'đžčŸ'),
-    ('đžș€', 'đžș‰'),
-    ('đžș‹', 'đžș›'),
-    ('đžșĄ', 'đžșŁ'),
-    ('đžș„', 'đžș©'),
-    ('đžș«', 'đžș»'),
-    ('🄰', '🅉'),
-    ('🅐', 'đŸ…©'),
-    ('🅰', '🆉'),
-];
-
-pub const CR: &'static [(char, char)] = &[('\r', '\r')];
-
-pub const DOUBLE_QUOTE: &'static [(char, char)] = &[('"', '"')];
-
-pub const EXTEND: &'static [(char, char)] = &[
-    ('\u{300}', '\u{36f}'),
-    ('\u{483}', '\u{489}'),
-    ('\u{591}', '\u{5bd}'),
-    ('\u{5bf}', '\u{5bf}'),
-    ('\u{5c1}', '\u{5c2}'),
-    ('\u{5c4}', '\u{5c5}'),
-    ('\u{5c7}', '\u{5c7}'),
-    ('\u{610}', '\u{61a}'),
-    ('\u{64b}', '\u{65f}'),
-    ('\u{670}', '\u{670}'),
-    ('\u{6d6}', '\u{6dc}'),
-    ('\u{6df}', '\u{6e4}'),
-    ('\u{6e7}', '\u{6e8}'),
-    ('\u{6ea}', '\u{6ed}'),
-    ('\u{711}', '\u{711}'),
-    ('\u{730}', '\u{74a}'),
-    ('\u{7a6}', '\u{7b0}'),
-    ('\u{7eb}', '\u{7f3}'),
-    ('\u{7fd}', '\u{7fd}'),
-    ('\u{816}', '\u{819}'),
-    ('\u{81b}', '\u{823}'),
-    ('\u{825}', '\u{827}'),
-    ('\u{829}', '\u{82d}'),
-    ('\u{859}', '\u{85b}'),
-    ('\u{897}', '\u{89f}'),
-    ('\u{8ca}', '\u{8e1}'),
-    ('\u{8e3}', 'à€ƒ'),
-    ('\u{93a}', '\u{93c}'),
-    ('à€Ÿ', 'à„'),
-    ('\u{951}', '\u{957}'),
-    ('\u{962}', '\u{963}'),
-    ('\u{981}', 'àŠƒ'),
-    ('\u{9bc}', '\u{9bc}'),
-    ('\u{9be}', '\u{9c4}'),
-    ('ে', 'ৈ'),
-    ('ো', '\u{9cd}'),
-    ('\u{9d7}', '\u{9d7}'),
-    ('\u{9e2}', '\u{9e3}'),
-    ('\u{9fe}', '\u{9fe}'),
-    ('\u{a01}', 'àšƒ'),
-    ('\u{a3c}', '\u{a3c}'),
-    ('àšŸ', '\u{a42}'),
-    ('\u{a47}', '\u{a48}'),
-    ('\u{a4b}', '\u{a4d}'),
-    ('\u{a51}', '\u{a51}'),
-    ('\u{a70}', '\u{a71}'),
-    ('\u{a75}', '\u{a75}'),
-    ('\u{a81}', 'àȘƒ'),
-    ('\u{abc}', '\u{abc}'),
-    ('àȘŸ', '\u{ac5}'),
-    ('\u{ac7}', 'ૉ'),
-    ('ો', '\u{acd}'),
-    ('\u{ae2}', '\u{ae3}'),
-    ('\u{afa}', '\u{aff}'),
-    ('\u{b01}', 'àŹƒ'),
-    ('\u{b3c}', '\u{b3c}'),
-    ('\u{b3e}', '\u{b44}'),
-    ('େ', 'ୈ'),
-    ('ୋ', '\u{b4d}'),
-    ('\u{b55}', '\u{b57}'),
-    ('\u{b62}', '\u{b63}'),
-    ('\u{b82}', '\u{b82}'),
-    ('\u{bbe}', 'àŻ‚'),
-    ('àŻ†', 'àŻˆ'),
-    ('àŻŠ', '\u{bcd}'),
-    ('\u{bd7}', '\u{bd7}'),
-    ('\u{c00}', '\u{c04}'),
-    ('\u{c3c}', '\u{c3c}'),
-    ('\u{c3e}', 'ౄ'),
-    ('\u{c46}', '\u{c48}'),
-    ('\u{c4a}', '\u{c4d}'),
-    ('\u{c55}', '\u{c56}'),
-    ('\u{c62}', '\u{c63}'),
-    ('\u{c81}', 'àȃ'),
-    ('\u{cbc}', '\u{cbc}'),
-    ('àČŸ', 'àł„'),
-    ('\u{cc6}', '\u{cc8}'),
-    ('\u{cca}', '\u{ccd}'),
-    ('\u{cd5}', '\u{cd6}'),
-    ('\u{ce2}', '\u{ce3}'),
-    ('àłł', 'àłł'),
-    ('\u{d00}', 'àŽƒ'),
-    ('\u{d3b}', '\u{d3c}'),
-    ('\u{d3e}', '\u{d44}'),
-    ('à”†', 'à”ˆ'),
-    ('à”Š', '\u{d4d}'),
-    ('\u{d57}', '\u{d57}'),
-    ('\u{d62}', '\u{d63}'),
-    ('\u{d81}', 'ඃ'),
-    ('\u{dca}', '\u{dca}'),
-    ('\u{dcf}', '\u{dd4}'),
-    ('\u{dd6}', '\u{dd6}'),
-    ('ෘ', '\u{ddf}'),
-    ('à·Č', 'à·ł'),
-    ('\u{e31}', '\u{e31}'),
-    ('\u{e34}', '\u{e3a}'),
-    ('\u{e47}', '\u{e4e}'),
-    ('\u{eb1}', '\u{eb1}'),
-    ('\u{eb4}', '\u{ebc}'),
-    ('\u{ec8}', '\u{ece}'),
-    ('\u{f18}', '\u{f19}'),
-    ('\u{f35}', '\u{f35}'),
-    ('\u{f37}', '\u{f37}'),
-    ('\u{f39}', '\u{f39}'),
-    ('àŒŸ', 'àŒż'),
-    ('\u{f71}', '\u{f84}'),
-    ('\u{f86}', '\u{f87}'),
-    ('\u{f8d}', '\u{f97}'),
-    ('\u{f99}', '\u{fbc}'),
-    ('\u{fc6}', '\u{fc6}'),
-    ('ါ', '\u{103e}'),
-    ('ၖ', '\u{1059}'),
-    ('\u{105e}', '\u{1060}'),
-    ('ၹ', '၀'),
-    ('ၧ', 'ၭ'),
-    ('\u{1071}', '\u{1074}'),
-    ('\u{1082}', '\u{108d}'),
-    ('ႏ', 'ႏ'),
-    ('ႚ', '\u{109d}'),
-    ('\u{135d}', '\u{135f}'),
-    ('\u{1712}', '\u{1715}'),
-    ('\u{1732}', '\u{1734}'),
-    ('\u{1752}', '\u{1753}'),
-    ('\u{1772}', '\u{1773}'),
-    ('\u{17b4}', '\u{17d3}'),
-    ('\u{17dd}', '\u{17dd}'),
-    ('\u{180b}', '\u{180d}'),
-    ('\u{180f}', '\u{180f}'),
-    ('\u{1885}', '\u{1886}'),
-    ('\u{18a9}', '\u{18a9}'),
-    ('\u{1920}', 'ါ'),
-    ('ူ', '\u{193b}'),
-    ('\u{1a17}', '\u{1a1b}'),
-    ('ᩕ', '\u{1a5e}'),
-    ('\u{1a60}', '\u{1a7c}'),
-    ('\u{1a7f}', '\u{1a7f}'),
-    ('\u{1ab0}', '\u{1ace}'),
-    ('\u{1b00}', 'ᬄ'),
-    ('\u{1b34}', '\u{1b44}'),
-    ('\u{1b6b}', '\u{1b73}'),
-    ('\u{1b80}', 'ἂ'),
-    ('៥', '\u{1bad}'),
-    ('\u{1be6}', '\u{1bf3}'),
-    ('á°€', '\u{1c37}'),
-    ('\u{1cd0}', '\u{1cd2}'),
-    ('\u{1cd4}', '\u{1ce8}'),
-    ('\u{1ced}', '\u{1ced}'),
-    ('\u{1cf4}', '\u{1cf4}'),
-    ('áł·', '\u{1cf9}'),
-    ('\u{1dc0}', '\u{1dff}'),
-    ('\u{200c}', '\u{200c}'),
-    ('\u{20d0}', '\u{20f0}'),
-    ('\u{2cef}', '\u{2cf1}'),
-    ('\u{2d7f}', '\u{2d7f}'),
-    ('\u{2de0}', '\u{2dff}'),
-    ('\u{302a}', '\u{302f}'),
-    ('\u{3099}', '\u{309a}'),
-    ('\u{a66f}', '\u{a672}'),
-    ('\u{a674}', '\u{a67d}'),
-    ('\u{a69e}', '\u{a69f}'),
-    ('\u{a6f0}', '\u{a6f1}'),
-    ('\u{a802}', '\u{a802}'),
-    ('\u{a806}', '\u{a806}'),
-    ('\u{a80b}', '\u{a80b}'),
-    ('ê Ł', 'ê §'),
-    ('\u{a82c}', '\u{a82c}'),
-    ('êą€', 'êą'),
-    ('êąŽ', '\u{a8c5}'),
-    ('\u{a8e0}', '\u{a8f1}'),
-    ('\u{a8ff}', '\u{a8ff}'),
-    ('\u{a926}', '\u{a92d}'),
-    ('\u{a947}', '\u{a953}'),
-    ('\u{a980}', 'ꊃ'),
-    ('\u{a9b3}', '\u{a9c0}'),
-    ('\u{a9e5}', '\u{a9e5}'),
-    ('\u{aa29}', '\u{aa36}'),
-    ('\u{aa43}', '\u{aa43}'),
-    ('\u{aa4c}', 'ꩍ'),
-    ('ꩻ', '꩜'),
-    ('\u{aab0}', '\u{aab0}'),
-    ('\u{aab2}', '\u{aab4}'),
-    ('\u{aab7}', '\u{aab8}'),
-    ('\u{aabe}', '\u{aabf}'),
-    ('\u{aac1}', '\u{aac1}'),
-    ('ê««', 'ê«Ż'),
-    ('ê«”', '\u{aaf6}'),
-    ('êŻŁ', 'êŻȘ'),
-    ('êŻŹ', '\u{abed}'),
-    ('\u{fb1e}', '\u{fb1e}'),
-    ('\u{fe00}', '\u{fe0f}'),
-    ('\u{fe20}', '\u{fe2f}'),
-    ('\u{ff9e}', '\u{ff9f}'),
-    ('\u{101fd}', '\u{101fd}'),
-    ('\u{102e0}', '\u{102e0}'),
-    ('\u{10376}', '\u{1037a}'),
-    ('\u{10a01}', '\u{10a03}'),
-    ('\u{10a05}', '\u{10a06}'),
-    ('\u{10a0c}', '\u{10a0f}'),
-    ('\u{10a38}', '\u{10a3a}'),
-    ('\u{10a3f}', '\u{10a3f}'),
-    ('\u{10ae5}', '\u{10ae6}'),
-    ('\u{10d24}', '\u{10d27}'),
-    ('\u{10d69}', '\u{10d6d}'),
-    ('\u{10eab}', '\u{10eac}'),
-    ('\u{10efc}', '\u{10eff}'),
-    ('\u{10f46}', '\u{10f50}'),
-    ('\u{10f82}', '\u{10f85}'),
-    ('𑀀', '𑀂'),
-    ('\u{11038}', '\u{11046}'),
-    ('\u{11070}', '\u{11070}'),
-    ('\u{11073}', '\u{11074}'),
-    ('\u{1107f}', '𑂂'),
-    ('𑂰', '\u{110ba}'),
-    ('\u{110c2}', '\u{110c2}'),
-    ('\u{11100}', '\u{11102}'),
-    ('\u{11127}', '\u{11134}'),
-    ('𑅅', '𑅆'),
-    ('\u{11173}', '\u{11173}'),
-    ('\u{11180}', '𑆂'),
-    ('𑆳', '\u{111c0}'),
-    ('\u{111c9}', '\u{111cc}'),
-    ('𑇎', '\u{111cf}'),
-    ('𑈬', '\u{11237}'),
-    ('\u{1123e}', '\u{1123e}'),
-    ('\u{11241}', '\u{11241}'),
-    ('\u{112df}', '\u{112ea}'),
-    ('\u{11300}', '𑌃'),
-    ('\u{1133b}', '\u{1133c}'),
-    ('\u{1133e}', '𑍄'),
-    ('𑍇', '𑍈'),
-    ('𑍋', '\u{1134d}'),
-    ('\u{11357}', '\u{11357}'),
-    ('𑍱', '𑍣'),
-    ('\u{11366}', '\u{1136c}'),
-    ('\u{11370}', '\u{11374}'),
-    ('\u{113b8}', '\u{113c0}'),
-    ('\u{113c2}', '\u{113c2}'),
-    ('\u{113c5}', '\u{113c5}'),
-    ('\u{113c7}', '𑏊'),
-    ('𑏌', '\u{113d0}'),
-    ('\u{113d2}', '\u{113d2}'),
-    ('\u{113e1}', '\u{113e2}'),
-    ('𑐔', '\u{11446}'),
-    ('\u{1145e}', '\u{1145e}'),
-    ('\u{114b0}', '\u{114c3}'),
-    ('\u{115af}', '\u{115b5}'),
-    ('𑖾', '\u{115c0}'),
-    ('\u{115dc}', '\u{115dd}'),
-    ('𑘰', '\u{11640}'),
-    ('\u{116ab}', '\u{116b7}'),
-    ('\u{1171d}', '\u{1172b}'),
-    ('𑠬', '\u{1183a}'),
-    ('\u{11930}', 'đ‘€”'),
-    ('đ‘€·', '𑀞'),
-    ('\u{1193b}', '\u{1193e}'),
-    ('đ‘„€', 'đ‘„€'),
-    ('đ‘„‚', '\u{11943}'),
-    ('𑧑', '\u{119d7}'),
-    ('\u{119da}', '\u{119e0}'),
-    ('đ‘§€', 'đ‘§€'),
-    ('\u{11a01}', '\u{11a0a}'),
-    ('\u{11a33}', 'đ‘šč'),
-    ('\u{11a3b}', '\u{11a3e}'),
-    ('\u{11a47}', '\u{11a47}'),
-    ('\u{11a51}', '\u{11a5b}'),
-    ('\u{11a8a}', '\u{11a99}'),
-    ('𑰯', '\u{11c36}'),
-    ('\u{11c38}', '\u{11c3f}'),
-    ('\u{11c92}', '\u{11ca7}'),
-    ('đ‘Č©', '\u{11cb6}'),
-    ('\u{11d31}', '\u{11d36}'),
-    ('\u{11d3a}', '\u{11d3a}'),
-    ('\u{11d3c}', '\u{11d3d}'),
-    ('\u{11d3f}', '\u{11d45}'),
-    ('\u{11d47}', '\u{11d47}'),
-    ('đ‘¶Š', 'đ‘¶Ž'),
-    ('\u{11d90}', '\u{11d91}'),
-    ('đ‘¶“', '\u{11d97}'),
-    ('\u{11ef3}', 'đ‘»¶'),
-    ('\u{11f00}', '\u{11f01}'),
-    ('đ‘Œƒ', 'đ‘Œƒ'),
-    ('đ‘ŒŽ', '\u{11f3a}'),
-    ('đ‘ŒŸ', '\u{11f42}'),
-    ('\u{11f5a}', '\u{11f5a}'),
-    ('\u{13440}', '\u{13440}'),
-    ('\u{13447}', '\u{13455}'),
-    ('\u{1611e}', '\u{1612f}'),
-    ('\u{16af0}', '\u{16af4}'),
-    ('\u{16b30}', '\u{16b36}'),
-    ('\u{16f4f}', '\u{16f4f}'),
-    ('đ–œ‘', 'đ–Ÿ‡'),
-    ('\u{16f8f}', '\u{16f92}'),
-    ('\u{16fe4}', '\u{16fe4}'),
-    ('\u{16ff0}', '\u{16ff1}'),
-    ('\u{1bc9d}', '\u{1bc9e}'),
-    ('\u{1cf00}', '\u{1cf2d}'),
-    ('\u{1cf30}', '\u{1cf46}'),
-    ('\u{1d165}', '\u{1d169}'),
-    ('\u{1d16d}', '\u{1d172}'),
-    ('\u{1d17b}', '\u{1d182}'),
-    ('\u{1d185}', '\u{1d18b}'),
-    ('\u{1d1aa}', '\u{1d1ad}'),
-    ('\u{1d242}', '\u{1d244}'),
-    ('\u{1da00}', '\u{1da36}'),
-    ('\u{1da3b}', '\u{1da6c}'),
-    ('\u{1da75}', '\u{1da75}'),
-    ('\u{1da84}', '\u{1da84}'),
-    ('\u{1da9b}', '\u{1da9f}'),
-    ('\u{1daa1}', '\u{1daaf}'),
-    ('\u{1e000}', '\u{1e006}'),
-    ('\u{1e008}', '\u{1e018}'),
-    ('\u{1e01b}', '\u{1e021}'),
-    ('\u{1e023}', '\u{1e024}'),
-    ('\u{1e026}', '\u{1e02a}'),
-    ('\u{1e08f}', '\u{1e08f}'),
-    ('\u{1e130}', '\u{1e136}'),
-    ('\u{1e2ae}', '\u{1e2ae}'),
-    ('\u{1e2ec}', '\u{1e2ef}'),
-    ('\u{1e4ec}', '\u{1e4ef}'),
-    ('\u{1e5ee}', '\u{1e5ef}'),
-    ('\u{1e8d0}', '\u{1e8d6}'),
-    ('\u{1e944}', '\u{1e94a}'),
-    ('đŸ»', '🏿'),
-    ('\u{e0020}', '\u{e007f}'),
-    ('\u{e0100}', '\u{e01ef}'),
-];
-
-pub const EXTENDNUMLET: &'static [(char, char)] = &[
-    ('_', '_'),
-    ('\u{202f}', '\u{202f}'),
-    ('‿', '⁀'),
-    ('⁔', '⁔'),
-    ('ïžł', ''),
-    ('ïč', 'ïč'),
-    ('ïŒż', 'ïŒż'),
-];
-
-pub const FORMAT: &'static [(char, char)] = &[
-    ('\u{ad}', '\u{ad}'),
-    ('\u{61c}', '\u{61c}'),
-    ('\u{180e}', '\u{180e}'),
-    ('\u{200e}', '\u{200f}'),
-    ('\u{202a}', '\u{202e}'),
-    ('\u{2060}', '\u{2064}'),
-    ('\u{2066}', '\u{206f}'),
-    ('\u{feff}', '\u{feff}'),
-    ('\u{fff9}', '\u{fffb}'),
-    ('\u{13430}', '\u{1343f}'),
-    ('\u{1bca0}', '\u{1bca3}'),
-    ('\u{1d173}', '\u{1d17a}'),
-    ('\u{e0001}', '\u{e0001}'),
-];
-
-pub const HEBREW_LETTER: &'static [(char, char)] = &[
-    ('ڐ', 'ŚȘ'),
-    ('ŚŻ', 'ŚČ'),
-    ('ïŹ', 'ïŹ'),
-    ('ïŹŸ', 'ïŹš'),
-    ('ïŹȘ', 'ïŹ¶'),
-    ('ïŹž', 'ïŹŒ'),
-    ('ïŹŸ', 'ïŹŸ'),
-    ('נּ', 'סּ'),
-    ('ףּ', 'פּ'),
-    ('צּ', 'ﭏ'),
-];
-
-pub const KATAKANA: &'static [(char, char)] = &[
-    ('〱', '〔'),
-    ('゛', '゜'),
-    ('゠', 'ăƒș'),
-    ('ăƒŒ', 'ヿ'),
-    ('ㇰ', 'ㇿ'),
-    ('㋐', 'ă‹Ÿ'),
-    ('㌀', '㍗'),
-    ('', ''),
-    ('𚿰', '𚿳'),
-    ('đšż”', 'đšż»'),
-    ('đšżœ', 'đšżŸ'),
-    ('𛀀', '𛀀'),
-    ('𛄠', '𛄱'),
-    ('𛅕', '𛅕'),
-    ('đ›…€', '𛅧'),
-];
-
-pub const LF: &'static [(char, char)] = &[('\n', '\n')];
-
-pub const MIDLETTER: &'static [(char, char)] = &[
-    (':', ':'),
-    ('·', '·'),
-    ('·', '·'),
-    ('՟', '՟'),
-    ('ŚŽ', 'ŚŽ'),
-    ('‧', '‧'),
-    ('ïž“', 'ïž“'),
-    ('ïč•', 'ïč•'),
-    ('', ''),
-];
-
-pub const MIDNUM: &'static [(char, char)] = &[
-    (',', ','),
-    (';', ';'),
-    ('ÍŸ', 'ÍŸ'),
-    ('։', '։'),
-    ('ی', 'ۍ'),
-    ('ÙŹ', 'ÙŹ'),
-    ('ßž', 'ßž'),
-    ('⁄', '⁄'),
-    ('ïč', 'ïč'),
-    ('ïč”', 'ïč”'),
-    ('', ''),
-    ('', ''),
-];
-
-pub const MIDNUMLET: &'static [(char, char)] = &[
-    ('.', '.'),
-    ('‘', '’'),
-    (' ', ' '),
-    ('ïč’', 'ïč’'),
-    ('', ''),
-    ('', ''),
-];
-
-pub const NEWLINE: &'static [(char, char)] =
-    &[('\u{b}', '\u{c}'), ('\u{85}', '\u{85}'), ('\u{2028}', '\u{2029}')];
-
-pub const NUMERIC: &'static [(char, char)] = &[
-    ('0', '9'),
-    ('\u{600}', '\u{605}'),
-    ('Ù ', 'Ù©'),
-    ('Ù«', 'Ù«'),
-    ('\u{6dd}', '\u{6dd}'),
-    ('Û°', 'Ûč'),
-    ('߀', '߉'),
-    ('\u{890}', '\u{891}'),
-    ('\u{8e2}', '\u{8e2}'),
-    ('à„Š', 'à„Ż'),
-    ('à§Š', 'à§Ż'),
-    ('੊', 'à©Ż'),
-    ('૊', 'à«Ż'),
-    ('à­Š', 'à­Ż'),
-    ('àŻŠ', 'àŻŻ'),
-    ('ొ', 'à±Ż'),
-    ('àłŠ', 'àłŻ'),
-    ('à”Š', 'à”Ż'),
-    ('à·Š', 'à·Ż'),
-    ('àč', 'àč™'),
-    ('໐', '໙'),
-    ('àŒ ', 'àŒ©'),
-    ('၀', '၉'),
-    ('႐', '႙'),
-    ('០', '៩'),
-    ('᠐', '᠙'),
-    ('ᄆ', 'ᄏ'),
-    ('᧐', '᧚'),
-    ('áȘ€', 'áȘ‰'),
-    ('áȘ', 'áȘ™'),
-    ('᭐', '᭙'),
-    ('áź°', 'áźč'),
-    ('᱀', '᱉'),
-    ('᱐', '᱙'),
-    ('꘠', '꘩'),
-    ('êŁ', 'êŁ™'),
-    ('ꀀ', 'ꀉ'),
-    ('꧐', '꧙'),
-    ('ê§°', 'ê§č'),
-    ('꩐', '꩙'),
-    ('êŻ°', 'êŻč'),
-    ('', ''),
-    ('𐒠', '𐒩'),
-    ('𐎰', 'đŽč'),
-    ('𐔀', '𐔉'),
-    ('𑁩', '𑁯'),
-    ('\u{110bd}', '\u{110bd}'),
-    ('\u{110cd}', '\u{110cd}'),
-    ('𑃰', 'đ‘ƒč'),
-    ('đ‘„¶', '𑄿'),
-    ('𑇐', '𑇙'),
-    ('𑋰', 'đ‘‹č'),
-    ('𑑐', '𑑙'),
-    ('𑓐', '𑓙'),
-    ('𑙐', '𑙙'),
-    ('𑛀', '𑛉'),
-    ('𑛐', '𑛣'),
-    ('𑜰', 'đ‘œč'),
-    ('𑣠', '𑣩'),
-    ('𑄐', 'đ‘„™'),
-    ('𑯰', 'đ‘Żč'),
-    ('𑱐', '𑱙'),
-    ('𑔐', 'đ‘”™'),
-    ('đ‘¶ ', 'đ‘¶©'),
-    ('đ‘œ', 'đ‘œ™'),
-    ('𖄰', 'đ–„č'),
-    ('đ–© ', 'đ–©©'),
-    ('đ–«€', '𖫉'),
-    ('𖭐', '𖭙'),
-    ('đ–”°', 'đ–”č'),
-    ('𜳰', 'đœłč'),
-    ('𝟎', '𝟿'),
-    ('𞅀', '𞅉'),
-    ('𞋰', 'đž‹č'),
-    ('𞓰', 'đž“č'),
-    ('đž—±', 'đž—ș'),
-    ('𞄐', 'đž„™'),
-    ('🯰', 'đŸŻč'),
-];
-
-pub const REGIONAL_INDICATOR: &'static [(char, char)] = &[('🇩', '🇿')];
-
-pub const SINGLE_QUOTE: &'static [(char, char)] = &[('\'', '\'')];
-
-pub const WSEGSPACE: &'static [(char, char)] = &[
-    (' ', ' '),
-    ('\u{1680}', '\u{1680}'),
-    ('\u{2000}', '\u{2006}'),
-    ('\u{2008}', '\u{200a}'),
-    ('\u{205f}', '\u{205f}'),
-    ('\u{3000}', '\u{3000}'),
-];
-
-pub const ZWJ: &'static [(char, char)] = &[('\u{200d}', '\u{200d}')];
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/utf8.rs b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/utf8.rs
deleted file mode 100644
index 69d74945..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/utf8.rs
+++ /dev/null
@@ -1,592 +0,0 @@
-/*!
-Converts ranges of Unicode scalar values to equivalent ranges of UTF-8 bytes.
-
-This is sub-module is useful for constructing byte based automatons that need
-to embed UTF-8 decoding. The most common use of this module is in conjunction
-with the [`hir::ClassUnicodeRange`](crate::hir::ClassUnicodeRange) type.
-
-See the documentation on the `Utf8Sequences` iterator for more details and
-an example.
-
-# Wait, what is this?
-
-This is simplest to explain with an example. Let's say you wanted to test
-whether a particular byte sequence was a Cyrillic character. One possible
-scalar value range is `[0400-04FF]`. The set of allowed bytes for this
-range can be expressed as a sequence of byte ranges:
-
-```text
-[D0-D3][80-BF]
-```
-
-This is simple enough: simply encode the boundaries, `0400` encodes to
-`D0 80` and `04FF` encodes to `D3 BF`, and create ranges from each
-corresponding pair of bytes: `D0` to `D3` and `80` to `BF`.
-
-However, what if you wanted to add the Cyrillic Supplementary characters to
-your range? Your range might then become `[0400-052F]`. The same procedure
-as above doesn't quite work because `052F` encodes to `D4 AF`. The byte ranges
-you'd get from the previous transformation would be `[D0-D4][80-AF]`. However,
-this isn't quite correct because this range doesn't capture many characters,
-for example, `04FF` (because its last byte, `BF` isn't in the range `80-AF`).
-
-Instead, you need multiple sequences of byte ranges:
-
-```text
-[D0-D3][80-BF]  # matches codepoints 0400-04FF
-[D4][80-AF]     # matches codepoints 0500-052F
-```
-
-This gets even more complicated if you want bigger ranges, particularly if
-they naively contain surrogate codepoints. For example, the sequence of byte
-ranges for the basic multilingual plane (`[0000-FFFF]`) look like this:
-
-```text
-[0-7F]
-[C2-DF][80-BF]
-[E0][A0-BF][80-BF]
-[E1-EC][80-BF][80-BF]
-[ED][80-9F][80-BF]
-[EE-EF][80-BF][80-BF]
-```
-
-Note that the byte ranges above will *not* match any erroneous encoding of
-UTF-8, including encodings of surrogate codepoints.
-
-And, of course, for all of Unicode (`[000000-10FFFF]`):
-
-```text
-[0-7F]
-[C2-DF][80-BF]
-[E0][A0-BF][80-BF]
-[E1-EC][80-BF][80-BF]
-[ED][80-9F][80-BF]
-[EE-EF][80-BF][80-BF]
-[F0][90-BF][80-BF][80-BF]
-[F1-F3][80-BF][80-BF][80-BF]
-[F4][80-8F][80-BF][80-BF]
-```
-
-This module automates the process of creating these byte ranges from ranges of
-Unicode scalar values.
-
-# Lineage
-
-I got the idea and general implementation strategy from Russ Cox in his
-[article on regexps](https://web.archive.org/web/20160404141123/https://swtch.com/~rsc/regexp/regexp3.html) and RE2.
-Russ Cox got it from Ken Thompson's `grep` (no source, folk lore?).
-I also got the idea from
-[Lucene](https://github.com/apache/lucene-solr/blob/ae93f4e7ac6a3908046391de35d4f50a0d3c59ca/lucene/core/src/java/org/apache/lucene/util/automaton/UTF32ToUTF8.java),
-which uses it for executing automata on their term index.
-*/
-
-use core::{char, fmt, iter::FusedIterator, slice};
-
-use alloc::{vec, vec::Vec};
-
-const MAX_UTF8_BYTES: usize = 4;
-
-/// Utf8Sequence represents a sequence of byte ranges.
-///
-/// To match a Utf8Sequence, a candidate byte sequence must match each
-/// successive range.
-///
-/// For example, if there are two ranges, `[C2-DF][80-BF]`, then the byte
-/// sequence `\xDD\x61` would not match because `0x61 < 0x80`.
-#[derive(Copy, Clone, Eq, PartialEq, PartialOrd, Ord)]
-pub enum Utf8Sequence {
-    /// One byte range.
-    One(Utf8Range),
-    /// Two successive byte ranges.
-    Two([Utf8Range; 2]),
-    /// Three successive byte ranges.
-    Three([Utf8Range; 3]),
-    /// Four successive byte ranges.
-    Four([Utf8Range; 4]),
-}
-
-impl Utf8Sequence {
-    /// Creates a new UTF-8 sequence from the encoded bytes of a scalar value
-    /// range.
-    ///
-    /// This assumes that `start` and `end` have the same length.
-    fn from_encoded_range(start: &[u8], end: &[u8]) -> Self {
-        assert_eq!(start.len(), end.len());
-        match start.len() {
-            2 => Utf8Sequence::Two([
-                Utf8Range::new(start[0], end[0]),
-                Utf8Range::new(start[1], end[1]),
-            ]),
-            3 => Utf8Sequence::Three([
-                Utf8Range::new(start[0], end[0]),
-                Utf8Range::new(start[1], end[1]),
-                Utf8Range::new(start[2], end[2]),
-            ]),
-            4 => Utf8Sequence::Four([
-                Utf8Range::new(start[0], end[0]),
-                Utf8Range::new(start[1], end[1]),
-                Utf8Range::new(start[2], end[2]),
-                Utf8Range::new(start[3], end[3]),
-            ]),
-            n => unreachable!("invalid encoded length: {}", n),
-        }
-    }
-
-    /// Returns the underlying sequence of byte ranges as a slice.
-    pub fn as_slice(&self) -> &[Utf8Range] {
-        use self::Utf8Sequence::*;
-        match *self {
-            One(ref r) => slice::from_ref(r),
-            Two(ref r) => &r[..],
-            Three(ref r) => &r[..],
-            Four(ref r) => &r[..],
-        }
-    }
-
-    /// Returns the number of byte ranges in this sequence.
-    ///
-    /// The length is guaranteed to be in the closed interval `[1, 4]`.
-    pub fn len(&self) -> usize {
-        self.as_slice().len()
-    }
-
-    /// Reverses the ranges in this sequence.
-    ///
-    /// For example, if this corresponds to the following sequence:
-    ///
-    /// ```text
-    /// [D0-D3][80-BF]
-    /// ```
-    ///
-    /// Then after reversal, it will be
-    ///
-    /// ```text
-    /// [80-BF][D0-D3]
-    /// ```
-    ///
-    /// This is useful when one is constructing a UTF-8 automaton to match
-    /// character classes in reverse.
-    pub fn reverse(&mut self) {
-        match *self {
-            Utf8Sequence::One(_) => {}
-            Utf8Sequence::Two(ref mut x) => x.reverse(),
-            Utf8Sequence::Three(ref mut x) => x.reverse(),
-            Utf8Sequence::Four(ref mut x) => x.reverse(),
-        }
-    }
-
-    /// Returns true if and only if a prefix of `bytes` matches this sequence
-    /// of byte ranges.
-    pub fn matches(&self, bytes: &[u8]) -> bool {
-        if bytes.len() < self.len() {
-            return false;
-        }
-        for (&b, r) in bytes.iter().zip(self) {
-            if !r.matches(b) {
-                return false;
-            }
-        }
-        true
-    }
-}
-
-impl<'a> IntoIterator for &'a Utf8Sequence {
-    type IntoIter = slice::Iter<'a, Utf8Range>;
-    type Item = &'a Utf8Range;
-
-    fn into_iter(self) -> Self::IntoIter {
-        self.as_slice().iter()
-    }
-}
-
-impl fmt::Debug for Utf8Sequence {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        use self::Utf8Sequence::*;
-        match *self {
-            One(ref r) => write!(f, "{:?}", r),
-            Two(ref r) => write!(f, "{:?}{:?}", r[0], r[1]),
-            Three(ref r) => write!(f, "{:?}{:?}{:?}", r[0], r[1], r[2]),
-            Four(ref r) => {
-                write!(f, "{:?}{:?}{:?}{:?}", r[0], r[1], r[2], r[3])
-            }
-        }
-    }
-}
-
-/// A single inclusive range of UTF-8 bytes.
-#[derive(Clone, Copy, Eq, PartialEq, PartialOrd, Ord)]
-pub struct Utf8Range {
-    /// Start of byte range (inclusive).
-    pub start: u8,
-    /// End of byte range (inclusive).
-    pub end: u8,
-}
-
-impl Utf8Range {
-    fn new(start: u8, end: u8) -> Self {
-        Utf8Range { start, end }
-    }
-
-    /// Returns true if and only if the given byte is in this range.
-    pub fn matches(&self, b: u8) -> bool {
-        self.start <= b && b <= self.end
-    }
-}
-
-impl fmt::Debug for Utf8Range {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        if self.start == self.end {
-            write!(f, "[{:X}]", self.start)
-        } else {
-            write!(f, "[{:X}-{:X}]", self.start, self.end)
-        }
-    }
-}
-
-/// An iterator over ranges of matching UTF-8 byte sequences.
-///
-/// The iteration represents an alternation of comprehensive byte sequences
-/// that match precisely the set of UTF-8 encoded scalar values.
-///
-/// A byte sequence corresponds to one of the scalar values in the range given
-/// if and only if it completely matches exactly one of the sequences of byte
-/// ranges produced by this iterator.
-///
-/// Each sequence of byte ranges matches a unique set of bytes. That is, no two
-/// sequences will match the same bytes.
-///
-/// # Example
-///
-/// This shows how to match an arbitrary byte sequence against a range of
-/// scalar values.
-///
-/// ```rust
-/// use regex_syntax::utf8::{Utf8Sequences, Utf8Sequence};
-///
-/// fn matches(seqs: &[Utf8Sequence], bytes: &[u8]) -> bool {
-///     for range in seqs {
-///         if range.matches(bytes) {
-///             return true;
-///         }
-///     }
-///     false
-/// }
-///
-/// // Test the basic multilingual plane.
-/// let seqs: Vec<_> = Utf8Sequences::new('\u{0}', '\u{FFFF}').collect();
-///
-/// // UTF-8 encoding of 'a'.
-/// assert!(matches(&seqs, &[0x61]));
-/// // UTF-8 encoding of '☃' (`\u{2603}`).
-/// assert!(matches(&seqs, &[0xE2, 0x98, 0x83]));
-/// // UTF-8 encoding of `\u{10348}` (outside the BMP).
-/// assert!(!matches(&seqs, &[0xF0, 0x90, 0x8D, 0x88]));
-/// // Tries to match against a UTF-8 encoding of a surrogate codepoint,
-/// // which is invalid UTF-8, and therefore fails, despite the fact that
-/// // the corresponding codepoint (0xD800) falls in the range given.
-/// assert!(!matches(&seqs, &[0xED, 0xA0, 0x80]));
-/// // And fails against plain old invalid UTF-8.
-/// assert!(!matches(&seqs, &[0xFF, 0xFF]));
-/// ```
-///
-/// If this example seems circuitous, that's because it is! It's meant to be
-/// illustrative. In practice, you could just try to decode your byte sequence
-/// and compare it with the scalar value range directly. However, this is not
-/// always possible (for example, in a byte based automaton).
-#[derive(Debug)]
-pub struct Utf8Sequences {
-    range_stack: Vec<ScalarRange>,
-}
-
-impl Utf8Sequences {
-    /// Create a new iterator over UTF-8 byte ranges for the scalar value range
-    /// given.
-    pub fn new(start: char, end: char) -> Self {
-        let range =
-            ScalarRange { start: u32::from(start), end: u32::from(end) };
-        Utf8Sequences { range_stack: vec![range] }
-    }
-
-    /// reset resets the scalar value range.
-    /// Any existing state is cleared, but resources may be reused.
-    ///
-    /// N.B. Benchmarks say that this method is dubious.
-    #[doc(hidden)]
-    pub fn reset(&mut self, start: char, end: char) {
-        self.range_stack.clear();
-        self.push(u32::from(start), u32::from(end));
-    }
-
-    fn push(&mut self, start: u32, end: u32) {
-        self.range_stack.push(ScalarRange { start, end });
-    }
-}
-
-struct ScalarRange {
-    start: u32,
-    end: u32,
-}
-
-impl fmt::Debug for ScalarRange {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        write!(f, "ScalarRange({:X}, {:X})", self.start, self.end)
-    }
-}
-
-impl Iterator for Utf8Sequences {
-    type Item = Utf8Sequence;
-
-    fn next(&mut self) -> Option<Self::Item> {
-        'TOP: while let Some(mut r) = self.range_stack.pop() {
-            'INNER: loop {
-                if let Some((r1, r2)) = r.split() {
-                    self.push(r2.start, r2.end);
-                    r.start = r1.start;
-                    r.end = r1.end;
-                    continue 'INNER;
-                }
-                if !r.is_valid() {
-                    continue 'TOP;
-                }
-                for i in 1..MAX_UTF8_BYTES {
-                    let max = max_scalar_value(i);
-                    if r.start <= max && max < r.end {
-                        self.push(max + 1, r.end);
-                        r.end = max;
-                        continue 'INNER;
-                    }
-                }
-                if let Some(ascii_range) = r.as_ascii() {
-                    return Some(Utf8Sequence::One(ascii_range));
-                }
-                for i in 1..MAX_UTF8_BYTES {
-                    let m = (1 << (6 * i)) - 1;
-                    if (r.start & !m) != (r.end & !m) {
-                        if (r.start & m) != 0 {
-                            self.push((r.start | m) + 1, r.end);
-                            r.end = r.start | m;
-                            continue 'INNER;
-                        }
-                        if (r.end & m) != m {
-                            self.push(r.end & !m, r.end);
-                            r.end = (r.end & !m) - 1;
-                            continue 'INNER;
-                        }
-                    }
-                }
-                let mut start = [0; MAX_UTF8_BYTES];
-                let mut end = [0; MAX_UTF8_BYTES];
-                let n = r.encode(&mut start, &mut end);
-                return Some(Utf8Sequence::from_encoded_range(
-                    &start[0..n],
-                    &end[0..n],
-                ));
-            }
-        }
-        None
-    }
-}
-
-impl FusedIterator for Utf8Sequences {}
-
-impl ScalarRange {
-    /// split splits this range if it overlaps with a surrogate codepoint.
-    ///
-    /// Either or both ranges may be invalid.
-    fn split(&self) -> Option<(ScalarRange, ScalarRange)> {
-        if self.start < 0xE000 && self.end > 0xD7FF {
-            Some((
-                ScalarRange { start: self.start, end: 0xD7FF },
-                ScalarRange { start: 0xE000, end: self.end },
-            ))
-        } else {
-            None
-        }
-    }
-
-    /// is_valid returns true if and only if start <= end.
-    fn is_valid(&self) -> bool {
-        self.start <= self.end
-    }
-
-    /// as_ascii returns this range as a Utf8Range if and only if all scalar
-    /// values in this range can be encoded as a single byte.
-    fn as_ascii(&self) -> Option<Utf8Range> {
-        if self.is_ascii() {
-            let start = u8::try_from(self.start).unwrap();
-            let end = u8::try_from(self.end).unwrap();
-            Some(Utf8Range::new(start, end))
-        } else {
-            None
-        }
-    }
-
-    /// is_ascii returns true if the range is ASCII only (i.e., takes a single
-    /// byte to encode any scalar value).
-    fn is_ascii(&self) -> bool {
-        self.is_valid() && self.end <= 0x7f
-    }
-
-    /// encode writes the UTF-8 encoding of the start and end of this range
-    /// to the corresponding destination slices, and returns the number of
-    /// bytes written.
-    ///
-    /// The slices should have room for at least `MAX_UTF8_BYTES`.
-    fn encode(&self, start: &mut [u8], end: &mut [u8]) -> usize {
-        let cs = char::from_u32(self.start).unwrap();
-        let ce = char::from_u32(self.end).unwrap();
-        let ss = cs.encode_utf8(start);
-        let se = ce.encode_utf8(end);
-        assert_eq!(ss.len(), se.len());
-        ss.len()
-    }
-}
-
-fn max_scalar_value(nbytes: usize) -> u32 {
-    match nbytes {
-        1 => 0x007F,
-        2 => 0x07FF,
-        3 => 0xFFFF,
-        4 => 0x0010_FFFF,
-        _ => unreachable!("invalid UTF-8 byte sequence size"),
-    }
-}
-
-#[cfg(test)]
-mod tests {
-    use core::char;
-
-    use alloc::{vec, vec::Vec};
-
-    use crate::utf8::{Utf8Range, Utf8Sequences};
-
-    fn rutf8(s: u8, e: u8) -> Utf8Range {
-        Utf8Range::new(s, e)
-    }
-
-    fn never_accepts_surrogate_codepoints(start: char, end: char) {
-        for cp in 0xD800..0xE000 {
-            let buf = encode_surrogate(cp);
-            for r in Utf8Sequences::new(start, end) {
-                if r.matches(&buf) {
-                    panic!(
-                        "Sequence ({:X}, {:X}) contains range {:?}, \
-                         which matches surrogate code point {:X} \
-                         with encoded bytes {:?}",
-                        u32::from(start),
-                        u32::from(end),
-                        r,
-                        cp,
-                        buf,
-                    );
-                }
-            }
-        }
-    }
-
-    #[test]
-    fn codepoints_no_surrogates() {
-        never_accepts_surrogate_codepoints('\u{0}', '\u{FFFF}');
-        never_accepts_surrogate_codepoints('\u{0}', '\u{10FFFF}');
-        never_accepts_surrogate_codepoints('\u{0}', '\u{10FFFE}');
-        never_accepts_surrogate_codepoints('\u{80}', '\u{10FFFF}');
-        never_accepts_surrogate_codepoints('\u{D7FF}', '\u{E000}');
-    }
-
-    #[test]
-    fn single_codepoint_one_sequence() {
-        // Tests that every range of scalar values that contains a single
-        // scalar value is recognized by one sequence of byte ranges.
-        for i in 0x0..=0x0010_FFFF {
-            let c = match char::from_u32(i) {
-                None => continue,
-                Some(c) => c,
-            };
-            let seqs: Vec<_> = Utf8Sequences::new(c, c).collect();
-            assert_eq!(seqs.len(), 1);
-        }
-    }
-
-    #[test]
-    fn bmp() {
-        use crate::utf8::Utf8Sequence::*;
-
-        let seqs = Utf8Sequences::new('\u{0}', '\u{FFFF}').collect::<Vec<_>>();
-        assert_eq!(
-            seqs,
-            vec![
-                One(rutf8(0x0, 0x7F)),
-                Two([rutf8(0xC2, 0xDF), rutf8(0x80, 0xBF)]),
-                Three([
-                    rutf8(0xE0, 0xE0),
-                    rutf8(0xA0, 0xBF),
-                    rutf8(0x80, 0xBF)
-                ]),
-                Three([
-                    rutf8(0xE1, 0xEC),
-                    rutf8(0x80, 0xBF),
-                    rutf8(0x80, 0xBF)
-                ]),
-                Three([
-                    rutf8(0xED, 0xED),
-                    rutf8(0x80, 0x9F),
-                    rutf8(0x80, 0xBF)
-                ]),
-                Three([
-                    rutf8(0xEE, 0xEF),
-                    rutf8(0x80, 0xBF),
-                    rutf8(0x80, 0xBF)
-                ]),
-            ]
-        );
-    }
-
-    #[test]
-    fn reverse() {
-        use crate::utf8::Utf8Sequence::*;
-
-        let mut s = One(rutf8(0xA, 0xB));
-        s.reverse();
-        assert_eq!(s.as_slice(), &[rutf8(0xA, 0xB)]);
-
-        let mut s = Two([rutf8(0xA, 0xB), rutf8(0xB, 0xC)]);
-        s.reverse();
-        assert_eq!(s.as_slice(), &[rutf8(0xB, 0xC), rutf8(0xA, 0xB)]);
-
-        let mut s = Three([rutf8(0xA, 0xB), rutf8(0xB, 0xC), rutf8(0xC, 0xD)]);
-        s.reverse();
-        assert_eq!(
-            s.as_slice(),
-            &[rutf8(0xC, 0xD), rutf8(0xB, 0xC), rutf8(0xA, 0xB)]
-        );
-
-        let mut s = Four([
-            rutf8(0xA, 0xB),
-            rutf8(0xB, 0xC),
-            rutf8(0xC, 0xD),
-            rutf8(0xD, 0xE),
-        ]);
-        s.reverse();
-        assert_eq!(
-            s.as_slice(),
-            &[
-                rutf8(0xD, 0xE),
-                rutf8(0xC, 0xD),
-                rutf8(0xB, 0xC),
-                rutf8(0xA, 0xB)
-            ]
-        );
-    }
-
-    fn encode_surrogate(cp: u32) -> [u8; 3] {
-        const TAG_CONT: u8 = 0b1000_0000;
-        const TAG_THREE_B: u8 = 0b1110_0000;
-
-        assert!(0xD800 <= cp && cp < 0xE000);
-        let mut dst = [0; 3];
-        dst[0] = u8::try_from(cp >> 12 & 0x0F).unwrap() | TAG_THREE_B;
-        dst[1] = u8::try_from(cp >> 6 & 0x3F).unwrap() | TAG_CONT;
-        dst[2] = u8::try_from(cp & 0x3F).unwrap() | TAG_CONT;
-        dst
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/test b/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/test
deleted file mode 100755
index 8626c3b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/test
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-
-set -e
-
-# cd to the directory containing this crate's Cargo.toml so that we don't need
-# to pass --manifest-path to every `cargo` command.
-cd "$(dirname "$0")"
-
-# This is a convenience script for running a broad swath of the syntax tests.
-echo "===== DEFAULT FEATURES ==="
-cargo test
-
-features=(
-    std
-    unicode
-    unicode-age
-    unicode-bool
-    unicode-case
-    unicode-gencat
-    unicode-perl
-    unicode-script
-    unicode-segment
-)
-for f in "${features[@]}"; do
-    echo "=== FEATURE: $f ==="
-    # We only run library tests because I couldn't figure out how to easily
-    # make doc tests run in 'no_std' mode. In particular, without the Error
-    # trait, using '?' in doc tests seems tricky.
-    cargo test --no-default-features --lib --features "$f"
-done
diff --git a/third_party/rust/chromium_crates_io/vendor/relative-path-1.9.3/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/relative-path-1.9.3/.cargo-checksum.json
deleted file mode 100644
index 697c9ce..0000000
--- a/third_party/rust/chromium_crates_io/vendor/relative-path-1.9.3/.cargo-checksum.json
+++ /dev/null
@@ -1 +0,0 @@
-{"files":{}}
diff --git a/third_party/rust/chromium_crates_io/vendor/relative-path-1.9.3/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/relative-path-1.9.3/.cargo_vcs_info.json
deleted file mode 100644
index c7ac96ee..0000000
--- a/third_party/rust/chromium_crates_io/vendor/relative-path-1.9.3/.cargo_vcs_info.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
-  "git": {
-    "sha1": "6d267fbd85b257e4416c9f020131c6da168e1d3d"
-  },
-  "path_in_vcs": "relative-path"
-}
\ No newline at end of file
diff --git a/third_party/rust/chromium_crates_io/vendor/relative-path-1.9.3/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/relative-path-1.9.3/Cargo.toml
deleted file mode 100644
index 4208d95..0000000
--- a/third_party/rust/chromium_crates_io/vendor/relative-path-1.9.3/Cargo.toml
+++ /dev/null
@@ -1,42 +0,0 @@
-# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
-#
-# When uploading crates to the registry Cargo will automatically
-# "normalize" Cargo.toml files for maximal compatibility
-# with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g., crates.io) dependencies.
-#
-# If you are reading this file be aware that the original Cargo.toml
-# will likely look very different (and much more reasonable).
-# See Cargo.toml.orig for the original contents.
-
-[package]
-edition = "2021"
-rust-version = "1.66"
-name = "relative-path"
-version = "1.9.3"
-authors = ["John-John Tedro <udoprog@tedro.se>"]
-description = "Portable, relative paths for Rust."
-homepage = "https://github.com/udoprog/relative-path"
-documentation = "https://docs.rs/relative-path"
-readme = "README.md"
-keywords = ["path"]
-categories = ["filesystem"]
-license = "MIT OR Apache-2.0"
-repository = "https://github.com/udoprog/relative-path"
-
-[package.metadata.docs.rs]
-all-features = true
-
-[dependencies.serde]
-version = "1.0.160"
-optional = true
-
-[dev-dependencies.anyhow]
-version = "1.0.76"
-
-[dev-dependencies.serde]
-version = "1.0.160"
-features = ["derive"]
-
-[features]
-default = []
diff --git a/third_party/rust/chromium_crates_io/vendor/relative-path-1.9.3/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/relative-path-1.9.3/Cargo.toml.orig
deleted file mode 100644
index 4b15de0..0000000
--- a/third_party/rust/chromium_crates_io/vendor/relative-path-1.9.3/Cargo.toml.orig
+++ /dev/null
@@ -1,27 +0,0 @@
-[package]
-name = "relative-path"
-version = "1.9.3"
-authors = ["John-John Tedro <udoprog@tedro.se>"]
-edition = "2021"
-rust-version = "1.66"
-description = "Portable, relative paths for Rust."
-documentation = "https://docs.rs/relative-path"
-readme = "README.md"
-homepage = "https://github.com/udoprog/relative-path"
-repository = "https://github.com/udoprog/relative-path"
-license = "MIT OR Apache-2.0"
-keywords = ["path"]
-categories = ["filesystem"]
-
-[features]
-default = []
-
-[dependencies]
-serde = { version = "1.0.160", optional = true }
-
-[dev-dependencies]
-anyhow = "1.0.76"
-serde = { version = "1.0.160", features = ["derive"] }
-
-[package.metadata.docs.rs]
-all-features = true
diff --git a/third_party/rust/chromium_crates_io/vendor/relative-path-1.9.3/README.md b/third_party/rust/chromium_crates_io/vendor/relative-path-1.9.3/README.md
deleted file mode 100644
index 75520b0..0000000
--- a/third_party/rust/chromium_crates_io/vendor/relative-path-1.9.3/README.md
+++ /dev/null
@@ -1,279 +0,0 @@
-# relative-path
-
-[<img alt="github" src="https://img.shields.io/badge/github-udoprog/relative--path-8da0cb?style=for-the-badge&logo=github" height="20">](https://github.com/udoprog/relative-path)
-[<img alt="crates.io" src="https://img.shields.io/crates/v/relative-path.svg?style=for-the-badge&color=fc8d62&logo=rust" height="20">](https://crates.io/crates/relative-path)
-[<img alt="docs.rs" src="https://img.shields.io/badge/docs.rs-relative--path-66c2a5?style=for-the-badge&logoColor=white&logo=data:image/svg+xml;base64,PHN2ZyByb2xlPSJpbWciIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgdmlld0JveD0iMCAwIDUxMiA1MTIiPjxwYXRoIGZpbGw9IiNmNWY1ZjUiIGQ9Ik00ODguNiAyNTAuMkwzOTIgMjE0VjEwNS41YzAtMTUtOS4zLTI4LjQtMjMuNC0zMy43bC0xMDAtMzcuNWMtOC4xLTMuMS0xNy4xLTMuMS0yNS4zIDBsLTEwMCAzNy41Yy0xNC4xIDUuMy0yMy40IDE4LjctMjMuNCAzMy43VjIxNGwtOTYuNiAzNi4yQzkuMyAyNTUuNSAwIDI2OC45IDAgMjgzLjlWMzk0YzAgMTMuNiA3LjcgMjYuMSAxOS45IDMyLjJsMTAwIDUwYzEwLjEgNS4xIDIyLjEgNS4xIDMyLjIgMGwxMDMuOS01MiAxMDMuOSA1MmMxMC4xIDUuMSAyMi4xIDUuMSAzMi4yIDBsMTAwLTUwYzEyLjItNi4xIDE5LjktMTguNiAxOS45LTMyLjJWMjgzLjljMC0xNS05LjMtMjguNC0yMy40LTMzLjd6TTM1OCAyMTQuOGwtODUgMzEuOXYtNjguMmw4NS0zN3Y3My4zek0xNTQgMTA0LjFsMTAyLTM4LjIgMTAyIDM4LjJ2LjZsLTEwMiA0MS40LTEwMi00MS40di0uNnptODQgMjkxLjFsLTg1IDQyLjV2LTc5LjFsODUtMzguOHY3NS40em0wLTExMmwtMTAyIDQxLjQtMTAyLTQxLjR2LS42bDEwMi0zOC4yIDEwMiAzOC4ydi42em0yNDAgMTEybC04NSA0Mi41di03OS4xbDg1LTM4Ljh2NzUuNHptMC0xMTJsLTEwMiA0MS40LTEwMi00MS40di0uNmwxMDItMzguMiAxMDIgMzguMnYuNnoiPjwvcGF0aD48L3N2Zz4K" height="20">](https://docs.rs/relative-path)
-[<img alt="build status" src="https://img.shields.io/github/actions/workflow/status/udoprog/relative-path/ci.yml?branch=main&style=for-the-badge" height="20">](https://github.com/udoprog/relative-path/actions?query=branch%3Amain)
-
-Portable relative UTF-8 paths for Rust.
-
-This crate provides a module analogous to [`std::path`], with the following
-characteristics:
-
-* The path separator is set to a fixed character (`/`), regardless of
-  platform.
-* Relative paths cannot represent a path in the filesystem without first
-  specifying *what they are relative to* using functions such as [`to_path`]
-  and [`to_logical_path`].
-* Relative paths are always guaranteed to be valid UTF-8 strings.
-
-On top of this we support many operations that guarantee the same behavior
-across platforms.
-
-For more utilities to manipulate relative paths, see the
-[`relative-path-utils` crate].
-
-<br>
-
-## Usage
-
-Add `relative-path` to your `Cargo.toml`:
-
-```toml
-relative-path = "1.9.2"
-```
-
-Start using relative paths:
-
-```rust
-use serde::{Serialize, Deserialize};
-use relative_path::RelativePath;
-
-#[derive(Serialize, Deserialize)]
-struct Manifest<'a> {
-    #[serde(borrow)]
-    source: &'a RelativePath,
-}
-
-```
-
-<br>
-
-## Serde Support
-
-This library includes serde support that can be enabled with the `serde`
-feature.
-
-<br>
-
-## Why is `std::path` a portability hazard?
-
-Path representations differ across platforms.
-
-* Windows permits using drive volumes (multiple roots) as a prefix (e.g.
-  `"c:\"`) and backslash (`\`) as a separator.
-* Unix references absolute paths from a single root and uses forward slash
-  (`/`) as a separator.
-
-If we use `PathBuf`, Storing paths in a manifest would allow our application
-to build and run on one platform but potentially not others.
-
-Consider the following data model and corresponding toml for a manifest:
-
-```rust
-use std::path::PathBuf;
-
-use serde::{Serialize, Deserialize};
-
-#[derive(Serialize, Deserialize)]
-struct Manifest {
-    source: PathBuf,
-}
-```
-
-```toml
-source = "C:\\Users\\udoprog\\repo\\data\\source"
-```
-
-This will run for you (assuming `source` exists). So you go ahead and check
-the manifest into git. The next day your Linux colleague calls you and
-wonders what they have ever done to wrong you?
-
-So what went wrong? Well two things. You forgot to make the `source`
-relative, so anyone at the company which has a different username than you
-won't be able to use it. So you go ahead and fix that:
-
-```toml
-source = "data\\source"
-```
-
-But there is still one problem! A backslash (`\`) is only a legal path
-separator on Windows. Luckily you learn that forward slashes are supported
-both on Windows *and* Linux. So you opt for:
-
-```toml
-source = "data/source"
-```
-
-Things are working now. So all is well... Right? Sure, but we can do better.
-
-This crate provides types that work with *portable relative paths* (hence
-the name). So by using [`RelativePath`] we can systematically help avoid
-portability issues like the one above. Avoiding issues at the source is
-preferably over spending 5 minutes of onboarding time on a theoretical
-problem, hoping that your new hires will remember what to do if they ever
-encounter it.
-
-Using [`RelativePathBuf`] we can fix our data model like this:
-
-```rust
-use relative_path::RelativePathBuf;
-use serde::{Serialize, Deserialize};
-
-#[derive(Serialize, Deserialize)]
-pub struct Manifest {
-    source: RelativePathBuf,
-}
-```
-
-And where it's used:
-
-```rust
-use std::fs;
-use std::env::current_dir;
-
-let manifest: Manifest = todo!();
-
-let root = current_dir()?;
-let source = manifest.source.to_path(&root);
-let content = fs::read(&source)?;
-```
-
-<br>
-
-## Overview
-
-Conversion to a platform-specific [`Path`] happens through the [`to_path`]
-and [`to_logical_path`] functions. Where you are required to specify the
-path that prefixes the relative path. This can come from a function such as
-[`std::env::current_dir`].
-
-```rust
-use std::env::current_dir;
-use std::path::Path;
-
-use relative_path::RelativePath;
-
-let root = current_dir()?;
-
-// to_path unconditionally concatenates a relative path with its base:
-let relative_path = RelativePath::new("../foo/./bar");
-let full_path = relative_path.to_path(&root);
-assert_eq!(full_path, root.join("..\\foo\\.\\bar"));
-
-// to_logical_path tries to apply the logical operations that the relative
-// path corresponds to:
-let relative_path = RelativePath::new("../foo/./bar");
-let full_path = relative_path.to_logical_path(&root);
-
-// Replicate the operation performed by `to_logical_path`.
-let mut parent = root.clone();
-parent.pop();
-assert_eq!(full_path, parent.join("foo\\bar"));
-```
-
-When two relative paths are compared to each other, their exact component
-makeup determines equality.
-
-```rust
-use relative_path::RelativePath;
-
-assert_ne!(
-    RelativePath::new("foo/bar/../baz"),
-    RelativePath::new("foo/baz")
-);
-```
-
-Using platform-specific path separators to construct relative paths is not
-supported.
-
-Path separators from other platforms are simply treated as part of a
-component:
-
-```rust
-use relative_path::RelativePath;
-
-assert_ne!(
-    RelativePath::new("foo/bar"),
-    RelativePath::new("foo\\bar")
-);
-
-assert_eq!(1, RelativePath::new("foo\\bar").components().count());
-assert_eq!(2, RelativePath::new("foo/bar").components().count());
-```
-
-To see if two relative paths are equivalent you can use [`normalize`]:
-
-```rust
-use relative_path::RelativePath;
-
-assert_eq!(
-    RelativePath::new("foo/bar/../baz").normalize(),
-    RelativePath::new("foo/baz").normalize(),
-);
-```
-
-<br>
-
-## Additional portability notes
-
-While relative paths avoid the most egregious portability issue, that
-absolute paths will work equally unwell on all platforms. We cannot avoid
-all. This section tries to document additional portability hazards that we
-are aware of.
-
-[`RelativePath`], similarly to [`Path`], makes no guarantees that its
-constituent components make up legal file names. While components are
-strictly separated by slashes, we can still store things in them which may
-not be used as legal paths on all platforms.
-
-* A `NUL` character is not permitted on unix platforms - this is a
-  terminator in C-based filesystem APIs. Slash (`/`) is also used as a path
-  separator.
-* Windows has a number of [reserved characters and names][windows-reserved]
-  (like `CON`, `PRN`, and `AUX`) which cannot legally be part of a
-  filesystem component.
-* Windows paths are [case-insensitive by default][windows-case]. So,
-  `Foo.txt` and `foo.txt` are the same files on windows. But they are
-  considered different paths on most unix systems.
-
-A relative path that *accidentally* contains a platform-specific components
-will largely result in a nonsensical paths being generated in the hope that
-they will fail fast during development and testing.
-
-```rust
-use relative_path::{RelativePath, PathExt};
-use std::path::Path;
-
-if cfg!(windows) {
-    assert_eq!(
-        Path::new("foo\\c:\\bar\\baz"),
-        RelativePath::new("c:\\bar\\baz").to_path("foo")
-    );
-}
-
-if cfg!(unix) {
-    assert_eq!(
-        Path::new("foo/bar/baz"),
-        RelativePath::new("/bar/baz").to_path("foo")
-    );
-}
-
-assert_eq!(
-    Path::new("foo").relative_to("bar")?,
-    RelativePath::new("../foo"),
-);
-```
-
-[`None`]: https://doc.rust-lang.org/std/option/enum.Option.html
-[`normalize`]: https://docs.rs/relative-path/1/relative_path/struct.RelativePath.html#method.normalize
-[`Path`]: https://doc.rust-lang.org/std/path/struct.Path.html
-[`RelativePath`]: https://docs.rs/relative-path/1/relative_path/struct.RelativePath.html
-[`RelativePathBuf`]: https://docs.rs/relative-path/1/relative_path/struct.RelativePathBuf.html
-[`std::env::current_dir`]: https://doc.rust-lang.org/std/env/fn.current_dir.html
-[`std::path`]: https://doc.rust-lang.org/std/path/index.html
-[`to_logical_path`]: https://docs.rs/relative-path/1/relative_path/struct.RelativePath.html#method.to_logical_path
-[`to_path`]: https://docs.rs/relative-path/1/relative_path/struct.RelativePath.html#method.to_path
-[windows-reserved]: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
-[windows-case]: https://learn.microsoft.com/en-us/windows/wsl/case-sensitivity
-[`relative-path-utils` crate]: https://docs.rs/relative-path-utils
diff --git a/third_party/rust/chromium_crates_io/vendor/relative-path-1.9.3/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/relative-path-1.9.3/src/lib.rs
deleted file mode 100644
index 5decb0f..0000000
--- a/third_party/rust/chromium_crates_io/vendor/relative-path-1.9.3/src/lib.rs
+++ /dev/null
@@ -1,2399 +0,0 @@
-//! [<img alt="github" src="https://img.shields.io/badge/github-udoprog/relative--path-8da0cb?style=for-the-badge&logo=github" height="20">](https://github.com/udoprog/relative-path)
-//! [<img alt="crates.io" src="https://img.shields.io/crates/v/relative-path.svg?style=for-the-badge&color=fc8d62&logo=rust" height="20">](https://crates.io/crates/relative-path)
-//! [<img alt="docs.rs" src="https://img.shields.io/badge/docs.rs-relative--path-66c2a5?style=for-the-badge&logoColor=white&logo=data:image/svg+xml;base64,PHN2ZyByb2xlPSJpbWciIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgdmlld0JveD0iMCAwIDUxMiA1MTIiPjxwYXRoIGZpbGw9IiNmNWY1ZjUiIGQ9Ik00ODguNiAyNTAuMkwzOTIgMjE0VjEwNS41YzAtMTUtOS4zLTI4LjQtMjMuNC0zMy43bC0xMDAtMzcuNWMtOC4xLTMuMS0xNy4xLTMuMS0yNS4zIDBsLTEwMCAzNy41Yy0xNC4xIDUuMy0yMy40IDE4LjctMjMuNCAzMy43VjIxNGwtOTYuNiAzNi4yQzkuMyAyNTUuNSAwIDI2OC45IDAgMjgzLjlWMzk0YzAgMTMuNiA3LjcgMjYuMSAxOS45IDMyLjJsMTAwIDUwYzEwLjEgNS4xIDIyLjEgNS4xIDMyLjIgMGwxMDMuOS01MiAxMDMuOSA1MmMxMC4xIDUuMSAyMi4xIDUuMSAzMi4yIDBsMTAwLTUwYzEyLjItNi4xIDE5LjktMTguNiAxOS45LTMyLjJWMjgzLjljMC0xNS05LjMtMjguNC0yMy40LTMzLjd6TTM1OCAyMTQuOGwtODUgMzEuOXYtNjguMmw4NS0zN3Y3My4zek0xNTQgMTA0LjFsMTAyLTM4LjIgMTAyIDM4LjJ2LjZsLTEwMiA0MS40LTEwMi00MS40di0uNnptODQgMjkxLjFsLTg1IDQyLjV2LTc5LjFsODUtMzguOHY3NS40em0wLTExMmwtMTAyIDQxLjQtMTAyLTQxLjR2LS42bDEwMi0zOC4yIDEwMiAzOC4ydi42em0yNDAgMTEybC04NSA0Mi41di03OS4xbDg1LTM4Ljh2NzUuNHptMC0xMTJsLTEwMiA0MS40LTEwMi00MS40di0uNmwxMDItMzguMiAxMDIgMzguMnYuNnoiPjwvcGF0aD48L3N2Zz4K" height="20">](https://docs.rs/relative-path)
-//!
-//! Portable relative UTF-8 paths for Rust.
-//!
-//! This crate provides a module analogous to [`std::path`], with the following
-//! characteristics:
-//!
-//! * The path separator is set to a fixed character (`/`), regardless of
-//!   platform.
-//! * Relative paths cannot represent a path in the filesystem without first
-//!   specifying *what they are relative to* using functions such as [`to_path`]
-//!   and [`to_logical_path`].
-//! * Relative paths are always guaranteed to be valid UTF-8 strings.
-//!
-//! On top of this we support many operations that guarantee the same behavior
-//! across platforms.
-//!
-//! For more utilities to manipulate relative paths, see the
-//! [`relative-path-utils` crate].
-//!
-//! <br>
-//!
-//! ## Usage
-//!
-//! Add `relative-path` to your `Cargo.toml`:
-//!
-//! ```toml
-//! relative-path = "1.9.2"
-//! ```
-//!
-//! Start using relative paths:
-//!
-//! ```
-//! use serde::{Serialize, Deserialize};
-//! use relative_path::RelativePath;
-//!
-//! #[derive(Serialize, Deserialize)]
-//! struct Manifest<'a> {
-//!     #[serde(borrow)]
-//!     source: &'a RelativePath,
-//! }
-//!
-//! # Ok::<_, Box<dyn std::error::Error>>(())
-//! ```
-//!
-//! <br>
-//!
-//! ## Serde Support
-//!
-//! This library includes serde support that can be enabled with the `serde`
-//! feature.
-//!
-//! <br>
-//!
-//! ## Why is `std::path` a portability hazard?
-//!
-//! Path representations differ across platforms.
-//!
-//! * Windows permits using drive volumes (multiple roots) as a prefix (e.g.
-//!   `"c:\"`) and backslash (`\`) as a separator.
-//! * Unix references absolute paths from a single root and uses forward slash
-//!   (`/`) as a separator.
-//!
-//! If we use `PathBuf`, Storing paths in a manifest would allow our application
-//! to build and run on one platform but potentially not others.
-//!
-//! Consider the following data model and corresponding toml for a manifest:
-//!
-//! ```rust
-//! use std::path::PathBuf;
-//!
-//! use serde::{Serialize, Deserialize};
-//!
-//! #[derive(Serialize, Deserialize)]
-//! struct Manifest {
-//!     source: PathBuf,
-//! }
-//! ```
-//!
-//! ```toml
-//! source = "C:\\Users\\udoprog\\repo\\data\\source"
-//! ```
-//!
-//! This will run for you (assuming `source` exists). So you go ahead and check
-//! the manifest into git. The next day your Linux colleague calls you and
-//! wonders what they have ever done to wrong you?
-//!
-//! So what went wrong? Well two things. You forgot to make the `source`
-//! relative, so anyone at the company which has a different username than you
-//! won't be able to use it. So you go ahead and fix that:
-//!
-//! ```toml
-//! source = "data\\source"
-//! ```
-//!
-//! But there is still one problem! A backslash (`\`) is only a legal path
-//! separator on Windows. Luckily you learn that forward slashes are supported
-//! both on Windows *and* Linux. So you opt for:
-//!
-//! ```toml
-//! source = "data/source"
-//! ```
-//!
-//! Things are working now. So all is well... Right? Sure, but we can do better.
-//!
-//! This crate provides types that work with *portable relative paths* (hence
-//! the name). So by using [`RelativePath`] we can systematically help avoid
-//! portability issues like the one above. Avoiding issues at the source is
-//! preferably over spending 5 minutes of onboarding time on a theoretical
-//! problem, hoping that your new hires will remember what to do if they ever
-//! encounter it.
-//!
-//! Using [`RelativePathBuf`] we can fix our data model like this:
-//!
-//! ```rust
-//! use relative_path::RelativePathBuf;
-//! use serde::{Serialize, Deserialize};
-//!
-//! #[derive(Serialize, Deserialize)]
-//! pub struct Manifest {
-//!     source: RelativePathBuf,
-//! }
-//! ```
-//!
-//! And where it's used:
-//!
-//! ```rust,no_run
-//! # use relative_path::RelativePathBuf;
-//! # use serde::{Serialize, Deserialize};
-//! # #[derive(Serialize, Deserialize)] pub struct Manifest { source: RelativePathBuf }
-//! use std::fs;
-//! use std::env::current_dir;
-//!
-//! let manifest: Manifest = todo!();
-//!
-//! let root = current_dir()?;
-//! let source = manifest.source.to_path(&root);
-//! let content = fs::read(&source)?;
-//! # Ok::<_, Box<dyn std::error::Error>>(())
-//! ```
-//!
-//! <br>
-//!
-//! ## Overview
-//!
-//! Conversion to a platform-specific [`Path`] happens through the [`to_path`]
-//! and [`to_logical_path`] functions. Where you are required to specify the
-//! path that prefixes the relative path. This can come from a function such as
-//! [`std::env::current_dir`].
-//!
-//! ```rust
-//! use std::env::current_dir;
-//! use std::path::Path;
-//!
-//! use relative_path::RelativePath;
-//!
-//! let root = current_dir()?;
-//!
-//! # if cfg!(windows) {
-//! // to_path unconditionally concatenates a relative path with its base:
-//! let relative_path = RelativePath::new("../foo/./bar");
-//! let full_path = relative_path.to_path(&root);
-//! assert_eq!(full_path, root.join("..\\foo\\.\\bar"));
-//!
-//! // to_logical_path tries to apply the logical operations that the relative
-//! // path corresponds to:
-//! let relative_path = RelativePath::new("../foo/./bar");
-//! let full_path = relative_path.to_logical_path(&root);
-//!
-//! // Replicate the operation performed by `to_logical_path`.
-//! let mut parent = root.clone();
-//! parent.pop();
-//! assert_eq!(full_path, parent.join("foo\\bar"));
-//! # }
-//! # Ok::<_, std::io::Error>(())
-//! ```
-//!
-//! When two relative paths are compared to each other, their exact component
-//! makeup determines equality.
-//!
-//! ```rust
-//! use relative_path::RelativePath;
-//!
-//! assert_ne!(
-//!     RelativePath::new("foo/bar/../baz"),
-//!     RelativePath::new("foo/baz")
-//! );
-//! ```
-//!
-//! Using platform-specific path separators to construct relative paths is not
-//! supported.
-//!
-//! Path separators from other platforms are simply treated as part of a
-//! component:
-//!
-//! ```rust
-//! use relative_path::RelativePath;
-//!
-//! assert_ne!(
-//!     RelativePath::new("foo/bar"),
-//!     RelativePath::new("foo\\bar")
-//! );
-//!
-//! assert_eq!(1, RelativePath::new("foo\\bar").components().count());
-//! assert_eq!(2, RelativePath::new("foo/bar").components().count());
-//! ```
-//!
-//! To see if two relative paths are equivalent you can use [`normalize`]:
-//!
-//! ```rust
-//! use relative_path::RelativePath;
-//!
-//! assert_eq!(
-//!     RelativePath::new("foo/bar/../baz").normalize(),
-//!     RelativePath::new("foo/baz").normalize(),
-//! );
-//! ```
-//!
-//! <br>
-//!
-//! ## Additional portability notes
-//!
-//! While relative paths avoid the most egregious portability issue, that
-//! absolute paths will work equally unwell on all platforms. We cannot avoid
-//! all. This section tries to document additional portability hazards that we
-//! are aware of.
-//!
-//! [`RelativePath`], similarly to [`Path`], makes no guarantees that its
-//! constituent components make up legal file names. While components are
-//! strictly separated by slashes, we can still store things in them which may
-//! not be used as legal paths on all platforms.
-//!
-//! * A `NUL` character is not permitted on unix platforms - this is a
-//!   terminator in C-based filesystem APIs. Slash (`/`) is also used as a path
-//!   separator.
-//! * Windows has a number of [reserved characters and names][windows-reserved]
-//!   (like `CON`, `PRN`, and `AUX`) which cannot legally be part of a
-//!   filesystem component.
-//! * Windows paths are [case-insensitive by default][windows-case]. So,
-//!   `Foo.txt` and `foo.txt` are the same files on windows. But they are
-//!   considered different paths on most unix systems.
-//!
-//! A relative path that *accidentally* contains a platform-specific components
-//! will largely result in a nonsensical paths being generated in the hope that
-//! they will fail fast during development and testing.
-//!
-//! ```rust
-//! use relative_path::{RelativePath, PathExt};
-//! use std::path::Path;
-//!
-//! if cfg!(windows) {
-//!     assert_eq!(
-//!         Path::new("foo\\c:\\bar\\baz"),
-//!         RelativePath::new("c:\\bar\\baz").to_path("foo")
-//!     );
-//! }
-//!
-//! if cfg!(unix) {
-//!     assert_eq!(
-//!         Path::new("foo/bar/baz"),
-//!         RelativePath::new("/bar/baz").to_path("foo")
-//!     );
-//! }
-//!
-//! assert_eq!(
-//!     Path::new("foo").relative_to("bar")?,
-//!     RelativePath::new("../foo"),
-//! );
-//! # Ok::<_, Box<dyn std::error::Error>>(())
-//! ```
-//!
-//! [`None`]: https://doc.rust-lang.org/std/option/enum.Option.html
-//! [`normalize`]: https://docs.rs/relative-path/1/relative_path/struct.RelativePath.html#method.normalize
-//! [`Path`]: https://doc.rust-lang.org/std/path/struct.Path.html
-//! [`RelativePath`]: https://docs.rs/relative-path/1/relative_path/struct.RelativePath.html
-//! [`RelativePathBuf`]: https://docs.rs/relative-path/1/relative_path/struct.RelativePathBuf.html
-//! [`std::env::current_dir`]: https://doc.rust-lang.org/std/env/fn.current_dir.html
-//! [`std::path`]: https://doc.rust-lang.org/std/path/index.html
-//! [`to_logical_path`]: https://docs.rs/relative-path/1/relative_path/struct.RelativePath.html#method.to_logical_path
-//! [`to_path`]: https://docs.rs/relative-path/1/relative_path/struct.RelativePath.html#method.to_path
-//! [windows-reserved]: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
-//! [windows-case]: https://learn.microsoft.com/en-us/windows/wsl/case-sensitivity
-//! [`relative-path-utils` crate]: https://docs.rs/relative-path-utils
-
-// This file contains parts that are Copyright 2015 The Rust Project Developers, copied from:
-// https://github.com/rust-lang/rust
-// cb2a656cdfb6400ac0200c661267f91fabf237e2 src/libstd/path.rs
-
-#![allow(clippy::manual_let_else)]
-#![deny(missing_docs)]
-
-mod path_ext;
-
-#[cfg(test)]
-mod tests;
-
-pub use path_ext::{PathExt, RelativeToError};
-
-use std::borrow::{Borrow, Cow};
-use std::cmp;
-use std::error;
-use std::fmt;
-use std::hash::{Hash, Hasher};
-use std::iter::FromIterator;
-use std::mem;
-use std::ops;
-use std::path;
-use std::rc::Rc;
-use std::str;
-use std::sync::Arc;
-
-const STEM_SEP: char = '.';
-const CURRENT_STR: &str = ".";
-const PARENT_STR: &str = "..";
-
-const SEP: char = '/';
-
-fn split_file_at_dot(input: &str) -> (Option<&str>, Option<&str>) {
-    if input == PARENT_STR {
-        return (Some(input), None);
-    }
-
-    let mut iter = input.rsplitn(2, STEM_SEP);
-
-    let after = iter.next();
-    let before = iter.next();
-
-    if before == Some("") {
-        (Some(input), None)
-    } else {
-        (before, after)
-    }
-}
-
-// Iterate through `iter` while it matches `prefix`; return `None` if `prefix`
-// is not a prefix of `iter`, otherwise return `Some(iter_after_prefix)` giving
-// `iter` after having exhausted `prefix`.
-fn iter_after<'a, 'b, I, J>(mut iter: I, mut prefix: J) -> Option<I>
-where
-    I: Iterator<Item = Component<'a>> + Clone,
-    J: Iterator<Item = Component<'b>>,
-{
-    loop {
-        let mut iter_next = iter.clone();
-        match (iter_next.next(), prefix.next()) {
-            (Some(x), Some(y)) if x == y => (),
-            (Some(_) | None, Some(_)) => return None,
-            (Some(_) | None, None) => return Some(iter),
-        }
-        iter = iter_next;
-    }
-}
-
-/// A single path component.
-///
-/// Accessed using the [`RelativePath::components`] iterator.
-///
-/// # Examples
-///
-/// ```
-/// use relative_path::{Component, RelativePath};
-///
-/// let path = RelativePath::new("foo/../bar/./baz");
-/// let mut it = path.components();
-///
-/// assert_eq!(Some(Component::Normal("foo")), it.next());
-/// assert_eq!(Some(Component::ParentDir), it.next());
-/// assert_eq!(Some(Component::Normal("bar")), it.next());
-/// assert_eq!(Some(Component::CurDir), it.next());
-/// assert_eq!(Some(Component::Normal("baz")), it.next());
-/// assert_eq!(None, it.next());
-/// ```
-#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)]
-pub enum Component<'a> {
-    /// The current directory `.`.
-    CurDir,
-    /// The parent directory `..`.
-    ParentDir,
-    /// A normal path component as a string.
-    Normal(&'a str),
-}
-
-impl<'a> Component<'a> {
-    /// Extracts the underlying [`str`] slice.
-    ///
-    /// [`str`]: prim@str
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::{RelativePath, Component};
-    ///
-    /// let path = RelativePath::new("./tmp/../foo/bar.txt");
-    /// let components: Vec<_> = path.components().map(Component::as_str).collect();
-    /// assert_eq!(&components, &[".", "tmp", "..", "foo", "bar.txt"]);
-    /// ```
-    #[must_use]
-    pub fn as_str(self) -> &'a str {
-        use self::Component::{CurDir, Normal, ParentDir};
-
-        match self {
-            CurDir => CURRENT_STR,
-            ParentDir => PARENT_STR,
-            Normal(name) => name,
-        }
-    }
-}
-
-/// [`AsRef<RelativePath>`] implementation for [`Component`].
-///
-/// # Examples
-///
-/// ```
-/// use relative_path::RelativePath;
-///
-/// let mut it = RelativePath::new("../foo/bar").components();
-///
-/// let a = it.next().ok_or("a")?;
-/// let b = it.next().ok_or("b")?;
-/// let c = it.next().ok_or("c")?;
-///
-/// let a: &RelativePath = a.as_ref();
-/// let b: &RelativePath = b.as_ref();
-/// let c: &RelativePath = c.as_ref();
-///
-/// assert_eq!(a, "..");
-/// assert_eq!(b, "foo");
-/// assert_eq!(c, "bar");
-///
-/// # Ok::<_, Box<dyn std::error::Error>>(())
-/// ```
-impl AsRef<RelativePath> for Component<'_> {
-    #[inline]
-    fn as_ref(&self) -> &RelativePath {
-        self.as_str().as_ref()
-    }
-}
-
-/// Traverse the given components and apply to the provided stack.
-///
-/// This takes '.', and '..' into account. Where '.' doesn't change the stack, and '..' pops the
-/// last item or further adds parent components.
-#[inline(always)]
-fn relative_traversal<'a, C>(buf: &mut RelativePathBuf, components: C)
-where
-    C: IntoIterator<Item = Component<'a>>,
-{
-    use self::Component::{CurDir, Normal, ParentDir};
-
-    for c in components {
-        match c {
-            CurDir => (),
-            ParentDir => match buf.components().next_back() {
-                Some(Component::ParentDir) | None => {
-                    buf.push(PARENT_STR);
-                }
-                _ => {
-                    buf.pop();
-                }
-            },
-            Normal(name) => {
-                buf.push(name);
-            }
-        }
-    }
-}
-
-/// Iterator over all the components in a relative path.
-#[derive(Clone)]
-pub struct Components<'a> {
-    source: &'a str,
-}
-
-impl<'a> Iterator for Components<'a> {
-    type Item = Component<'a>;
-
-    fn next(&mut self) -> Option<Self::Item> {
-        self.source = self.source.trim_start_matches(SEP);
-
-        let slice = match self.source.find(SEP) {
-            Some(i) => {
-                let (slice, rest) = self.source.split_at(i);
-                self.source = rest.trim_start_matches(SEP);
-                slice
-            }
-            None => mem::take(&mut self.source),
-        };
-
-        match slice {
-            "" => None,
-            CURRENT_STR => Some(Component::CurDir),
-            PARENT_STR => Some(Component::ParentDir),
-            slice => Some(Component::Normal(slice)),
-        }
-    }
-}
-
-impl<'a> DoubleEndedIterator for Components<'a> {
-    fn next_back(&mut self) -> Option<Self::Item> {
-        self.source = self.source.trim_end_matches(SEP);
-
-        let slice = match self.source.rfind(SEP) {
-            Some(i) => {
-                let (rest, slice) = self.source.split_at(i + 1);
-                self.source = rest.trim_end_matches(SEP);
-                slice
-            }
-            None => mem::take(&mut self.source),
-        };
-
-        match slice {
-            "" => None,
-            CURRENT_STR => Some(Component::CurDir),
-            PARENT_STR => Some(Component::ParentDir),
-            slice => Some(Component::Normal(slice)),
-        }
-    }
-}
-
-impl<'a> Components<'a> {
-    /// Construct a new component from the given string.
-    fn new(source: &'a str) -> Components<'a> {
-        Self { source }
-    }
-
-    /// Extracts a slice corresponding to the portion of the path remaining for iteration.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::RelativePath;
-    ///
-    /// let mut components = RelativePath::new("tmp/foo/bar.txt").components();
-    /// components.next();
-    /// components.next();
-    ///
-    /// assert_eq!("bar.txt", components.as_relative_path());
-    /// ```
-    #[must_use]
-    #[inline]
-    pub fn as_relative_path(&self) -> &'a RelativePath {
-        RelativePath::new(self.source)
-    }
-}
-
-impl<'a> cmp::PartialEq for Components<'a> {
-    fn eq(&self, other: &Components<'a>) -> bool {
-        Iterator::eq(self.clone(), other.clone())
-    }
-}
-
-/// An iterator over the [`Component`]s of a [`RelativePath`], as [`str`]
-/// slices.
-///
-/// This `struct` is created by the [`iter`][RelativePath::iter] method.
-///
-/// [`str`]: prim@str
-#[derive(Clone)]
-pub struct Iter<'a> {
-    inner: Components<'a>,
-}
-
-impl<'a> Iterator for Iter<'a> {
-    type Item = &'a str;
-
-    fn next(&mut self) -> Option<&'a str> {
-        self.inner.next().map(Component::as_str)
-    }
-}
-
-impl<'a> DoubleEndedIterator for Iter<'a> {
-    fn next_back(&mut self) -> Option<&'a str> {
-        self.inner.next_back().map(Component::as_str)
-    }
-}
-
-/// Error kind for [`FromPathError`].
-#[derive(Debug, Clone, Copy, PartialEq, Eq)]
-#[non_exhaustive]
-pub enum FromPathErrorKind {
-    /// Non-relative component in path.
-    NonRelative,
-    /// Non-utf8 component in path.
-    NonUtf8,
-    /// Trying to convert a platform-specific path which uses a platform-specific separator.
-    BadSeparator,
-}
-
-/// An error raised when attempting to convert a path using
-/// [`RelativePathBuf::from_path`].
-#[derive(Debug, Clone, PartialEq, Eq)]
-pub struct FromPathError {
-    kind: FromPathErrorKind,
-}
-
-impl FromPathError {
-    /// Gets the underlying [`FromPathErrorKind`] that provides more details on
-    /// what went wrong.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use std::path::Path;
-    /// use relative_path::{FromPathErrorKind, RelativePathBuf};
-    ///
-    /// let result = RelativePathBuf::from_path(Path::new("/hello/world"));
-    /// let e = result.unwrap_err();
-    ///
-    /// assert_eq!(FromPathErrorKind::NonRelative, e.kind());
-    /// ```
-    #[must_use]
-    pub fn kind(&self) -> FromPathErrorKind {
-        self.kind
-    }
-}
-
-impl From<FromPathErrorKind> for FromPathError {
-    fn from(value: FromPathErrorKind) -> Self {
-        Self { kind: value }
-    }
-}
-
-impl fmt::Display for FromPathError {
-    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
-        match self.kind {
-            FromPathErrorKind::NonRelative => "path contains non-relative component".fmt(fmt),
-            FromPathErrorKind::NonUtf8 => "path contains non-utf8 component".fmt(fmt),
-            FromPathErrorKind::BadSeparator => {
-                "path contains platform-specific path separator".fmt(fmt)
-            }
-        }
-    }
-}
-
-impl error::Error for FromPathError {}
-
-/// An owned, mutable relative path.
-///
-/// This type provides methods to manipulate relative path objects.
-#[derive(Clone)]
-pub struct RelativePathBuf {
-    inner: String,
-}
-
-impl RelativePathBuf {
-    /// Create a new relative path buffer.
-    #[must_use]
-    pub fn new() -> RelativePathBuf {
-        RelativePathBuf {
-            inner: String::new(),
-        }
-    }
-
-    /// Internal constructor to allocate a relative path buf with the given capacity.
-    fn with_capacity(cap: usize) -> RelativePathBuf {
-        RelativePathBuf {
-            inner: String::with_capacity(cap),
-        }
-    }
-
-    /// Try to convert a [`Path`] to a [`RelativePathBuf`].
-    ///
-    /// [`Path`]: https://doc.rust-lang.org/std/path/struct.Path.html
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::{RelativePath, RelativePathBuf, FromPathErrorKind};
-    /// use std::path::Path;
-    ///
-    /// assert_eq!(
-    ///     Ok(RelativePath::new("foo/bar").to_owned()),
-    ///     RelativePathBuf::from_path(Path::new("foo/bar"))
-    /// );
-    /// ```
-    ///
-    /// # Errors
-    ///
-    /// This will error in case the provided path is not a relative path, which
-    /// is identifier by it having a [`Prefix`] or [`RootDir`] component.
-    ///
-    /// [`Prefix`]: std::path::Component::Prefix
-    /// [`RootDir`]: std::path::Component::RootDir
-    pub fn from_path<P: AsRef<path::Path>>(path: P) -> Result<RelativePathBuf, FromPathError> {
-        use std::path::Component::{CurDir, Normal, ParentDir, Prefix, RootDir};
-
-        let mut buffer = RelativePathBuf::new();
-
-        for c in path.as_ref().components() {
-            match c {
-                Prefix(_) | RootDir => return Err(FromPathErrorKind::NonRelative.into()),
-                CurDir => continue,
-                ParentDir => buffer.push(PARENT_STR),
-                Normal(s) => buffer.push(s.to_str().ok_or(FromPathErrorKind::NonUtf8)?),
-            }
-        }
-
-        Ok(buffer)
-    }
-
-    /// Extends `self` with `path`.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::RelativePathBuf;
-    ///
-    /// let mut path = RelativePathBuf::new();
-    /// path.push("foo");
-    /// path.push("bar");
-    ///
-    /// assert_eq!("foo/bar", path);
-    ///
-    /// let mut path = RelativePathBuf::new();
-    /// path.push("foo");
-    /// path.push("/bar");
-    ///
-    /// assert_eq!("foo/bar", path);
-    /// ```
-    pub fn push<P>(&mut self, path: P)
-    where
-        P: AsRef<RelativePath>,
-    {
-        let other = path.as_ref();
-
-        let other = if other.starts_with_sep() {
-            &other.inner[1..]
-        } else {
-            &other.inner[..]
-        };
-
-        if !self.inner.is_empty() && !self.ends_with_sep() {
-            self.inner.push(SEP);
-        }
-
-        self.inner.push_str(other);
-    }
-
-    /// Updates [`file_name`] to `file_name`.
-    ///
-    /// If [`file_name`] was [`None`], this is equivalent to pushing
-    /// `file_name`.
-    ///
-    /// Otherwise it is equivalent to calling [`pop`] and then pushing
-    /// `file_name`. The new path will be a sibling of the original path. (That
-    /// is, it will have the same parent.)
-    ///
-    /// [`file_name`]: RelativePath::file_name
-    /// [`pop`]: RelativePathBuf::pop
-    /// [`None`]: https://doc.rust-lang.org/std/option/enum.Option.html
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::RelativePathBuf;
-    ///
-    /// let mut buf = RelativePathBuf::from("");
-    /// assert!(buf.file_name() == None);
-    /// buf.set_file_name("bar");
-    /// assert_eq!(RelativePathBuf::from("bar"), buf);
-    ///
-    /// assert!(buf.file_name().is_some());
-    /// buf.set_file_name("baz.txt");
-    /// assert_eq!(RelativePathBuf::from("baz.txt"), buf);
-    ///
-    /// buf.push("bar");
-    /// assert!(buf.file_name().is_some());
-    /// buf.set_file_name("bar.txt");
-    /// assert_eq!(RelativePathBuf::from("baz.txt/bar.txt"), buf);
-    /// ```
-    pub fn set_file_name<S: AsRef<str>>(&mut self, file_name: S) {
-        if self.file_name().is_some() {
-            let popped = self.pop();
-            debug_assert!(popped);
-        }
-
-        self.push(file_name.as_ref());
-    }
-
-    /// Updates [`extension`] to `extension`.
-    ///
-    /// Returns `false` and does nothing if
-    /// [`file_name`][RelativePath::file_name] is [`None`], returns `true` and
-    /// updates the extension otherwise.
-    ///
-    /// If [`extension`] is [`None`], the extension is added; otherwise it is
-    /// replaced.
-    ///
-    /// [`extension`]: RelativePath::extension
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::{RelativePath, RelativePathBuf};
-    ///
-    /// let mut p = RelativePathBuf::from("feel/the");
-    ///
-    /// p.set_extension("force");
-    /// assert_eq!(RelativePath::new("feel/the.force"), p);
-    ///
-    /// p.set_extension("dark_side");
-    /// assert_eq!(RelativePath::new("feel/the.dark_side"), p);
-    ///
-    /// assert!(p.pop());
-    /// p.set_extension("nothing");
-    /// assert_eq!(RelativePath::new("feel.nothing"), p);
-    /// ```
-    pub fn set_extension<S: AsRef<str>>(&mut self, extension: S) -> bool {
-        let file_stem = match self.file_stem() {
-            Some(stem) => stem,
-            None => return false,
-        };
-
-        let end_file_stem = file_stem[file_stem.len()..].as_ptr() as usize;
-        let start = self.inner.as_ptr() as usize;
-        self.inner.truncate(end_file_stem.wrapping_sub(start));
-
-        let extension = extension.as_ref();
-
-        if !extension.is_empty() {
-            self.inner.push(STEM_SEP);
-            self.inner.push_str(extension);
-        }
-
-        true
-    }
-
-    /// Truncates `self` to [`parent`][RelativePath::parent].
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::{RelativePath, RelativePathBuf};
-    ///
-    /// let mut p = RelativePathBuf::from("test/test.rs");
-    ///
-    /// assert_eq!(true, p.pop());
-    /// assert_eq!(RelativePath::new("test"), p);
-    /// assert_eq!(true, p.pop());
-    /// assert_eq!(RelativePath::new(""), p);
-    /// assert_eq!(false, p.pop());
-    /// assert_eq!(RelativePath::new(""), p);
-    /// ```
-    pub fn pop(&mut self) -> bool {
-        match self.parent().map(|p| p.inner.len()) {
-            Some(len) => {
-                self.inner.truncate(len);
-                true
-            }
-            None => false,
-        }
-    }
-
-    /// Coerce to a [`RelativePath`] slice.
-    #[must_use]
-    pub fn as_relative_path(&self) -> &RelativePath {
-        self
-    }
-
-    /// Consumes the `RelativePathBuf`, yielding its internal [`String`] storage.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::RelativePathBuf;
-    ///
-    /// let p = RelativePathBuf::from("/the/head");
-    /// let string = p.into_string();
-    /// assert_eq!(string, "/the/head".to_owned());
-    /// ```
-    #[must_use]
-    pub fn into_string(self) -> String {
-        self.inner
-    }
-
-    /// Converts this `RelativePathBuf` into a [boxed][std::boxed::Box]
-    /// [`RelativePath`].
-    #[must_use]
-    pub fn into_boxed_relative_path(self) -> Box<RelativePath> {
-        let rw = Box::into_raw(self.inner.into_boxed_str()) as *mut RelativePath;
-        unsafe { Box::from_raw(rw) }
-    }
-}
-
-impl Default for RelativePathBuf {
-    fn default() -> Self {
-        RelativePathBuf::new()
-    }
-}
-
-impl<'a> From<&'a RelativePath> for Cow<'a, RelativePath> {
-    #[inline]
-    fn from(s: &'a RelativePath) -> Cow<'a, RelativePath> {
-        Cow::Borrowed(s)
-    }
-}
-
-impl<'a> From<RelativePathBuf> for Cow<'a, RelativePath> {
-    #[inline]
-    fn from(s: RelativePathBuf) -> Cow<'a, RelativePath> {
-        Cow::Owned(s)
-    }
-}
-
-impl fmt::Debug for RelativePathBuf {
-    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
-        write!(fmt, "{:?}", &self.inner)
-    }
-}
-
-impl AsRef<RelativePath> for RelativePathBuf {
-    fn as_ref(&self) -> &RelativePath {
-        RelativePath::new(&self.inner)
-    }
-}
-
-impl AsRef<str> for RelativePath {
-    fn as_ref(&self) -> &str {
-        &self.inner
-    }
-}
-
-impl Borrow<RelativePath> for RelativePathBuf {
-    #[inline]
-    fn borrow(&self) -> &RelativePath {
-        self
-    }
-}
-
-impl<'a, T: ?Sized + AsRef<str>> From<&'a T> for RelativePathBuf {
-    fn from(path: &'a T) -> RelativePathBuf {
-        RelativePathBuf {
-            inner: path.as_ref().to_owned(),
-        }
-    }
-}
-
-impl From<String> for RelativePathBuf {
-    fn from(path: String) -> RelativePathBuf {
-        RelativePathBuf { inner: path }
-    }
-}
-
-impl From<RelativePathBuf> for String {
-    fn from(path: RelativePathBuf) -> String {
-        path.into_string()
-    }
-}
-
-impl ops::Deref for RelativePathBuf {
-    type Target = RelativePath;
-
-    fn deref(&self) -> &RelativePath {
-        RelativePath::new(&self.inner)
-    }
-}
-
-impl cmp::PartialEq for RelativePathBuf {
-    fn eq(&self, other: &RelativePathBuf) -> bool {
-        self.components() == other.components()
-    }
-}
-
-impl cmp::Eq for RelativePathBuf {}
-
-impl cmp::PartialOrd for RelativePathBuf {
-    #[inline]
-    fn partial_cmp(&self, other: &RelativePathBuf) -> Option<cmp::Ordering> {
-        Some(self.cmp(other))
-    }
-}
-
-impl cmp::Ord for RelativePathBuf {
-    #[inline]
-    fn cmp(&self, other: &RelativePathBuf) -> cmp::Ordering {
-        self.components().cmp(other.components())
-    }
-}
-
-impl Hash for RelativePathBuf {
-    fn hash<H: Hasher>(&self, h: &mut H) {
-        self.as_relative_path().hash(h);
-    }
-}
-
-impl<P> Extend<P> for RelativePathBuf
-where
-    P: AsRef<RelativePath>,
-{
-    #[inline]
-    fn extend<I: IntoIterator<Item = P>>(&mut self, iter: I) {
-        iter.into_iter().for_each(move |p| self.push(p.as_ref()));
-    }
-}
-
-impl<P> FromIterator<P> for RelativePathBuf
-where
-    P: AsRef<RelativePath>,
-{
-    #[inline]
-    fn from_iter<I: IntoIterator<Item = P>>(iter: I) -> RelativePathBuf {
-        let mut buf = RelativePathBuf::new();
-        buf.extend(iter);
-        buf
-    }
-}
-
-/// A borrowed, immutable relative path.
-#[repr(transparent)]
-pub struct RelativePath {
-    inner: str,
-}
-
-/// An error returned from [`strip_prefix`] if the prefix was not found.
-///
-/// [`strip_prefix`]: RelativePath::strip_prefix
-#[derive(Debug, Clone, PartialEq, Eq)]
-pub struct StripPrefixError(());
-
-impl RelativePath {
-    /// Directly wraps a string slice as a `RelativePath` slice.
-    pub fn new<S: AsRef<str> + ?Sized>(s: &S) -> &RelativePath {
-        unsafe { &*(s.as_ref() as *const str as *const RelativePath) }
-    }
-
-    /// Try to convert a [`Path`] to a [`RelativePath`] without allocating a buffer.
-    ///
-    /// [`Path`]: std::path::Path
-    ///
-    /// # Errors
-    ///
-    /// This requires the path to be a legal, platform-neutral relative path.
-    /// Otherwise various forms of [`FromPathError`] will be returned as an
-    /// [`Err`].
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::{RelativePath, FromPathErrorKind};
-    ///
-    /// assert_eq!(
-    ///     Ok(RelativePath::new("foo/bar")),
-    ///     RelativePath::from_path("foo/bar")
-    /// );
-    ///
-    /// // Note: absolute paths are different depending on platform.
-    /// if cfg!(windows) {
-    ///     let e = RelativePath::from_path("c:\\foo\\bar").unwrap_err();
-    ///     assert_eq!(FromPathErrorKind::NonRelative, e.kind());
-    /// }
-    ///
-    /// if cfg!(unix) {
-    ///     let e = RelativePath::from_path("/foo/bar").unwrap_err();
-    ///     assert_eq!(FromPathErrorKind::NonRelative, e.kind());
-    /// }
-    /// ```
-    pub fn from_path<P: ?Sized + AsRef<path::Path>>(
-        path: &P,
-    ) -> Result<&RelativePath, FromPathError> {
-        use std::path::Component::{CurDir, Normal, ParentDir, Prefix, RootDir};
-
-        let other = path.as_ref();
-
-        let s = match other.to_str() {
-            Some(s) => s,
-            None => return Err(FromPathErrorKind::NonUtf8.into()),
-        };
-
-        let rel = RelativePath::new(s);
-
-        // check that the component compositions are equal.
-        for (a, b) in other.components().zip(rel.components()) {
-            match (a, b) {
-                (Prefix(_) | RootDir, _) => return Err(FromPathErrorKind::NonRelative.into()),
-                (CurDir, Component::CurDir) | (ParentDir, Component::ParentDir) => continue,
-                (Normal(a), Component::Normal(b)) if a == b => continue,
-                _ => return Err(FromPathErrorKind::BadSeparator.into()),
-            }
-        }
-
-        Ok(rel)
-    }
-
-    /// Yields the underlying [`str`] slice.
-    ///
-    /// [`str`]: prim@str
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::RelativePath;
-    ///
-    /// assert_eq!(RelativePath::new("foo.txt").as_str(), "foo.txt");
-    /// ```
-    #[must_use]
-    pub fn as_str(&self) -> &str {
-        &self.inner
-    }
-
-    /// Returns an object that implements [`Display`][std::fmt::Display].
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::RelativePath;
-    ///
-    /// let path = RelativePath::new("tmp/foo.rs");
-    ///
-    /// println!("{}", path.display());
-    /// ```
-    #[deprecated(note = "RelativePath implements std::fmt::Display directly")]
-    #[must_use]
-    pub fn display(&self) -> Display {
-        Display { path: self }
-    }
-
-    /// Creates an owned [`RelativePathBuf`] with path adjoined to self.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::RelativePath;
-    ///
-    /// let path = RelativePath::new("foo/bar");
-    /// assert_eq!("foo/bar/baz", path.join("baz"));
-    /// ```
-    pub fn join<P>(&self, path: P) -> RelativePathBuf
-    where
-        P: AsRef<RelativePath>,
-    {
-        let mut out = self.to_relative_path_buf();
-        out.push(path);
-        out
-    }
-
-    /// Iterate over all components in this relative path.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::{Component, RelativePath};
-    ///
-    /// let path = RelativePath::new("foo/bar/baz");
-    /// let mut it = path.components();
-    ///
-    /// assert_eq!(Some(Component::Normal("foo")), it.next());
-    /// assert_eq!(Some(Component::Normal("bar")), it.next());
-    /// assert_eq!(Some(Component::Normal("baz")), it.next());
-    /// assert_eq!(None, it.next());
-    /// ```
-    #[must_use]
-    pub fn components(&self) -> Components {
-        Components::new(&self.inner)
-    }
-
-    /// Produces an iterator over the path's components viewed as [`str`]
-    /// slices.
-    ///
-    /// For more information about the particulars of how the path is separated
-    /// into components, see [`components`][Self::components].
-    ///
-    /// [`str`]: prim@str
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::RelativePath;
-    ///
-    /// let mut it = RelativePath::new("/tmp/foo.txt").iter();
-    /// assert_eq!(it.next(), Some("tmp"));
-    /// assert_eq!(it.next(), Some("foo.txt"));
-    /// assert_eq!(it.next(), None)
-    /// ```
-    #[must_use]
-    pub fn iter(&self) -> Iter {
-        Iter {
-            inner: self.components(),
-        }
-    }
-
-    /// Convert to an owned [`RelativePathBuf`].
-    #[must_use]
-    pub fn to_relative_path_buf(&self) -> RelativePathBuf {
-        RelativePathBuf::from(self.inner.to_owned())
-    }
-
-    /// Build an owned [`PathBuf`] relative to `base` for the current relative
-    /// path.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::RelativePath;
-    /// use std::path::Path;
-    ///
-    /// let path = RelativePath::new("foo/bar").to_path(".");
-    /// assert_eq!(Path::new("./foo/bar"), path);
-    ///
-    /// let path = RelativePath::new("foo/bar").to_path("");
-    /// assert_eq!(Path::new("foo/bar"), path);
-    /// ```
-    ///
-    /// # Encoding an absolute path
-    ///
-    /// Absolute paths are, in contrast to when using [`PathBuf::push`] *ignored*
-    /// and will be added unchanged to the buffer.
-    ///
-    /// This is to preserve the probability of a path conversion failing if the
-    /// relative path contains platform-specific absolute path components.
-    ///
-    /// ```
-    /// use relative_path::RelativePath;
-    /// use std::path::Path;
-    ///
-    /// if cfg!(windows) {
-    ///     let path = RelativePath::new("/bar/baz").to_path("foo");
-    ///     assert_eq!(Path::new("foo\\bar\\baz"), path);
-    ///
-    ///     let path = RelativePath::new("c:\\bar\\baz").to_path("foo");
-    ///     assert_eq!(Path::new("foo\\c:\\bar\\baz"), path);
-    /// }
-    ///
-    /// if cfg!(unix) {
-    ///     let path = RelativePath::new("/bar/baz").to_path("foo");
-    ///     assert_eq!(Path::new("foo/bar/baz"), path);
-    ///
-    ///     let path = RelativePath::new("c:\\bar\\baz").to_path("foo");
-    ///     assert_eq!(Path::new("foo/c:\\bar\\baz"), path);
-    /// }
-    /// ```
-    ///
-    /// [`PathBuf`]: std::path::PathBuf
-    /// [`PathBuf::push`]: std::path::PathBuf::push
-    pub fn to_path<P: AsRef<path::Path>>(&self, base: P) -> path::PathBuf {
-        let mut p = base.as_ref().to_path_buf().into_os_string();
-
-        for c in self.components() {
-            if !p.is_empty() {
-                p.push(path::MAIN_SEPARATOR.encode_utf8(&mut [0u8, 0u8, 0u8, 0u8]));
-            }
-
-            p.push(c.as_str());
-        }
-
-        path::PathBuf::from(p)
-    }
-
-    /// Build an owned [`PathBuf`] relative to `base` for the current relative
-    /// path.
-    ///
-    /// This is similar to [`to_path`] except that it doesn't just
-    /// unconditionally append one path to the other, instead it performs the
-    /// following operations depending on its own components:
-    ///
-    /// * [`Component::CurDir`] leaves the `base` unmodified.
-    /// * [`Component::ParentDir`] removes a component from `base` using
-    ///   [`path::PathBuf::pop`].
-    /// * [`Component::Normal`] pushes the given path component onto `base`
-    ///   using the same mechanism as [`to_path`].
-    ///
-    /// [`to_path`]: RelativePath::to_path
-    ///
-    /// Note that the exact semantics of the path operation is determined by the
-    /// corresponding [`PathBuf`] operation. E.g. popping a component off a path
-    /// like `.` will result in an empty path.
-    ///
-    /// ```
-    /// use relative_path::RelativePath;
-    /// use std::path::Path;
-    ///
-    /// let path = RelativePath::new("..").to_logical_path(".");
-    /// assert_eq!(path, Path::new(""));
-    /// ```
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::RelativePath;
-    /// use std::path::Path;
-    ///
-    /// let path = RelativePath::new("..").to_logical_path("foo/bar");
-    /// assert_eq!(path, Path::new("foo"));
-    /// ```
-    ///
-    /// # Encoding an absolute path
-    ///
-    /// Behaves the same as [`to_path`][RelativePath::to_path] when encoding
-    /// absolute paths.
-    ///
-    /// Absolute paths are, in contrast to when using [`PathBuf::push`] *ignored*
-    /// and will be added unchanged to the buffer.
-    ///
-    /// This is to preserve the probability of a path conversion failing if the
-    /// relative path contains platform-specific absolute path components.
-    ///
-    /// ```
-    /// use relative_path::RelativePath;
-    /// use std::path::Path;
-    ///
-    /// if cfg!(windows) {
-    ///     let path = RelativePath::new("/bar/baz").to_logical_path("foo");
-    ///     assert_eq!(Path::new("foo\\bar\\baz"), path);
-    ///
-    ///     let path = RelativePath::new("c:\\bar\\baz").to_logical_path("foo");
-    ///     assert_eq!(Path::new("foo\\c:\\bar\\baz"), path);
-    ///
-    ///     let path = RelativePath::new("foo/bar").to_logical_path("");
-    ///     assert_eq!(Path::new("foo\\bar"), path);
-    /// }
-    ///
-    /// if cfg!(unix) {
-    ///     let path = RelativePath::new("/bar/baz").to_logical_path("foo");
-    ///     assert_eq!(Path::new("foo/bar/baz"), path);
-    ///
-    ///     let path = RelativePath::new("c:\\bar\\baz").to_logical_path("foo");
-    ///     assert_eq!(Path::new("foo/c:\\bar\\baz"), path);
-    ///
-    ///     let path = RelativePath::new("foo/bar").to_logical_path("");
-    ///     assert_eq!(Path::new("foo/bar"), path);
-    /// }
-    /// ```
-    ///
-    /// [`PathBuf`]: std::path::PathBuf
-    /// [`PathBuf::push`]: std::path::PathBuf::push
-    pub fn to_logical_path<P: AsRef<path::Path>>(&self, base: P) -> path::PathBuf {
-        use self::Component::{CurDir, Normal, ParentDir};
-
-        let mut p = base.as_ref().to_path_buf().into_os_string();
-
-        for c in self.components() {
-            match c {
-                CurDir => continue,
-                ParentDir => {
-                    let mut temp = path::PathBuf::from(std::mem::take(&mut p));
-                    temp.pop();
-                    p = temp.into_os_string();
-                }
-                Normal(c) => {
-                    if !p.is_empty() {
-                        p.push(path::MAIN_SEPARATOR.encode_utf8(&mut [0u8, 0u8, 0u8, 0u8]));
-                    }
-
-                    p.push(c);
-                }
-            }
-        }
-
-        path::PathBuf::from(p)
-    }
-
-    /// Returns a relative path, without its final [`Component`] if there is one.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::RelativePath;
-    ///
-    /// assert_eq!(Some(RelativePath::new("foo")), RelativePath::new("foo/bar").parent());
-    /// assert_eq!(Some(RelativePath::new("")), RelativePath::new("foo").parent());
-    /// assert_eq!(None, RelativePath::new("").parent());
-    /// ```
-    #[must_use]
-    pub fn parent(&self) -> Option<&RelativePath> {
-        use self::Component::CurDir;
-
-        if self.inner.is_empty() {
-            return None;
-        }
-
-        let mut it = self.components();
-        while let Some(CurDir) = it.next_back() {}
-        Some(it.as_relative_path())
-    }
-
-    /// Returns the final component of the `RelativePath`, if there is one.
-    ///
-    /// If the path is a normal file, this is the file name. If it's the path of
-    /// a directory, this is the directory name.
-    ///
-    /// Returns [`None`] If the path terminates in `..`.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::RelativePath;
-    ///
-    /// assert_eq!(Some("bin"), RelativePath::new("usr/bin/").file_name());
-    /// assert_eq!(Some("foo.txt"), RelativePath::new("tmp/foo.txt").file_name());
-    /// assert_eq!(Some("foo.txt"), RelativePath::new("tmp/foo.txt/").file_name());
-    /// assert_eq!(Some("foo.txt"), RelativePath::new("foo.txt/.").file_name());
-    /// assert_eq!(Some("foo.txt"), RelativePath::new("foo.txt/.//").file_name());
-    /// assert_eq!(None, RelativePath::new("foo.txt/..").file_name());
-    /// assert_eq!(None, RelativePath::new("/").file_name());
-    /// ```
-    #[must_use]
-    pub fn file_name(&self) -> Option<&str> {
-        use self::Component::{CurDir, Normal, ParentDir};
-
-        let mut it = self.components();
-
-        while let Some(c) = it.next_back() {
-            return match c {
-                CurDir => continue,
-                Normal(name) => Some(name),
-                ParentDir => None,
-            };
-        }
-
-        None
-    }
-
-    /// Returns a relative path that, when joined onto `base`, yields `self`.
-    ///
-    /// # Errors
-    ///
-    /// If `base` is not a prefix of `self` (i.e. [`starts_with`] returns
-    /// `false`), returns [`Err`].
-    ///
-    /// [`starts_with`]: Self::starts_with
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::RelativePath;
-    ///
-    /// let path = RelativePath::new("test/haha/foo.txt");
-    ///
-    /// assert_eq!(path.strip_prefix("test"), Ok(RelativePath::new("haha/foo.txt")));
-    /// assert_eq!(path.strip_prefix("test").is_ok(), true);
-    /// assert_eq!(path.strip_prefix("haha").is_ok(), false);
-    /// ```
-    pub fn strip_prefix<P>(&self, base: P) -> Result<&RelativePath, StripPrefixError>
-    where
-        P: AsRef<RelativePath>,
-    {
-        iter_after(self.components(), base.as_ref().components())
-            .map(|c| c.as_relative_path())
-            .ok_or(StripPrefixError(()))
-    }
-
-    /// Determines whether `base` is a prefix of `self`.
-    ///
-    /// Only considers whole path components to match.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::RelativePath;
-    ///
-    /// let path = RelativePath::new("etc/passwd");
-    ///
-    /// assert!(path.starts_with("etc"));
-    ///
-    /// assert!(!path.starts_with("e"));
-    /// ```
-    pub fn starts_with<P>(&self, base: P) -> bool
-    where
-        P: AsRef<RelativePath>,
-    {
-        iter_after(self.components(), base.as_ref().components()).is_some()
-    }
-
-    /// Determines whether `child` is a suffix of `self`.
-    ///
-    /// Only considers whole path components to match.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::RelativePath;
-    ///
-    /// let path = RelativePath::new("etc/passwd");
-    ///
-    /// assert!(path.ends_with("passwd"));
-    /// ```
-    pub fn ends_with<P>(&self, child: P) -> bool
-    where
-        P: AsRef<RelativePath>,
-    {
-        iter_after(self.components().rev(), child.as_ref().components().rev()).is_some()
-    }
-
-    /// Determines whether `self` is normalized.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::RelativePath;
-    ///
-    /// // These are normalized.
-    /// assert!(RelativePath::new("").is_normalized());
-    /// assert!(RelativePath::new("baz.txt").is_normalized());
-    /// assert!(RelativePath::new("foo/bar/baz.txt").is_normalized());
-    /// assert!(RelativePath::new("..").is_normalized());
-    /// assert!(RelativePath::new("../..").is_normalized());
-    /// assert!(RelativePath::new("../../foo/bar/baz.txt").is_normalized());
-    ///
-    /// // These are not normalized.
-    /// assert!(!RelativePath::new(".").is_normalized());
-    /// assert!(!RelativePath::new("./baz.txt").is_normalized());
-    /// assert!(!RelativePath::new("foo/..").is_normalized());
-    /// assert!(!RelativePath::new("foo/../baz.txt").is_normalized());
-    /// assert!(!RelativePath::new("foo/.").is_normalized());
-    /// assert!(!RelativePath::new("foo/./baz.txt").is_normalized());
-    /// assert!(!RelativePath::new("../foo/./bar/../baz.txt").is_normalized());
-    /// ```
-    #[must_use]
-    pub fn is_normalized(&self) -> bool {
-        self.components()
-            .skip_while(|c| matches!(c, Component::ParentDir))
-            .all(|c| matches!(c, Component::Normal(_)))
-    }
-
-    /// Creates an owned [`RelativePathBuf`] like `self` but with the given file
-    /// name.
-    ///
-    /// See [`set_file_name`] for more details.
-    ///
-    /// [`set_file_name`]: RelativePathBuf::set_file_name
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::{RelativePath, RelativePathBuf};
-    ///
-    /// let path = RelativePath::new("tmp/foo.txt");
-    /// assert_eq!(path.with_file_name("bar.txt"), RelativePathBuf::from("tmp/bar.txt"));
-    ///
-    /// let path = RelativePath::new("tmp");
-    /// assert_eq!(path.with_file_name("var"), RelativePathBuf::from("var"));
-    /// ```
-    pub fn with_file_name<S: AsRef<str>>(&self, file_name: S) -> RelativePathBuf {
-        let mut buf = self.to_relative_path_buf();
-        buf.set_file_name(file_name);
-        buf
-    }
-
-    /// Extracts the stem (non-extension) portion of [`file_name`][Self::file_name].
-    ///
-    /// The stem is:
-    ///
-    /// * [`None`], if there is no file name;
-    /// * The entire file name if there is no embedded `.`;
-    /// * The entire file name if the file name begins with `.` and has no other `.`s within;
-    /// * Otherwise, the portion of the file name before the final `.`
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::RelativePath;
-    ///
-    /// let path = RelativePath::new("foo.rs");
-    ///
-    /// assert_eq!("foo", path.file_stem().unwrap());
-    /// ```
-    pub fn file_stem(&self) -> Option<&str> {
-        self.file_name()
-            .map(split_file_at_dot)
-            .and_then(|(before, after)| before.or(after))
-    }
-
-    /// Extracts the extension of [`file_name`][Self::file_name], if possible.
-    ///
-    /// The extension is:
-    ///
-    /// * [`None`], if there is no file name;
-    /// * [`None`], if there is no embedded `.`;
-    /// * [`None`], if the file name begins with `.` and has no other `.`s within;
-    /// * Otherwise, the portion of the file name after the final `.`
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::RelativePath;
-    ///
-    /// assert_eq!(Some("rs"), RelativePath::new("foo.rs").extension());
-    /// assert_eq!(None, RelativePath::new(".rs").extension());
-    /// assert_eq!(Some("rs"), RelativePath::new("foo.rs/.").extension());
-    /// ```
-    pub fn extension(&self) -> Option<&str> {
-        self.file_name()
-            .map(split_file_at_dot)
-            .and_then(|(before, after)| before.and(after))
-    }
-
-    /// Creates an owned [`RelativePathBuf`] like `self` but with the given
-    /// extension.
-    ///
-    /// See [`set_extension`] for more details.
-    ///
-    /// [`set_extension`]: RelativePathBuf::set_extension
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::{RelativePath, RelativePathBuf};
-    ///
-    /// let path = RelativePath::new("foo.rs");
-    /// assert_eq!(path.with_extension("txt"), RelativePathBuf::from("foo.txt"));
-    /// ```
-    pub fn with_extension<S: AsRef<str>>(&self, extension: S) -> RelativePathBuf {
-        let mut buf = self.to_relative_path_buf();
-        buf.set_extension(extension);
-        buf
-    }
-
-    /// Build an owned [`RelativePathBuf`], joined with the given path and
-    /// normalized.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::RelativePath;
-    ///
-    /// assert_eq!(
-    ///     RelativePath::new("foo/baz.txt"),
-    ///     RelativePath::new("foo/bar").join_normalized("../baz.txt").as_relative_path()
-    /// );
-    ///
-    /// assert_eq!(
-    ///     RelativePath::new("../foo/baz.txt"),
-    ///     RelativePath::new("../foo/bar").join_normalized("../baz.txt").as_relative_path()
-    /// );
-    /// ```
-    pub fn join_normalized<P>(&self, path: P) -> RelativePathBuf
-    where
-        P: AsRef<RelativePath>,
-    {
-        let mut buf = RelativePathBuf::new();
-        relative_traversal(&mut buf, self.components());
-        relative_traversal(&mut buf, path.as_ref().components());
-        buf
-    }
-
-    /// Return an owned [`RelativePathBuf`], with all non-normal components
-    /// moved to the beginning of the path.
-    ///
-    /// This permits for a normalized representation of different relative
-    /// components.
-    ///
-    /// Normalization is a _destructive_ operation if the path references an
-    /// actual filesystem path. An example of this is symlinks under unix, a
-    /// path like `foo/../bar` might reference a different location other than
-    /// `./bar`.
-    ///
-    /// Normalization is a logical operation and does not guarantee that the
-    /// constructed path corresponds to what the filesystem would do. On Linux
-    /// for example symbolic links could mean that the logical path doesn't
-    /// correspond to the filesystem path.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::RelativePath;
-    ///
-    /// assert_eq!(
-    ///     "../foo/baz.txt",
-    ///     RelativePath::new("../foo/./bar/../baz.txt").normalize()
-    /// );
-    ///
-    /// assert_eq!(
-    ///     "",
-    ///     RelativePath::new(".").normalize()
-    /// );
-    /// ```
-    #[must_use]
-    pub fn normalize(&self) -> RelativePathBuf {
-        let mut buf = RelativePathBuf::with_capacity(self.inner.len());
-        relative_traversal(&mut buf, self.components());
-        buf
-    }
-
-    /// Constructs a relative path from the current path, to `path`.
-    ///
-    /// This function will return the empty [`RelativePath`] `""` if this source
-    /// contains unnamed components like `..` that would have to be traversed to
-    /// reach the destination `path`. This is necessary since we have no way of
-    /// knowing what the names of those components are when we're building the
-    /// new relative path.
-    ///
-    /// ```
-    /// use relative_path::RelativePath;
-    ///
-    /// // Here we don't know what directories `../..` refers to, so there's no
-    /// // way to construct a path back to `bar` in the current directory from
-    /// // `../..`.
-    /// let from = RelativePath::new("../../foo/relative-path");
-    /// let to = RelativePath::new("bar");
-    /// assert_eq!("", from.relative(to));
-    /// ```
-    ///
-    /// One exception to this is when two paths contains a common prefix at
-    /// which point there's no need to know what the names of those unnamed
-    /// components are.
-    ///
-    /// ```
-    /// use relative_path::RelativePath;
-    ///
-    /// let from = RelativePath::new("../../foo/bar");
-    /// let to = RelativePath::new("../../foo/baz");
-    ///
-    /// assert_eq!("../baz", from.relative(to));
-    ///
-    /// let from = RelativePath::new("../a/../../foo/bar");
-    /// let to = RelativePath::new("../../foo/baz");
-    ///
-    /// assert_eq!("../baz", from.relative(to));
-    /// ```
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use relative_path::RelativePath;
-    ///
-    /// assert_eq!(
-    ///     "../../e/f",
-    ///     RelativePath::new("a/b/c/d").relative(RelativePath::new("a/b/e/f"))
-    /// );
-    ///
-    /// assert_eq!(
-    ///     "../bbb",
-    ///     RelativePath::new("a/../aaa").relative(RelativePath::new("b/../bbb"))
-    /// );
-    ///
-    /// let a = RelativePath::new("git/relative-path");
-    /// let b = RelativePath::new("git");
-    /// assert_eq!("relative-path", b.relative(a));
-    /// assert_eq!("..", a.relative(b));
-    ///
-    /// let a = RelativePath::new("foo/bar/bap/foo.h");
-    /// let b = RelativePath::new("../arch/foo.h");
-    /// assert_eq!("../../../../../arch/foo.h", a.relative(b));
-    /// assert_eq!("", b.relative(a));
-    /// ```
-    pub fn relative<P>(&self, path: P) -> RelativePathBuf
-    where
-        P: AsRef<RelativePath>,
-    {
-        let mut from = RelativePathBuf::with_capacity(self.inner.len());
-        let mut to = RelativePathBuf::with_capacity(path.as_ref().inner.len());
-
-        relative_traversal(&mut from, self.components());
-        relative_traversal(&mut to, path.as_ref().components());
-
-        let mut it_from = from.components();
-        let mut it_to = to.components();
-
-        // Strip a common prefixes - if any.
-        let (lead_from, lead_to) = loop {
-            match (it_from.next(), it_to.next()) {
-                (Some(f), Some(t)) if f == t => continue,
-                (f, t) => {
-                    break (f, t);
-                }
-            }
-        };
-
-        // Special case: The path we are traversing from can't contain unnamed
-        // components. A relative path might be any path, like `/`, or
-        // `/foo/bar/baz`, and these components cannot be named in the relative
-        // traversal.
-        //
-        // Also note that `relative_traversal` guarantees that all ParentDir
-        // components are at the head of the path being built.
-        if lead_from == Some(Component::ParentDir) {
-            return RelativePathBuf::new();
-        }
-
-        let head = lead_from.into_iter().chain(it_from);
-        let tail = lead_to.into_iter().chain(it_to);
-
-        let mut buf = RelativePathBuf::with_capacity(usize::max(from.inner.len(), to.inner.len()));
-
-        for c in head.map(|_| Component::ParentDir).chain(tail) {
-            buf.push(c.as_str());
-        }
-
-        buf
-    }
-
-    /// Check if path starts with a path separator.
-    #[inline]
-    fn starts_with_sep(&self) -> bool {
-        self.inner.starts_with(SEP)
-    }
-
-    /// Check if path ends with a path separator.
-    #[inline]
-    fn ends_with_sep(&self) -> bool {
-        self.inner.ends_with(SEP)
-    }
-}
-
-impl<'a> IntoIterator for &'a RelativePath {
-    type IntoIter = Iter<'a>;
-    type Item = &'a str;
-
-    #[inline]
-    fn into_iter(self) -> Self::IntoIter {
-        self.iter()
-    }
-}
-
-/// Conversion from a [`Box<str>`] reference to a [`Box<RelativePath>`].
-///
-/// # Examples
-///
-/// ```
-/// use relative_path::RelativePath;
-///
-/// let path: Box<RelativePath> = Box::<str>::from("foo/bar").into();
-/// assert_eq!(&*path, "foo/bar");
-/// ```
-impl From<Box<str>> for Box<RelativePath> {
-    #[inline]
-    fn from(boxed: Box<str>) -> Box<RelativePath> {
-        let rw = Box::into_raw(boxed) as *mut RelativePath;
-        unsafe { Box::from_raw(rw) }
-    }
-}
-
-/// Conversion from a [`str`] reference to a [`Box<RelativePath>`].
-///
-/// [`str`]: prim@str
-///
-/// # Examples
-///
-/// ```
-/// use relative_path::RelativePath;
-///
-/// let path: Box<RelativePath> = "foo/bar".into();
-/// assert_eq!(&*path, "foo/bar");
-///
-/// let path: Box<RelativePath> = RelativePath::new("foo/bar").into();
-/// assert_eq!(&*path, "foo/bar");
-/// ```
-impl<T> From<&T> for Box<RelativePath>
-where
-    T: ?Sized + AsRef<str>,
-{
-    #[inline]
-    fn from(path: &T) -> Box<RelativePath> {
-        Box::<RelativePath>::from(Box::<str>::from(path.as_ref()))
-    }
-}
-
-/// Conversion from [`RelativePathBuf`] to [`Box<RelativePath>`].
-///
-/// # Examples
-///
-/// ```
-/// use std::sync::Arc;
-/// use relative_path::{RelativePath, RelativePathBuf};
-///
-/// let path = RelativePathBuf::from("foo/bar");
-/// let path: Box<RelativePath> = path.into();
-/// assert_eq!(&*path, "foo/bar");
-/// ```
-impl From<RelativePathBuf> for Box<RelativePath> {
-    #[inline]
-    fn from(path: RelativePathBuf) -> Box<RelativePath> {
-        let boxed: Box<str> = path.inner.into();
-        let rw = Box::into_raw(boxed) as *mut RelativePath;
-        unsafe { Box::from_raw(rw) }
-    }
-}
-
-/// Clone implementation for [`Box<RelativePath>`].
-///
-/// # Examples
-///
-/// ```
-/// use relative_path::RelativePath;
-///
-/// let path: Box<RelativePath> = RelativePath::new("foo/bar").into();
-/// let path2 = path.clone();
-/// assert_eq!(&*path, &*path2);
-/// ```
-impl Clone for Box<RelativePath> {
-    #[inline]
-    fn clone(&self) -> Self {
-        self.to_relative_path_buf().into_boxed_relative_path()
-    }
-}
-
-/// Conversion from [`RelativePath`] to [`Arc<RelativePath>`].
-///
-/// # Examples
-///
-/// ```
-/// use std::sync::Arc;
-/// use relative_path::RelativePath;
-///
-/// let path: Arc<RelativePath> = RelativePath::new("foo/bar").into();
-/// assert_eq!(&*path, "foo/bar");
-/// ```
-impl From<&RelativePath> for Arc<RelativePath> {
-    #[inline]
-    fn from(path: &RelativePath) -> Arc<RelativePath> {
-        let arc: Arc<str> = path.inner.into();
-        let rw = Arc::into_raw(arc) as *const RelativePath;
-        unsafe { Arc::from_raw(rw) }
-    }
-}
-
-/// Conversion from [`RelativePathBuf`] to [`Arc<RelativePath>`].
-///
-/// # Examples
-///
-/// ```
-/// use std::sync::Arc;
-/// use relative_path::{RelativePath, RelativePathBuf};
-///
-/// let path = RelativePathBuf::from("foo/bar");
-/// let path: Arc<RelativePath> = path.into();
-/// assert_eq!(&*path, "foo/bar");
-/// ```
-impl From<RelativePathBuf> for Arc<RelativePath> {
-    #[inline]
-    fn from(path: RelativePathBuf) -> Arc<RelativePath> {
-        let arc: Arc<str> = path.inner.into();
-        let rw = Arc::into_raw(arc) as *const RelativePath;
-        unsafe { Arc::from_raw(rw) }
-    }
-}
-
-/// Conversion from [`RelativePathBuf`] to [`Arc<RelativePath>`].
-///
-/// # Examples
-///
-/// ```
-/// use std::rc::Rc;
-/// use relative_path::RelativePath;
-///
-/// let path: Rc<RelativePath> = RelativePath::new("foo/bar").into();
-/// assert_eq!(&*path, "foo/bar");
-/// ```
-impl From<&RelativePath> for Rc<RelativePath> {
-    #[inline]
-    fn from(path: &RelativePath) -> Rc<RelativePath> {
-        let rc: Rc<str> = path.inner.into();
-        let rw = Rc::into_raw(rc) as *const RelativePath;
-        unsafe { Rc::from_raw(rw) }
-    }
-}
-
-/// Conversion from [`RelativePathBuf`] to [`Rc<RelativePath>`].
-///
-/// # Examples
-///
-/// ```
-/// use std::rc::Rc;
-/// use relative_path::{RelativePath, RelativePathBuf};
-///
-/// let path = RelativePathBuf::from("foo/bar");
-/// let path: Rc<RelativePath> = path.into();
-/// assert_eq!(&*path, "foo/bar");
-/// ```
-impl From<RelativePathBuf> for Rc<RelativePath> {
-    #[inline]
-    fn from(path: RelativePathBuf) -> Rc<RelativePath> {
-        let rc: Rc<str> = path.inner.into();
-        let rw = Rc::into_raw(rc) as *const RelativePath;
-        unsafe { Rc::from_raw(rw) }
-    }
-}
-
-/// [`ToOwned`] implementation for [`RelativePath`].
-///
-/// # Examples
-///
-/// ```
-/// use relative_path::RelativePath;
-///
-/// let path = RelativePath::new("foo/bar").to_owned();
-/// assert_eq!(path, "foo/bar");
-/// ```
-impl ToOwned for RelativePath {
-    type Owned = RelativePathBuf;
-
-    #[inline]
-    fn to_owned(&self) -> RelativePathBuf {
-        self.to_relative_path_buf()
-    }
-}
-
-impl fmt::Debug for RelativePath {
-    #[inline]
-    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
-        write!(fmt, "{:?}", &self.inner)
-    }
-}
-
-/// [`AsRef<str>`] implementation for [`RelativePathBuf`].
-///
-/// # Examples
-///
-/// ```
-/// use relative_path::RelativePathBuf;
-///
-/// let path = RelativePathBuf::from("foo/bar");
-/// let string: &str = path.as_ref();
-/// assert_eq!(string, "foo/bar");
-/// ```
-impl AsRef<str> for RelativePathBuf {
-    #[inline]
-    fn as_ref(&self) -> &str {
-        &self.inner
-    }
-}
-
-/// [`AsRef<RelativePath>`] implementation for [String].
-///
-/// # Examples
-///
-/// ```
-/// use relative_path::RelativePath;
-///
-/// let path: String = format!("foo/bar");
-/// let path: &RelativePath = path.as_ref();
-/// assert_eq!(path, "foo/bar");
-/// ```
-impl AsRef<RelativePath> for String {
-    #[inline]
-    fn as_ref(&self) -> &RelativePath {
-        RelativePath::new(self)
-    }
-}
-
-/// [`AsRef<RelativePath>`] implementation for [`str`].
-///
-/// [`str`]: prim@str
-///
-/// # Examples
-///
-/// ```
-/// use relative_path::RelativePath;
-///
-/// let path: &RelativePath = "foo/bar".as_ref();
-/// assert_eq!(path, RelativePath::new("foo/bar"));
-/// ```
-impl AsRef<RelativePath> for str {
-    #[inline]
-    fn as_ref(&self) -> &RelativePath {
-        RelativePath::new(self)
-    }
-}
-
-impl AsRef<RelativePath> for RelativePath {
-    #[inline]
-    fn as_ref(&self) -> &RelativePath {
-        self
-    }
-}
-
-impl cmp::PartialEq for RelativePath {
-    #[inline]
-    fn eq(&self, other: &RelativePath) -> bool {
-        self.components() == other.components()
-    }
-}
-
-impl cmp::Eq for RelativePath {}
-
-impl cmp::PartialOrd for RelativePath {
-    #[inline]
-    fn partial_cmp(&self, other: &RelativePath) -> Option<cmp::Ordering> {
-        Some(self.cmp(other))
-    }
-}
-
-impl cmp::Ord for RelativePath {
-    #[inline]
-    fn cmp(&self, other: &RelativePath) -> cmp::Ordering {
-        self.components().cmp(other.components())
-    }
-}
-
-impl Hash for RelativePath {
-    #[inline]
-    fn hash<H: Hasher>(&self, h: &mut H) {
-        for c in self.components() {
-            c.hash(h);
-        }
-    }
-}
-
-impl fmt::Display for RelativePath {
-    #[inline]
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        fmt::Display::fmt(&self.inner, f)
-    }
-}
-
-impl fmt::Display for RelativePathBuf {
-    #[inline]
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        fmt::Display::fmt(&self.inner, f)
-    }
-}
-
-/// Helper struct for printing relative paths.
-///
-/// This is not strictly necessary in the same sense as it is for [`Display`],
-/// because relative paths are guaranteed to be valid UTF-8. But the behavior is
-/// preserved to simplify the transition between [`Path`] and [`RelativePath`].
-///
-/// [`Path`]: std::path::Path
-/// [`Display`]: std::fmt::Display
-pub struct Display<'a> {
-    path: &'a RelativePath,
-}
-
-impl<'a> fmt::Debug for Display<'a> {
-    #[inline]
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        fmt::Debug::fmt(&self.path, f)
-    }
-}
-
-impl<'a> fmt::Display for Display<'a> {
-    #[inline]
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        fmt::Display::fmt(&self.path, f)
-    }
-}
-
-/// [`serde::ser::Serialize`] implementation for [`RelativePathBuf`].
-///
-/// ```
-/// use serde::Serialize;
-/// use relative_path::RelativePathBuf;
-///
-/// #[derive(Serialize)]
-/// struct Document {
-///     path: RelativePathBuf,
-/// }
-/// ```
-#[cfg(feature = "serde")]
-impl serde::ser::Serialize for RelativePathBuf {
-    #[inline]
-    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
-    where
-        S: serde::ser::Serializer,
-    {
-        serializer.serialize_str(&self.inner)
-    }
-}
-
-/// [`serde::de::Deserialize`] implementation for [`RelativePathBuf`].
-///
-/// ```
-/// use serde::Deserialize;
-/// use relative_path::RelativePathBuf;
-///
-/// #[derive(Deserialize)]
-/// struct Document {
-///     path: RelativePathBuf,
-/// }
-/// ```
-#[cfg(feature = "serde")]
-impl<'de> serde::de::Deserialize<'de> for RelativePathBuf {
-    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
-    where
-        D: serde::de::Deserializer<'de>,
-    {
-        struct Visitor;
-
-        impl<'de> serde::de::Visitor<'de> for Visitor {
-            type Value = RelativePathBuf;
-
-            #[inline]
-            fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-                formatter.write_str("a relative path")
-            }
-
-            #[inline]
-            fn visit_string<E>(self, input: String) -> Result<Self::Value, E>
-            where
-                E: serde::de::Error,
-            {
-                Ok(RelativePathBuf::from(input))
-            }
-
-            #[inline]
-            fn visit_str<E>(self, input: &str) -> Result<Self::Value, E>
-            where
-                E: serde::de::Error,
-            {
-                Ok(RelativePathBuf::from(input.to_owned()))
-            }
-        }
-
-        deserializer.deserialize_str(Visitor)
-    }
-}
-
-/// [`serde::de::Deserialize`] implementation for [`Box<RelativePath>`].
-///
-/// ```
-/// use serde::Deserialize;
-/// use relative_path::RelativePath;
-///
-/// #[derive(Deserialize)]
-/// struct Document {
-///     path: Box<RelativePath>,
-/// }
-/// ```
-#[cfg(feature = "serde")]
-impl<'de> serde::de::Deserialize<'de> for Box<RelativePath> {
-    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
-    where
-        D: serde::de::Deserializer<'de>,
-    {
-        struct Visitor;
-
-        impl<'de> serde::de::Visitor<'de> for Visitor {
-            type Value = Box<RelativePath>;
-
-            #[inline]
-            fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-                formatter.write_str("a relative path")
-            }
-
-            #[inline]
-            fn visit_string<E>(self, input: String) -> Result<Self::Value, E>
-            where
-                E: serde::de::Error,
-            {
-                Ok(Box::<RelativePath>::from(input.into_boxed_str()))
-            }
-
-            #[inline]
-            fn visit_str<E>(self, input: &str) -> Result<Self::Value, E>
-            where
-                E: serde::de::Error,
-            {
-                Ok(Box::<RelativePath>::from(input))
-            }
-        }
-
-        deserializer.deserialize_str(Visitor)
-    }
-}
-
-/// [`serde::de::Deserialize`] implementation for a [`RelativePath`] reference.
-///
-/// ```
-/// use serde::Deserialize;
-/// use relative_path::RelativePath;
-///
-/// #[derive(Deserialize)]
-/// struct Document<'a> {
-///     #[serde(borrow)]
-///     path: &'a RelativePath,
-/// }
-/// ```
-#[cfg(feature = "serde")]
-impl<'de: 'a, 'a> serde::de::Deserialize<'de> for &'a RelativePath {
-    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
-    where
-        D: serde::de::Deserializer<'de>,
-    {
-        struct Visitor;
-
-        impl<'a> serde::de::Visitor<'a> for Visitor {
-            type Value = &'a RelativePath;
-
-            #[inline]
-            fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-                formatter.write_str("a borrowed relative path")
-            }
-
-            #[inline]
-            fn visit_borrowed_str<E>(self, v: &'a str) -> Result<Self::Value, E>
-            where
-                E: serde::de::Error,
-            {
-                Ok(RelativePath::new(v))
-            }
-
-            #[inline]
-            fn visit_borrowed_bytes<E>(self, v: &'a [u8]) -> Result<Self::Value, E>
-            where
-                E: serde::de::Error,
-            {
-                let string = str::from_utf8(v).map_err(|_| {
-                    serde::de::Error::invalid_value(serde::de::Unexpected::Bytes(v), &self)
-                })?;
-                Ok(RelativePath::new(string))
-            }
-        }
-
-        deserializer.deserialize_str(Visitor)
-    }
-}
-
-/// [`serde::ser::Serialize`] implementation for [`RelativePath`].
-///
-/// ```
-/// use serde::Serialize;
-/// use relative_path::RelativePath;
-///
-/// #[derive(Serialize)]
-/// struct Document<'a> {
-///     path: &'a RelativePath,
-/// }
-/// ```
-#[cfg(feature = "serde")]
-impl serde::ser::Serialize for RelativePath {
-    #[inline]
-    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
-    where
-        S: serde::ser::Serializer,
-    {
-        serializer.serialize_str(&self.inner)
-    }
-}
-
-macro_rules! impl_cmp {
-    ($lhs:ty, $rhs:ty) => {
-        impl<'a, 'b> PartialEq<$rhs> for $lhs {
-            #[inline]
-            fn eq(&self, other: &$rhs) -> bool {
-                <RelativePath as PartialEq>::eq(self, other)
-            }
-        }
-
-        impl<'a, 'b> PartialEq<$lhs> for $rhs {
-            #[inline]
-            fn eq(&self, other: &$lhs) -> bool {
-                <RelativePath as PartialEq>::eq(self, other)
-            }
-        }
-
-        impl<'a, 'b> PartialOrd<$rhs> for $lhs {
-            #[inline]
-            fn partial_cmp(&self, other: &$rhs) -> Option<cmp::Ordering> {
-                <RelativePath as PartialOrd>::partial_cmp(self, other)
-            }
-        }
-
-        impl<'a, 'b> PartialOrd<$lhs> for $rhs {
-            #[inline]
-            fn partial_cmp(&self, other: &$lhs) -> Option<cmp::Ordering> {
-                <RelativePath as PartialOrd>::partial_cmp(self, other)
-            }
-        }
-    };
-}
-
-impl_cmp!(RelativePathBuf, RelativePath);
-impl_cmp!(RelativePathBuf, &'a RelativePath);
-impl_cmp!(Cow<'a, RelativePath>, RelativePath);
-impl_cmp!(Cow<'a, RelativePath>, &'b RelativePath);
-impl_cmp!(Cow<'a, RelativePath>, RelativePathBuf);
-
-macro_rules! impl_cmp_str {
-    ($lhs:ty, $rhs:ty) => {
-        impl<'a, 'b> PartialEq<$rhs> for $lhs {
-            #[inline]
-            fn eq(&self, other: &$rhs) -> bool {
-                <RelativePath as PartialEq>::eq(self, other.as_ref())
-            }
-        }
-
-        impl<'a, 'b> PartialEq<$lhs> for $rhs {
-            #[inline]
-            fn eq(&self, other: &$lhs) -> bool {
-                <RelativePath as PartialEq>::eq(self.as_ref(), other)
-            }
-        }
-
-        impl<'a, 'b> PartialOrd<$rhs> for $lhs {
-            #[inline]
-            fn partial_cmp(&self, other: &$rhs) -> Option<cmp::Ordering> {
-                <RelativePath as PartialOrd>::partial_cmp(self, other.as_ref())
-            }
-        }
-
-        impl<'a, 'b> PartialOrd<$lhs> for $rhs {
-            #[inline]
-            fn partial_cmp(&self, other: &$lhs) -> Option<cmp::Ordering> {
-                <RelativePath as PartialOrd>::partial_cmp(self.as_ref(), other)
-            }
-        }
-    };
-}
-
-impl_cmp_str!(RelativePathBuf, str);
-impl_cmp_str!(RelativePathBuf, &'a str);
-impl_cmp_str!(RelativePathBuf, String);
-impl_cmp_str!(RelativePath, str);
-impl_cmp_str!(RelativePath, &'a str);
-impl_cmp_str!(RelativePath, String);
-impl_cmp_str!(&'a RelativePath, str);
-impl_cmp_str!(&'a RelativePath, String);
diff --git a/third_party/rust/chromium_crates_io/vendor/relative-path-1.9.3/src/path_ext.rs b/third_party/rust/chromium_crates_io/vendor/relative-path-1.9.3/src/path_ext.rs
deleted file mode 100644
index 8eb707f..0000000
--- a/third_party/rust/chromium_crates_io/vendor/relative-path-1.9.3/src/path_ext.rs
+++ /dev/null
@@ -1,353 +0,0 @@
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// Ported from the pathdiff crate, which adapted the original rustc's
-// path_relative_from
-// https://github.com/Manishearth/pathdiff/blob/master/src/lib.rs
-// https://github.com/rust-lang/rust/blob/e1d0de82cc40b666b88d4a6d2c9dcbc81d7ed27f/src/librustc_back/rpath.rs#L116-L158
-
-use std::error;
-use std::fmt;
-use std::path::{Path, PathBuf};
-
-use crate::{Component, RelativePathBuf};
-
-// Prevent downstream implementations, so methods may be added without backwards
-// breaking changes.
-mod sealed {
-    use std::path::{Path, PathBuf};
-
-    pub trait Sealed {}
-
-    impl Sealed for Path {}
-    impl Sealed for PathBuf {}
-}
-
-/// An error raised when attempting to convert a path using
-/// [`PathExt::relative_to`].
-#[derive(Debug, Clone, PartialEq, Eq)]
-pub struct RelativeToError {
-    kind: RelativeToErrorKind,
-}
-
-/// Error kind for [`RelativeToError`].
-#[derive(Debug, Clone, Copy, PartialEq, Eq)]
-#[non_exhaustive]
-enum RelativeToErrorKind {
-    /// Non-utf8 component in path.
-    NonUtf8,
-    /// Mismatching path prefixes.
-    PrefixMismatch,
-    /// A provided path is ambiguous, in that there is no way to determine which
-    /// components should be added from one path to the other to traverse it.
-    ///
-    /// For example, `.` is ambiguous relative to `../..` because we don't know
-    /// the names of the components being traversed.
-    AmbiguousTraversal,
-    /// This is a catch-all error since we don't control the `std::path` API a
-    /// Components iterator might decide (intentionally or not) to produce
-    /// components which violates its own contract.
-    ///
-    /// In particular we rely on only relative components being produced after
-    /// the absolute prefix has been consumed.
-    IllegalComponent,
-}
-
-impl fmt::Display for RelativeToError {
-    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
-        match self.kind {
-            RelativeToErrorKind::NonUtf8 => "path contains non-utf8 component".fmt(fmt),
-            RelativeToErrorKind::PrefixMismatch => {
-                "paths contain different absolute prefixes".fmt(fmt)
-            }
-            RelativeToErrorKind::AmbiguousTraversal => {
-                "path traversal cannot be determined".fmt(fmt)
-            }
-            RelativeToErrorKind::IllegalComponent => "path contains illegal components".fmt(fmt),
-        }
-    }
-}
-
-impl error::Error for RelativeToError {}
-
-impl From<RelativeToErrorKind> for RelativeToError {
-    #[inline]
-    fn from(kind: RelativeToErrorKind) -> Self {
-        Self { kind }
-    }
-}
-
-/// Extension methods for [`Path`] and [`PathBuf`] to for building and
-/// interacting with [`RelativePath`].
-///
-/// [`RelativePath`]: crate::RelativePath
-pub trait PathExt: sealed::Sealed {
-    /// Build a relative path from the provided directory to `self`.
-    ///
-    /// Producing a relative path like this is a logical operation and does not
-    /// guarantee that the constructed path corresponds to what the filesystem
-    /// would do. On Linux for example symbolic links could mean that the
-    /// logical path doesn't correspond to the filesystem path.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use std::path::Path;
-    /// use relative_path::{RelativePath, PathExt};
-    ///
-    /// let baz = Path::new("/foo/bar/baz");
-    /// let bar = Path::new("/foo/bar");
-    /// let qux = Path::new("/foo/bar/qux");
-    ///
-    /// assert_eq!(bar.relative_to(baz)?, RelativePath::new("../"));
-    /// assert_eq!(baz.relative_to(bar)?, RelativePath::new("baz"));
-    /// assert_eq!(qux.relative_to(baz)?, RelativePath::new("../qux"));
-    /// assert_eq!(baz.relative_to(qux)?, RelativePath::new("../baz"));
-    /// assert_eq!(bar.relative_to(qux)?, RelativePath::new("../"));
-    /// # Ok::<_, relative_path::RelativeToError>(())
-    /// ```
-    ///
-    /// # Errors
-    ///
-    /// Errors in case the provided path contains components which cannot be
-    /// converted into a relative path as needed, such as non-utf8 data.
-    fn relative_to<P>(&self, root: P) -> Result<RelativePathBuf, RelativeToError>
-    where
-        P: AsRef<Path>;
-}
-
-impl PathExt for Path {
-    fn relative_to<P>(&self, root: P) -> Result<RelativePathBuf, RelativeToError>
-    where
-        P: AsRef<Path>,
-    {
-        use std::path::Component::{CurDir, Normal, ParentDir, Prefix, RootDir};
-
-        // Helper function to convert from a std::path::Component to a
-        // relative_path::Component.
-        fn std_to_c(c: std::path::Component<'_>) -> Result<Component<'_>, RelativeToError> {
-            Ok(match c {
-                CurDir => Component::CurDir,
-                ParentDir => Component::ParentDir,
-                Normal(n) => Component::Normal(n.to_str().ok_or(RelativeToErrorKind::NonUtf8)?),
-                _ => return Err(RelativeToErrorKind::IllegalComponent.into()),
-            })
-        }
-
-        let root = root.as_ref();
-        let mut a_it = self.components();
-        let mut b_it = root.components();
-
-        // Ensure that the two paths are both either relative, or have the same
-        // prefix. Strips any common prefix the two paths do have. Prefixes are
-        // platform dependent, but different prefixes would for example indicate
-        // paths for different drives on Windows.
-        let (a_head, b_head) = loop {
-            match (a_it.next(), b_it.next()) {
-                (Some(RootDir), Some(RootDir)) => (),
-                (Some(Prefix(a)), Some(Prefix(b))) if a == b => (),
-                (Some(Prefix(_) | RootDir), _) | (_, Some(Prefix(_) | RootDir)) => {
-                    return Err(RelativeToErrorKind::PrefixMismatch.into());
-                }
-                (None, None) => break (None, None),
-                (a, b) if a != b => break (a, b),
-                _ => (),
-            }
-        };
-
-        let mut a_it = a_head.into_iter().chain(a_it);
-        let mut b_it = b_head.into_iter().chain(b_it);
-        let mut buf = RelativePathBuf::new();
-
-        loop {
-            let a = if let Some(a) = a_it.next() {
-                a
-            } else {
-                for _ in b_it {
-                    buf.push(Component::ParentDir);
-                }
-
-                break;
-            };
-
-            match b_it.next() {
-                Some(CurDir) => buf.push(std_to_c(a)?),
-                Some(ParentDir) => {
-                    return Err(RelativeToErrorKind::AmbiguousTraversal.into());
-                }
-                root => {
-                    if root.is_some() {
-                        buf.push(Component::ParentDir);
-                    }
-
-                    for comp in b_it {
-                        match comp {
-                            ParentDir => {
-                                if !buf.pop() {
-                                    return Err(RelativeToErrorKind::AmbiguousTraversal.into());
-                                }
-                            }
-                            CurDir => (),
-                            _ => buf.push(Component::ParentDir),
-                        }
-                    }
-
-                    buf.push(std_to_c(a)?);
-
-                    for c in a_it {
-                        buf.push(std_to_c(c)?);
-                    }
-
-                    break;
-                }
-            }
-        }
-
-        Ok(buf)
-    }
-}
-
-impl PathExt for PathBuf {
-    #[inline]
-    fn relative_to<P>(&self, root: P) -> Result<RelativePathBuf, RelativeToError>
-    where
-        P: AsRef<Path>,
-    {
-        self.as_path().relative_to(root)
-    }
-}
-
-#[cfg(test)]
-mod tests {
-    use std::path::Path;
-
-    use super::{PathExt, RelativeToErrorKind};
-    use crate::{RelativePathBuf, RelativeToError};
-
-    macro_rules! assert_relative_to {
-        ($path:expr, $base:expr, Ok($expected:expr) $(,)?) => {
-            assert_eq!(
-                Path::new($path).relative_to($base),
-                Ok(RelativePathBuf::from($expected))
-            );
-        };
-
-        ($path:expr, $base:expr, Err($expected:ident) $(,)?) => {
-            assert_eq!(
-                Path::new($path).relative_to($base),
-                Err(RelativeToError::from(RelativeToErrorKind::$expected))
-            );
-        };
-    }
-
-    #[cfg(windows)]
-    macro_rules! abs {
-        ($path:expr) => {
-            Path::new(concat!("C:\\", $path))
-        };
-    }
-
-    #[cfg(not(windows))]
-    macro_rules! abs {
-        ($path:expr) => {
-            Path::new(concat!("/", $path))
-        };
-    }
-
-    #[test]
-    #[cfg(windows)]
-    fn test_different_prefixes() {
-        assert_relative_to!("C:\\repo", "D:\\repo", Err(PrefixMismatch),);
-        assert_relative_to!("C:\\repo", "C:\\repo", Ok(""));
-        assert_relative_to!(
-            "\\\\server\\share\\repo",
-            "\\\\server2\\share\\repo",
-            Err(PrefixMismatch),
-        );
-    }
-
-    #[test]
-    fn test_absolute() {
-        assert_relative_to!(abs!("foo"), abs!("bar"), Ok("../foo"));
-        assert_relative_to!("foo", "bar", Ok("../foo"));
-        assert_relative_to!(abs!("foo"), "bar", Err(PrefixMismatch));
-        assert_relative_to!("foo", abs!("bar"), Err(PrefixMismatch));
-    }
-
-    #[test]
-    fn test_identity() {
-        assert_relative_to!(".", ".", Ok(""));
-        assert_relative_to!("../foo", "../foo", Ok(""));
-        assert_relative_to!("./foo", "./foo", Ok(""));
-        assert_relative_to!("/foo", "/foo", Ok(""));
-        assert_relative_to!("foo", "foo", Ok(""));
-
-        assert_relative_to!("../foo/bar/baz", "../foo/bar/baz", Ok(""));
-        assert_relative_to!("foo/bar/baz", "foo/bar/baz", Ok(""));
-    }
-
-    #[test]
-    fn test_subset() {
-        assert_relative_to!("foo", "fo", Ok("../foo"));
-        assert_relative_to!("fo", "foo", Ok("../fo"));
-    }
-
-    #[test]
-    fn test_empty() {
-        assert_relative_to!("", "", Ok(""));
-        assert_relative_to!("foo", "", Ok("foo"));
-        assert_relative_to!("", "foo", Ok(".."));
-    }
-
-    #[test]
-    fn test_relative() {
-        assert_relative_to!("../foo", "../bar", Ok("../foo"));
-        assert_relative_to!("../foo", "../foo/bar/baz", Ok("../.."));
-        assert_relative_to!("../foo/bar/baz", "../foo", Ok("bar/baz"));
-
-        assert_relative_to!("foo/bar/baz", "foo", Ok("bar/baz"));
-        assert_relative_to!("foo/bar/baz", "foo/bar", Ok("baz"));
-        assert_relative_to!("foo/bar/baz", "foo/bar/baz", Ok(""));
-        assert_relative_to!("foo/bar/baz", "foo/bar/baz/", Ok(""));
-
-        assert_relative_to!("foo/bar/baz/", "foo", Ok("bar/baz"));
-        assert_relative_to!("foo/bar/baz/", "foo/bar", Ok("baz"));
-        assert_relative_to!("foo/bar/baz/", "foo/bar/baz", Ok(""));
-        assert_relative_to!("foo/bar/baz/", "foo/bar/baz/", Ok(""));
-
-        assert_relative_to!("foo/bar/baz", "foo/", Ok("bar/baz"));
-        assert_relative_to!("foo/bar/baz", "foo/bar/", Ok("baz"));
-        assert_relative_to!("foo/bar/baz", "foo/bar/baz", Ok(""));
-    }
-
-    #[test]
-    fn test_current_directory() {
-        assert_relative_to!(".", "foo", Ok("../."));
-        assert_relative_to!("foo", ".", Ok("foo"));
-        assert_relative_to!("/foo", "/.", Ok("foo"));
-    }
-
-    #[test]
-    fn assert_does_not_skip_parents() {
-        assert_relative_to!("some/path", "some/foo/baz/path", Ok("../../../path"));
-        assert_relative_to!("some/path", "some/foo/bar/../baz/path", Ok("../../../path"));
-    }
-
-    #[test]
-    fn test_ambiguous_paths() {
-        // Parent directory name is unknown, so trying to make current directory
-        // relative to it is impossible.
-        assert_relative_to!(".", "../..", Err(AmbiguousTraversal));
-        assert_relative_to!(".", "a/../..", Err(AmbiguousTraversal));
-        // Common prefixes are ok.
-        assert_relative_to!("../a/..", "../a/../b", Ok(".."));
-        assert_relative_to!("../a/../b", "../a/..", Ok("b"));
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/relative-path-1.9.3/src/tests.rs b/third_party/rust/chromium_crates_io/vendor/relative-path-1.9.3/src/tests.rs
deleted file mode 100644
index 2947ea7..0000000
--- a/third_party/rust/chromium_crates_io/vendor/relative-path-1.9.3/src/tests.rs
+++ /dev/null
@@ -1,790 +0,0 @@
-#![allow(clippy::too_many_lines)]
-
-use super::*;
-
-use std::path::Path;
-use std::rc::Rc;
-use std::sync::Arc;
-
-macro_rules! t(
-    ($path:expr, iter: $iter:expr) => (
-        {
-            let path = RelativePath::new($path);
-
-            // Forward iteration
-            let comps = path.iter().map(str::to_string).collect::<Vec<String>>();
-            let exp: &[&str] = &$iter;
-            let exps = exp.iter().map(|s| s.to_string()).collect::<Vec<String>>();
-            assert!(comps == exps, "iter: Expected {:?}, found {:?}",
-                    exps, comps);
-
-            // Reverse iteration
-            let comps = RelativePath::new($path).iter().rev().map(str::to_string)
-                .collect::<Vec<String>>();
-            let exps = exps.into_iter().rev().collect::<Vec<String>>();
-            assert!(comps == exps, "iter().rev(): Expected {:?}, found {:?}",
-                    exps, comps);
-        }
-    );
-
-    ($path:expr, parent: $parent:expr, file_name: $file:expr) => (
-        {
-            let path = RelativePath::new($path);
-
-            let parent = path.parent().map(|p| p.as_str());
-            let exp_parent: Option<&str> = $parent;
-            assert!(parent == exp_parent, "parent: Expected {:?}, found {:?}",
-                    exp_parent, parent);
-
-            let file = path.file_name();
-            let exp_file: Option<&str> = $file;
-            assert!(file == exp_file, "file_name: Expected {:?}, found {:?}",
-                    exp_file, file);
-        }
-    );
-
-    ($path:expr, file_stem: $file_stem:expr, extension: $extension:expr) => (
-        {
-            let path = RelativePath::new($path);
-
-            let stem = path.file_stem();
-            let exp_stem: Option<&str> = $file_stem;
-            assert!(stem == exp_stem, "file_stem: Expected {:?}, found {:?}",
-                    exp_stem, stem);
-
-            let ext = path.extension();
-            let exp_ext: Option<&str> = $extension;
-            assert!(ext == exp_ext, "extension: Expected {:?}, found {:?}",
-                    exp_ext, ext);
-        }
-    );
-
-    ($path:expr, iter: $iter:expr,
-                    parent: $parent:expr, file_name: $file:expr,
-                    file_stem: $file_stem:expr, extension: $extension:expr) => (
-        {
-            t!($path, iter: $iter);
-            t!($path, parent: $parent, file_name: $file);
-            t!($path, file_stem: $file_stem, extension: $extension);
-        }
-    );
-);
-
-fn assert_components(components: &[&str], path: &RelativePath) {
-    let components = components
-        .iter()
-        .copied()
-        .map(Component::Normal)
-        .collect::<Vec<_>>();
-    let result: Vec<_> = path.components().collect();
-    assert_eq!(&components[..], &result[..]);
-}
-
-fn rp(input: &str) -> &RelativePath {
-    RelativePath::new(input)
-}
-
-#[test]
-#[allow(clippy::cognitive_complexity)]
-pub fn test_decompositions() {
-    t!("",
-    iter: [],
-    parent: None,
-    file_name: None,
-    file_stem: None,
-    extension: None
-    );
-
-    t!("foo",
-    iter: ["foo"],
-    parent: Some(""),
-    file_name: Some("foo"),
-    file_stem: Some("foo"),
-    extension: None
-    );
-
-    t!("/",
-    iter: [],
-    parent: Some(""),
-    file_name: None,
-    file_stem: None,
-    extension: None
-    );
-
-    t!("/foo",
-    iter: ["foo"],
-    parent: Some(""),
-    file_name: Some("foo"),
-    file_stem: Some("foo"),
-    extension: None
-    );
-
-    t!("foo/",
-    iter: ["foo"],
-    parent: Some(""),
-    file_name: Some("foo"),
-    file_stem: Some("foo"),
-    extension: None
-    );
-
-    t!("/foo/",
-    iter: ["foo"],
-    parent: Some(""),
-    file_name: Some("foo"),
-    file_stem: Some("foo"),
-    extension: None
-    );
-
-    t!("foo/bar",
-    iter: ["foo", "bar"],
-    parent: Some("foo"),
-    file_name: Some("bar"),
-    file_stem: Some("bar"),
-    extension: None
-    );
-
-    t!("/foo/bar",
-    iter: ["foo", "bar"],
-    parent: Some("/foo"),
-    file_name: Some("bar"),
-    file_stem: Some("bar"),
-    extension: None
-    );
-
-    t!("///foo///",
-    iter: ["foo"],
-    parent: Some(""),
-    file_name: Some("foo"),
-    file_stem: Some("foo"),
-    extension: None
-    );
-
-    t!("///foo///bar",
-    iter: ["foo", "bar"],
-    parent: Some("///foo"),
-    file_name: Some("bar"),
-    file_stem: Some("bar"),
-    extension: None
-    );
-
-    t!("./.",
-    iter: [".", "."],
-    parent: Some(""),
-    file_name: None,
-    file_stem: None,
-    extension: None
-    );
-
-    t!("/..",
-    iter: [".."],
-    parent: Some(""),
-    file_name: None,
-    file_stem: None,
-    extension: None
-    );
-
-    t!("../",
-    iter: [".."],
-    parent: Some(""),
-    file_name: None,
-    file_stem: None,
-    extension: None
-    );
-
-    t!("foo/.",
-    iter: ["foo", "."],
-    parent: Some(""),
-    file_name: Some("foo"),
-    file_stem: Some("foo"),
-    extension: None
-    );
-
-    t!("foo/..",
-    iter: ["foo", ".."],
-    parent: Some("foo"),
-    file_name: None,
-    file_stem: None,
-    extension: None
-    );
-
-    t!("foo/./",
-    iter: ["foo", "."],
-    parent: Some(""),
-    file_name: Some("foo"),
-    file_stem: Some("foo"),
-    extension: None
-    );
-
-    t!("foo/./bar",
-    iter: ["foo", ".", "bar"],
-    parent: Some("foo/."),
-    file_name: Some("bar"),
-    file_stem: Some("bar"),
-    extension: None
-    );
-
-    t!("foo/../",
-    iter: ["foo", ".."],
-    parent: Some("foo"),
-    file_name: None,
-    file_stem: None,
-    extension: None
-    );
-
-    t!("foo/../bar",
-    iter: ["foo", "..", "bar"],
-    parent: Some("foo/.."),
-    file_name: Some("bar"),
-    file_stem: Some("bar"),
-    extension: None
-    );
-
-    t!("./a",
-    iter: [".", "a"],
-    parent: Some("."),
-    file_name: Some("a"),
-    file_stem: Some("a"),
-    extension: None
-    );
-
-    t!(".",
-    iter: ["."],
-    parent: Some(""),
-    file_name: None,
-    file_stem: None,
-    extension: None
-    );
-
-    t!("./",
-    iter: ["."],
-    parent: Some(""),
-    file_name: None,
-    file_stem: None,
-    extension: None
-    );
-
-    t!("a/b",
-    iter: ["a", "b"],
-    parent: Some("a"),
-    file_name: Some("b"),
-    file_stem: Some("b"),
-    extension: None
-    );
-
-    t!("a//b",
-    iter: ["a", "b"],
-    parent: Some("a"),
-    file_name: Some("b"),
-    file_stem: Some("b"),
-    extension: None
-    );
-
-    t!("a/./b",
-    iter: ["a", ".", "b"],
-    parent: Some("a/."),
-    file_name: Some("b"),
-    file_stem: Some("b"),
-    extension: None
-    );
-
-    t!("a/b/c",
-    iter: ["a", "b", "c"],
-    parent: Some("a/b"),
-    file_name: Some("c"),
-    file_stem: Some("c"),
-    extension: None
-    );
-
-    t!(".foo",
-    iter: [".foo"],
-    parent: Some(""),
-    file_name: Some(".foo"),
-    file_stem: Some(".foo"),
-    extension: None
-    );
-}
-
-#[test]
-pub fn test_stem_ext() {
-    t!("foo",
-    file_stem: Some("foo"),
-    extension: None
-    );
-
-    t!("foo.",
-    file_stem: Some("foo"),
-    extension: Some("")
-    );
-
-    t!(".foo",
-    file_stem: Some(".foo"),
-    extension: None
-    );
-
-    t!("foo.txt",
-    file_stem: Some("foo"),
-    extension: Some("txt")
-    );
-
-    t!("foo.bar.txt",
-    file_stem: Some("foo.bar"),
-    extension: Some("txt")
-    );
-
-    t!("foo.bar.",
-    file_stem: Some("foo.bar"),
-    extension: Some("")
-    );
-
-    t!(".", file_stem: None, extension: None);
-
-    t!("..", file_stem: None, extension: None);
-
-    t!("", file_stem: None, extension: None);
-}
-
-#[test]
-pub fn test_set_file_name() {
-    macro_rules! tfn(
-            ($path:expr, $file:expr, $expected:expr) => ( {
-            let mut p = RelativePathBuf::from($path);
-            p.set_file_name($file);
-            assert!(p.as_str() == $expected,
-                    "setting file name of {:?} to {:?}: Expected {:?}, got {:?}",
-                    $path, $file, $expected,
-                    p.as_str());
-        });
-    );
-
-    tfn!("foo", "foo", "foo");
-    tfn!("foo", "bar", "bar");
-    tfn!("foo", "", "");
-    tfn!("", "foo", "foo");
-
-    tfn!(".", "foo", "./foo");
-    tfn!("foo/", "bar", "bar");
-    tfn!("foo/.", "bar", "bar");
-    tfn!("..", "foo", "../foo");
-    tfn!("foo/..", "bar", "foo/../bar");
-    tfn!("/", "foo", "/foo");
-}
-
-#[test]
-pub fn test_set_extension() {
-    macro_rules! tse(
-            ($path:expr, $ext:expr, $expected:expr, $output:expr) => ( {
-            let mut p = RelativePathBuf::from($path);
-            let output = p.set_extension($ext);
-            assert!(p.as_str() == $expected && output == $output,
-                    "setting extension of {:?} to {:?}: Expected {:?}/{:?}, got {:?}/{:?}",
-                    $path, $ext, $expected, $output,
-                    p.as_str(), output);
-        });
-    );
-
-    tse!("foo", "txt", "foo.txt", true);
-    tse!("foo.bar", "txt", "foo.txt", true);
-    tse!("foo.bar.baz", "txt", "foo.bar.txt", true);
-    tse!(".test", "txt", ".test.txt", true);
-    tse!("foo.txt", "", "foo", true);
-    tse!("foo", "", "foo", true);
-    tse!("", "foo", "", false);
-    tse!(".", "foo", ".", false);
-    tse!("foo/", "bar", "foo.bar", true);
-    tse!("foo/.", "bar", "foo.bar", true);
-    tse!("..", "foo", "..", false);
-    tse!("foo/..", "bar", "foo/..", false);
-    tse!("/", "foo", "/", false);
-}
-
-#[test]
-fn test_eq_recievers() {
-    use std::borrow::Cow;
-
-    let borrowed: &RelativePath = RelativePath::new("foo/bar");
-    let mut owned: RelativePathBuf = RelativePathBuf::new();
-    owned.push("foo");
-    owned.push("bar");
-    let borrowed_cow: Cow<RelativePath> = borrowed.into();
-    let owned_cow: Cow<RelativePath> = owned.clone().into();
-
-    macro_rules! t {
-        ($($current:expr),+) => {
-            $(
-                assert_eq!($current, borrowed);
-                assert_eq!($current, owned);
-                assert_eq!($current, borrowed_cow);
-                assert_eq!($current, owned_cow);
-            )+
-        }
-    }
-
-    t!(borrowed, owned, borrowed_cow, owned_cow);
-}
-
-#[test]
-#[allow(clippy::cognitive_complexity)]
-pub fn test_compare() {
-    use std::collections::hash_map::DefaultHasher;
-    use std::hash::{Hash, Hasher};
-
-    fn hash<T: Hash>(t: T) -> u64 {
-        let mut s = DefaultHasher::new();
-        t.hash(&mut s);
-        s.finish()
-    }
-
-    macro_rules! tc(
-        ($path1:expr, $path2:expr, eq: $eq:expr,
-            starts_with: $starts_with:expr, ends_with: $ends_with:expr,
-            relative_from: $relative_from:expr) => ({
-                let path1 = RelativePath::new($path1);
-                let path2 = RelativePath::new($path2);
-
-                let eq = path1 == path2;
-                assert!(eq == $eq, "{:?} == {:?}, expected {:?}, got {:?}",
-                        $path1, $path2, $eq, eq);
-                assert!($eq == (hash(path1) == hash(path2)),
-                        "{:?} == {:?}, expected {:?}, got {} and {}",
-                        $path1, $path2, $eq, hash(path1), hash(path2));
-
-                let starts_with = path1.starts_with(path2);
-                assert!(starts_with == $starts_with,
-                        "{:?}.starts_with({:?}), expected {:?}, got {:?}", $path1, $path2,
-                        $starts_with, starts_with);
-
-                let ends_with = path1.ends_with(path2);
-                assert!(ends_with == $ends_with,
-                        "{:?}.ends_with({:?}), expected {:?}, got {:?}", $path1, $path2,
-                        $ends_with, ends_with);
-
-                let relative_from = path1.strip_prefix(path2)
-                                        .map(|p| p.as_str())
-                                        .ok();
-                let exp: Option<&str> = $relative_from;
-                assert!(relative_from == exp,
-                        "{:?}.strip_prefix({:?}), expected {:?}, got {:?}",
-                        $path1, $path2, exp, relative_from);
-        });
-    );
-
-    tc!("", "",
-    eq: true,
-    starts_with: true,
-    ends_with: true,
-    relative_from: Some("")
-    );
-
-    tc!("foo", "",
-    eq: false,
-    starts_with: true,
-    ends_with: true,
-    relative_from: Some("foo")
-    );
-
-    tc!("", "foo",
-    eq: false,
-    starts_with: false,
-    ends_with: false,
-    relative_from: None
-    );
-
-    tc!("foo", "foo",
-    eq: true,
-    starts_with: true,
-    ends_with: true,
-    relative_from: Some("")
-    );
-
-    tc!("foo/", "foo",
-    eq: true,
-    starts_with: true,
-    ends_with: true,
-    relative_from: Some("")
-    );
-
-    tc!("foo/bar", "foo",
-    eq: false,
-    starts_with: true,
-    ends_with: false,
-    relative_from: Some("bar")
-    );
-
-    tc!("foo/bar/baz", "foo/bar",
-    eq: false,
-    starts_with: true,
-    ends_with: false,
-    relative_from: Some("baz")
-    );
-
-    tc!("foo/bar", "foo/bar/baz",
-    eq: false,
-    starts_with: false,
-    ends_with: false,
-    relative_from: None
-    );
-}
-
-#[test]
-fn test_join() {
-    assert_components(&["foo", "bar", "baz"], &rp("foo/bar").join("baz///"));
-    assert_components(
-        &["hello", "world", "foo", "bar", "baz"],
-        &rp("hello/world").join("///foo/bar/baz"),
-    );
-    assert_components(&["foo", "bar", "baz"], &rp("").join("foo/bar/baz"));
-}
-
-#[test]
-fn test_components_iterator() {
-    use self::Component::*;
-
-    assert_eq!(
-        vec![Normal("hello"), Normal("world")],
-        rp("/hello///world//").components().collect::<Vec<_>>()
-    );
-}
-
-#[test]
-fn test_to_path_buf() {
-    let path = rp("/hello///world//");
-    let path_buf = path.to_path(".");
-    let expected = Path::new(".").join("hello").join("world");
-    assert_eq!(expected, path_buf);
-}
-
-#[test]
-fn test_eq() {
-    assert_eq!(rp("//foo///bar"), rp("/foo/bar"));
-    assert_eq!(rp("foo///bar"), rp("foo/bar"));
-    assert_eq!(rp("foo"), rp("foo"));
-    assert_eq!(rp("foo"), rp("foo").to_relative_path_buf());
-}
-
-#[test]
-fn test_next_back() {
-    use self::Component::*;
-
-    let mut it = rp("baz/bar///foo").components();
-    assert_eq!(Some(Normal("foo")), it.next_back());
-    assert_eq!(Some(Normal("bar")), it.next_back());
-    assert_eq!(Some(Normal("baz")), it.next_back());
-    assert_eq!(None, it.next_back());
-}
-
-#[test]
-fn test_parent() {
-    let path = rp("baz/./bar/foo//./.");
-
-    assert_eq!(Some(rp("baz/./bar")), path.parent());
-    assert_eq!(
-        Some(rp("baz/.")),
-        path.parent().and_then(RelativePath::parent)
-    );
-    assert_eq!(
-        Some(rp("")),
-        path.parent()
-            .and_then(RelativePath::parent)
-            .and_then(RelativePath::parent)
-    );
-    assert_eq!(
-        None,
-        path.parent()
-            .and_then(RelativePath::parent)
-            .and_then(RelativePath::parent)
-            .and_then(RelativePath::parent)
-    );
-}
-
-#[test]
-fn test_relative_path_buf() {
-    assert_eq!(
-        rp("hello/world/."),
-        rp("/hello///world//").to_owned().join(".")
-    );
-}
-
-#[test]
-fn test_normalize() {
-    assert_eq!(rp("c/d"), rp("a/.././b/../c/d").normalize());
-}
-
-#[test]
-fn test_relative_to() {
-    assert_eq!(
-        rp("foo/foo/bar"),
-        rp("foo/bar").join_normalized("../foo/bar")
-    );
-
-    assert_eq!(
-        rp("../c/e"),
-        rp("x/y").join_normalized("../../a/b/../../../c/d/../e")
-    );
-}
-
-#[test]
-fn test_from() {
-    assert_eq!(
-        rp("foo/bar").to_owned(),
-        RelativePathBuf::from(String::from("foo/bar")),
-    );
-
-    assert_eq!(
-        RelativePathBuf::from(rp("foo/bar")),
-        RelativePathBuf::from("foo/bar"),
-    );
-
-    assert_eq!(rp("foo/bar").to_owned(), RelativePathBuf::from("foo/bar"),);
-
-    assert_eq!(&*Box::<RelativePath>::from(rp("foo/bar")), rp("foo/bar"));
-    assert_eq!(
-        &*Box::<RelativePath>::from(RelativePathBuf::from("foo/bar")),
-        rp("foo/bar")
-    );
-
-    assert_eq!(&*Arc::<RelativePath>::from(rp("foo/bar")), rp("foo/bar"));
-    assert_eq!(
-        &*Arc::<RelativePath>::from(RelativePathBuf::from("foo/bar")),
-        rp("foo/bar")
-    );
-
-    assert_eq!(&*Rc::<RelativePath>::from(rp("foo/bar")), rp("foo/bar"));
-    assert_eq!(
-        &*Rc::<RelativePath>::from(RelativePathBuf::from("foo/bar")),
-        rp("foo/bar")
-    );
-}
-
-#[test]
-fn test_relative_path_asref_str() {
-    assert_eq!(
-        <RelativePath as AsRef<str>>::as_ref(rp("foo/bar")),
-        "foo/bar"
-    );
-}
-
-#[test]
-fn test_default() {
-    assert_eq!(RelativePathBuf::new(), RelativePathBuf::default(),);
-}
-
-#[test]
-pub fn test_push() {
-    macro_rules! tp(
-        ($path:expr, $push:expr, $expected:expr) => ( {
-            let mut actual = RelativePathBuf::from($path);
-            actual.push($push);
-            assert!(actual.as_str() == $expected,
-                    "pushing {:?} onto {:?}: Expected {:?}, got {:?}",
-                    $push, $path, $expected, actual.as_str());
-        });
-    );
-
-    tp!("", "foo", "foo");
-    tp!("foo", "bar", "foo/bar");
-    tp!("foo/", "bar", "foo/bar");
-    tp!("foo//", "bar", "foo//bar");
-    tp!("foo/.", "bar", "foo/./bar");
-    tp!("foo./.", "bar", "foo././bar");
-    tp!("foo", "", "foo/");
-    tp!("foo", ".", "foo/.");
-    tp!("foo", "..", "foo/..");
-}
-
-#[test]
-pub fn test_pop() {
-    macro_rules! tp(
-        ($path:expr, $expected:expr, $output:expr) => ( {
-            let mut actual = RelativePathBuf::from($path);
-            let output = actual.pop();
-            assert!(actual.as_str() == $expected && output == $output,
-                    "popping from {:?}: Expected {:?}/{:?}, got {:?}/{:?}",
-                    $path, $expected, $output,
-                    actual.as_str(), output);
-        });
-    );
-
-    tp!("", "", false);
-    tp!("/", "", true);
-    tp!("foo", "", true);
-    tp!(".", "", true);
-    tp!("/foo", "", true);
-    tp!("/foo/bar", "/foo", true);
-    tp!("/foo/bar/.", "/foo", true);
-    tp!("foo/bar", "foo", true);
-    tp!("foo/.", "", true);
-    tp!("foo//bar", "foo", true);
-}
-
-#[test]
-pub fn test_display() {
-    // NB: display delegated to the underlying string.
-    assert_eq!(RelativePathBuf::from("foo/bar").to_string(), "foo/bar");
-    assert_eq!(RelativePath::new("foo/bar").to_string(), "foo/bar");
-
-    assert_eq!(format!("{}", RelativePathBuf::from("foo/bar")), "foo/bar");
-    assert_eq!(format!("{}", RelativePath::new("foo/bar")), "foo/bar");
-}
-
-#[cfg(unix)]
-#[test]
-pub fn test_unix_from_path() {
-    use std::ffi::OsStr;
-    use std::os::unix::ffi::OsStrExt;
-
-    assert_eq!(
-        Err(FromPathErrorKind::NonRelative.into()),
-        RelativePath::from_path("/foo/bar")
-    );
-
-    // Continuation byte without continuation.
-    let non_utf8 = OsStr::from_bytes(&[0x80u8]);
-
-    assert_eq!(
-        Err(FromPathErrorKind::NonUtf8.into()),
-        RelativePath::from_path(non_utf8)
-    );
-}
-
-#[cfg(windows)]
-#[test]
-pub fn test_windows_from_path() {
-    assert_eq!(
-        Err(FromPathErrorKind::NonRelative.into()),
-        RelativePath::from_path("c:\\foo\\bar")
-    );
-
-    assert_eq!(
-        Err(FromPathErrorKind::BadSeparator.into()),
-        RelativePath::from_path("foo\\bar")
-    );
-}
-
-#[cfg(unix)]
-#[test]
-pub fn test_unix_owned_from_path() {
-    use std::ffi::OsStr;
-    use std::os::unix::ffi::OsStrExt;
-
-    assert_eq!(
-        Err(FromPathErrorKind::NonRelative.into()),
-        RelativePathBuf::from_path(Path::new("/foo/bar"))
-    );
-
-    // Continuation byte without continuation.
-    let non_utf8 = OsStr::from_bytes(&[0x80u8]);
-
-    assert_eq!(
-        Err(FromPathErrorKind::NonUtf8.into()),
-        RelativePathBuf::from_path(Path::new(non_utf8))
-    );
-}
-
-#[cfg(windows)]
-#[test]
-pub fn test_windows_owned_from_path() {
-    assert_eq!(
-        Err(FromPathErrorKind::NonRelative.into()),
-        RelativePathBuf::from_path(Path::new("c:\\foo\\bar"))
-    );
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/.cargo-checksum.json
deleted file mode 100644
index 697c9ce..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/.cargo-checksum.json
+++ /dev/null
@@ -1 +0,0 @@
-{"files":{}}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/.cargo_vcs_info.json
deleted file mode 100644
index 816eac70..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/.cargo_vcs_info.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
-  "git": {
-    "sha1": "62134281cf451fc2bea69f9d2a16805a9ad03fef"
-  },
-  "path_in_vcs": "rstest"
-}
\ No newline at end of file
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/Cargo.toml
deleted file mode 100644
index ef28b5e..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/Cargo.toml
+++ /dev/null
@@ -1,101 +0,0 @@
-# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
-#
-# When uploading crates to the registry Cargo will automatically
-# "normalize" Cargo.toml files for maximal compatibility
-# with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g., crates.io) dependencies.
-#
-# If you are reading this file be aware that the original Cargo.toml
-# will likely look very different (and much more reasonable).
-# See Cargo.toml.orig for the original contents.
-
-[package]
-edition = "2021"
-rust-version = "1.67.1"
-name = "rstest"
-version = "0.22.0"
-authors = ["Michele d'Amico <michele.damico@gmail.com>"]
-build = false
-autobins = false
-autoexamples = false
-autotests = false
-autobenches = false
-description = """
-Rust fixture based test framework. It use procedural macro
-to implement fixtures and table based tests.
-"""
-homepage = "https://github.com/la10736/rstest"
-readme = "README.md"
-keywords = [
-    "test",
-    "fixture",
-]
-categories = ["development-tools::testing"]
-license = "MIT OR Apache-2.0"
-repository = "https://github.com/la10736/rstest"
-
-[lib]
-name = "rstest"
-path = "src/lib.rs"
-
-[[test]]
-name = "integration"
-path = "tests/integration.rs"
-
-[dependencies.futures]
-version = "0.3.21"
-optional = true
-
-[dependencies.futures-timer]
-version = "3.0.2"
-optional = true
-
-[dependencies.rstest_macros]
-version = "0.22.0"
-default-features = false
-
-[dev-dependencies.actix-rt]
-version = "2.7.0"
-
-[dev-dependencies.async-std]
-version = "1.12.0"
-features = ["attributes"]
-
-[dev-dependencies.lazy_static]
-version = "1.4.0"
-
-[dev-dependencies.mytest]
-version = "0.21.0"
-default-features = false
-package = "rstest"
-
-[dev-dependencies.pretty_assertions]
-version = "1.2.1"
-
-[dev-dependencies.temp_testdir]
-version = "0.2.3"
-
-[dev-dependencies.tokio]
-version = "1.19.2"
-features = [
-    "rt",
-    "macros",
-]
-
-[dev-dependencies.unindent]
-version = "0.2.1"
-
-[build-dependencies.rustc_version]
-version = "0.4.0"
-
-[features]
-async-timeout = [
-    "dep:futures",
-    "dep:futures-timer",
-    "rstest_macros/async-timeout",
-]
-crate-name = ["rstest_macros/crate-name"]
-default = [
-    "async-timeout",
-    "crate-name",
-]
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/Cargo.toml.orig
deleted file mode 100644
index 8402c2fa..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/Cargo.toml.orig
+++ /dev/null
@@ -1,47 +0,0 @@
-[package]
-authors = ["Michele d'Amico <michele.damico@gmail.com>"]
-categories = ["development-tools::testing"]
-description = """
-Rust fixture based test framework. It use procedural macro
-to implement fixtures and table based tests.
-"""
-edition = "2021"
-homepage = "https://github.com/la10736/rstest"
-keywords = ["test", "fixture"]
-license = "MIT OR Apache-2.0"
-name = "rstest"
-readme = "README.md"
-repository = "https://github.com/la10736/rstest"
-rust-version = "1.67.1"
-version = "0.22.0"
-
-[features]
-async-timeout = [
-    "dep:futures",
-    "dep:futures-timer",
-    "rstest_macros/async-timeout",
-]
-crate-name = ["rstest_macros/crate-name"]
-default = ["async-timeout", "crate-name"]
-
-[lib]
-
-[dependencies]
-futures = { version = "0.3.21", optional = true }
-futures-timer = { version = "3.0.2", optional = true }
-rstest_macros = { version = "0.22.0", path = "../rstest_macros", default-features = false }
-
-[dev-dependencies]
-actix-rt = "2.7.0"
-async-std = { version = "1.12.0", features = ["attributes"] }
-lazy_static = "1.4.0"
-mytest = { package = "rstest", version = "0.21.0", default-features = false }
-pretty_assertions = "1.2.1"
-rstest_reuse = { path = "../rstest_reuse" }
-rstest_test = { path = "../rstest_test" }
-temp_testdir = "0.2.3"
-tokio = { version = "1.19.2", features = ["rt", "macros"] }
-unindent = "0.2.1"
-
-[build-dependencies]
-rustc_version = "0.4.0"
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/LICENSE-APACHE b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/LICENSE-APACHE
deleted file mode 100644
index 0f9875d..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/LICENSE-APACHE
+++ /dev/null
@@ -1,202 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-   
-   Copyright 2018-19 Michele d'Amico
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/LICENSE-MIT b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/LICENSE-MIT
deleted file mode 100644
index 513033e..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/LICENSE-MIT
+++ /dev/null
@@ -1,18 +0,0 @@
-Copyright 2018-19 Michele d'Amico
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), 
-to deal in the Software without restriction, including without limitation the
-rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-sell copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
-INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
-PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/README.md b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/README.md
deleted file mode 100644
index 9c76a682..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/README.md
+++ /dev/null
@@ -1,550 +0,0 @@
-[![Crate][crate-image]][crate-link]
-[![Docs][docs-image]][docs-link]
-[![Status][test-action-image]][test-action-link]
-[![Apache 2.0 Licensed][license-apache-image]][license-apache-link]
-[![MIT Licensed][license-mit-image]][license-mit-link]
-
-# Fixture-based test framework for Rust
-
-## Introduction
-
-`rstest` uses procedural macros to help you on writing
-fixtures and table-based tests. To use it, add the
-following lines to your `Cargo.toml` file:
-
-```toml
-[dev-dependencies]
-rstest = "0.22.0"
-```
-
-### Features
-
-- `async-timeout`: `timeout` for `async` tests (Default enabled)
-- `crate-name`: Import `rstest` package with different name (Default enabled)
-
-### Fixture
-
-The core idea is that you can inject your test dependencies
-by passing them as test arguments. In the following example,
-a `fixture` is defined and then used in two tests,
-simply providing it as an argument:
-
-```rust
-use rstest::*;
-
-#[fixture]
-pub fn fixture() -> u32 { 42 }
-
-#[rstest]
-fn should_success(fixture: u32) {
-    assert_eq!(fixture, 42);
-}
-
-#[rstest]
-fn should_fail(fixture: u32) {
-    assert_ne!(fixture, 42);
-}
-```
-
-### Parametrize
-
-You can also inject values in some other ways. For instance, you can
-create a set of tests by simply providing the injected values for each
-case: `rstest` will generate an independent test for each case.
-
-```rust
-use rstest::rstest;
-
-#[rstest]
-#[case(0, 0)]
-#[case(1, 1)]
-#[case(2, 1)]
-#[case(3, 2)]
-#[case(4, 3)]
-fn fibonacci_test(#[case] input: u32, #[case] expected: u32) {
-    assert_eq!(expected, fibonacci(input))
-}
-```
-
-Running `cargo test` in this case executes five tests:
-
-```bash
-running 5 tests
-test fibonacci_test::case_1 ... ok
-test fibonacci_test::case_2 ... ok
-test fibonacci_test::case_3 ... ok
-test fibonacci_test::case_4 ... ok
-test fibonacci_test::case_5 ... ok
-
-test result: ok. 5 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out
-```
-
-If you need to just providing a bunch of values for which you
-need to run your test, you can use `#[values(list, of, values)]`
-argument attribute:
-
-```rust
-use rstest::rstest;
-
-#[rstest]
-fn should_be_invalid(
-    #[values(None, Some(""), Some("    "))]
-    value: Option<&str>
-) {
-    assert!(!valid(value))
-}
-```
-
-Or create a _matrix_ test by using _list of values_ for some
-variables that will generate the cartesian product of all the
-values.
-
-#### Use Parametrize definition in more tests
-
-If you need to use a test list for more than one test you can use [`rstest_reuse`][reuse-crate-link]
-crate. With this helper crate you can define a template and use it everywhere.
-
-```rust
-use rstest::rstest;
-use rstest_reuse::{self, *};
-
-#[template]
-#[rstest]
-#[case(2, 2)]
-#[case(4/2, 2)]
-fn two_simple_cases(#[case] a: u32, #[case] b: u32) {}
-
-#[apply(two_simple_cases)]
-fn it_works(#[case] a: u32, #[case] b: u32) {
-    assert!(a == b);
-}
-```
-
-See [`rstest_reuse`][reuse-crate-link] for more details.
-
-#### Feature flagged cases
-
-In case you want certain test cases to only be present if a certain feature is
-enabled, use `#[cfg_attr(feature = …, case(…))]`:
-
-```rust
-use rstest::rstest;
-
-#[rstest]
-#[case(2, 2)]
-#[cfg_attr(feature = "frac", case(4/2, 2))]
-#[case(4/2, 2)]
-fn it_works(#[case] a: u32, #[case] b: u32) {
-    assert!(a == b);
-}
-```
-
-This also works with [`rstest_reuse`][reuse-crate-link].
-
-### Magic Conversion
-
-If you need a value where its type implement `FromStr()` trait you can use a literal
-string to build it:
-
-```rust
-# use rstest::rstest;
-# use std::net::SocketAddr;
-#[rstest]
-#[case("1.2.3.4:8080", 8080)]
-#[case("127.0.0.1:9000", 9000)]
-fn check_port(#[case] addr: SocketAddr, #[case] expected: u16) {
-    assert_eq!(expected, addr.port());
-}
-```
-
-You can use this feature also in value list and in fixture default value.
-
-### Async
-
-`rstest` provides out of the box `async` support. Just mark your
-test function as `async`, and it'll use `#[async-std::test]` to
-annotate it. This feature can be really useful to build async
-parametric tests using a tidy syntax:
-
-```rust
-use rstest::*;
-
-#[rstest]
-#[case(5, 2, 3)]
-#[should_panic]
-#[case(42, 40, 1)]
-async fn my_async_test(#[case] expected: u32, #[case] a: u32, #[case] b: u32) {
-    assert_eq!(expected, async_sum(a, b).await);
-}
-```
-
-Currently, only `async-std` is supported out of the box. But if you need to use
-another runtime that provide its own test attribute (i.e. `tokio::test` or
-`actix_rt::test`) you can use it in your `async` test like described in
-[Inject Test Attribute](#inject-test-attribute).
-
-To use this feature, you need to enable `attributes` in the `async-std`
-features list in your `Cargo.toml`:
-
-```toml
-async-std = { version = "1.5", features = ["attributes"] }
-```
-
-If your test input is an async value (fixture or test parameter) you can use `#[future]`
-attribute to remove `impl Future<Output = T>` boilerplate and just use `T`:
-
-```rust
-use rstest::*;
-#[fixture]
-async fn base() -> u32 { 42 }
-
-#[rstest]
-#[case(21, async { 2 })]
-#[case(6, async { 7 })]
-async fn my_async_test(#[future] base: u32, #[case] expected: u32, #[future] #[case] div: u32) {
-    assert_eq!(expected, base.await / div.await);
-}
-```
-
-As you noted you should `.await` all _future_ values and this sometimes can be really boring.
-In this case you can use `#[future(awt)]` to _awaiting_ an input or annotating your function
-with `#[awt]` attributes to globally `.await` all your _future_ inputs. Previous code can be
-simplified like follow:
-
-```rust
-use rstest::*;
-# #[fixture]
-# async fn base() -> u32 { 42 }
-#[rstest]
-#[case(21, async { 2 })]
-#[case(6, async { 7 })]
-#[awt]
-async fn global(#[future] base: u32, #[case] expected: u32, #[future] #[case] div: u32) {
-    assert_eq!(expected, base / div);
-}
-#[rstest]
-#[case(21, async { 2 })]
-#[case(6, async { 7 })]
-async fn single(#[future] base: u32, #[case] expected: u32, #[future(awt)] #[case] div: u32) {
-    assert_eq!(expected, base.await / div);
-}
-```
-
-### Files path as input arguments
-
-If you need to create a test for each file in a given location you can use
-`#[files("glob path syntax")]` attribute to generate a test for each file that
-satisfy the given glob path.
-
-```rust
-#[rstest]
-fn for_each_file(#[files("src/**/*.rs")] #[exclude("test")] path: PathBuf) {
-    assert!(check_file(&path))
-}
-```
-
-The default behavior is to ignore the files that start with `"."`, but you can
-modify this by use `#[include_dot_files]` attribute. The `files` attribute can be
-used more than once on the same variable, and you can also create some custom
-exclusion rules with the `#[exclude("regex")]` attributes that filter out all
-paths that verify the regular expression.
-
-### Default timeout
-
-You can set a default timeout for test using the `RSTEST_TIMEOUT` environment variable.
-The value is in seconds and is evaluated on test compile time.
-
-### Test `#[timeout()]`
-
-You can define an execution timeout for your tests with `#[timeout(<duration>)]` attribute. Timeout
-works both for sync and async tests and is runtime agnostic. `#[timeout(<duration>)]` take an
-expression that should return a `std::time::Duration`. Follow a simple async example:
-
-```rust
-use rstest::*;
-use std::time::Duration;
-
-async fn delayed_sum(a: u32, b: u32,delay: Duration) -> u32 {
-    async_std::task::sleep(delay).await;
-    a + b
-}
-
-#[rstest]
-#[timeout(Duration::from_millis(80))]
-async fn single_pass() {
-    assert_eq!(4, delayed_sum(2, 2, ms(10)).await);
-}
-```
-
-In this case test pass because the delay is just 10 milliseconds and timeout is
-80 milliseconds.
-
-You can use `timeout` attribute like any other attribute in your tests, and you can
-override a group timeout with a case specific one. In the follow example we have
-3 tests where first and third use 100 milliseconds but the second one use 10 milliseconds.
-Another valuable point in this example is to use an expression to compute the
-duration.
-
-```rust
-fn ms(ms: u32) -> Duration {
-    Duration::from_millis(ms.into())
-}
-
-#[rstest]
-#[case::pass(ms(1), 4)]
-#[timeout(ms(10))]
-#[case::fail_timeout(ms(60), 4)]
-#[case::fail_value(ms(1), 5)]
-#[timeout(ms(100))]
-async fn group_one_timeout_override(#[case] delay: Duration, #[case] expected: u32) {
-    assert_eq!(expected, delayed_sum(2, 2, delay).await);
-}
-```
-
-If you want to use `timeout` for `async` test you need to use `async-timeout`
-feature (enabled by default).
-
-### Inject Test Attribute
-
-If you would like to use another `test` attribute for your test you can simply
-indicate it in your test function's attributes. For instance if you want
-to test some async function with use `actix_rt::test` attribute you can just write:
-
-```rust
-use rstest::*;
-use actix_rt;
-use std::future::Future;
-
-#[rstest]
-#[case(2, async { 4 })]
-#[case(21, async { 42 })]
-#[actix_rt::test]
-async fn my_async_test(#[case] a: u32, #[case] #[future] result: u32) {
-    assert_eq!(2 * a, result.await);
-}
-```
-
-Just the attributes that ends with `test` (last path segment) can be injected.
-
-### Use `#[once]` Fixture
-
-If you need to a fixture that should be initialized just once for all tests
-you can use `#[once]` attribute. `rstest` call your fixture function just once and
-return a reference to your function result to all your tests:
-
-```rust
-#[fixture]
-#[once]
-fn once_fixture() -> i32 { 42 }
-
-#[rstest]
-fn single(once_fixture: &i32) {
-    // All tests that use once_fixture will share the same reference to once_fixture() 
-    // function result.
-    assert_eq!(&42, once_fixture)
-}
-```
-
-## Local lifetime and `#[by_ref]` attribute
-
-In some cases you may want to use a local lifetime for some arguments of your test.
-In these cases you can use the `#[by_ref]` attribute then use the reference instead
-the value.
-
-```rust
-enum E<'a> {
-    A(bool),
-    B(&'a Cell<E<'a>>),
-}
-
-fn make_e_from_bool<'a>(_bump: &'a (), b: bool) -> E<'a> {
-    E::A(b)
-}
-
-#[fixture]
-fn bump() -> () {}
- 
-#[rstest]
-#[case(true, E::A(true))]
-fn it_works<'a>(#[by_ref] bump: &'a (), #[case] b: bool, #[case] expected: E<'a>) {
-    let actual = make_e_from_bool(&bump, b);
-    assert_eq!(actual, expected);
-}
-```
-
-You can use `#[by_ref]` attribute for all arguments of your test and not just for fixture
-but also for cases, values and files.
-
-## Complete Example
-
-All these features can be used together with a mixture of fixture variables,
-fixed cases and a bunch of values. For instance, you might need two
-test cases which test for panics, one for a logged-in user and one for a guest user.
-
-```rust
-use rstest::*;
-
-#[fixture]
-fn repository() -> InMemoryRepository {
-    let mut r = InMemoryRepository::default();
-    // fill repository with some data
-    r
-}
-
-#[fixture]
-fn alice() -> User {
-    User::logged("Alice", "2001-10-04", "London", "UK")
-}
-
-#[rstest]
-#[case::authorized_user(alice())] // We can use `fixture` also as standard function
-#[case::guest(User::Guest)]   // We can give a name to every case : `guest` in this case
-                              // and `authorized_user`
-#[should_panic(expected = "Invalid query error")] // We would test a panic
-fn should_be_invalid_query_error(
-    repository: impl Repository,
-    #[case] user: User,
-    #[values("     ", "^%$some#@invalid!chars", ".n.o.d.o.t.s.")] query: &str,
-) {
-    repository.find_items(&user, query).unwrap();
-}
-```
-
-This example will generate exactly 6 tests grouped by 2 different cases:
-
-```text
-running 6 tests
-test should_be_invalid_query_error::case_1_authorized_user::query_1_____ - should panic ... ok
-test should_be_invalid_query_error::case_2_guest::query_2_____someinvalid_chars__ - should panic ... ok
-test should_be_invalid_query_error::case_1_authorized_user::query_2_____someinvalid_chars__ - should panic ... ok
-test should_be_invalid_query_error::case_2_guest::query_3____n_o_d_o_t_s___ - should panic ... ok
-test should_be_invalid_query_error::case_1_authorized_user::query_3____n_o_d_o_t_s___ - should panic ... ok
-test should_be_invalid_query_error::case_2_guest::query_1_____ - should panic ... ok
-
-test result: ok. 6 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s
-```
-
-Note that the names of the values _try_ to convert the input expression in a
-Rust valid identifier name to help you find which tests fail.
-
-## More
-
-Is that all? Not quite yet!
-
-A fixture can be injected by another fixture, and they can be called
-using just some of its arguments.
-
-```rust
-#[fixture]
-fn user(#[default("Alice")] name: &str, #[default(22)] age: u8) -> User {
-    User::new(name, age)
-}
-
-#[rstest]
-fn is_alice(user: User) {
-    assert_eq!(user.name(), "Alice")
-}
-
-#[rstest]
-fn is_22(user: User) {
-    assert_eq!(user.age(), 22)
-}
-
-#[rstest]
-fn is_bob(#[with("Bob")] user: User) {
-    assert_eq!(user.name(), "Bob")
-}
-
-#[rstest]
-fn is_42(#[with("", 42)] user: User) {
-    assert_eq!(user.age(), 42)
-}
-```
-
-As you noted you can provide default values without the need of a fixture
-to define it.
-
-Finally, if you need tracing the input values you can just
-add the `trace` attribute to your test to enable the dump of all input
-variables.
-
-```rust
-#[rstest]
-#[case(42, "FortyTwo", ("minus twelve", -12))]
-#[case(24, "TwentyFour", ("minus twentyfour", -24))]
-#[trace] //This attribute enable tracing
-fn should_fail(#[case] number: u32, #[case] name: &str, #[case] tuple: (&str, i32)) {
-    assert!(false); // <- stdout come out just for failed tests
-}
-```
-
-```text
-running 2 tests
-test should_fail::case_1 ... FAILED
-test should_fail::case_2 ... FAILED
-
-failures:
-
----- should_fail::case_1 stdout ----
------------- TEST ARGUMENTS ------------
-number = 42
-name = "FortyTwo"
-tuple = ("minus twelve", -12)
--------------- TEST START --------------
-thread 'should_fail::case_1' panicked at 'assertion failed: false', src/main.rs:64:5
-note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace.
-
----- should_fail::case_2 stdout ----
------------- TEST ARGUMENTS ------------
-number = 24
-name = "TwentyFour"
-tuple = ("minus twentyfour", -24)
--------------- TEST START --------------
-thread 'should_fail::case_2' panicked at 'assertion failed: false', src/main.rs:64:5
-
-
-failures:
-    should_fail::case_1
-    should_fail::case_2
-
-test result: FAILED. 0 passed; 2 failed; 0 ignored; 0 measured; 0 filtered out
-```
-
-In case one or more variables don't implement the `Debug` trait, an error
-is raised, but it's also possible to exclude a variable using the
-`#[notrace]` argument attribute.
-
-You can learn more on [Docs][docs-link] and find more examples in
-[`tests/resources`](/rstest/tests/resources) directory.
-
-## Rust version compatibility
-
-The minimum supported Rust version is 1.67.1.
-
-## Changelog
-
-See [CHANGELOG.md](/CHANGELOG.md)
-
-## License
-
-Licensed under either of
-
-* Apache License, Version 2.0, ([LICENSE-APACHE](/LICENSE-APACHE) or
-[license-apache-link])
-
-* MIT license [LICENSE-MIT](/LICENSE-MIT) or [license-MIT-link]
-at your option.
-
-[//]: # (links)
-
-[crate-image]: https://img.shields.io/crates/v/rstest.svg
-[crate-link]: https://crates.io/crates/rstest
-[docs-image]: https://docs.rs/rstest/badge.svg
-[docs-link]: https://docs.rs/rstest/
-[test-action-image]: https://github.com/la10736/rstest/workflows/Test/badge.svg
-[test-action-link]: https://github.com/la10736/rstest/actions?query=workflow:Test
-[license-apache-image]: https://img.shields.io/badge/license-Apache2.0-blue.svg
-[license-mit-image]: https://img.shields.io/badge/license-MIT-blue.svg
-[license-apache-link]: http://www.apache.org/licenses/LICENSE-2.0
-[license-MIT-link]: http://opensource.org/licenses/MIT
-[reuse-crate-link]: https://crates.io/crates/rstest_reuse
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/src/lib.rs
deleted file mode 100644
index 22fa75f7..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/src/lib.rs
+++ /dev/null
@@ -1,1439 +0,0 @@
-#![allow(clippy::test_attr_in_doctest)]
-//! This crate will help you to write simpler tests by leveraging a software testing concept called
-//! [test fixtures](https://en.wikipedia.org/wiki/Test_fixture#Software). A fixture is something
-//! that you can use in your tests to encapsulate a test's dependencies.
-//!
-//! The general idea is to have smaller tests that only describe the thing you're testing while you
-//! hide the auxiliary utilities your tests make use of somewhere else.
-//! For instance, if you have an application that has many tests with users, shopping baskets, and
-//! products, you'd have to create a user, a shopping basket, and product every single time in
-//! every test which becomes unwieldy quickly. In order to cut down on that repetition, you can
-//! instead use fixtures to declare that you need those objects for your function and the fixtures
-//! will take care of creating those by themselves. Focus on the important stuff in your tests!
-//!
-//! In `rstest` a fixture is a function that can return any kind of valid Rust type. This
-//! effectively means that your fixtures are not limited by the kind of data they can return.
-//! A test can consume an arbitrary number of fixtures at the same time.
-//!
-//! ## What
-//!
-//! The `rstest` crate defines the following procedural macros:
-//!
-//! - [`[rstest]`](macro@rstest): Declare that a test or a group of tests that may take
-//!   [fixtures](attr.rstest.html#injecting-fixtures),
-//!   [input table](attr.rstest.html#test-parametrized-cases) or
-//!   [list of values](attr.rstest.html#values-lists).
-//! - [`[fixture]`](macro@fixture): To mark a function as a fixture.
-//!
-//! ## Why
-//!
-//! Very often in Rust we write tests like this
-//!
-//! ```
-//! #[test]
-//! fn should_process_two_users() {
-//!     let mut repository = create_repository();
-//!     repository.add("Bob", 21);
-//!     repository.add("Alice", 22);
-//!
-//!     let processor = string_processor();
-//!     processor.send_all(&repository, "Good Morning");
-//!
-//!     assert_eq!(2, processor.output.find("Good Morning").count());
-//!     assert!(processor.output.contains("Bob"));
-//!     assert!(processor.output.contains("Alice"));
-//! }
-//! ```
-//!
-//! By making use of [`[rstest]`](macro@rstest) we can isolate the dependencies `empty_repository` and
-//! `string_processor` by passing them as fixtures:
-//!
-//! ```
-//! # use rstest::*;
-//! #[rstest]
-//! fn should_process_two_users(mut empty_repository: impl Repository,
-//!                             string_processor: FakeProcessor) {
-//!     empty_repository.add("Bob", 21);
-//!     empty_repository.add("Alice", 22);
-//!
-//!     string_processor.send_all("Good Morning");
-//!
-//!     assert_eq!(2, string_processor.output.find("Good Morning").count());
-//!     assert!(string_processor.output.contains("Bob"));
-//!     assert!(string_processor.output.contains("Alice"));
-//! }
-//! ```
-//!
-//! ... or if you use `"Alice"` and `"Bob"` in other tests, you can isolate `alice_and_bob` fixture
-//! and use it directly:
-//!
-//! ```
-//! # use rstest::*;
-//! # trait Repository { fn add(&mut self, name: &str, age: u8); }
-//! # struct Rep;
-//! # impl Repository for Rep { fn add(&mut self, name: &str, age: u8) {} }
-//! # #[fixture]
-//! # fn empty_repository() -> Rep {
-//! #     Rep
-//! # }
-//! #[fixture]
-//! fn alice_and_bob(mut empty_repository: impl Repository) -> impl Repository {
-//!     empty_repository.add("Bob", 21);
-//!     empty_repository.add("Alice", 22);
-//!     empty_repository
-//! }
-//!
-//! #[rstest]
-//! fn should_process_two_users(alice_and_bob: impl Repository,
-//!                             string_processor: FakeProcessor) {
-//!     string_processor.send_all("Good Morning");
-//!
-//!     assert_eq!(2, string_processor.output.find("Good Morning").count());
-//!     assert!(string_processor.output.contains("Bob"));
-//!     assert!(string_processor.output.contains("Alice"));
-//! }
-//! ```
-//! ### Features
-//!
-//! - `async-timeout`: `timeout` for `async` tests (Default enabled)
-//! - `crate-name`: Import `rstest` package with different name (Default enabled)
-//!
-//! ## Injecting fixtures as function arguments
-//!
-//! `rstest` functions can receive fixtures by using them as input arguments.
-//! A function decorated with [`[rstest]`](attr.rstest.html#injecting-fixtures)
-//! will resolve each argument name by call the fixture function.
-//! Fixtures should be annotated with the [`[fixture]`](macro@fixture) attribute.
-//!
-//! Fixtures will be resolved like function calls by following the standard resolution rules.
-//! Therefore, an identically named fixture can be use in different context.
-//!
-//! ```
-//! # use rstest::*;
-//! # trait Repository { }
-//! # #[derive(Default)]
-//! # struct DataSet {}
-//! # impl Repository for DataSet { }
-//! mod empty_cases {
-//! # use rstest::*;
-//! # trait Repository { }
-//! # #[derive(Default)]
-//! # struct DataSet {}
-//! # impl Repository for DataSet { }
-//!     use super::*;
-//!
-//!     #[fixture]
-//!     fn repository() -> impl Repository {
-//!         DataSet::default()
-//!     }
-//!
-//!     #[rstest]
-//!     fn should_do_nothing(repository: impl Repository) {
-//!         //.. test impl ..
-//!     }
-//! }
-//!
-//! mod non_trivial_case {
-//! # use rstest::*;
-//! # trait Repository { }
-//! # #[derive(Default)]
-//! # struct DataSet {}
-//! # impl Repository for DataSet { }
-//!     use super::*;
-//!
-//!     #[fixture]
-//!     fn repository() -> impl Repository {
-//!         let mut ds = DataSet::default();
-//!         // Fill your dataset with interesting case
-//!         ds
-//!     }
-//!
-//!     #[rstest]
-//!     fn should_notify_all_entries(repository: impl Repository) {
-//!         //.. test impl ..
-//!     }
-//! }
-//!
-//! ```
-//!
-//! Last but not least, fixtures can be injected like we saw in `alice_and_bob` example.
-//!
-//! ## Creating parametrized tests
-//!
-//! You can use also [`[rstest]`](attr.rstest.html#test-parametrized-cases) to create
-//! simple table-based tests. Let's see the classic Fibonacci example:
-//!
-//! ```
-//! use rstest::rstest;
-//!
-//! #[rstest]
-//! #[case(0, 0)]
-//! #[case(1, 1)]
-//! #[case(2, 1)]
-//! #[case(3, 2)]
-//! #[case(4, 3)]
-//! #[case(5, 5)]
-//! #[case(6, 8)]
-//! fn fibonacci_test(#[case] input: u32,#[case] expected: u32) {
-//!     assert_eq!(expected, fibonacci(input))
-//! }
-//!
-//! fn fibonacci(input: u32) -> u32 {
-//!     match input {
-//!         0 => 0,
-//!         1 => 1,
-//!         n => fibonacci(n - 2) + fibonacci(n - 1)
-//!     }
-//! }
-//! ```
-//! This will generate a bunch of tests, one for every `#[case(a, b)]`.
-//!
-//! ## Creating a test for each combinations of given values
-//!
-//! In some cases you need to test your code for each combinations of some input values. In this
-//! cases [`[rstest]`](attr.rstest.html#values-lists) give you the ability to define a list
-//! of values (rust expressions) to use for an arguments.
-//!
-//! ```
-//! # use rstest::rstest;
-//! # #[derive(PartialEq, Debug)]
-//! # enum State { Init, Start, Processing, Terminated }
-//! # #[derive(PartialEq, Debug)]
-//! # enum Event { Error, Fatal }
-//! # impl State { fn process(self, event: Event) -> Self { self } }
-//!
-//! #[rstest]
-//! fn should_terminate(
-//!     #[values(State::Init, State::Start, State::Processing)]
-//!     state: State,
-//!     #[values(Event::Error, Event::Fatal)]
-//!     event: Event
-//! ) {
-//!     assert_eq!(State::Terminated, state.process(event))
-//! }
-//! ```
-//!
-//! This will generate a test for each combination of `state` and `event`.
-//!
-//! ## Magic Conversion
-//!
-//! If you need a value where its type implement `FromStr()` trait you
-//! can use a literal string to build it.
-//!
-//! ```
-//! # use rstest::rstest;
-//! # use std::net::SocketAddr;
-//! #[rstest]
-//! #[case("1.2.3.4:8080", 8080)]
-//! #[case("127.0.0.1:9000", 9000)]
-//! fn check_port(#[case] addr: SocketAddr, #[case] expected: u16) {
-//!     assert_eq!(expected, addr.port());
-//! }
-//! ```
-//! You can use this feature also in value list and in fixture default value.
-//!
-//! # Optional features
-//!
-//! `rstest` Enable all features by default. You can disable them if you need to
-//! speed up compilation.
-//!
-//! - **`async-timeout`** *(enabled by default)* — Implement timeout for async
-//!   tests.
-//!
-//! # Rust version compatibility
-//!
-//! The minimum supported Rust version is 1.67.1.
-//!
-
-#[doc(hidden)]
-pub mod magic_conversion;
-#[doc(hidden)]
-pub mod timeout;
-
-/// Define a fixture that you can use in all `rstest`'s test arguments. You should just mark your
-/// function as `#[fixture]` and then use it as a test's argument. Fixture functions can also
-/// use other fixtures.
-///
-/// Let's see a trivial example:
-///
-/// ```
-/// use rstest::*;
-///
-/// #[fixture]
-/// fn twenty_one() -> i32 { 21 }
-///
-/// #[fixture]
-/// fn two() -> i32 { 2 }
-///
-/// #[fixture]
-/// fn injected(twenty_one: i32, two: i32) -> i32 { twenty_one * two }
-///
-/// #[rstest]
-/// fn the_test(injected: i32) {
-///     assert_eq!(42, injected)
-/// }
-/// ```
-///
-/// If the fixture function is an [`async` function](#async) your fixture become an `async`
-/// fixture.
-///
-/// # Default values
-///
-/// If you need to define argument default value you can use `#[default(expression)]`
-/// argument's attribute:
-///
-/// ```
-/// use rstest::*;
-///
-/// #[fixture]
-/// fn injected(
-///     #[default(21)]
-///     twenty_one: i32,
-///     #[default(1 + 1)]
-///     two: i32
-/// ) -> i32 { twenty_one * two }
-///
-/// #[rstest]
-/// fn the_test(injected: i32) {
-///     assert_eq!(42, injected)
-/// }
-/// ```
-/// The `expression` could be any valid rust expression, even an `async` block if you need.
-/// Moreover, if the type implements `FromStr` trait you can use a literal string to build it.
-///
-/// ```
-/// # use rstest::*;
-/// # use std::net::SocketAddr;
-/// # struct DbConnection {}
-/// #[fixture]
-/// fn db_connection(
-///     #[default("127.0.0.1:9000")]
-///     addr: SocketAddr
-/// ) -> DbConnection {
-///     // create connection
-/// # DbConnection{}
-/// }
-/// ```
-///
-/// # Async
-///
-/// If you need you can write `async` fixtures to use in your `async` tests. Simply use `async`
-/// keyword for your function and the fixture become an `async` fixture.
-///
-/// ```
-/// use rstest::*;
-///
-/// #[fixture]
-/// async fn async_fixture() -> i32 { 42 }
-///
-///
-/// #[rstest]
-/// async fn the_test(#[future] async_fixture: i32) {
-///     assert_eq!(42, async_fixture.await)
-/// }
-/// ```
-/// The `#[future]` argument attribute helps to remove the `impl Future<Output = T>` boilerplate.
-/// In this case the macro expands it in:
-///
-/// ```
-/// # use rstest::*;
-/// # use std::future::Future;
-/// # #[fixture]
-/// # async fn async_fixture() -> i32 { 42 }
-/// #[rstest]
-/// async fn the_test(async_fixture: impl std::future::Future<Output = i32>) {
-///     assert_eq!(42, async_fixture.await)
-/// }
-/// ```
-/// If you need, you can use `#[future]` attribute also with an implicit lifetime reference
-/// because the macro will replace the implicit lifetime with an explicit one.
-///
-/// # Rename
-///
-/// Sometimes you want to have long and descriptive name for your fixture but you prefer to use a much
-/// shorter name for argument that represent it in your fixture or test. You can rename the fixture
-/// using `#[from(short_name)]` attribute like following example:
-///
-/// ## Destructuring
-///
-/// It's possible to destructure the fixture type but, in this case, your're forced to use renaming syntax
-/// because it's not possible to guess the fixture name from this syntax:
-///
-/// ```
-/// use rstest::*;
-/// #[fixture]
-/// fn two_values() -> (u32, u32) { (42, 24) }
-///
-/// #[rstest]
-/// fn the_test(#[from(two_values)] (first, _): (u32, u32)) {
-///     assert_eq!(42, first)
-/// }
-/// ```
-///
-/// ```
-/// use rstest::*;
-///
-/// #[fixture]
-/// fn long_and_boring_descriptive_name() -> i32 { 42 }
-///
-/// #[rstest]
-/// fn the_test(#[from(long_and_boring_descriptive_name)] short: i32) {
-///     assert_eq!(42, short)
-/// }
-/// ```
-///
-/// This feature can also be useful when you don't want to declare the `use` of a fixture or simple
-/// use the fixture's path:
-///
-/// ```
-/// use rstest::*;
-///
-/// # mod magic_numbers {
-/// #     use rstest::*;
-/// #     #[fixture]
-/// #     pub fn fortytwo() -> i32 { 42 }
-/// # }
-/// #[rstest]
-/// fn the_test(#[from(magic_numbers::fortytwo)] x: i32) {
-///     assert_eq!(42, x)
-/// }
-/// ```
-///
-/// # `#[once]` Fixture
-///
-/// Especially in integration tests there are cases where you need a fixture that is called just once
-/// for every tests. `rstest` provides `#[once]` attribute for these cases.
-///
-/// If you mark your fixture with this attribute, then `rstest` will compute a static reference to your
-/// fixture result and return this reference to all your tests that need this fixture.
-///
-/// In follow example all tests share the same reference to the `42` static value.
-///
-/// ```
-/// use rstest::*;
-///
-/// #[fixture]
-/// #[once]
-/// fn once_fixture() -> i32 { 42 }
-///
-/// // Take care!!! You need to use a reference to the fixture value
-///
-/// #[rstest]
-/// #[case(1)]
-/// #[case(2)]
-/// fn cases_tests(once_fixture: &i32, #[case] v: i32) {
-///     // Take care!!! You need to use a reference to the fixture value
-///     assert_eq!(&42, once_fixture)
-/// }
-///
-/// #[rstest]
-/// fn single(once_fixture: &i32) {
-///     assert_eq!(&42, once_fixture)
-/// }
-/// ```
-///
-/// There are some limitations when you use `#[once]` fixture. `rstest` forbid to use once fixture
-/// for:
-///
-/// - `async` function
-/// - Generic function (both with generic types or use `impl` trait)
-///
-/// Take care that the `#[once]` fixture value will **never be dropped**.
-///
-/// # Partial Injection
-///
-/// You can also partially inject fixture dependency using `#[with(v1, v2, ..)]` attribute:
-///
-/// ```
-/// use rstest::*;
-///
-/// #[fixture]
-/// fn base() -> i32 { 1 }
-///
-/// #[fixture]
-/// fn first(base: i32) -> i32 { 1 * base }
-///
-/// #[fixture]
-/// fn second(base: i32) -> i32 { 2 * base }
-///
-/// #[fixture]
-/// fn injected(first: i32, #[with(3)] second: i32) -> i32 { first * second }
-///
-/// #[rstest]
-/// fn the_test(injected: i32) {
-///     assert_eq!(-6, injected)
-/// }
-/// ```
-/// Note that injected value can be an arbitrary rust expression. `#[with(v1, ..., vn)]`
-/// attribute will inject `v1, ..., vn` expression as fixture arguments: all remaining arguments
-/// will be resolved as fixtures.
-///
-/// Sometimes the return type cannot be inferred so you must define it: For the few times you may
-/// need to do it, you can use the `#[default(type)]`, `#[partial_n(type)]` function attribute
-/// to define it:
-///
-/// ```
-/// use rstest::*;
-/// # use std::fmt::Debug;
-///
-/// #[fixture]
-/// pub fn i() -> u32 {
-///     42
-/// }
-///
-/// #[fixture]
-/// pub fn j() -> i32 {
-///     -42
-/// }
-///
-/// #[fixture]
-/// #[default(impl Iterator<Item=(u32, i32)>)]
-/// #[partial_1(impl Iterator<Item=(I,i32)>)]
-/// pub fn fx<I, J>(i: I, j: J) -> impl Iterator<Item=(I, J)> {
-///     std::iter::once((i, j))
-/// }
-///
-/// #[rstest]
-/// fn resolve_by_default(mut fx: impl Iterator<Item=(u32, i32)>) {
-///     assert_eq!((42, -42), fx.next().unwrap())
-/// }
-///
-/// #[rstest]
-/// fn resolve_partial(#[with(42.0)] mut fx: impl Iterator<Item=(f32, i32)>) {
-///     assert_eq!((42.0, -42), fx.next().unwrap())
-/// }
-/// ```
-/// `partial_i` is the fixture used when you inject the first `i` arguments in test call.
-///
-/// # Old _compact_ syntax
-///
-/// There is also a compact form for all previous features. This will maintained for a long time
-/// but for `fixture` I strongly recommend to migrate your code because you'll pay a little
-/// verbosity but get back a more readable code.
-///
-/// Follow the previous examples in old _compact_ syntax.
-///
-/// ## Default
-/// ```
-/// # use rstest::*;
-/// #[fixture(twenty_one=21, two=2)]
-/// fn injected(twenty_one: i32, two: i32) -> i32 { twenty_one * two }
-/// ```
-///
-/// ## Rename
-/// ```
-/// # use rstest::*;
-/// #[fixture]
-/// fn long_and_boring_descriptive_name() -> i32 { 42 }
-///
-/// #[rstest(long_and_boring_descriptive_name as short)]
-/// fn the_test(short: i32) {
-///     assert_eq!(42, short)
-/// }
-/// ```
-///
-/// ## Partial Injection
-/// ```
-/// # use rstest::*;
-/// # #[fixture]
-/// # fn base() -> i32 { 1 }
-/// #
-/// # #[fixture]
-/// # fn first(base: i32) -> i32 { 1 * base }
-/// #
-/// # #[fixture]
-/// # fn second(base: i32) -> i32 { 2 * base }
-/// #
-/// #[fixture(second(-3))]
-/// fn injected(first: i32, second: i32) -> i32 { first * second }
-/// ```
-/// ## Partial Type Injection
-/// ```
-/// # use rstest::*;
-/// # use std::fmt::Debug;
-/// #
-/// # #[fixture]
-/// # pub fn i() -> u32 {
-/// #     42
-/// # }
-/// #
-/// # #[fixture]
-/// # pub fn j() -> i32 {
-/// #     -42
-/// # }
-/// #
-/// #[fixture(::default<impl Iterator<Item=(u32, i32)>>::partial_1<impl Iterator<Item=(I,i32)>>)]
-/// pub fn fx<I, J>(i: I, j: J) -> impl Iterator<Item=(I, J)> {
-///     std::iter::once((i, j))
-/// }
-/// ```
-pub use rstest_macros::fixture;
-
-/// The attribute that you should use for your tests. Your
-/// annotated function's arguments can be
-/// [injected](attr.rstest.html#injecting-fixtures) with
-/// [`[fixture]`](macro@fixture)s, provided by
-/// [parametrized cases](attr.rstest.html#test-parametrized-cases)
-/// or by [value lists](attr.rstest.html#values-lists).
-///
-/// `rstest` attribute can be applied to _any_ function and you can customize its
-/// parameters by using function and arguments attributes.
-///
-/// Your test function can use generics, `impl` or `dyn` and like any kind of rust tests:
-///
-/// - return results
-/// - marked by `#[should_panic]` attribute
-///
-/// In the function signature, where you define your tests inputs, you can also destructuring
-/// the values like any other rust function.
-///
-/// If the test function is an [`async` function](#async) `rstest` will run all tests as `async`
-/// tests. You can use it just with `async-std` and you should include `attributes` in
-/// `async-std`'s features.
-///
-/// In your test function you can:
-///
-/// - [injecting fixtures](#injecting-fixtures)
-/// - Generate [parametrized test cases](#test-parametrized-cases)
-/// - Generate tests for each combination of [value lists](#values-lists)
-///
-/// ## Injecting Fixtures
-///
-/// The simplest case is write a test that can be injected with
-/// [`[fixture]`](macro@fixture)s. You can just declare all used fixtures by passing
-/// them as a function's arguments. This can help your test to be neat
-/// and make your dependency clear.
-///
-/// ```
-/// use rstest::*;
-///
-/// #[fixture]
-/// fn injected() -> i32 { 42 }
-///
-/// #[rstest]
-/// fn the_test(injected: i32) {
-///     assert_eq!(42, injected)
-/// }
-/// ```
-///
-/// [`[rstest]`](macro@rstest) procedural macro will desugar it to something that isn't
-/// so far from
-///
-/// ```
-/// #[test]
-/// fn the_test() {
-///     let injected=injected();
-///     assert_eq!(42, injected)
-/// }
-/// ```
-///
-/// If you want to use long and descriptive names for your fixture but prefer to use
-/// shorter names inside your tests you use rename feature described in
-/// [fixture rename](attr.fixture.html#rename):
-///
-/// ```
-/// use rstest::*;
-///
-/// #[fixture]
-/// fn long_and_boring_descriptive_name() -> i32 { 42 }
-///
-/// #[rstest]
-/// fn the_test(#[from(long_and_boring_descriptive_name)] short: i32) {
-///     assert_eq!(42, short)
-/// }
-/// ```
-///
-/// The use of `#[from(...)]` attribute is mandatory if you need to destructure the value:
-///
-/// ```
-/// use rstest::*;
-///
-/// #[fixture]
-/// fn tuple() -> (u32, f32) { (42, 42.0) }
-///
-/// #[rstest]
-/// fn the_test(#[from(tuple)] (u, _): (u32, f32)) {
-///     assert_eq!(42, u)
-/// }
-/// ```
-///
-/// Sometimes is useful to have some parameters in your fixtures but your test would
-/// override the fixture's default values in some cases. Like in
-/// [fixture partial injection](attr.fixture.html#partial-injection) you use `#[with]`
-/// attribute to indicate some fixture's arguments also in `rstest`.
-///
-/// ```
-/// # struct User(String, u8);
-/// # impl User { fn name(&self) -> &str {&self.0} }
-/// use rstest::*;
-///
-/// #[fixture]
-/// fn user(
-///     #[default("Alice")] name: impl AsRef<str>,
-///     #[default(22)] age: u8
-/// ) -> User { User(name.as_ref().to_owned(), age) }
-///
-/// #[rstest]
-/// fn check_user(#[with("Bob")] user: User) {
-///     assert_eq("Bob", user.name())
-/// }
-/// ```
-///
-/// ## Test Parametrized Cases
-///
-/// If you would execute your test for a set of input data cases
-/// you can define the arguments to use and the cases list. Let see
-/// the classical Fibonacci example. In this case we would give the
-/// `input` value and the `expected` result for a set of cases to test.
-///
-/// ```
-/// use rstest::rstest;
-///
-/// #[rstest]
-/// #[case(0, 0)]
-/// #[case(1, 1)]
-/// #[case(2, 1)]
-/// #[case(3, 2)]
-/// #[case(4, 3)]
-/// fn fibonacci_test(#[case] input: u32,#[case] expected: u32) {
-///     assert_eq!(expected, fibonacci(input))
-/// }
-///
-/// fn fibonacci(input: u32) -> u32 {
-///     match input {
-///         0 => 0,
-///         1 => 1,
-///         n => fibonacci(n - 2) + fibonacci(n - 1)
-///     }
-/// }
-/// ```
-///
-/// `rstest` will produce 5 independent tests and not just one that
-/// check every case. Every test can fail independently and `cargo test`
-/// will give follow output:
-///
-/// ```text
-/// running 5 tests
-/// test fibonacci_test::case_1 ... ok
-/// test fibonacci_test::case_2 ... ok
-/// test fibonacci_test::case_3 ... ok
-/// test fibonacci_test::case_4 ... ok
-/// test fibonacci_test::case_5 ... ok
-///
-/// test result: ok. 5 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out
-/// ```
-///
-/// The cases input values can be arbitrary Rust expressions that return the
-/// argument type.
-///
-/// ```
-/// use rstest::rstest;
-///  
-/// fn sum(a: usize, b: usize) -> usize { a + b }
-///
-/// #[rstest]
-/// #[case("foo", 3)]
-/// #[case(String::from("foo"), 2 + 1)]
-/// #[case(format!("foo"), sum(2, 1))]
-/// fn test_len(#[case] s: impl AsRef<str>,#[case] len: usize) {
-///     assert_eq!(s.as_ref().len(), len);
-/// }
-/// ```
-/// ### Feature flagged cases
-///
-/// In case you want certain test cases to only be present if a certain feature is
-/// enabled, use `#[cfg_attr(feature = …, case(…))]`:
-///
-/// ```
-/// use rstest::rstest;
-///
-/// #[rstest]
-/// #[case(2, 2)]
-/// #[cfg_attr(feature = "frac", case(4/2, 2))]
-/// #[case(4/2, 2)]
-/// fn it_works(#[case] a: u32, #[case] b: u32) {
-///     assert!(a == b);
-/// }
-/// ```
-///
-/// This also works with [`rstest_reuse`](https://crates.io/crates/rstest_reuse).
-///
-/// ### Magic Conversion
-///
-/// You can use the magic conversion feature every time you would define a variable
-/// where its type define `FromStr` trait: test will parse the string to build the value.
-///
-/// ```
-/// # use rstest::rstest;
-/// # use std::path::PathBuf;
-/// # fn count_words(path: PathBuf) -> usize {0}
-/// #[rstest]
-/// #[case("resources/empty", 0)]
-/// #[case("resources/divine_comedy", 101.698)]
-/// fn test_count_words(#[case] path: PathBuf, #[case] expected: usize) {
-///     assert_eq!(expected, count_words(path))
-/// }
-/// ```
-///
-/// ### Optional case description
-///
-/// Optionally you can give a _description_ to every case simple by follow `case`
-/// with `::my_case_description` where `my_case_description` should be a a valid
-/// Rust ident.
-///
-/// ```
-/// # use rstest::*;
-/// #[rstest]
-/// #[case::zero_base_case(0, 0)]
-/// #[case::one_base_case(1, 1)]
-/// #[case(2, 1)]
-/// #[case(3, 2)]
-/// fn fibonacci_test(#[case] input: u32,#[case] expected: u32) {
-///     assert_eq!(expected, fibonacci(input))
-/// }
-///
-/// # fn fibonacci(input: u32) -> u32 {
-/// #     match input {
-/// #         0 => 0,
-/// #         1 => 1,
-/// #         n => fibonacci(n - 2) + fibonacci(n - 1)
-/// #     }
-/// # }
-/// ```
-///
-/// Output will be
-/// ```text
-/// running 4 tests
-/// test fibonacci_test::case_1_zero_base_case ... ok
-/// test fibonacci_test::case_2_one_base_case ... ok
-/// test fibonacci_test::case_3 ... ok
-/// test fibonacci_test::case_4 ... ok
-///
-/// test result: ok. 4 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out
-/// ```
-///
-/// ### Use specific `case` attributes
-///
-/// Every function's attributes that preceding a `#[case]` attribute will
-/// be used in this test case and all function's attributes that follow the
-/// last `#[case]` attribute will mark all test cases.
-///
-/// This feature can be use to mark just some cases as `should_panic`
-/// and choose to have a fine grain on expected panic messages.
-///
-/// In follow example we run 3 tests where the first pass without any
-/// panic, in the second we catch a panic but we don't care about the message
-/// and in the third one we also check the panic message.
-///
-/// ```
-/// use rstest::rstest;
-///
-/// #[rstest]
-/// #[case::no_panic(0)]
-/// #[should_panic]
-/// #[case::panic(1)]
-/// #[should_panic(expected="expected")]
-/// #[case::panic_with_message(2)]
-/// fn attribute_per_case(#[case] val: i32) {
-///     match val {
-///         0 => assert!(true),
-///         1 => panic!("No catch"),
-///         2 => panic!("expected"),
-///         _ => unreachable!(),
-///     }
-/// }
-/// ```
-///
-/// Output:
-///
-/// ```text
-/// running 3 tests
-/// test attribute_per_case::case_1_no_panic ... ok
-/// test attribute_per_case::case_3_panic_with_message ... ok
-/// test attribute_per_case::case_2_panic ... ok
-///
-/// test result: ok. 3 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out
-/// ```
-///
-/// To mark all your tests as `#[should_panic]` use:
-///
-/// ```
-/// # use rstest::rstest;
-/// #[rstest]
-/// #[case(1)]
-/// #[case(2)]
-/// #[case(3)]
-/// #[should_panic]
-/// fn fail(#[case] v: u32) { assert_eq!(0, v) }
-/// ```
-///
-/// ## Values Lists
-///
-/// Another useful way to write a test and execute it for some values
-/// is to use the values list syntax. This syntax can be useful both
-/// for a plain list and for testing all combination of input arguments.
-///
-/// ```
-/// # use rstest::*;
-/// # fn is_valid(input: &str) -> bool { true }
-///
-/// #[rstest]
-/// fn should_be_valid(
-///     #[values("John", "alice", "My_Name", "Zigy_2001")]
-///     input: &str
-/// ) {
-///     assert!(is_valid(input))
-/// }
-/// ```
-///
-/// or
-///
-/// ```
-/// # use rstest::*;
-/// # fn valid_user(name: &str, age: u8) -> bool { true }
-///
-/// #[rstest]
-/// fn should_accept_all_corner_cases(
-///     #[values("J", "A", "A________________________________________21")]
-///     name: &str,
-///     #[values(14, 100)]
-///     age: u8
-/// ) {
-///     assert!(valid_user(name, age))
-/// }
-/// ```
-/// where `cargo test` output is
-///
-/// ```text
-/// test should_accept_all_corner_cases::name_1___J__::age_2_100 ... ok
-/// test should_accept_all_corner_cases::name_2___A__::age_1_14 ... ok
-/// test should_accept_all_corner_cases::name_2___A__::age_2_100 ... ok
-/// test should_accept_all_corner_cases::name_3___A________________________________________21__::age_2_100 ... ok
-/// test should_accept_all_corner_cases::name_3___A________________________________________21__::age_1_14 ... ok
-/// test should_accept_all_corner_cases::name_1___J__::age_1_14 ... ok
-///
-/// test result: ok. 6 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s
-/// ```
-/// Note that the test names contains the given expression sanitized into
-/// a valid Rust identifier name. This should help to identify which case fails.
-///
-///
-/// Also value list implements the magic conversion feature: every time the value type
-/// implements `FromStr` trait you can use a literal string to define it.
-///
-/// ```
-/// # use rstest::rstest;
-/// # use std::net::SocketAddr;
-/// #[rstest]
-/// fn given_port(#[values("1.2.3.4:8000", "4.3.2.1:8000", "127.0.0.1:8000")] addr: SocketAddr) {
-///     assert_eq!(8000, addr.port())
-/// }
-/// ```
-///
-/// ## Destructuring inputs
-///
-/// Both paramtrized case and values can be destructured:
-///
-/// ```
-/// # use rstest::*;
-/// struct S {
-///     first: u32,
-///     second: u32,
-/// }
-///
-/// struct T(i32);
-///
-/// #[rstest]
-/// #[case(S{first: 21, second: 42})]
-/// fn some_test(#[case] S{first, second} : S, #[values(T(-1), T(1))] T(t): T) {
-///     assert_eq!(1, t * t);
-///     assert_eq!(2 * first, second);
-/// }
-/// ```
-///
-/// ## Files path as input arguments
-///
-/// If you need to create a test for each file in a given location you can use
-/// `#[files("glob path syntax")]` attribute to generate a test for each file that
-/// satisfy the given glob path.
-///
-/// ```
-/// # use rstest::rstest;
-/// # use std::path::{Path, PathBuf};
-/// # fn check_file(path: &Path) -> bool { true };
-/// #[rstest]
-/// fn for_each_file(#[files("src/**/*.rs")] #[exclude("test")] path: PathBuf) {
-///     assert!(check_file(&path))
-/// }
-/// ```
-/// The default behavior is to ignore the files that start with `"."`, but you can
-/// modify this by use `#[include_dot_files]` attribute. The `files` attribute can be
-/// used more than once on the same variable, and you can also create some custom
-/// exclusion rules with the `#[exclude("regex")]` attributes that filter out all
-/// paths that verify the regular expression.
-///
-/// Sometime is useful to have test files in a workspace folder to share them between the
-/// crates in your workspace. You can do that by use the usual parent folders `..` in
-/// the glob path. In this case the test names will be the relative path from the crate root
-/// where the parent folder components are replaced by `_UP`: for instance if you have a
-/// `valid_call.yaml` in the folder `../test_cases` (from your crate root) a test name could be
-/// `path_1__UP_test_cases_valid_call_yaml`.
-///
-/// ## Use Parametrize definition in more tests
-///
-/// If you need to use a test list for more than one test you can use
-/// [`rstest_reuse`](https://crates.io/crates/rstest_reuse) crate.
-/// With this helper crate you can define a template and use it everywhere.
-///
-/// ```rust,ignore
-/// use rstest::rstest;
-/// use rstest_reuse::{self, *};
-///
-/// #[template]
-/// #[rstest]
-/// #[case(2, 2)]
-/// #[case(4/2, 2)]
-/// fn two_simple_cases(#[case] a: u32, #[case] b: u32) {}
-///
-/// #[apply(two_simple_cases)]
-/// fn it_works(#[case] a: u32,#[case] b: u32) {
-///     assert_eq!(a, b);
-/// }
-/// ```
-///
-/// See [`rstest_reuse`](https://crates.io/crates/rstest_reuse) for more details.
-///
-/// ## Async
-///
-/// `rstest` provides out of the box `async` support. Just mark your
-/// test function as `async` and it'll use `#[async-std::test]` to
-/// annotate it. This feature can be really useful to build async
-/// parametric tests using a tidy syntax:
-///
-/// ```
-/// use rstest::*;
-/// # async fn async_sum(a: u32, b: u32) -> u32 { a + b }
-///
-/// #[rstest]
-/// #[case(5, 2, 3)]
-/// #[should_panic]
-/// #[case(42, 40, 1)]
-/// async fn my_async_test(#[case] expected: u32, #[case] a: u32, #[case] b: u32) {
-///     assert_eq!(expected, async_sum(a, b).await);
-/// }
-/// ```
-///
-/// Currently only `async-std` is supported out of the box. But if you need to use
-/// another runtime that provide it's own test attribute (i.e. `tokio::test` or
-/// `actix_rt::test`) you can use it in your `async` test like described in
-/// [Inject Test Attribute](attr.rstest.html#inject-test-attribute).
-///
-/// To use this feature, you need to enable `attributes` in the `async-std`
-/// features list in your `Cargo.toml`:
-///
-/// ```toml
-/// async-std = { version = "1.5", features = ["attributes"] }
-/// ```
-///
-/// If your test input is an async value (fixture or test parameter) you can use `#[future]`
-/// attribute to remove `impl Future<Output = T>` boilerplate and just use `T`:
-///
-/// ```
-/// use rstest::*;
-/// #[fixture]
-/// async fn base() -> u32 { 42 }
-///
-/// #[rstest]
-/// #[case(21, async { 2 })]
-/// #[case(6, async { 7 })]
-/// async fn my_async_test(#[future] base: u32, #[case] expected: u32, #[future] #[case] div: u32) {
-///     assert_eq!(expected, base.await / div.await);
-/// }
-/// ```
-///
-/// As you noted you should `.await` all _future_ values and this some times can be really boring.
-/// In this case you can use `#[future(awt)]` to _awaiting_ an input or annotating your function
-/// with `#[awt]` attributes to globally `.await` all your _future_ inputs. Previous code can be
-/// simplified like follow:
-///
-/// ```
-/// use rstest::*;
-/// # #[fixture]
-/// # async fn base() -> u32 { 42 }
-///
-/// #[rstest]
-/// #[case(21, async { 2 })]
-/// #[case(6, async { 7 })]
-/// #[awt]
-/// async fn global(#[future] base: u32, #[case] expected: u32, #[future] #[case] div: u32) {
-///     assert_eq!(expected, base / div);
-/// }
-///
-/// #[rstest]
-/// #[case(21, async { 2 })]
-/// #[case(6, async { 7 })]
-/// async fn single(#[future] base: u32, #[case] expected: u32, #[future(awt)] #[case] div: u32) {
-///     assert_eq!(expected, base.await / div);
-/// }
-/// ```
-///
-/// ### Default timeout
-///
-/// You can set a default timeout for test using the `RSTEST_TIMEOUT` environment variable.
-/// The value is in seconds and is evaluated on test compile time.///
-///
-/// ### Test `#[timeout()]`
-///
-/// You can define an execution timeout for your tests with `#[timeout(<duration>)]` attribute. Timeout
-/// works both for sync and async tests and is runtime agnostic. `#[timeout(<duration>)]` take an
-/// expression that should return a `std::time::Duration`. Follow a simple async example:
-///
-/// ```rust
-/// use rstest::*;
-/// use std::time::Duration;
-///
-/// async fn delayed_sum(a: u32, b: u32,delay: Duration) -> u32 {
-///     async_std::task::sleep(delay).await;
-///     a + b
-/// }
-///
-/// #[rstest]
-/// #[timeout(Duration::from_millis(80))]
-/// async fn single_pass() {
-///     assert_eq!(4, delayed_sum(2, 2, ms(10)).await);
-/// }
-/// ```
-/// In this case test pass because the delay is just 10 milliseconds and timeout is
-/// 80 milliseconds.
-///
-/// You can use `timeout` attribute like any other attribute in your tests, and you can
-/// override a group timeout with a test specific one. In the follow example we have
-/// 3 tests where first and third use 100 milliseconds but the second one use 10 milliseconds.
-/// Another valuable point in this example is to use an expression to compute the
-/// duration.
-///
-/// ```rust
-/// # use rstest::*;
-/// # use std::time::Duration;
-/// #
-/// # async fn delayed_sum(a: u32, b: u32,delay: Duration) -> u32 {
-/// #     async_std::task::sleep(delay).await;
-/// #     a + b
-/// # }
-/// fn ms(ms: u32) -> Duration {
-///     Duration::from_millis(ms.into())
-/// }
-///
-/// #[rstest]
-/// #[case::pass(ms(1), 4)]
-/// #[timeout(ms(10))]
-/// #[case::fail_timeout(ms(60), 4)]
-/// #[case::fail_value(ms(1), 5)]
-/// #[timeout(ms(100))]
-/// async fn group_one_timeout_override(#[case] delay: Duration, #[case] expected: u32) {
-///     assert_eq!(expected, delayed_sum(2, 2, delay).await);
-/// }
-/// ```
-///
-/// If you want to use `timeout` for `async` test you need to use `async-timeout`
-/// feature (enabled by default).
-///
-/// ## Inject Test Attribute
-///
-/// If you would like to use another `test` attribute for your test you can simply
-/// indicate it in your test function's attributes. For instance if you want
-/// to test some async function with use `actix_rt::test` attribute you can just write:
-///
-/// ```
-/// use rstest::*;
-/// use actix_rt;
-/// use std::future::Future;
-///
-/// #[rstest]
-/// #[case(2, async { 4 })]
-/// #[case(21, async { 42 })]
-/// #[actix_rt::test]
-/// async fn my_async_test(#[case] a: u32, #[case] #[future] result: u32) {
-///     assert_eq!(2 * a, result.await);
-/// }
-/// ```
-/// Just the attributes that ends with `test` (last path segment) can be injected:
-/// in this case the `#[actix_rt::test]` attribute will replace the standard `#[test]`
-/// attribute.
-///
-/// Some test attributes allow to inject arguments into the test function, in a similar way to rstest.
-/// This can lead to compile errors when rstest is not able to resolve the additional arguments.
-/// To avoid this, see [Ignoring Arguments](attr.rstest.html#ignoring-arguments).
-///
-/// ## Local lifetime and `#[by_ref]` attribute
-///
-/// In some cases you may want to use a local lifetime for some arguments of your test.
-/// In these cases you can use the `#[by_ref]` attribute then use the reference instead
-/// the value.
-///
-/// ```rust
-/// # use std::cell::Cell;
-/// # use rstest::*;
-/// # #[derive(Debug, Clone, Copy, PartialEq, Eq)]
-/// enum E<'a> {
-///     A(bool),
-///     B(&'a Cell<E<'a>>),
-/// }
-///
-/// fn make_e_from_bool<'a>(_bump: &'a (), b: bool) -> E<'a> {
-///     E::A(b)
-/// }
-///
-/// #[fixture]
-/// fn bump() -> () {}
-///  
-/// #[rstest]
-/// #[case(true, E::A(true))]
-/// fn it_works<'a>(#[by_ref] bump: &'a (), #[case] b: bool, #[case] expected: E<'a>) {
-///     let actual = make_e_from_bool(&bump, b);
-///     assert_eq!(actual, expected);
-/// }
-/// ```
-///
-/// You can use `#[by_ref]` attribute for all arguments of your test and not just for fixture
-/// but also for cases, values and files.
-///
-/// ## Putting all Together
-///
-/// All these features can be used together with a mixture of fixture variables,
-/// fixed cases and bunch of values. For instance, you might need two
-/// test cases which test for panics, one for a logged in user and one for a guest user.
-///
-/// ```rust
-/// # enum User { Guest, Logged, }
-/// # impl User { fn logged(_n: &str, _d: &str, _w: &str, _s: &str) -> Self { Self::Logged } }
-/// # struct Item {}
-/// # trait Repository { fn find_items(&self, user: &User, query: &str) -> Result<Vec<Item>, String> { Err("Invalid query error".to_owned()) } }
-/// # #[derive(Default)] struct InMemoryRepository {}
-/// # impl Repository for InMemoryRepository {}
-///
-/// use rstest::*;
-///
-/// #[fixture]
-/// fn repository() -> InMemoryRepository {
-///     let mut r = InMemoryRepository::default();
-///     // fill repository with some data
-///     r
-/// }
-///
-/// #[fixture]
-/// fn alice() -> User {
-///     User::logged("Alice", "2001-10-04", "London", "UK")
-/// }
-///
-/// #[rstest]
-/// #[case::authorized_user(alice())] // We can use `fixture` also as standard function
-/// #[case::guest(User::Guest)]   // We can give a name to every case : `guest` in this case
-/// #[should_panic(expected = "Invalid query error")] // We would test a panic
-/// fn should_be_invalid_query_error(
-///     repository: impl Repository,
-///     #[case] user: User,
-///     #[values("     ", "^%$some#@invalid!chars", ".n.o.d.o.t.s.")] query: &str,
-///     query: &str
-/// ) {
-///     repository.find_items(&user, query).unwrap();
-/// }
-/// ```
-///
-/// ## Ignoring Arguments
-///
-/// Sometimes, you may want to inject and use fixtures not managed by rstest
-/// (e.g. db connection pools for sqlx tests).
-///
-/// In these cases, you can use the `#[ignore]` attribute to ignore the additional
-/// parameter and let another crate take care of it:
-///
-/// ```rust, ignore
-/// use rstest::*;
-/// use sqlx::*;
-///
-/// #[fixture]
-/// fn my_fixture() -> i32 { 42 }
-///
-/// #[rstest]
-/// #[sqlx::test]
-/// async fn test_db(my_fixture: i32, #[ignore] pool: PgPool) {
-///     assert_eq!(42, injected);
-///     // do stuff with the connection pool
-/// }
-/// ```
-///
-///
-/// ## Trace Input Arguments
-///
-/// Sometimes can be very helpful to print all test's input arguments. To
-/// do it you can use the `#[trace]` function attribute that you can apply
-/// to all cases or just to some of them.
-///
-/// ```
-/// use rstest::*;
-///
-/// #[fixture]
-/// fn injected() -> i32 { 42 }
-///
-/// #[rstest]
-/// #[trace]
-/// fn the_test(injected: i32) {
-///     assert_eq!(42, injected)
-/// }
-/// ```
-///
-/// Will print an output like
-///
-/// ```bash
-/// Testing started at 14.12 ...
-/// ------------ TEST ARGUMENTS ------------
-/// injected = 42
-/// -------------- TEST START --------------
-///
-///
-/// Expected :42
-/// Actual   :43
-/// ```
-/// But
-/// ```
-/// # use rstest::*;
-/// #[rstest]
-/// #[case(1)]
-/// #[trace]
-/// #[case(2)]
-/// fn the_test(#[case] v: i32) {
-///     assert_eq!(0, v)
-/// }
-/// ```
-/// will trace just `case_2` input arguments.
-///
-/// If you want to trace input arguments but skip some of them that don't
-/// implement the `Debug` trait, you can also use the
-/// `#[notrace]` argument attribute to skip them:
-///
-/// ```
-/// # use rstest::*;
-/// # struct Xyz;
-/// # struct NoSense;
-/// #[rstest]
-/// #[trace]
-/// fn the_test(injected: i32, #[notrace] xyz: Xyz, #[notrace] have_no_sense: NoSense) {
-///     assert_eq!(42, injected)
-/// }
-/// ```
-/// # Old _compact_ syntax
-///
-/// `rstest` support also a syntax where all options and configuration can be write as
-/// `rstest` attribute arguments. This syntax is a little less verbose but make
-/// composition harder: for instance try to add some cases to a `rstest_reuse` template
-/// is really hard.
-///
-/// So we'll continue to maintain the old syntax for a long time but we strongly encourage
-/// to switch your test in the new form.
-///
-/// Anyway, here we recall this syntax and rewrite the previous example in the _compact_ form.
-///
-/// ```text
-/// rstest(
-///     arg_1,
-///     ...,
-///     arg_n[,]
-///     [::attribute_1[:: ... [::attribute_k]]]
-/// )
-/// ```
-/// Where:
-///
-/// - `arg_i` could be one of the follow
-///   - `ident` that match to one of function arguments for parametrized cases
-///   - `case[::description](v1, ..., vl)` a test case
-///   - `fixture(v1, ..., vl) [as argument_name]` where fixture is the injected
-///     fixture and argument_name (default use fixture) is one of function arguments
-///     that and `v1, ..., vl` is a partial list of fixture's arguments
-///   - `ident => [v1, ..., vl]` where `ident` is one of function arguments and
-///     `v1, ..., vl` is a list of values for ident
-/// - `attribute_j` a test attribute like `trace` or `notrace`
-///
-/// ## Fixture Arguments
-///
-/// ```
-/// # struct User(String, u8);
-/// # impl User { fn name(&self) -> &str {&self.0} }
-/// # use rstest::*;
-/// #
-/// # #[fixture]
-/// # fn user(
-/// #     #[default("Alice")] name: impl AsRef<str>,
-/// #     #[default(22)] age: u8
-/// # ) -> User { User(name.as_ref().to_owned(), age) }
-/// #
-/// #[rstest(user("Bob"))]
-/// fn check_user(user: User) {
-///     assert_eq("Bob", user.name())
-/// }
-/// ```
-///
-/// ## Fixture Rename
-/// ```
-/// # use rstest::*;
-/// #[fixture]
-/// fn long_and_boring_descriptive_name() -> i32 { 42 }
-///
-/// #[rstest(long_and_boring_descriptive_name as short)]
-/// fn the_test(short: i32) {
-///     assert_eq!(42, short)
-/// }
-/// ```
-///
-/// ## Parametrized
-///
-/// ```
-/// # use rstest::*;
-/// #[rstest(input, expected,
-///     case::zero_base_case(0, 0),
-///     case::one_base_case(1, 1),
-///     case(2, 1),
-///     case(3, 2),
-///     #[should_panic]
-///     case(4, 42)
-/// )]
-/// fn fibonacci_test(input: u32, expected: u32) {
-///     assert_eq!(expected, fibonacci(input))
-/// }
-///
-/// # fn fibonacci(input: u32) -> u32 {
-/// #     match input {
-/// #         0 => 0,
-/// #         1 => 1,
-/// #         n => fibonacci(n - 2) + fibonacci(n - 1)
-/// #     }
-/// # }
-/// ```
-///
-/// ## Values Lists
-///
-/// ```
-/// # use rstest::*;
-/// # fn is_valid(input: &str) -> bool { true }
-///
-/// #[rstest(
-///     input => ["John", "alice", "My_Name", "Zigy_2001"]
-/// )]
-/// fn should_be_valid(input: &str) {
-///     assert!(is_valid(input))
-/// }
-/// ```
-///
-/// ## `trace` and `notrace`
-///
-/// ```
-/// # use rstest::*;
-/// # struct Xyz;
-/// # struct NoSense;
-/// #[rstest(::trace::notrace(xzy, have_no_sense))]
-/// fn the_test(injected: i32, xyz: Xyz, have_no_sense: NoSense) {
-///     assert_eq!(42, injected)
-/// }
-/// ```
-///
-pub use rstest_macros::rstest;
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/src/magic_conversion.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/src/magic_conversion.rs
deleted file mode 100644
index 4df2301..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/src/magic_conversion.rs
+++ /dev/null
@@ -1,104 +0,0 @@
-pub struct Magic<T>(pub std::marker::PhantomData<T>);
-
-pub trait ViaParseDebug<'a, T> {
-    fn magic_conversion(&self, input: &'a str) -> T;
-}
-
-impl<'a, T> ViaParseDebug<'a, T> for &&Magic<T>
-where
-    T: std::str::FromStr,
-    T::Err: std::fmt::Debug,
-{
-    fn magic_conversion(&self, input: &'a str) -> T {
-        T::from_str(input).unwrap()
-    }
-}
-
-pub trait ViaParse<'a, T> {
-    fn magic_conversion(&self, input: &'a str) -> T;
-}
-
-impl<'a, T> ViaParse<'a, T> for &Magic<T>
-where
-    T: std::str::FromStr,
-{
-    fn magic_conversion(&self, input: &'a str) -> T {
-        match T::from_str(input) {
-            Ok(v) => v,
-            Err(_) => {
-                panic!(
-                    "Cannot parse '{}' to get {}",
-                    input,
-                    std::any::type_name::<T>()
-                );
-            }
-        }
-    }
-}
-
-pub trait ViaIdent<'a, T> {
-    fn magic_conversion(&self, input: &'a str) -> T;
-}
-
-impl<'a> ViaIdent<'a, &'a str> for &&Magic<&'a str> {
-    fn magic_conversion(&self, input: &'a str) -> &'a str {
-        input
-    }
-}
-
-#[cfg(test)]
-mod test {
-    use super::*;
-    use std::str::FromStr;
-
-    #[test]
-    fn should_return_the_same_slice_string() {
-        assert_eq!(
-            "something",
-            (&&&Magic::<&str>(std::marker::PhantomData)).magic_conversion("something")
-        );
-    }
-
-    #[test]
-    fn should_parse_via_parse_debug() {
-        assert_eq!(
-            42u32,
-            (&&&Magic::<u32>(std::marker::PhantomData)).magic_conversion("42")
-        );
-    }
-
-    #[test]
-    fn should_parse_via_parse_no_error_debug() {
-        struct S(String);
-        struct E;
-        impl FromStr for S {
-            type Err = E;
-
-            fn from_str(s: &str) -> Result<Self, Self::Err> {
-                Ok(S(s.to_owned()))
-            }
-        }
-
-        assert_eq!(
-            "some",
-            (&&&Magic::<S>(std::marker::PhantomData))
-                .magic_conversion("some")
-                .0
-        );
-    }
-
-    #[test]
-    #[should_panic(expected = "MyTypeName")]
-    fn should_show_error() {
-        struct MyTypeName;
-        struct E;
-        impl FromStr for MyTypeName {
-            type Err = E;
-
-            fn from_str(_s: &str) -> Result<Self, Self::Err> {
-                Err(E)
-            }
-        }
-        (&&&Magic::<MyTypeName>(std::marker::PhantomData)).magic_conversion("");
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/src/timeout.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/src/timeout.rs
deleted file mode 100644
index 011e3a4..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/src/timeout.rs
+++ /dev/null
@@ -1,199 +0,0 @@
-use std::{sync::mpsc, thread, time::Duration};
-
-#[cfg(feature = "async-timeout")]
-use futures::{select, Future, FutureExt};
-#[cfg(feature = "async-timeout")]
-use futures_timer::Delay;
-
-pub fn execute_with_timeout_sync<T: 'static + Send, F: FnOnce() -> T + Send + 'static>(
-    code: F,
-    timeout: Duration,
-) -> T {
-    let (sender, receiver) = mpsc::channel();
-    let thread = if let Some(name) = thread::current().name() {
-        thread::Builder::new().name(name.to_string())
-    } else {
-        thread::Builder::new()
-    };
-    let handle = thread.spawn(move || sender.send(code())).unwrap();
-    match receiver.recv_timeout(timeout) {
-        Ok(result) => {
-            // Unwraps are safe because we got a result from the thread, which is not a `SendError`,
-            // and there was no panic within the thread which caused a disconnect.
-            handle.join().unwrap().unwrap();
-            result
-        }
-        Err(mpsc::RecvTimeoutError::Timeout) => panic!("Timeout {:?} expired", timeout),
-        Err(mpsc::RecvTimeoutError::Disconnected) => match handle.join() {
-            Err(any) => std::panic::resume_unwind(any),
-            Ok(_) => unreachable!(),
-        },
-    }
-}
-
-#[cfg(feature = "async-timeout")]
-pub async fn execute_with_timeout_async<T, Fut: Future<Output = T>, F: FnOnce() -> Fut>(
-    code: F,
-    timeout: Duration,
-) -> T {
-    select! {
-        () = async {
-            Delay::new(timeout).await;
-        }.fuse() => panic!("Timeout {:?} expired", timeout),
-        out = code().fuse() => out,
-    }
-}
-
-#[cfg(test)]
-mod tests {
-    use super::*;
-    #[cfg(feature = "async-timeout")]
-    mod async_version {
-
-        use super::*;
-        use std::time::Duration;
-
-        async fn delayed_sum(a: u32, b: u32, delay: Duration) -> u32 {
-            async_std::task::sleep(delay).await;
-            a + b
-        }
-
-        async fn test(delay: Duration) {
-            let result = delayed_sum(2, 2, delay).await;
-            assert_eq!(result, 4);
-        }
-
-        mod use_async_std_runtime {
-            use super::*;
-
-            #[async_std::test]
-            #[should_panic]
-            async fn should_fail() {
-                execute_with_timeout_async(
-                    || test(Duration::from_millis(40)),
-                    Duration::from_millis(10),
-                )
-                .await
-            }
-
-            #[async_std::test]
-            async fn should_pass() {
-                execute_with_timeout_async(
-                    || test(Duration::from_millis(10)),
-                    Duration::from_millis(40),
-                )
-                .await
-            }
-
-            #[async_std::test]
-            #[should_panic = "inner message"]
-            async fn should_fail_for_panic_with_right_panic_message() {
-                execute_with_timeout_async(
-                    || async {
-                        panic!("inner message");
-                    },
-                    Duration::from_millis(30),
-                )
-                .await
-            }
-
-            #[async_std::test]
-            async fn should_compile_also_with_no_copy_move() {
-                struct S {}
-                async fn test(_s: S) {
-                    assert!(true);
-                }
-                let s = S {};
-
-                execute_with_timeout_async(move || test(s), Duration::from_millis(20)).await
-            }
-        }
-
-        mod use_tokio_runtime {
-            use super::*;
-
-            #[tokio::test]
-            #[should_panic]
-            async fn should_fail() {
-                execute_with_timeout_async(
-                    || test(Duration::from_millis(40)),
-                    Duration::from_millis(10),
-                )
-                .await
-            }
-
-            #[async_std::test]
-            #[should_panic = "inner message"]
-            async fn should_fail_for_panic_with_right_panic_message() {
-                execute_with_timeout_async(
-                    || async {
-                        panic!("inner message");
-                    },
-                    Duration::from_millis(30),
-                )
-                .await
-            }
-
-            #[tokio::test]
-            async fn should_pass() {
-                execute_with_timeout_async(
-                    || test(Duration::from_millis(10)),
-                    Duration::from_millis(40),
-                )
-                .await
-            }
-        }
-    }
-
-    mod thread_version {
-        use super::*;
-
-        pub fn delayed_sum(a: u32, b: u32, delay: Duration) -> u32 {
-            std::thread::sleep(delay);
-            a + b
-        }
-
-        fn test(delay: Duration) {
-            let result = delayed_sum(2, 2, delay);
-            assert_eq!(result, 4);
-        }
-
-        #[test]
-        fn should_pass() {
-            execute_with_timeout_sync(
-                || test(Duration::from_millis(30)),
-                Duration::from_millis(70),
-            )
-        }
-
-        #[test]
-        #[should_panic = "inner message"]
-        fn should_fail_for_panic_with_right_panic_message() {
-            execute_with_timeout_sync(
-                || {
-                    panic!("inner message");
-                },
-                Duration::from_millis(100),
-            )
-        }
-
-        #[test]
-        #[should_panic]
-        fn should_fail() {
-            execute_with_timeout_sync(
-                || test(Duration::from_millis(70)),
-                Duration::from_millis(30),
-            )
-        }
-        #[test]
-        fn should_compile_also_with_no_copy_move() {
-            struct S {}
-            fn test(_s: S) {
-                assert!(true);
-            }
-            let s = S {};
-
-            execute_with_timeout_sync(move || test(s), Duration::from_millis(20))
-        }
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/fixture/mod.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/fixture/mod.rs
deleted file mode 100644
index f331ca9..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/fixture/mod.rs
+++ /dev/null
@@ -1,455 +0,0 @@
-use std::path::Path;
-pub use unindent::Unindent;
-
-use super::resources;
-use mytest::*;
-use rstest_test::{assert_in, assert_not_in, Project, Stringable, TestResults};
-
-fn prj(res: &str) -> Project {
-    let path = Path::new("fixture").join(res);
-    crate::prj().set_code_file(resources(path))
-}
-
-fn run_test(res: &str) -> (std::process::Output, String) {
-    let prj = prj(res);
-    (
-        prj.run_tests().unwrap(),
-        prj.get_name().to_owned().to_string(),
-    )
-}
-
-mod should {
-    use rstest_test::{assert_regex, CountMessageOccurrence};
-
-    use super::*;
-
-    #[test]
-    fn use_input_fixtures() {
-        let (output, _) = run_test("simple_injection.rs");
-
-        TestResults::new().ok("success").fail("fail").assert(output);
-    }
-
-    #[test]
-    fn create_a_struct_that_return_the_fixture() {
-        let (output, _) = run_test("fixture_struct.rs");
-
-        TestResults::new()
-            .ok("resolve_new")
-            .ok("resolve_default")
-            .ok("injected_new")
-            .ok("injected_default")
-            .assert(output);
-    }
-
-    #[test]
-    fn be_accessible_from_other_module() {
-        let (output, _) = run_test("from_other_module.rs");
-
-        TestResults::new().ok("struct_access").assert(output);
-    }
-
-    #[test]
-    fn not_show_any_warning() {
-        let (output, _) = run_test("no_warning.rs");
-
-        assert_not_in!(output.stderr.str(), "warning:");
-    }
-
-    #[test]
-    fn rename() {
-        let (output, _) = run_test("rename.rs");
-
-        TestResults::new().ok("test").assert(output);
-    }
-
-    mod accept_and_return {
-        use super::*;
-
-        #[test]
-        fn impl_traits() {
-            let (output, _) = run_test("impl.rs");
-
-            TestResults::new()
-                .ok("base_impl_return")
-                .ok("nested_impl_return")
-                .ok("nested_multiple_impl_return")
-                .ok("base_impl_input")
-                .ok("nested_impl_input")
-                .ok("nested_multiple_impl_input")
-                .assert(output);
-        }
-
-        #[test]
-        fn dyn_traits() {
-            let (output, _) = run_test("dyn.rs");
-
-            TestResults::new()
-                .ok("test_dyn_box")
-                .ok("test_dyn_ref")
-                .ok("test_dyn_box_resolve")
-                .ok("test_dyn_ref_resolve")
-                .assert(output);
-        }
-    }
-
-    #[rstest]
-    #[case::base("async_fixture.rs")]
-    #[case::use_global("await_complete_fixture.rs")]
-    #[case::use_selective("await_partial_fixture.rs")]
-    fn resolve_async_fixture(#[case] code: &str) {
-        let prj = prj(code);
-        prj.add_dependency("async-std", r#"{version="*", features=["attributes"]}"#);
-
-        let output = prj.run_tests().unwrap();
-
-        TestResults::new()
-            .ok("default_is_async")
-            .ok("use_async_fixture")
-            .ok("use_async_impl_output")
-            .ok("use_async_nest_fixture_default")
-            .ok("use_async_nest_fixture_injected")
-            .ok("use_async_nest_fixture_with_default")
-            .ok("use_two_args_mix_fixture")
-            .ok("use_two_args_mix_fixture_inject_first")
-            .ok("use_two_args_mix_fixture_inject_both")
-            .assert(output);
-    }
-
-    #[test]
-    fn resolve_fixture_generics_by_fixture_input() {
-        let (output, _) = run_test("resolve.rs");
-
-        TestResults::new()
-            .ok("test_u32")
-            .ok("test_i32")
-            .assert(output);
-    }
-
-    #[test]
-    fn use_defined_return_type_if_any() {
-        let (output, _) = run_test("defined_return_type.rs");
-
-        TestResults::new()
-            .ok("resolve")
-            .ok("resolve_partial")
-            .ok("resolve_attrs")
-            .ok("resolve_partial_attrs")
-            .assert(output);
-    }
-
-    #[test]
-    fn clean_up_default_from_unused_generics() {
-        let (output, _) = run_test("clean_up_default_generics.rs");
-
-        TestResults::new()
-            .ok("resolve")
-            .ok("resolve_partial")
-            .assert(output);
-    }
-
-    #[test]
-    fn apply_partial_fixture() {
-        let (output, _) = run_test("partial.rs");
-
-        TestResults::new()
-            .ok("default")
-            .ok("t_partial_1")
-            .ok("t_partial_2")
-            .ok("t_complete")
-            .assert(output);
-    }
-
-    #[test]
-    fn apply_partial_fixture_from_value_attribute() {
-        let (output, _) = run_test("partial_in_attr.rs");
-
-        TestResults::new()
-            .ok("default")
-            .ok("t_partial_1")
-            .ok("t_partial_2")
-            .ok("t_complete")
-            .assert(output);
-    }
-
-    #[rstest]
-    #[case::compact_form("default.rs")]
-    #[case::attrs_form("default_in_attrs.rs")]
-    fn use_input_values_if_any(#[case] file: &str) {
-        let (output, _) = run_test(file);
-
-        TestResults::new()
-            .ok("test_simple")
-            .ok("test_simple_changed")
-            .ok("test_double")
-            .ok("test_double_changed")
-            .ok("test_mixed")
-            .assert(output);
-    }
-
-    #[test]
-    fn convert_literal_string_for_default_values() {
-        let (output, _) = run_test("default_conversion.rs");
-
-        assert_regex!(
-            "Cannot parse 'error' to get [a-z:_0-9]*MyType",
-            output.stdout.str()
-        );
-
-        TestResults::new()
-            .ok("test_base")
-            .ok("test_byte_array")
-            .ok("test_convert_custom")
-            .fail("test_fail_conversion")
-            .assert(output);
-    }
-
-    #[rstest]
-    #[case("once.rs")]
-    #[case::no_return("once_no_return.rs")]
-    #[case::defined_type("once_defined_type.rs")]
-    fn accept_once_attribute_and_call_fixture_just_once(#[case] fname: &str) {
-        let project = prj(fname).with_nocapture();
-
-        let output = project.run_tests().unwrap();
-
-        // Just to see the errors if fixture doesn't compile
-        assert_in!(output.stderr.str(), "Exec fixture() just once");
-
-        let occurrences = output.stderr.str().count("Exec fixture() just once");
-
-        assert_eq!(1, occurrences);
-    }
-
-    mod show_correct_errors {
-        use super::*;
-        use std::process::Output;
-
-        use rstest::{fixture, rstest};
-
-        #[fixture]
-        #[once]
-        fn errors_rs() -> (Output, String) {
-            run_test("errors.rs")
-        }
-
-        #[rstest]
-        fn when_cannot_resolve_fixture(errors_rs: &(Output, String)) {
-            let (output, name) = errors_rs.clone();
-
-            assert_in!(output.stderr.str(), "error[E0433]: ");
-            assert_in!(
-                output.stderr.str(),
-                format!(
-                    r#"
-                      --> {name}/src/lib.rs:14:33
-                       |
-                    14 | fn error_cannot_resolve_fixture(no_fixture: u32) {{"#
-                )
-                .unindent()
-            );
-        }
-
-        #[rstest]
-        fn on_mismatched_types_inner(errors_rs: &(Output, String)) {
-            let (output, name) = errors_rs.clone();
-
-            assert_in!(
-                output.stderr.str(),
-                format!(
-                    r#"
-                    error[E0308]: mismatched types
-                      --> {name}/src/lib.rs:10:18
-                       |
-                    10 |     let a: u32 = "";
-                    "#
-                )
-                .unindent()
-            );
-        }
-
-        #[rstest]
-        fn on_mismatched_types_argument(errors_rs: &(Output, String)) {
-            let (output, name) = errors_rs.clone();
-
-            assert_in!(
-                output.stderr.str(),
-                format!(
-                    r#"
-                    error[E0308]: mismatched types
-                      --> {name}/src/lib.rs:17:29
-                    "#
-                )
-                .unindent()
-            );
-
-            assert_in!(
-                output.stderr.str(),
-                r#"
-                17 | fn error_fixture_wrong_type(fixture: String) {}
-                   |                             ^^^^^^"#
-                    .unindent()
-            );
-        }
-
-        #[rstest]
-        fn on_invalid_fixture(errors_rs: &(Output, String)) {
-            let (output, name) = errors_rs.clone();
-
-            assert_in!(
-                output.stderr.str(),
-                format!(
-                    "
-                    error: Missed argument: 'not_a_fixture' should be a test function argument.
-                      --> {name}/src/lib.rs:19:11
-                       |
-                    19 | #[fixture(not_a_fixture(24))]
-                       |           ^^^^^^^^^^^^^
-                    "
-                )
-                .unindent()
-            );
-        }
-
-        #[rstest]
-        fn on_duplicate_fixture_argument(errors_rs: &(Output, String)) {
-            let (output, name) = errors_rs.clone();
-
-            assert_in!(
-                output.stderr.str(),
-                format!(
-                    r#"
-                    error: Duplicate argument: 'f' is already defined.
-                      --> {name}/src/lib.rs:32:23
-                       |
-                    32 | #[fixture(f("first"), f("second"))]
-                       |                       ^
-                    "#
-                )
-                .unindent()
-            );
-        }
-
-        #[rstest]
-        fn on_destruct_implicit_fixture(errors_rs: &(Output, String)) {
-            let (output, name) = errors_rs.clone();
-
-            assert_in!(
-                output.stderr.str(),
-                format!(
-                    r#"
-                    error: To destruct a fixture you should provide a path to resolve it by '#[from(...)]' attribute.
-                      --> {name}/src/lib.rs:48:35
-                       |
-                    48 | fn error_destruct_without_resolve(T(a): T) {{}}
-                       |                                   ^^^^^^^
-                    "#
-                )
-                .unindent()
-            );
-        }
-
-        #[rstest]
-        fn on_destruct_explicit_fixture_without_from(errors_rs: &(Output, String)) {
-            let (output, name) = errors_rs.clone();
-
-            assert_in!(
-                output.stderr.str(),
-                format!(
-                    r#"
-                    error: To destruct a fixture you should provide a path to resolve it by '#[from(...)]' attribute.
-                      --> {name}/src/lib.rs:51:57
-                       |
-                    51 | fn error_destruct_without_resolve_also_with(#[with(21)] T(a): T) {{}}
-                       |                                                         ^^^^^^^
-                    "#
-                )
-                .unindent()
-            );
-            assert_eq!(
-                1,
-                output.stderr.str().count("51 | fn error_destruct_without")
-            )
-        }
-
-        #[fixture]
-        #[once]
-        fn errors_once_rs() -> (Output, String) {
-            run_test("errors_once.rs")
-        }
-
-        #[rstest]
-        fn once_async(errors_once_rs: &(Output, String)) {
-            let (output, name) = errors_once_rs.clone();
-            assert_in!(
-                output.stderr.str(),
-                format!(
-                    r#"
-                    error: Cannot apply #[once] to async fixture.
-                     --> {}/src/lib.rs:4:1
-                      |
-                    4 | #[once]
-                    "#,
-                    name
-                )
-                .unindent()
-            );
-        }
-
-        #[rstest]
-        fn once_generic_type(errors_once_rs: &(Output, String)) {
-            let (output, name) = errors_once_rs.clone();
-
-            assert_in!(
-                output.stderr.str(),
-                format!(
-                    r#"
-                    error: Cannot apply #[once] on generic fixture.
-                     --> {}/src/lib.rs:9:1
-                      |
-                    9 | #[once]
-                    "#,
-                    name
-                )
-                .unindent()
-            );
-        }
-
-        #[rstest]
-        fn once_generic_impl(errors_once_rs: &(Output, String)) {
-            let (output, name) = errors_once_rs.clone();
-            assert_in!(
-                output.stderr.str(),
-                format!(
-                    r#"
-                error: Cannot apply #[once] on generic fixture.
-                  --> {}/src/lib.rs:15:1
-                   |
-                15 | #[once]
-                "#,
-                    name
-                )
-                .unindent()
-            );
-        }
-
-        #[rstest]
-        fn once_on_not_sync_type(errors_once_rs: &(Output, String)) {
-            let (output, name) = errors_once_rs.clone();
-            assert_in!(
-                output.stderr.str(),
-                format!(
-                    r#"
-                    error[E0277]: `Cell<u32>` cannot be shared between threads safely
-                      --> {}/src/lib.rs:20:1
-                       |
-                    20 | #[fixture]
-                       | ^^^^^^^^^^ `Cell<u32>` cannot be shared between threads safely
-                    "#,
-                    name,
-                )
-                .unindent(),
-            );
-        }
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/integration.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/integration.rs
deleted file mode 100644
index 7858dcf..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/integration.rs
+++ /dev/null
@@ -1,35 +0,0 @@
-use rstest_test::{sanitize_name, testname, Project};
-
-/// Rstest integration tests
-mod rstest;
-
-/// Fixture's integration tests
-mod fixture;
-
-use lazy_static::lazy_static;
-
-use std::path::{Path, PathBuf};
-use temp_testdir::TempDir;
-
-lazy_static! {
-    static ref ROOT_DIR: TempDir = TempDir::default().permanent();
-    static ref ROOT_PROJECT: Project = Project::new(ROOT_DIR.as_ref());
-}
-
-pub fn base_prj() -> Project {
-    let prj_name = sanitize_name(testname());
-
-    ROOT_PROJECT.subproject(&prj_name)
-}
-
-pub fn prj() -> Project {
-    let prj_name = sanitize_name(testname());
-
-    let prj = ROOT_PROJECT.subproject(&prj_name);
-    prj.add_local_dependency("rstest");
-    prj
-}
-
-pub fn resources<O: AsRef<Path>>(name: O) -> PathBuf {
-    Path::new("tests").join("resources").join(name)
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/async_fixture.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/async_fixture.rs
deleted file mode 100644
index f9fc96c..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/async_fixture.rs
+++ /dev/null
@@ -1,73 +0,0 @@
-use std::io::prelude::*;
-
-use rstest::*;
-
-#[fixture]
-async fn async_u32() -> u32 {
-    42
-}
-
-#[fixture]
-async fn nest_fixture(#[future] async_u32: u32) -> u32 {
-    async_u32.await
-}
-
-#[fixture(fortytwo = async { 42 })]
-async fn nest_fixture_with_default(#[future] fortytwo: u32) -> u32 {
-    fortytwo.await
-}
-
-#[rstest]
-async fn default_is_async() {
-    assert_eq!(42, async_u32::default().await);
-}
-
-#[rstest]
-async fn use_async_nest_fixture_default(#[future] nest_fixture: u32) {
-    assert_eq!(42, nest_fixture.await);
-}
-
-#[rstest(nest_fixture(async { 24 }))]
-async fn use_async_nest_fixture_injected(#[future] nest_fixture: u32) {
-    assert_eq!(24, nest_fixture.await);
-}
-
-#[rstest]
-async fn use_async_nest_fixture_with_default(#[future] nest_fixture_with_default: u32) {
-    assert_eq!(42, nest_fixture_with_default.await);
-}
-
-#[rstest]
-async fn use_async_fixture(#[future] async_u32: u32) {
-    assert_eq!(42, async_u32.await);
-}
-
-#[fixture]
-async fn async_impl_output() -> impl Read {
-    std::io::Cursor::new(vec![1, 2, 3, 4, 5])
-}
-
-#[rstest]
-async fn use_async_impl_output<T: Read>(#[future] async_impl_output: T) {
-    let reader = async_impl_output.await;
-}
-
-#[fixture(four = async { 4 }, two = 2)]
-async fn two_args_mix_fixture(#[future] four: u32, two: u32) -> u32 {
-    four.await * 10 + two
-}
-
-#[rstest]
-async fn use_two_args_mix_fixture(#[future] two_args_mix_fixture: u32) {
-    assert_eq!(42, two_args_mix_fixture.await);
-}
-
-#[rstest(two_args_mix_fixture(async { 5 }))]
-async fn use_two_args_mix_fixture_inject_first(#[future] two_args_mix_fixture: u32) {
-    assert_eq!(52, two_args_mix_fixture.await);
-}
-
-#[rstest(two_args_mix_fixture(async { 3 }, 1))]
-async fn use_two_args_mix_fixture_inject_both(#[future] two_args_mix_fixture: u32) {
-    assert_eq!(31, two_args_mix_fixture.await);
-}
\ No newline at end of file
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/await_complete_fixture.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/await_complete_fixture.rs
deleted file mode 100644
index 8962d8e..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/await_complete_fixture.rs
+++ /dev/null
@@ -1,105 +0,0 @@
-use std::io::prelude::*;
-
-use rstest::*;
-
-#[fixture]
-async fn async_u32() -> u32 {
-    42
-}
-
-#[fixture]
-#[awt]
-async fn nest_fixture(#[future] async_u32: u32) -> u32 {
-    async_u32
-}
-
-#[fixture]
-#[awt]
-async fn nest_fixture_with_default(
-    #[future]
-    #[default(async { 42 })]
-    fortytwo: u32,
-) -> u32 {
-    fortytwo
-}
-
-#[rstest]
-async fn default_is_async() {
-    assert_eq!(42, async_u32::default().await);
-}
-
-#[rstest]
-#[awt]
-async fn use_async_nest_fixture_default(#[future] nest_fixture: u32) {
-    assert_eq!(42, nest_fixture);
-}
-
-#[rstest]
-#[awt]
-async fn use_async_nest_fixture_injected(
-    #[future]
-    #[with(async { 24 })]
-    nest_fixture: u32,
-) {
-    assert_eq!(24, nest_fixture);
-}
-
-#[rstest]
-#[awt]
-async fn use_async_nest_fixture_with_default(#[future] nest_fixture_with_default: u32) {
-    assert_eq!(42, nest_fixture_with_default);
-}
-
-#[rstest]
-#[awt]
-async fn use_async_fixture(#[future] async_u32: u32) {
-    assert_eq!(42, async_u32);
-}
-
-#[fixture]
-async fn async_impl_output() -> impl Read {
-    std::io::Cursor::new(vec![1, 2, 3, 4, 5])
-}
-
-#[rstest]
-#[awt]
-async fn use_async_impl_output<T: Read>(#[future] async_impl_output: T) {
-    let reader = async_impl_output;
-}
-
-#[fixture]
-#[awt]
-async fn two_args_mix_fixture(
-    #[future]
-    #[default(async { 4 })]
-    four: u32,
-    #[default(2)] two: u32,
-) -> u32 {
-    four * 10 + two
-}
-
-#[rstest]
-#[awt]
-async fn use_two_args_mix_fixture(#[future] two_args_mix_fixture: u32) {
-    assert_eq!(42, two_args_mix_fixture);
-}
-
-#[rstest]
-#[awt]
-async fn use_two_args_mix_fixture_inject_first(
-    #[future]
-    #[with(async { 5 })]
-    two_args_mix_fixture: u32,
-) {
-    assert_eq!(52, two_args_mix_fixture);
-}
-
-#[rstest]
-#[awt]
-async fn use_two_args_mix_fixture_inject_both(
-    #[future]
-    #[with(async { 3 }, 1)]
-    two_args_mix_fixture: u32,
-) {
-    assert_eq!(31, two_args_mix_fixture);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/await_partial_fixture.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/await_partial_fixture.rs
deleted file mode 100644
index 9e26f4a..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/await_partial_fixture.rs
+++ /dev/null
@@ -1,94 +0,0 @@
-use std::io::prelude::*;
-
-use rstest::*;
-
-#[fixture]
-async fn async_u32() -> u32 {
-    42
-}
-
-#[fixture]
-async fn nest_fixture(#[future(awt)] async_u32: u32) -> u32 {
-    async_u32
-}
-
-#[fixture]
-async fn nest_fixture_with_default(
-    #[future(awt)]
-    #[default(async { 42 })]
-    fortytwo: u32,
-) -> u32 {
-    fortytwo
-}
-
-#[rstest]
-async fn default_is_async() {
-    assert_eq!(42, async_u32::default().await);
-}
-
-#[rstest]
-async fn use_async_nest_fixture_default(#[future(awt)] nest_fixture: u32) {
-    assert_eq!(42, nest_fixture);
-}
-
-#[rstest]
-async fn use_async_nest_fixture_injected(
-    #[future(awt)]
-    #[with(async { 24 })]
-    nest_fixture: u32,
-) {
-    assert_eq!(24, nest_fixture);
-}
-
-#[rstest]
-async fn use_async_nest_fixture_with_default(#[future(awt)] nest_fixture_with_default: u32) {
-    assert_eq!(42, nest_fixture_with_default);
-}
-
-#[rstest]
-async fn use_async_fixture(#[future(awt)] async_u32: u32) {
-    assert_eq!(42, async_u32);
-}
-
-#[fixture]
-async fn async_impl_output() -> impl Read {
-    std::io::Cursor::new(vec![1, 2, 3, 4, 5])
-}
-
-#[rstest]
-async fn use_async_impl_output<T: Read>(#[future(awt)] async_impl_output: T) {
-    let reader = async_impl_output;
-}
-
-#[fixture]
-async fn two_args_mix_fixture(
-    #[future(awt)]
-    #[default(async { 4 })]
-    four: u32,
-    #[default(2)] two: u32,
-) -> u32 {
-    four * 10 + two
-}
-
-#[rstest]
-async fn use_two_args_mix_fixture(#[future(awt)] two_args_mix_fixture: u32) {
-    assert_eq!(42, two_args_mix_fixture);
-}
-
-#[rstest]
-async fn use_two_args_mix_fixture_inject_first(
-    #[future(awt)]
-    #[with(async { 5 })]
-    two_args_mix_fixture: u32,
-) {
-    assert_eq!(52, two_args_mix_fixture);
-}
-
-#[rstest]
-async fn use_two_args_mix_fixture_inject_both(
-    #[future(awt)]
-    #[with(async { 3 }, 1)]
-    two_args_mix_fixture: u32,
-) {
-    assert_eq!(31, two_args_mix_fixture);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/clean_up_default_generics.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/clean_up_default_generics.rs
deleted file mode 100644
index 11fd276..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/clean_up_default_generics.rs
+++ /dev/null
@@ -1,31 +0,0 @@
-use rstest::*;
-
-#[fixture]
-fn s() -> &'static str {
-    "42"
-}
-
-#[fixture]
-fn fx<S: ToString>(s: S) -> usize {
-    s.to_string().len()
-}
-
-#[fixture]
-fn sum() -> usize {
-    42
-}
-
-#[fixture]
-fn fx_double<S: ToString>(sum: usize, s: S) -> usize {
-    s.to_string().len() + sum
-}
-
-#[test]
-fn resolve() {
-    assert_eq!(2, fx::default())
-}
-
-#[test]
-fn resolve_partial() {
-    assert_eq!(12, fx_double::partial_1(10))
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/default.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/default.rs
deleted file mode 100644
index 1bdd080e..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/default.rs
+++ /dev/null
@@ -1,46 +0,0 @@
-use rstest::{fixture, rstest};
-
-#[fixture(value = 42)]
-pub fn simple(value: u32) -> u32 {
-    value
-}
-
-#[fixture(value = 21, mult = 2)]
-pub fn double(value: u32, mult: u32) -> u32 {
-    value * mult
-}
-
-#[fixture]
-pub fn middle() -> u32 {
-    2
-}
-
-#[fixture(value = 21, mult = 4)]
-pub fn mixed(value: u32, middle: u32, mult: u32) -> u32 {
-    value * mult / middle
-}
-
-#[rstest]
-fn test_simple(simple: u32) {
-    assert_eq!(simple, 42)
-}
-
-#[rstest(simple(21))]
-fn test_simple_changed(simple: u32) {
-    assert_eq!(simple, 21)
-}
-
-#[rstest]
-fn test_double(double: u32) {
-    assert_eq!(double, 42)
-}
-
-#[rstest(double(20, 3))]
-fn test_double_changed(double: u32) {
-    assert_eq!(double, 60)
-}
-
-#[rstest]
-fn test_mixed(mixed: u32) {
-    assert_eq!(mixed, 42)
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/default_conversion.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/default_conversion.rs
deleted file mode 100644
index aee1ecf..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/default_conversion.rs
+++ /dev/null
@@ -1,51 +0,0 @@
-use rstest::{fixture, rstest};
-use std::net::{Ipv4Addr, SocketAddr};
-
-struct MyType(String);
-struct E;
-impl core::str::FromStr for MyType {
-    type Err = E;
-
-    fn from_str(s: &str) -> Result<Self, Self::Err> {
-        match s {
-            "error" => Err(E),
-            inner => Ok(MyType(inner.to_owned())),
-        }
-    }
-}
-
-#[fixture]
-fn base(#[default("1.2.3.4")] ip: Ipv4Addr, #[default(r#"8080"#)] port: u16) -> SocketAddr {
-    SocketAddr::new(ip.into(), port)
-}
-
-#[fixture]
-fn fail(#[default("error")] t: MyType) -> MyType {
-    t
-}
-
-#[fixture]
-fn valid(#[default("some")] t: MyType) -> MyType {
-    t
-}
-
-#[rstest]
-fn test_base(base: SocketAddr) {
-    assert_eq!(base, "1.2.3.4:8080".parse().unwrap());
-}
-
-#[fixture]
-fn byte_array(#[default(b"1234")] some: &[u8]) -> usize {
-    some.len()
-}
-
-#[rstest]
-fn test_byte_array(byte_array: usize) {
-    assert_eq!(4, byte_array);
-}
-
-#[rstest]
-fn test_convert_custom(valid: MyType) {}
-
-#[rstest]
-fn test_fail_conversion(fail: MyType) {}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/default_in_attrs.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/default_in_attrs.rs
deleted file mode 100644
index 20a10a7..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/default_in_attrs.rs
+++ /dev/null
@@ -1,46 +0,0 @@
-use rstest::{fixture, rstest};
-
-#[fixture]
-pub fn simple(#[default(42)] value: u32) -> u32 {
-    value
-}
-
-#[fixture]
-pub fn double(#[default(20 + 1)] value: u32, #[default(1 + 1)] mult: u32) -> u32 {
-    value * mult
-}
-
-#[fixture]
-pub fn middle() -> u32 {
-    2
-}
-
-#[fixture]
-pub fn mixed(#[default(21)] value: u32, middle: u32, #[default(2 + 2)] mult: u32) -> u32 {
-    value * mult / middle
-}
-
-#[rstest]
-fn test_simple(simple: u32) {
-    assert_eq!(simple, 42)
-}
-
-#[rstest(simple(21))]
-fn test_simple_changed(simple: u32) {
-    assert_eq!(simple, 21)
-}
-
-#[rstest]
-fn test_double(double: u32) {
-    assert_eq!(double, 42)
-}
-
-#[rstest(double(20, 3))]
-fn test_double_changed(double: u32) {
-    assert_eq!(double, 60)
-}
-
-#[rstest]
-fn test_mixed(mixed: u32) {
-    assert_eq!(mixed, 42)
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/defined_return_type.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/defined_return_type.rs
deleted file mode 100644
index db0315d7..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/defined_return_type.rs
+++ /dev/null
@@ -1,43 +0,0 @@
-use rstest::*;
-
-#[fixture]
-pub fn i() -> u32 {
-    42
-}
-
-#[fixture]
-pub fn j() -> i32 {
-    -42
-}
-
-#[fixture(::default<impl Iterator<Item=(u32, i32)>>::partial_1<impl Iterator<Item=(I,i32)>>)]
-pub fn fx<I, J>(i: I, j: J) -> impl Iterator<Item=(I, J)> {
-    std::iter::once((i, j))
-}
-
-#[test]
-fn resolve() {
-    assert_eq!((42, -42), fx::default().next().unwrap())
-}
-
-#[test]
-fn resolve_partial() {
-    assert_eq!((42.0, -42), fx::partial_1(42.0).next().unwrap())
-}
-
-#[fixture]
-#[default(impl Iterator<Item=(u32, i32)>)]
-#[partial_1(impl Iterator<Item=(I,i32)>)]
-pub fn fx_attrs<I, J>(i: I, j: J) -> impl Iterator<Item=(I, J)> {
-    std::iter::once((i, j))
-}
-
-#[test]
-fn resolve_attrs() {
-    assert_eq!((42, -42), fx_attrs::default().next().unwrap())
-}
-
-#[test]
-fn resolve_partial_attrs() {
-    assert_eq!((42.0, -42), fx_attrs::partial_1(42.0).next().unwrap())
-}
\ No newline at end of file
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/dyn.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/dyn.rs
deleted file mode 100644
index 27ea0265..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/dyn.rs
+++ /dev/null
@@ -1,41 +0,0 @@
-use rstest::*;
-
-#[fixture]
-fn dyn_box() -> Box<dyn Iterator<Item=i32>> {
-    Box::new(std::iter::once(42))
-}
-
-#[fixture]
-fn dyn_ref() -> &'static dyn ToString {
-    &42
-}
-
-#[fixture]
-fn dyn_box_resolve(mut dyn_box: Box<dyn Iterator<Item=i32>>) -> i32 {
-    dyn_box.next().unwrap()
-}
-
-#[fixture]
-fn dyn_ref_resolve(dyn_ref: &dyn ToString) -> String {
-    dyn_ref.to_string()
-}
-
-#[rstest]
-fn test_dyn_box(mut dyn_box: Box<dyn Iterator<Item=i32>>) {
-    assert_eq!(42, dyn_box.next().unwrap())
-}
-
-#[rstest]
-fn test_dyn_ref(dyn_ref: &dyn ToString) {
-    assert_eq!("42", dyn_ref.to_string())
-}
-
-#[rstest]
-fn test_dyn_box_resolve(dyn_box_resolve: i32) {
-    assert_eq!(42, dyn_box_resolve)
-}
-
-#[rstest]
-fn test_dyn_ref_resolve(dyn_ref_resolve: String) {
-    assert_eq!("42", dyn_ref_resolve)
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/errors.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/errors.rs
deleted file mode 100644
index f00ff38..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/errors.rs
+++ /dev/null
@@ -1,51 +0,0 @@
-use rstest::*;
-
-#[fixture]
-pub fn fixture() -> u32 {
-    42
-}
-
-#[fixture]
-fn error_inner(fixture: u32) {
-    let a: u32 = "";
-}
-
-#[fixture]
-fn error_cannot_resolve_fixture(no_fixture: u32) {}
-
-#[fixture]
-fn error_fixture_wrong_type(fixture: String) {}
-
-#[fixture(not_a_fixture(24))]
-fn error_inject_an_invalid_fixture(fixture: String) {}
-
-#[fixture]
-fn name() -> &'static str {
-    "name"
-}
-
-#[fixture]
-fn f(name: &str) -> String {
-    name.to_owned()
-}
-
-#[fixture(f("first"), f("second"))]
-fn error_inject_a_fixture_more_than_once(f: String) {}
-
-struct T(u32);
-
-#[fixture]
-fn structed() -> T {
-    T(42)
-}
-
-#[fixture]
-fn structed_injectd(fixture: u32) -> T {
-    T(fixture)
-}
-
-#[fixture]
-fn error_destruct_without_resolve(T(a): T) {}
-
-#[fixture]
-fn error_destruct_without_resolve_also_with(#[with(21)] T(a): T) {}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/errors_once.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/errors_once.rs
deleted file mode 100644
index 034af63..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/errors_once.rs
+++ /dev/null
@@ -1,24 +0,0 @@
-use rstest::*;
-
-#[fixture]
-#[once]
-async fn error_async_once_fixture() {
-}
-
-#[fixture]
-#[once]
-fn error_generics_once_fixture<T: std::fmt::Debug>() -> T {
-    42
-}
-
-#[fixture]
-#[once]
-fn error_generics_once_fixture() -> impl Iterator<Item = u32> {
-    std::iter::once(42)
-}
-
-#[fixture]
-#[once]
-fn error_once_fixture_not_sync() -> std::cell::Cell<u32> {
-    std::cell::Cell::new(42)
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/fixture_struct.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/fixture_struct.rs
deleted file mode 100644
index a090d39..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/fixture_struct.rs
+++ /dev/null
@@ -1,44 +0,0 @@
-use rstest::fixture;
-
-trait Mult {
-    fn mult(&self, n: u32) -> u32;
-}
-
-struct M(u32);
-
-impl Mult for M {
-    fn mult(&self, n: u32) -> u32 {
-        n * self.0
-    }
-}
-
-#[fixture]
-fn my_fixture() -> u32 { 42 }
-
-#[fixture]
-fn multiplier() -> M {
-    M(2)
-}
-
-#[fixture]
-fn my_fixture_injected(my_fixture: u32, multiplier: impl Mult) -> u32 { multiplier.mult(my_fixture) }
-
-#[test]
-fn resolve_new() {
-    assert_eq!(42, my_fixture::get());
-}
-
-#[test]
-fn resolve_default() {
-    assert_eq!(42, my_fixture::default());
-}
-
-#[test]
-fn injected_new() {
-    assert_eq!(63, my_fixture_injected::get(21, M(3)));
-}
-
-#[test]
-fn injected_default() {
-    assert_eq!(84, my_fixture_injected::default());
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/from_other_module.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/from_other_module.rs
deleted file mode 100644
index 5eeffab..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/from_other_module.rs
+++ /dev/null
@@ -1,14 +0,0 @@
-
-mod my_mod {
-    use rstest::{fixture};
-
-    #[fixture]
-    pub fn mod_fixture() -> u32 { 42 }
-}
-
-use my_mod::mod_fixture;
-
-#[test]
-fn struct_access() {
-    assert_eq!(42, mod_fixture::default());
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/impl.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/impl.rs
deleted file mode 100644
index 0e84827..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/impl.rs
+++ /dev/null
@@ -1,57 +0,0 @@
-use rstest::*;
-
-#[fixture]
-fn fx_base_impl_return() -> impl Iterator<Item=u32> { std::iter::once(42) }
-
-#[fixture]
-fn fx_base_impl_input(mut fx_base_impl_return: impl Iterator<Item=u32>) -> u32 {
-    fx_base_impl_return.next().unwrap()
-}
-
-#[rstest]
-fn base_impl_return(mut fx_base_impl_return: impl Iterator<Item=u32>) {
-    assert_eq!(42, fx_base_impl_return.next().unwrap());
-}
-
-#[rstest]
-fn base_impl_input(mut fx_base_impl_input: u32) {
-    assert_eq!(42, fx_base_impl_input);
-}
-
-#[fixture]
-fn fx_nested_impl_return() -> impl Iterator<Item=impl ToString> { std::iter::once(42) }
-
-#[fixture]
-fn fx_nested_impl_input(mut fx_nested_impl_return: impl Iterator<Item=impl ToString>) -> String {
-    fx_nested_impl_return.next().unwrap().to_string()
-}
-
-#[rstest]
-fn nested_impl_return(mut fx_nested_impl_return: impl Iterator<Item=impl ToString>) {
-    assert_eq!("42", fx_nested_impl_return.next().unwrap().to_string());
-}
-
-#[rstest]
-fn nested_impl_input(mut fx_nested_impl_input: String) {
-    assert_eq!("42", &fx_nested_impl_input);
-}
-
-#[fixture]
-fn fx_nested_multiple_impl_return() -> (impl Iterator<Item=impl ToString>, impl ToString) {
-    (std::iter::once(42), 42i32)
-}
-
-#[fixture]
-fn fx_nested_multiple_impl_input(mut fx_nested_multiple_impl_return: (impl Iterator<Item=impl ToString>, impl ToString)) -> bool {
-    fx_nested_multiple_impl_return.0.next().unwrap().to_string() == fx_nested_multiple_impl_return.1.to_string()
-}
-
-#[rstest]
-fn nested_multiple_impl_return(mut fx_nested_multiple_impl_return: (impl Iterator<Item=impl ToString>, impl ToString)) {
-    assert_eq!(fx_nested_multiple_impl_return.0.next().unwrap().to_string(), fx_nested_multiple_impl_return.1.to_string());
-}
-
-#[rstest]
-fn nested_multiple_impl_input(fx_nested_multiple_impl_input: bool) {
-    assert!(fx_nested_multiple_impl_input);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/no_warning.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/no_warning.rs
deleted file mode 100644
index b6aa924..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/no_warning.rs
+++ /dev/null
@@ -1,17 +0,0 @@
-use rstest::*;
-
-#[fixture]
-fn val() -> i32 {
-    21
-}
-
-#[fixture]
-fn fortytwo(mut val: i32) -> i32 {
-    val *= 2;
-    val
-}
-
-#[rstest]
-fn the_test(fortytwo: i32) {
-    assert_eq!(fortytwo, 42);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/once.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/once.rs
deleted file mode 100644
index 31f637c..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/once.rs
+++ /dev/null
@@ -1,21 +0,0 @@
-use rstest::{fixture, rstest};
-
-#[fixture]
-#[once]
-fn once_fixture() -> u32 {
-    eprintln!("Exec fixture() just once");
-    42
-}
-
-#[rstest]
-fn base(once_fixture: &u32) {
-    assert_eq!(&42, once_fixture);
-}
-
-#[rstest]
-#[case(2)]
-#[case(3)]
-#[case(7)]
-fn cases(once_fixture: &u32, #[case] divisor: u32) {
-    assert_eq!(0, *once_fixture % divisor);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/once_defined_type.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/once_defined_type.rs
deleted file mode 100644
index 1ac439c..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/once_defined_type.rs
+++ /dev/null
@@ -1,33 +0,0 @@
-use rstest::{fixture, rstest};
-
-#[fixture]
-#[default(u32)]
-#[partial_1(u32)]
-#[once]
-fn once_fixture(#[default(())] a: (), #[default(())] b: ()) -> u32 {
-    eprintln!("Exec fixture() just once");
-    42
-}
-
-#[rstest]
-fn base(once_fixture: &u32) {
-    assert_eq!(&42, once_fixture);
-}
-
-#[rstest]
-fn base_partial(#[with(())] once_fixture: &u32) {
-    assert_eq!(&42, once_fixture);
-}
-
-#[rstest]
-fn base_complete(#[with((), ())] once_fixture: &u32) {
-    assert_eq!(&42, once_fixture);
-}
-
-#[rstest]
-#[case(2)]
-#[case(3)]
-#[case(7)]
-fn cases(once_fixture: &u32, #[case] divisor: u32) {
-    assert_eq!(0, *once_fixture % divisor);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/once_no_return.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/once_no_return.rs
deleted file mode 100644
index 26674c9196..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/once_no_return.rs
+++ /dev/null
@@ -1,20 +0,0 @@
-use rstest::*;
-
-#[fixture]
-#[once]
-fn once_fixture() {
-    eprintln!("Exec fixture() just once");
-}
-
-#[rstest]
-fn base(_once_fixture: ()) {
-    assert!(true);
-}
-
-#[rstest]
-#[case()]
-#[case()]
-#[case()]
-fn cases(_once_fixture: ()) {
-    assert!(true);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/partial.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/partial.rs
deleted file mode 100644
index 5afbe02..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/partial.rs
+++ /dev/null
@@ -1,40 +0,0 @@
-use rstest::*;
-
-#[fixture]
-fn f1() -> u32 { 0 }
-#[fixture]
-fn f2() -> u32 { 0 }
-#[fixture]
-fn f3() -> u32 { 0 }
-
-#[fixture]
-fn fixture(f1: u32, f2: u32, f3: u32) -> u32 { f1 + 10 * f2 + 100 * f3 }
-
-#[fixture(fixture(7))]
-fn partial_1(fixture: u32) -> u32 { fixture }
-
-#[fixture(fixture(2, 4))]
-fn partial_2(fixture: u32) -> u32 { fixture }
-
-#[fixture(fixture(2, 4, 5))]
-fn complete(fixture: u32) -> u32 { fixture }
-
-#[rstest]
-fn default(fixture: u32) {
-    assert_eq!(fixture, 0);
-}
-
-#[rstest]
-fn t_partial_1(partial_1: u32) {
-    assert_eq!(partial_1, 7);
-}
-
-#[rstest]
-fn t_partial_2(partial_2: u32) {
-    assert_eq!(partial_2, 42);
-}
-
-#[rstest]
-fn t_complete(complete: u32) {
-    assert_eq!(complete, 542);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/partial_in_attr.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/partial_in_attr.rs
deleted file mode 100644
index 3564e8956..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/partial_in_attr.rs
+++ /dev/null
@@ -1,54 +0,0 @@
-use rstest::*;
-
-#[fixture]
-fn f1() -> u32 {
-    0
-}
-#[fixture]
-fn f2() -> u32 {
-    0
-}
-#[fixture]
-fn f3() -> u32 {
-    0
-}
-
-#[fixture]
-fn fixture(f1: u32, f2: u32, f3: u32) -> u32 {
-    f1 + 10 * f2 + 100 * f3
-}
-
-#[fixture]
-fn partial_1(#[with(7)] fixture: u32) -> u32 {
-    fixture
-}
-
-#[fixture]
-fn partial_2(#[with(2, 4)] fixture: u32) -> u32 {
-    fixture
-}
-
-#[fixture]
-fn complete(#[with(2, 4, 5)] fixture: u32) -> u32 {
-    fixture
-}
-
-#[rstest]
-fn default(fixture: u32) {
-    assert_eq!(fixture, 0);
-}
-
-#[rstest]
-fn t_partial_1(partial_1: u32) {
-    assert_eq!(partial_1, 7);
-}
-
-#[rstest]
-fn t_partial_2(partial_2: u32) {
-    assert_eq!(partial_2, 42);
-}
-
-#[rstest]
-fn t_complete(complete: u32) {
-    assert_eq!(complete, 542);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/rename.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/rename.rs
deleted file mode 100644
index ec77809..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/rename.rs
+++ /dev/null
@@ -1,64 +0,0 @@
-use rstest::*;
-
-#[fixture]
-fn very_long_and_boring_name(#[default(42)] inject: u32) -> u32 {
-    inject
-}
-
-mod sub_module {
-    use super::*;
-
-    #[fixture]
-    pub fn mod_fixture() -> u32 {
-        42
-    }
-}
-
-#[fixture(very_long_and_boring_name as foo)]
-fn compact(foo: u32) -> u32 {
-    foo
-}
-
-#[fixture(very_long_and_boring_name(21) as foo)]
-fn compact_injected(foo: u32) -> u32 {
-    foo
-}
-
-#[fixture(sub_module::mod_fixture as foo)]
-fn compact_from_mod(foo: u32) -> u32 {
-    foo
-}
-
-#[fixture]
-fn attribute(#[from(very_long_and_boring_name)] foo: u32) -> u32 {
-    foo
-}
-
-#[fixture]
-fn attribute_mod(#[from(sub_module::mod_fixture)] foo: u32) -> u32 {
-    foo
-}
-
-#[fixture]
-fn attribute_injected(
-    #[from(very_long_and_boring_name)]
-    #[with(21)]
-    foo: u32,
-) -> u32 {
-    foo
-}
-
-#[rstest]
-fn test(
-    compact: u32,
-    attribute: u32,
-    attribute_mod: u32,
-    compact_from_mod: u32,
-    compact_injected: u32,
-    attribute_injected: u32,
-) {
-    assert_eq!(compact, attribute);
-    assert_eq!(attribute, attribute_mod);
-    assert_eq!(attribute_mod, compact_from_mod);
-    assert_eq!(compact_injected, attribute_injected);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/resolve.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/resolve.rs
deleted file mode 100644
index 696ea62..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/resolve.rs
+++ /dev/null
@@ -1,42 +0,0 @@
-use rstest::{rstest, fixture};
-
-pub trait Tr {
-    fn get() -> Self;
-}
-
-impl Tr for i32 {
-    fn get() -> Self {
-        42
-    }
-}
-
-impl Tr for u32 {
-    fn get() -> Self {
-        42
-    }
-}
-
-#[fixture]
-pub fn f<T: Tr>() -> T {
-    T::get()
-}
-
-#[fixture]
-pub fn fu32(f: u32) -> u32 {
-    f
-}
-
-#[fixture]
-pub fn fi32(f: i32) -> i32 {
-    f
-}
-
-#[rstest]
-fn test_u32(fu32: u32) {
-    assert_eq!(fu32, 42)
-}
-
-#[rstest]
-fn test_i32(fi32: i32) {
-    assert_eq!(fi32, 42)
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/simple_injection.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/simple_injection.rs
deleted file mode 100644
index fc83cba..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/fixture/simple_injection.rs
+++ /dev/null
@@ -1,17 +0,0 @@
-use rstest::{rstest, fixture};
-
-#[fixture]
-fn root() -> u32 { 21 }
-
-#[fixture]
-fn injection(root: u32) -> u32 { 2 * root }
-
-#[rstest]
-fn success(injection: u32) {
-    assert_eq!(42, injection);
-}
-
-#[rstest]
-fn fail(injection: u32) {
-    assert_eq!(41, injection);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/by_ref.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/by_ref.rs
deleted file mode 100644
index c1976fd..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/by_ref.rs
+++ /dev/null
@@ -1,38 +0,0 @@
-use rstest::*;
-use std::fs::File;
-use std::io::Read;
-use std::path::PathBuf;
-
-#[rstest]
-fn start_with_name(
-    #[files("files/**/*.txt")]
-    #[by_ref]
-    path: &PathBuf,
-) {
-    let name = path.file_name().unwrap();
-    let mut f = File::open(&path).unwrap();
-    let mut contents = String::new();
-    f.read_to_string(&mut contents).unwrap();
-
-    assert!(contents.starts_with(name.to_str().unwrap()))
-}
-
-#[fixture]
-fn f() -> u32 {
-    42
-}
-
-#[rstest]
-#[case(42)]
-fn test(
-    #[by_ref] f: &u32,
-    #[case]
-    #[by_ref]
-    c: &u32,
-    #[values(42, 142)]
-    #[by_ref]
-    v: &u32,
-) {
-    assert_eq!(f, c);
-    assert_eq!(*c, *v % 100);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/args_with_no_cases.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/args_with_no_cases.rs
deleted file mode 100644
index 9b857ae5..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/args_with_no_cases.rs
+++ /dev/null
@@ -1,4 +0,0 @@
-use rstest::rstest;
-
-#[rstest(one, two, three)]
-fn should_show_error_for_no_case(one: u32, two: u32, three: u32) {}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/async.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/async.rs
deleted file mode 100644
index abc7deb2..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/async.rs
+++ /dev/null
@@ -1,18 +0,0 @@
-use rstest::*;
-
-#[rstest]
-#[case::pass(42, async { 42 })]
-#[case::fail(42, async { 41 })]
-#[should_panic]
-#[case::pass_panic(42, async { 41 })]
-#[should_panic]
-#[case::fail_panic(42, async { 42 })]
-async fn my_async_test(#[case] expected: u32, #[case] #[future] value: u32) {
-    assert_eq!(expected, value.await);
-}
-
-#[rstest]
-#[case::pass(42, async { 42 })]
-async fn my_async_test_revert(#[case] expected: u32, #[future] #[case] value: u32) {
-    assert_eq!(expected, value.await);
-}
\ No newline at end of file
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/async_awt.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/async_awt.rs
deleted file mode 100644
index fcd9de4..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/async_awt.rs
+++ /dev/null
@@ -1,28 +0,0 @@
-use rstest::*;
-
-#[rstest]
-#[case::pass(42, async { 42 })]
-#[case::fail(42, async { 41 })]
-#[should_panic]
-#[case::pass_panic(42, async { 41 })]
-#[should_panic]
-#[case::fail_panic(42, async { 42 })]
-async fn my_async_test(
-    #[case] expected: u32,
-    #[case]
-    #[future(awt)]
-    value: u32,
-) {
-    assert_eq!(expected, value);
-}
-
-#[rstest]
-#[case::pass(42, async { 42 })]
-async fn my_async_test_revert(
-    #[case] expected: u32,
-    #[future(awt)]
-    #[case]
-    value: u32,
-) {
-    assert_eq!(expected, value);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/async_awt_global.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/async_awt_global.rs
deleted file mode 100644
index ce88204d..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/async_awt_global.rs
+++ /dev/null
@@ -1,30 +0,0 @@
-use rstest::*;
-
-#[rstest]
-#[case::pass(42, async { 42 })]
-#[case::fail(42, async { 41 })]
-#[should_panic]
-#[case::pass_panic(42, async { 41 })]
-#[should_panic]
-#[case::fail_panic(42, async { 42 })]
-#[awt]
-async fn my_async_test(
-    #[case] expected: u32,
-    #[case]
-    #[future]
-    value: u32,
-) {
-    assert_eq!(expected, value);
-}
-
-#[rstest]
-#[case::pass(42, async { 42 })]
-#[awt]
-async fn my_async_test_revert(
-    #[case] expected: u32,
-    #[future]
-    #[case]
-    value: u32,
-) {
-    assert_eq!(expected, value);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/async_awt_mut.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/async_awt_mut.rs
deleted file mode 100644
index 2ff4376..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/async_awt_mut.rs
+++ /dev/null
@@ -1,24 +0,0 @@
-use rstest::*;
-
-#[rstest]
-#[case::pass(async { 3 })]
-#[awt]
-async fn my_mut_test_global_awt(
-    #[future]
-    #[case]
-    mut a: i32,
-) {
-    a = 4;
-    assert_eq!(a, 4);
-}
-
-#[rstest]
-#[case::pass(async { 3 })]
-async fn my_mut_test_local_awt(
-    #[future(awt)]
-    #[case]
-    mut a: i32,
-) {
-    a = 4;
-    assert_eq!(a, 4);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/case_attributes.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/case_attributes.rs
deleted file mode 100644
index 7aa5a0fb..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/case_attributes.rs
+++ /dev/null
@@ -1,24 +0,0 @@
-use rstest::rstest;
-
-#[rstest(
-    val,
-    case::no_panic(0),
-    #[should_panic]
-    case::panic(2),
-    #[should_panic(expected="expected")]
-    case::panic_with_message(3),
-    case::no_panic_but_fail(1),
-    #[should_panic]
-    case::panic_but_fail(0),
-    #[should_panic(expected="other")]
-    case::panic_with_wrong_message(3),
-)]
-fn attribute_per_case(val: i32) {
-    match val {
-        0 => assert!(true),
-        1 => assert!(false),
-        2 => panic!("No catch"),
-        3 => panic!("expected"),
-        _ => panic!("Not defined"),
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/case_with_wrong_args.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/case_with_wrong_args.rs
deleted file mode 100644
index d6efadf..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/case_with_wrong_args.rs
+++ /dev/null
@@ -1,10 +0,0 @@
-use rstest::rstest;
-
-#[cfg(test)]
-#[rstest(a, b, case(42), case(1, 2), case(43))]
-fn error_less_arguments(a: u32, b: u32) {}
-
-#[cfg(test)]
-#[rstest(a, case(42, 43), case(12), case(24, 34))]
-fn error_too_much_arguments(a: u32) {}
-
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/description.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/description.rs
deleted file mode 100644
index c57ae4b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/description.rs
+++ /dev/null
@@ -1,11 +0,0 @@
-use rstest::rstest;
-
-#[rstest(
-    expected,
-    case::user_test_description(true),
-    case(true),
-    case::user_test_description_fail(false)
-)]
-fn description(expected: bool) {
-    assert!(expected);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/dump_just_one_case.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/dump_just_one_case.rs
deleted file mode 100644
index f8e80d4..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/dump_just_one_case.rs
+++ /dev/null
@@ -1,10 +0,0 @@
-use rstest::*;
-
-#[rstest]
-#[case::first_no_dump("Please don't trace me")]
-#[trace]
-#[case::dump_me("Trace it!")]
-#[case::last_no_dump("Please don't trace me")]
-fn cases(#[case] s: &str) {
-    assert!(false);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/inject.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/inject.rs
deleted file mode 100644
index eb2dc75..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/inject.rs
+++ /dev/null
@@ -1,21 +0,0 @@
-use rstest::*;
-use actix_rt;
-use std::future::Future;
-
-#[rstest(expected, value,
-    case::pass(42, 42),
-    #[should_panic]
-    case::panic(41, 42),
-    case::fail(1, 42)
-)]
-#[test]
-fn sync(expected: u32, value: u32) { assert_eq!(expected, value); }
-
-#[rstest(expected, value,
-    case::pass(42, async { 42 }),
-    #[should_panic]
-    case::panic(41, async { 42 }),
-    case::fail(1, async { 42 })
-)]
-#[actix_rt::test]
-async fn fn_async(expected: u32, value: impl Future<Output=u32>) { assert_eq!(expected, value.await); }
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/missed_argument.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/missed_argument.rs
deleted file mode 100644
index e11e2d9..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/missed_argument.rs
+++ /dev/null
@@ -1,5 +0,0 @@
-use rstest::rstest;
-
-#[cfg(test)]
-#[rstest(f, case(42), case(24))]
-fn error_param_not_exist() {}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/missed_some_arguments.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/missed_some_arguments.rs
deleted file mode 100644
index 7251997..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/missed_some_arguments.rs
+++ /dev/null
@@ -1,5 +0,0 @@
-use rstest::rstest;
-
-#[cfg(test)]
-#[rstest(a,b,c, case(1,2,3), case(3,2,1))]
-fn error_param_not_exist(b: u32) {}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/partial.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/partial.rs
deleted file mode 100644
index bb4d863e..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/partial.rs
+++ /dev/null
@@ -1,54 +0,0 @@
-use rstest::*;
-
-#[fixture]
-fn f1() -> u32 {
-    0
-}
-#[fixture]
-fn f2() -> u32 {
-    0
-}
-#[fixture]
-fn f3() -> u32 {
-    0
-}
-
-#[fixture]
-fn fixture(f1: u32, f2: u32, f3: u32) -> u32 {
-    f1 + 10 * f2 + 100 * f3
-}
-
-#[rstest(expected, case(0), case(1000))]
-fn default(fixture: u32, expected: u32) {
-    assert_eq!(fixture, expected);
-}
-
-#[rstest(fixture(7), expected, case(7), case(1000))]
-fn partial_1(fixture: u32, expected: u32) {
-    assert_eq!(fixture, expected);
-}
-
-#[rstest(expected, case(7), case(1000))]
-fn partial_attr_1(#[with(7)] fixture: u32, expected: u32) {
-    assert_eq!(fixture, expected);
-}
-
-#[rstest(fixture(2, 4), expected, case(42), case(1000))]
-fn partial_2(fixture: u32, expected: u32) {
-    assert_eq!(fixture, expected);
-}
-
-#[rstest(expected, case(42), case(1000))]
-fn partial_attr_2(#[with(2, 4)] fixture: u32, expected: u32) {
-    assert_eq!(fixture, expected);
-}
-
-#[rstest(fixture(2, 4, 5), expected, case(542), case(1000))]
-fn complete(fixture: u32, expected: u32) {
-    assert_eq!(fixture, expected);
-}
-
-#[rstest(expected, case(542), case(1000))]
-fn complete_attr(#[with(2, 4, 5)] fixture: u32, expected: u32) {
-    assert_eq!(fixture, expected);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/simple.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/simple.rs
deleted file mode 100644
index d699cf7..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/simple.rs
+++ /dev/null
@@ -1,10 +0,0 @@
-use rstest::rstest;
-
-#[rstest(
-    expected, input,
-    case(4, "ciao"),
-    case(3, "Foo")
-)]
-fn strlen_test(expected: usize, input: &str) {
-    assert_eq!(expected, input.len());
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/use_attr.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/use_attr.rs
deleted file mode 100644
index dd45df36..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/cases/use_attr.rs
+++ /dev/null
@@ -1,37 +0,0 @@
-use rstest::rstest;
-
-#[rstest]
-#[case::ciao(4, "ciao")]
-#[should_panic]
-#[case::panic(42, "Foo")]
-#[case::foo(3, "Foo")]
-fn all(#[case] expected: usize, #[case] input: &str) {
-    assert_eq!(expected, input.len());
-}
-
-#[rstest(expected, input)]
-#[case::ciao(4, "ciao")]
-#[case::foo(3, "Foo")]
-#[should_panic]
-#[case::panic(42, "Foo")]
-fn just_cases(expected: usize, input: &str) {
-    assert_eq!(expected, input.len());
-}
-
-#[rstest(
-    case::ciao(4, "ciao"),
-    case::foo(3, "Foo"),
-    #[should_panic]
-    case::panic(42, "Foo"),
-)]
-fn just_args(#[case] expected: usize, #[case] input: &str) {
-    assert_eq!(expected, input.len());
-}
-
-#[rstest]
-#[case(0, "ciao")]
-#[case(0, "Foo")]
-#[should_panic]
-fn all_panic(#[case] expected: usize, #[case] input: &str) {
-    assert_eq!(expected, input.len());
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/convert_string_literal.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/convert_string_literal.rs
deleted file mode 100644
index 3e7989f..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/convert_string_literal.rs
+++ /dev/null
@@ -1,75 +0,0 @@
-use rstest::*;
-use std::net::SocketAddr;
-
-#[rstest]
-#[case(true, "1.2.3.4:42")]
-#[case(true, r#"4.3.2.1:24"#)]
-#[case(false, "[2001:db8:85a3:8d3:1319:8a2e:370:7348]:443")]
-#[case(false, r#"[2aa1:db8:85a3:8af:1319:8a2e:375:4873]:344"#)]
-#[case(false, "this.is.not.a.socket.address")]
-#[case(false, r#"this.is.not.a.socket.address"#)]
-fn cases(#[case] expected: bool, #[case] addr: SocketAddr) {
-    assert_eq!(expected, addr.is_ipv4());
-}
-
-#[rstest]
-fn values(
-    #[values(
-        "1.2.3.4:42",
-        r#"4.3.2.1:24"#,
-        "this.is.not.a.socket.address",
-        r#"this.is.not.a.socket.address"#
-    )]
-    addr: SocketAddr,
-) {
-    assert!(addr.is_ipv4())
-}
-
-#[rstest]
-#[case(b"12345")]
-fn not_convert_byte_array(#[case] cases: &[u8], #[values(b"abc")] values: &[u8]) {
-    assert_eq!(5, cases.len());
-    assert_eq!(3, values.len());
-}
-
-trait MyTrait {
-    fn my_trait(&self) -> u32 {
-        42
-    }
-}
-
-impl MyTrait for &str {}
-
-#[rstest]
-#[case("impl", "nothing")]
-fn not_convert_impl(#[case] that_impl: impl MyTrait, #[case] s: &str) {
-    assert_eq!(42, that_impl.my_trait());
-    assert_eq!(42, s.my_trait());
-}
-
-#[rstest]
-#[case("1.2.3.4", "1.2.3.4:42")]
-#[case("1.2.3.4".to_owned(), "1.2.3.4:42")]
-fn not_convert_generics<S: AsRef<str>>(#[case] ip: S, #[case] addr: SocketAddr) {
-    assert_eq!(addr.ip().to_string(), ip.as_ref());
-}
-
-struct MyType(String);
-struct E;
-impl core::str::FromStr for MyType {
-    type Err = E;
-
-    fn from_str(s: &str) -> Result<Self, Self::Err> {
-        match s {
-            "error" => Err(E),
-            inner => Ok(MyType(inner.to_owned())),
-        }
-    }
-}
-
-#[rstest]
-#[case("hello", "hello")]
-#[case("doesn't mater", "error")]
-fn convert_without_debug(#[case] expected: &str, #[case] converted: MyType) {
-    assert_eq!(expected, converted.0);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/convert_string_literal_other_name.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/convert_string_literal_other_name.rs
deleted file mode 100644
index b5a0f887..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/convert_string_literal_other_name.rs
+++ /dev/null
@@ -1,75 +0,0 @@
-use other_name::*;
-use std::net::SocketAddr;
-
-#[rstest]
-#[case(true, "1.2.3.4:42")]
-#[case(true, r#"4.3.2.1:24"#)]
-#[case(false, "[2001:db8:85a3:8d3:1319:8a2e:370:7348]:443")]
-#[case(false, r#"[2aa1:db8:85a3:8af:1319:8a2e:375:4873]:344"#)]
-#[case(false, "this.is.not.a.socket.address")]
-#[case(false, r#"this.is.not.a.socket.address"#)]
-fn cases(#[case] expected: bool, #[case] addr: SocketAddr) {
-    assert_eq!(expected, addr.is_ipv4());
-}
-
-#[rstest]
-fn values(
-    #[values(
-        "1.2.3.4:42",
-        r#"4.3.2.1:24"#,
-        "this.is.not.a.socket.address",
-        r#"this.is.not.a.socket.address"#
-    )]
-    addr: SocketAddr,
-) {
-    assert!(addr.is_ipv4())
-}
-
-#[rstest]
-#[case(b"12345")]
-fn not_convert_byte_array(#[case] cases: &[u8], #[values(b"abc")] values: &[u8]) {
-    assert_eq!(5, cases.len());
-    assert_eq!(3, values.len());
-}
-
-trait MyTrait {
-    fn my_trait(&self) -> u32 {
-        42
-    }
-}
-
-impl MyTrait for &str {}
-
-#[rstest]
-#[case("impl", "nothing")]
-fn not_convert_impl(#[case] that_impl: impl MyTrait, #[case] s: &str) {
-    assert_eq!(42, that_impl.my_trait());
-    assert_eq!(42, s.my_trait());
-}
-
-#[rstest]
-#[case("1.2.3.4", "1.2.3.4:42")]
-#[case("1.2.3.4".to_owned(), "1.2.3.4:42")]
-fn not_convert_generics<S: AsRef<str>>(#[case] ip: S, #[case] addr: SocketAddr) {
-    assert_eq!(addr.ip().to_string(), ip.as_ref());
-}
-
-struct MyType(String);
-struct E;
-impl core::str::FromStr for MyType {
-    type Err = E;
-
-    fn from_str(s: &str) -> Result<Self, Self::Err> {
-        match s {
-            "error" => Err(E),
-            inner => Ok(MyType(inner.to_owned())),
-        }
-    }
-}
-
-#[rstest]
-#[case("hello", "hello")]
-#[case("doesn't mater", "error")]
-fn convert_without_debug(#[case] expected: &str, #[case] converted: MyType) {
-    assert_eq!(expected, converted.0);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/destruct.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/destruct.rs
deleted file mode 100644
index 302978f5d..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/destruct.rs
+++ /dev/null
@@ -1,79 +0,0 @@
-use rstest::*;
-
-struct T {
-    a: u32,
-    b: u32,
-}
-
-impl T {
-    fn new(a: u32, b: u32) -> Self {
-        Self { a, b }
-    }
-}
-
-struct S(u32, u32);
-
-#[fixture]
-fn fix() -> T {
-    T::new(1, 42)
-}
-
-#[fixture]
-fn named() -> S {
-    S(1, 42)
-}
-
-#[fixture]
-fn tuple() -> (u32, u32) {
-    (1, 42)
-}
-
-#[fixture]
-fn swap(#[from(fix)] T { a, b }: T) -> T {
-    T::new(b, a)
-}
-
-#[rstest]
-fn swapped(#[from(swap)] T { a, b }: T) {
-    assert_eq!(a, 42);
-    assert_eq!(b, 1);
-}
-
-#[rstest]
-#[case::two_times_twenty_one(T::new(2, 21))]
-#[case::six_times_seven(T{ a: 6, b: 7 })]
-fn cases_destruct(
-    #[from(fix)] T { a, b }: T,
-    #[case] T { a: c, b: d }: T,
-    #[values(T::new(42, 1), T{ a: 3, b: 14})] T { a: e, b: f }: T,
-) {
-    assert_eq!(a * b, 42);
-    assert_eq!(c * d, 42);
-    assert_eq!(e * f, 42);
-}
-
-#[rstest]
-#[case::two_times_twenty_one(S(2, 21))]
-#[case::six_times_seven(S(6, 7))]
-fn cases_destruct_named_tuple(
-    #[from(named)] S(a, b): S,
-    #[case] S(c, d): S,
-    #[values(S(42, 1), S(3, 14))] S(e, f): S,
-) {
-    assert_eq!(a * b, 42);
-    assert_eq!(c * d, 42);
-    assert_eq!(e * f, 42);
-}
-
-#[rstest]
-#[case::two_times_twenty_one((2, 21))]
-#[case::six_times_seven((6, 7))]
-fn cases_destruct_tuple(
-    #[from(tuple)] (a, b): (u32, u32),
-    #[case] (c, d): (u32, u32),
-    #[values((42, 1), (3, 14))] (e, f): (u32, u32),
-) {
-    assert_eq!(a * b, 42);
-    assert_eq!(c * d, 42);
-    assert_eq!(e * f, 42);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/dump_debug.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/dump_debug.rs
deleted file mode 100644
index 421be11..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/dump_debug.rs
+++ /dev/null
@@ -1,62 +0,0 @@
-use rstest::*;
-
-#[derive(Debug)]
-struct A {}
-
-#[fixture]
-fn fu32() -> u32 {
-    42
-}
-#[fixture]
-fn fstring() -> String {
-    "A String".to_string()
-}
-#[fixture]
-fn ftuple() -> (A, String, i32) {
-    (A {}, "A String".to_string(), -12)
-}
-
-#[rstest]
-#[trace]
-fn single_fail(fu32: u32, fstring: String, ftuple: (A, String, i32)) {
-    assert!(false);
-}
-
-#[rstest]
-fn no_trace_single_fail(fu32: u32, fstring: String, ftuple: (A, String, i32)) {
-    assert!(false);
-}
-
-#[rstest]
-#[case(42, "str", ("ss", -12))]
-#[case(24, "trs", ("tt", -24))]
-#[trace]
-fn cases_fail(#[case] u: u32, #[case] s: &str, #[case] t: (&str, i32)) {
-    assert!(false);
-}
-
-#[rstest]
-#[case(42, "str", ("ss", -12))]
-#[case(24, "trs", ("tt", -24))]
-fn no_trace_cases_fail(#[case] u: u32, #[case] s: &str, #[case] t: (&str, i32)) {
-    assert!(false);
-}
-
-#[rstest]
-#[trace]
-fn matrix_fail(
-    #[values(1, 3)] u: u32,
-    #[values("rst", "srt")] s: &str,
-    #[values(("SS", -12), ("TT", -24))] t: (&str, i32),
-) {
-    assert!(false);
-}
-
-#[rstest]
-fn no_trace_matrix_fail(
-    #[values(1, 3)] u: u32,
-    #[values("rst", "srt")] s: &str,
-    #[values(("SS", -12), ("TT", -24))] t: (&str, i32),
-) {
-    assert!(false);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/dump_debug_compact.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/dump_debug_compact.rs
deleted file mode 100644
index 7b68510..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/dump_debug_compact.rs
+++ /dev/null
@@ -1,63 +0,0 @@
-use rstest::*;
-
-#[derive(Debug)]
-struct A {}
-
-#[fixture]
-fn fu32() -> u32 {
-    42
-}
-#[fixture]
-fn fstring() -> String {
-    "A String".to_string()
-}
-#[fixture]
-fn ftuple() -> (A, String, i32) {
-    (A {}, "A String".to_string(), -12)
-}
-
-#[rstest(::trace)]
-fn single_fail(fu32: u32, fstring: String, ftuple: (A, String, i32)) {
-    assert!(false);
-}
-
-#[rstest]
-fn no_trace_single_fail(fu32: u32, fstring: String, ftuple: (A, String, i32)) {
-    assert!(false);
-}
-
-#[rstest(u, s, t,
-    case(42, "str", ("ss", -12)),
-    case(24, "trs", ("tt", -24))
-    ::trace
-)]
-fn cases_fail(u: u32, s: &str, t: (&str, i32)) {
-    assert!(false);
-}
-
-#[rstest(u, s, t,
-    case(42, "str", ("ss", -12)),
-    case(24, "trs", ("tt", -24))
-)]
-fn no_trace_cases_fail(u: u32, s: &str, t: (&str, i32)) {
-    assert!(false);
-}
-
-#[rstest(
-    u => [1, 2],
-    s => ["rst", "srt"],
-    t => [("SS", -12), ("TT", -24)]
-    ::trace
-)]
-fn matrix_fail(u: u32, s: &str, t: (&str, i32)) {
-    assert!(false);
-}
-
-#[rstest(
-    u => [1, 2],
-    s => ["rst", "srt"],
-    t => [("SS", -12), ("TT", -24)]
-)]
-fn no_trace_matrix_fail(u: u32, s: &str, t: (&str, i32)) {
-    assert!(false);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/dump_exclude_some_inputs.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/dump_exclude_some_inputs.rs
deleted file mode 100644
index 4775cd2..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/dump_exclude_some_inputs.rs
+++ /dev/null
@@ -1,51 +0,0 @@
-use rstest::*;
-
-struct A;
-struct B;
-#[derive(Debug)]
-struct D;
-
-#[fixture]
-fn fu32() -> u32 {
-    42
-}
-#[fixture]
-fn fb() -> B {
-    B {}
-}
-#[fixture]
-fn fd() -> D {
-    D {}
-}
-#[fixture]
-fn fa() -> A {
-    A {}
-}
-
-#[rstest]
-#[trace]
-fn simple(fu32: u32, #[notrace] fa: A, #[notrace] fb: B, fd: D) {
-    assert!(false);
-}
-
-#[rstest]
-#[trace]
-#[case(A{}, B{}, D{})]
-fn cases(fu32: u32, #[case] #[notrace] a: A, #[case] #[notrace] b: B, #[case] d: D) {
-    assert!(false);
-}
-
-#[rstest]
-#[trace]
-fn matrix(
-    fu32: u32,
-    #[notrace]
-    #[values(A{})]
-    a: A,
-    #[notrace]
-    #[values(B{})]
-    b: B,
-    #[values(D{}) ] dd: D,
-) {
-    assert!(false);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/dump_exclude_some_inputs_compact.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/dump_exclude_some_inputs_compact.rs
deleted file mode 100644
index d8de32f..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/dump_exclude_some_inputs_compact.rs
+++ /dev/null
@@ -1,41 +0,0 @@
-use rstest::*;
-
-struct A;
-struct B;
-#[derive(Debug)]
-struct D;
-
-#[fixture]
-fn fu32() -> u32 { 42 }
-#[fixture]
-fn fb() -> B { B {} }
-#[fixture]
-fn fd() -> D { D {} }
-#[fixture]
-fn fa() -> A { A {} }
-
-
-#[rstest(
-::trace::notrace(fa,fb))
-]
-fn simple(fu32: u32, fa: A, fb: B, fd: D) {
-    assert!(false);
-}
-
-#[rstest(a,b,d,
-    case(A{}, B{}, D{})
-    ::trace::notrace(a,b))
-]
-fn cases(fu32: u32, a: A, b: B, d: D) {
-    assert!(false);
-}
-
-#[rstest(
-    a => [A{}],
-    b => [B{}],
-    dd => [D{}],
-    ::trace::notrace(a,b))
-]
-fn matrix(fu32: u32, a: A, b: B, dd: D) {
-    assert!(false);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/dump_not_debug.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/dump_not_debug.rs
deleted file mode 100644
index 10a97db6a..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/dump_not_debug.rs
+++ /dev/null
@@ -1,21 +0,0 @@
-struct S;
-#[rustfmt::skip] mod _skip_format {
-use rstest::*; use super::*;
-
-#[fixture]
-fn fixture() -> S { S {} }
-
-#[rstest]
-#[trace]
-fn single(fixture: S) {}
-
-#[rstest(s)]
-#[trace]
-#[case(S{})]
-fn cases(s: S) {}
-
-#[rstest(
-    s => [S{}])]
-#[trace]
-fn matrix(s: S) {}
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/dump_not_debug_compact.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/dump_not_debug_compact.rs
deleted file mode 100644
index fc640cb4..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/dump_not_debug_compact.rs
+++ /dev/null
@@ -1,21 +0,0 @@
-struct S;
-#[rustfmt::skip] mod _skip_format {
-use rstest::*; use super::*;
-
-#[fixture]
-fn fixture() -> S { S {} }
-
-#[rstest(
-    ::trace)]
-fn single(fixture: S) {}
-
-#[rstest(s,
-    case(S{})
-    ::trace)]
-fn cases(s: S) {}
-
-#[rstest(
-    s => [S{}]
-    ::trace)]
-fn matrix(s: S) {}
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/errors.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/errors.rs
deleted file mode 100644
index a13fbaf5..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/errors.rs
+++ /dev/null
@@ -1,126 +0,0 @@
-use rstest::*;
-#[fixture]
-pub fn fixture() -> u32 {
-    42
-}
-
-#[rstest(f, case(42))]
-fn error_inner(f: i32) {
-    let a: u32 = "";
-}
-
-#[rstest(f, case(42))]
-fn error_cannot_resolve_fixture(no_fixture: u32, f: u32) {}
-
-#[rstest(f, case(42))]
-fn error_fixture_wrong_type(fixture: String, f: u32) {}
-
-#[rstest(f, case(42))]
-fn error_case_wrong_type(f: &str) {}
-
-#[rstest(condition,
-    case(vec![1,2,3].contains(2)))
-]
-fn error_in_arbitrary_rust_code_cases(condition: bool) {
-    assert!(condition)
-}
-
-#[rstest(f, case(42), not_a_fixture(24))]
-fn error_inject_an_invalid_fixture(f: u32) {}
-
-#[fixture]
-fn n() -> u32 {
-    24
-}
-
-#[fixture]
-fn f(n: u32) -> u32 {
-    2 * n
-}
-
-#[rstest(f, f(42), case(12))]
-fn error_inject_a_fixture_that_is_already_a_case(f: u32) {}
-
-#[rstest(f(42), f, case(12))]
-fn error_define_case_that_is_already_an_injected_fixture(f: u32) {}
-
-#[rstest(v, f(42), f(42), case(12))]
-fn error_inject_a_fixture_more_than_once(v: u32, f: u32) {}
-
-#[rstest(f => [42])]
-fn error_matrix_wrong_type(f: &str) {}
-
-#[rstest(condition => [vec![1,2,3].contains(2)] )]
-fn error_arbitrary_rust_code_matrix(condition: bool) {
-    assert!(condition)
-}
-
-#[rstest(empty => [])]
-fn error_empty_list(empty: &str) {}
-
-#[rstest(not_exist_1 => [42],
-         not_exist_2 => [42])]
-fn error_no_match_args() {}
-
-#[rstest(f => [41, 42], f(42))]
-fn error_inject_a_fixture_that_is_already_a_value_list(f: u32) {}
-
-#[rstest(f(42), f => [41, 42])]
-fn error_define_a_value_list_that_is_already_an_injected_fixture(f: u32) {}
-
-#[rstest(a, case(42), a => [42])]
-fn error_define_a_value_list_that_is_already_a_case_arg(a: u32) {}
-
-#[rstest(a => [42], a, case(42))]
-fn error_define_a_case_arg_that_is_already_a_value_list(a: u32) {}
-
-#[rstest(a => [42, 24], a => [24, 42])]
-fn error_define_a_value_list_that_is_already_a_value_list(f: u32) {}
-
-#[rstest(a, a, case(42))]
-fn error_define_a_case_arg_that_is_already_a_case_arg(a: u32) {}
-
-struct S;
-#[rstest]
-#[case("donald duck")]
-fn error_convert_to_type_that_not_implement_from_str(#[case] s: S) {}
-
-#[rstest]
-#[case(async { "hello" } )]
-async fn error_future_on_impl_type(
-    #[case]
-    #[future]
-    s: impl AsRef<str>,
-) {
-}
-
-#[rstest]
-#[case(async { 42 } )]
-async fn error_future_more_than_once(
-    #[case]
-    #[future]
-    #[future]
-    a: i32,
-) {
-}
-
-#[rstest]
-#[timeout]
-fn error_timeout_without_arg() {}
-
-#[rstest]
-#[timeout(some -> strange -> invalid -> expression)]
-fn error_timeout_without_expression_arg() {}
-
-#[rstest]
-#[timeout(42)]
-fn error_timeout_without_duration() {}
-
-#[rstest]
-fn error_absolute_path_files(#[files("/tmp/tmp.Q81idVZYAV/*.txt")] path: std::path::PathBuf) {}
-
-struct T(u32, u32);
-
-#[rstest]
-#[case(T(3, 4))]
-fn wrong_destruct_fixture(T(a, b): T, #[with(42)] T(c, d): T) {}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/files.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/files.rs
deleted file mode 100644
index b90712c..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/files.rs
+++ /dev/null
@@ -1,45 +0,0 @@
-use rstest::*;
-use std::fs::File;
-use std::io::Read;
-use std::path::PathBuf;
-
-#[rstest]
-fn start_with_name(
-    #[files("files/**/*.txt")]
-    #[exclude("exclude")]
-    #[files("../files_test_sub_folder/**/*.txt")]
-    path: PathBuf,
-) {
-    let name = path.file_name().unwrap();
-    let mut f = File::open(&path).unwrap();
-    let mut contents = String::new();
-    f.read_to_string(&mut contents).unwrap();
-
-    assert!(contents.starts_with(name.to_str().unwrap()))
-}
-
-#[rstest]
-fn start_with_name_with_include(
-    #[files("files/**/*.txt")]
-    #[exclude("exclude")]
-    #[include_dot_files]
-    path: PathBuf,
-) {
-    let name = path.file_name().unwrap();
-    let mut f = File::open(&path).unwrap();
-    let mut contents = String::new();
-    f.read_to_string(&mut contents).unwrap();
-
-    assert!(contents.starts_with(name.to_str().unwrap()))
-}
-
-mod module {
-    #[rstest::rstest]
-    fn pathbuf_need_not_be_in_scope(
-        #[files("files/**/*.txt")]
-        #[exclude("exclude")]
-        #[include_dot_files]
-        path: std::path::PathBuf,
-    ) {
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/generic.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/generic.rs
deleted file mode 100644
index 4da8de7a..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/generic.rs
+++ /dev/null
@@ -1,18 +0,0 @@
-use rstest::*;
-
-#[fixture]
-fn fixture() -> String { "str".to_owned() }
-
-#[rstest]
-fn simple<S: AsRef<str>>(fixture: S) {
-    assert_eq!(3, fixture.as_ref().len());
-}
-
-#[rstest(
-    expected, input,
-    case(4, String::from("ciao")),
-    case(3, "Foo")
-)]
-fn strlen_test<S: AsRef<str>>(expected: usize, input: S) {
-    assert_eq!(expected, input.as_ref().len());
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/happy_path.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/happy_path.rs
deleted file mode 100644
index 1a4172f..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/happy_path.rs
+++ /dev/null
@@ -1,35 +0,0 @@
-use rstest::*;
-
-#[fixture]
-fn inject() -> u32 {
-    0
-}
-
-#[fixture]
-fn ex() -> u32 {
-    42
-}
-
-#[fixture]
-fn fix(inject: u32, ex: u32) -> bool {
-    (inject * 2) == ex
-}
-
-#[rstest(
-    fix(21),
-    a,
-    case(21, 2),
-    expected => [4, 2*3-2],
-)]
-#[case::second(14, 3)]
-fn happy(
-    fix: bool,
-    a: u32,
-    #[case] b: u32,
-    expected: usize,
-    #[values("ciao", "buzz")] input: &str,
-) {
-    assert!(fix);
-    assert_eq!(a * b, 42);
-    assert_eq!(expected, input.len());
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/ignore_args.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/ignore_args.rs
deleted file mode 100644
index fd5a8aa5..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/ignore_args.rs
+++ /dev/null
@@ -1,6 +0,0 @@
-use rstest::*;
-
-#[rstest]
-#[case(42, 2)]
-#[case(43, 3)]
-fn test(#[case] _ignore1: u32, #[case] _ignore2: u32, #[values(1, 2, 3, 4)] _ignore3: u32) {}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/ignore_not_fixture_arg.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/ignore_not_fixture_arg.rs
deleted file mode 100644
index d16fc652..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/ignore_not_fixture_arg.rs
+++ /dev/null
@@ -1,16 +0,0 @@
-use rstest::*;
-
-use sqlx::SqlitePool;
-
-struct FixtureStruct {}
-
-#[fixture]
-fn my_fixture() -> FixtureStruct {
-    FixtureStruct {}
-}
-
-#[rstest]
-#[sqlx::test]
-async fn test_db(my_fixture: FixtureStruct, #[ignore] pool: SqlitePool) {
-    assert!(true);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/impl_param.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/impl_param.rs
deleted file mode 100644
index f366401..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/impl_param.rs
+++ /dev/null
@@ -1,18 +0,0 @@
-use rstest::*;
-
-#[fixture]
-fn fixture() -> String { "str".to_owned() }
-
-#[rstest]
-fn simple(fixture: impl AsRef<str>) {
-    assert_eq!(3, fixture.as_ref().len());
-}
-
-#[rstest(
-    expected, input,
-    case(4, String::from("ciao")),
-    case(3, "Foo")
-)]
-fn strlen_test(expected: usize, input: impl AsRef<str>) {
-    assert_eq!(expected, input.as_ref().len());
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/lifetimes.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/lifetimes.rs
deleted file mode 100644
index 440751b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/lifetimes.rs
+++ /dev/null
@@ -1,21 +0,0 @@
-use rstest::*;
-
-enum E<'a> {
-    A(bool),
-    B(&'a std::cell::Cell<E<'a>>),
-}
-
-#[rstest]
-#[case(E::A(true))]
-fn case<'a>(#[case] e: E<'a>) {}
-
-#[rstest]
-fn values<'a>(#[values(E::A(true))] e: E<'a>) {}
-
-#[fixture]
-fn e<'a>() -> E<'a> {
-    E::A(true)
-}
-
-#[rstest]
-fn fixture<'a>(e: E<'a>) {}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/local_lifetime.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/local_lifetime.rs
deleted file mode 100644
index 61d9368..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/local_lifetime.rs
+++ /dev/null
@@ -1,28 +0,0 @@
-use std::cell::Cell;
-
-#[derive(Debug, Clone, Copy, PartialEq, Eq)]
-enum E<'a> {
-    A(bool),
-    B(&'a Cell<E<'a>>),
-}
-
-fn make_e_from_bool<'a>(_bump: &'a (), b: bool) -> E<'a> {
-    E::A(b)
-}
-
-#[cfg(test)]
-mod tests {
-    use rstest::*;
-
-    use super::*;
-
-    #[fixture]
-    fn bump() -> () {}
-
-    #[rstest]
-    #[case(true, E::A(true))]
-    fn it_works<'a>(#[by_ref] bump: &'a (), #[case] b: bool, #[case] expected: E<'a>) {
-        let actual = make_e_from_bool(&bump, b);
-        assert_eq!(actual, expected);
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/matrix/async.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/matrix/async.rs
deleted file mode 100644
index d36f01e9..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/matrix/async.rs
+++ /dev/null
@@ -1,12 +0,0 @@
-use rstest::*;
-
-#[rstest]
-async fn my_async_test(
-    #[future] 
-    #[values(async { 1 }, async { 2 })] 
-    first: u32, 
-    #[values(42, 21)] 
-    second: u32
-) {
-    assert_eq!(42, first.await * second);
-}
\ No newline at end of file
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/matrix/async_awt.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/matrix/async_awt.rs
deleted file mode 100644
index f9e8819..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/matrix/async_awt.rs
+++ /dev/null
@@ -1,12 +0,0 @@
-use rstest::*;
-
-#[rstest]
-async fn my_async_test(
-    #[future(awt)] 
-    #[values(async { 1 }, async { 2 })] 
-    first: u32, 
-    #[values(42, 21)] 
-    second: u32
-) {
-    assert_eq!(42, first * second);
-}
\ No newline at end of file
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/matrix/async_awt_global.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/matrix/async_awt_global.rs
deleted file mode 100644
index 780d8fa..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/matrix/async_awt_global.rs
+++ /dev/null
@@ -1,13 +0,0 @@
-use rstest::*;
-
-#[rstest]
-#[awt]
-async fn my_async_test(
-    #[future] 
-    #[values(async { 1 }, async { 2 })] 
-    first: u32, 
-    #[values(42, 21)] 
-    second: u32
-) {
-    assert_eq!(42, first * second);
-}
\ No newline at end of file
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/matrix/inject.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/matrix/inject.rs
deleted file mode 100644
index 6547f09b0..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/matrix/inject.rs
+++ /dev/null
@@ -1,17 +0,0 @@
-use rstest::*;
-use actix_rt;
-use std::future::Future;
-
-#[rstest(
-    first => [1, 2], 
-    second => [2, 1],
-)]
-#[test]
-fn sync(first: u32, second: u32) { assert_eq!(2, first * second); }
-
-#[rstest(
-    first => [async { 1 }, async { 2 }], 
-    second => [2, 1],
-)]
-#[actix_rt::test]
-async fn fn_async(first: impl Future<Output=u32>, second: u32) { assert_eq!(2, first.await * second); }
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/matrix/partial.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/matrix/partial.rs
deleted file mode 100644
index 40317b5..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/matrix/partial.rs
+++ /dev/null
@@ -1,54 +0,0 @@
-use rstest::*;
-
-#[fixture]
-fn f1() -> u32 {
-    0
-}
-#[fixture]
-fn f2() -> u32 {
-    0
-}
-#[fixture]
-fn f3() -> u32 {
-    0
-}
-
-#[fixture]
-fn fixture(f1: u32, f2: u32, f3: u32) -> u32 {
-    f1 + f2 + 2 * f3
-}
-
-#[rstest(a => [0, 1], b => [0, 2])]
-fn default(fixture: u32, a: u32, b: u32) {
-    assert_eq!(fixture, a * b);
-}
-
-#[rstest(a => [0, 1], b => [0, 2], fixture(1))]
-fn partial_1(fixture: u32, a: u32, b: u32) {
-    assert_eq!(fixture, a * b);
-}
-
-#[rstest(a => [0, 1], b => [0, 2])]
-fn partial_attr_1(#[with(1)] fixture: u32, a: u32, b: u32) {
-    assert_eq!(fixture, a * b);
-}
-
-#[rstest(a => [0, 1], b => [0, 2], fixture(0, 2))]
-fn partial_2(fixture: u32, a: u32, b: u32) {
-    assert_eq!(fixture, a * b);
-}
-
-#[rstest(a => [0, 1], b => [0, 2])]
-fn partial_attr_2(#[with(0, 2)] fixture: u32, a: u32, b: u32) {
-    assert_eq!(fixture, a * b);
-}
-
-#[rstest(a => [0, 1], b => [0, 2], fixture(0, 0, 1))]
-fn complete(fixture: u32, a: u32, b: u32) {
-    assert_eq!(fixture, a * b);
-}
-
-#[rstest(a => [0, 1], b => [0, 2])]
-fn complete_attr(#[with(0, 0, 1)] fixture: u32, a: u32, b: u32) {
-    assert_eq!(fixture, a * b);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/matrix/simple.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/matrix/simple.rs
deleted file mode 100644
index d5ef2d08..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/matrix/simple.rs
+++ /dev/null
@@ -1,9 +0,0 @@
-use rstest::rstest;
-
-#[rstest(
-    expected => [4, 2*3-2],
-    input => ["ciao", "buzz"],
-)]
-fn strlen_test(expected: usize, input: &str) {
-    assert_eq!(expected, input.len());
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/matrix/use_attr.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/matrix/use_attr.rs
deleted file mode 100644
index 7b6e6f7..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/matrix/use_attr.rs
+++ /dev/null
@@ -1,20 +0,0 @@
-use rstest::rstest;
-
-#[rstest]
-fn both(#[values(4, 2*3-2)] expected: usize, #[values("ciao", "buzz")] input: &str) {
-    assert_eq!(expected, input.len());
-}
-
-#[rstest(
-    input => ["ciao", "buzz"]
-)]
-fn first(#[values(4, 2*3-2)] expected: usize, input: &str) {
-    assert_eq!(expected, input.len());
-}
-
-#[rstest(
-    expected => [4, 2*3-2]
-)]
-fn second(expected: usize, #[values("ciao", "buzz")] input: &str) {
-    assert_eq!(expected, input.len());
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/mut.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/mut.rs
deleted file mode 100644
index 1f45844..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/mut.rs
+++ /dev/null
@@ -1,29 +0,0 @@
-use rstest::*;
-
-#[fixture]
-pub fn fixture() -> u32 { 42 }
-
-#[rstest]
-fn should_success(mut fixture: u32) {
-    fixture += 1;
-    assert_eq!(fixture, 43);
-}
-
-#[rstest]
-fn should_fail(mut fixture: u32) {
-    fixture += 1;
-    assert_ne!(fixture, 43);
-}
-
-#[rstest(
-    expected, val,
-    case(45, 1),
-    case(46, 2),
-    case(47, 2)
-)]
-fn add_test(mut fixture: u32, expected: u32, mut val: u32) {
-    fixture += 1;
-    val += fixture + 1;
-
-    assert_eq!(expected, val);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/panic.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/panic.rs
deleted file mode 100644
index a86c97a..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/panic.rs
+++ /dev/null
@@ -1,27 +0,0 @@
-use rstest::*;
-
-#[fixture]
-pub fn fixture() -> u32 { 42 }
-
-#[rstest]
-#[should_panic]
-fn should_success(fixture: u32) {
-    assert_ne!(fixture, 42);
-}
-
-#[rstest]
-#[should_panic]
-fn should_fail(fixture: u32) {
-    assert_eq!(fixture, 42);
-}
-
-#[rstest(
-    expected, input,
-    case(4, 5),
-    case(3, 2),
-    case(3, 3)
-)]
-#[should_panic]
-fn fail(expected: i32, input: i32) {
-    assert_eq!(expected, input);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/reject_no_item_function.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/reject_no_item_function.rs
deleted file mode 100644
index d120bdc..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/reject_no_item_function.rs
+++ /dev/null
@@ -1,11 +0,0 @@
-use rstest::rstest;
-
-#[rstest]
-struct Foo;
-
-#[rstest]
-impl Foo {}
-
-#[rstest]
-mod mod_baz {}
-
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/remove_underscore.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/remove_underscore.rs
deleted file mode 100644
index 50930a5..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/remove_underscore.rs
+++ /dev/null
@@ -1,9 +0,0 @@
-use rstest::*;
-
-#[fixture]
-fn can_be_ignored() {}
-
-#[rstest]
-fn ignore_input(_can_be_ignored: ()) {
-    assert!(true);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/rename.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/rename.rs
deleted file mode 100644
index 77e7d9e..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/rename.rs
+++ /dev/null
@@ -1,49 +0,0 @@
-use rstest::*;
-
-#[fixture]
-fn very_long_and_boring_name(#[default(42)] inject: u32) -> u32 {
-    inject
-}
-
-mod sub_module {
-    use super::*;
-
-    #[fixture]
-    pub fn mod_fixture() -> u32 {
-        42
-    }
-}
-
-#[rstest(very_long_and_boring_name as foo)]
-fn compact(foo: u32) {
-    assert!(42 == foo);
-}
-
-#[rstest(sub_module::mod_fixture as foo)]
-fn compact_mod(foo: u32) {
-    assert!(42 == foo);
-}
-
-#[rstest(very_long_and_boring_name(21) as foo)]
-fn compact_injected(foo: u32) {
-    assert!(21 == foo);
-}
-
-#[rstest]
-fn attribute(#[from(very_long_and_boring_name)] foo: u32) {
-    assert!(42 == foo);
-}
-
-#[rstest]
-fn attribute_mod(#[from(sub_module::mod_fixture)] foo: u32) {
-    assert!(42 == foo);
-}
-
-#[rstest]
-fn attribute_injected(
-    #[from(very_long_and_boring_name)]
-    #[with(21)]
-    foo: u32,
-) {
-    assert!(21 == foo);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/return_result.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/return_result.rs
deleted file mode 100644
index bcfb6f6b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/return_result.rs
+++ /dev/null
@@ -1,19 +0,0 @@
-use rstest::rstest;
-
-#[rstest]
-fn should_success() -> Result<(), &'static str> {
-    Ok(())
-}
-
-#[rstest]
-fn should_fail() -> Result<(), &'static str> {
-    Err("Return Error")
-}
-
-#[rstest(ret,
-    case::should_success(Ok(())),
-    case::should_fail(Err("Return Error"))
-)]
-fn return_type(ret: Result<(), &'static str>) -> Result<(), &'static str> {
-    ret
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/single/async.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/single/async.rs
deleted file mode 100644
index 2a4f6a6..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/single/async.rs
+++ /dev/null
@@ -1,26 +0,0 @@
-use rstest::*;
-
-#[fixture]
-async fn fixture() -> u32 { 42 }
-
-#[rstest]
-async fn should_pass(#[future] fixture: u32) {
-    assert_eq!(fixture.await, 42);
-}
-
-#[rstest]
-async fn should_fail(#[future] fixture: u32) {
-    assert_ne!(fixture.await, 42);
-}
-
-#[rstest]
-#[should_panic]
-async fn should_panic_pass(#[future] fixture: u32) {
-    panic!(format!("My panic -> fixture = {}", fixture.await));
-}
-
-#[rstest]
-#[should_panic]
-async fn should_panic_fail(#[future] fixture: u32) {
-    assert_eq!(fixture.await, 42);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/single/async_awt.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/single/async_awt.rs
deleted file mode 100644
index 9d39236..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/single/async_awt.rs
+++ /dev/null
@@ -1,28 +0,0 @@
-use rstest::*;
-
-#[fixture]
-async fn fixture() -> u32 {
-    42
-}
-
-#[rstest]
-async fn should_pass(#[future(awt)] fixture: u32) {
-    assert_eq!(fixture, 42);
-}
-
-#[rstest]
-async fn should_fail(#[future(awt)] fixture: u32) {
-    assert_ne!(fixture, 42);
-}
-
-#[rstest]
-#[should_panic]
-async fn should_panic_pass(#[future(awt)] fixture: u32) {
-    panic!(format!("My panic -> fixture = {}", fixture));
-}
-
-#[rstest]
-#[should_panic]
-async fn should_panic_fail(#[future(awt)] fixture: u32) {
-    assert_eq!(fixture, 42);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/single/async_awt_global.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/single/async_awt_global.rs
deleted file mode 100644
index 42794612..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/single/async_awt_global.rs
+++ /dev/null
@@ -1,32 +0,0 @@
-use rstest::*;
-
-#[fixture]
-async fn fixture() -> u32 {
-    42
-}
-
-#[rstest]
-#[awt]
-async fn should_pass(#[future] fixture: u32) {
-    assert_eq!(fixture, 42);
-}
-
-#[rstest]
-#[awt]
-async fn should_fail(#[future] fixture: u32) {
-    assert_ne!(fixture, 42);
-}
-
-#[rstest]
-#[awt]
-#[should_panic]
-async fn should_panic_pass(#[future] fixture: u32) {
-    panic!(format!("My panic -> fixture = {}", fixture));
-}
-
-#[rstest]
-#[awt]
-#[should_panic]
-async fn should_panic_fail(#[future] fixture: u32) {
-    assert_eq!(fixture, 42);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/single/dump_debug.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/single/dump_debug.rs
deleted file mode 100644
index a287932f..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/single/dump_debug.rs
+++ /dev/null
@@ -1,16 +0,0 @@
-use rstest::*;
-
-#[derive(Debug)]
-struct A {}
-
-#[fixture]
-fn fu32() -> u32 { 42 }
-#[fixture]
-fn fstring() -> String { "A String".to_string() }
-#[fixture]
-fn ftuple() -> (A, String, i32) { (A{}, "A String".to_string(), -12) }
-
-#[rstest(::trace)]
-fn should_fail(fu32: u32, fstring: String, ftuple: (A, String, i32)) {
-    assert!(false);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/single/inject.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/single/inject.rs
deleted file mode 100644
index c835097b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/single/inject.rs
+++ /dev/null
@@ -1,41 +0,0 @@
-use rstest::*;
-use actix_rt;
-
-#[fixture]
-fn a() -> u32 {
-    42
-}
-
-#[rstest]
-#[test]
-fn sync_case(a: u32) {}
-
-#[rstest]
-#[test]
-#[should_panic]
-fn sync_case_panic(a: u32) { panic!("panic") }
-
-#[rstest]
-#[test]
-fn sync_case_fail(a: u32) { assert_eq!(2, a); }
-
-#[rstest]
-#[test]
-fn sync_case_panic_fail(a: u32) { panic!("panic") }
-
-#[rstest]
-#[actix_rt::test]
-async fn async_case(a: u32) {}
-
-#[rstest]
-#[actix_rt::test]
-async fn async_case_fail(a: u32) { assert_eq!(2, a); }
-
-#[rstest]
-#[actix_rt::test]
-#[should_panic]
-async fn async_case_panic(a: u32) { panic!("panic") }
-
-#[rstest]
-#[actix_rt::test]
-async fn async_case_panic_fail(a: u32) { panic!("panic") }
\ No newline at end of file
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/single/partial.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/single/partial.rs
deleted file mode 100644
index e653ab2..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/single/partial.rs
+++ /dev/null
@@ -1,54 +0,0 @@
-use rstest::*;
-
-#[fixture]
-fn f1() -> u32 {
-    0
-}
-#[fixture]
-fn f2() -> u32 {
-    0
-}
-#[fixture]
-fn f3() -> u32 {
-    0
-}
-
-#[fixture]
-fn fixture(f1: u32, f2: u32, f3: u32) -> u32 {
-    f1 + 10 * f2 + 100 * f3
-}
-
-#[rstest]
-fn default(fixture: u32) {
-    assert_eq!(fixture, 0);
-}
-
-#[rstest(fixture(7))]
-fn partial_1(fixture: u32) {
-    assert_eq!(fixture, 7);
-}
-
-#[rstest]
-fn partial_attr_1(#[with(7)] fixture: u32) {
-    assert_eq!(fixture, 7);
-}
-
-#[rstest(fixture(2, 4))]
-fn partial_2(fixture: u32) {
-    assert_eq!(fixture, 42);
-}
-
-#[rstest]
-fn partial_attr_2(#[with(2, 4)] fixture: u32) {
-    assert_eq!(fixture, 42);
-}
-
-#[rstest(fixture(2, 4, 5))]
-fn complete(fixture: u32) {
-    assert_eq!(fixture, 542);
-}
-
-#[rstest]
-fn complete_attr(#[with(2, 4, 5)] fixture: u32) {
-    assert_eq!(fixture, 542);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/single/resolve.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/single/resolve.rs
deleted file mode 100644
index 607caa3..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/single/resolve.rs
+++ /dev/null
@@ -1,32 +0,0 @@
-use rstest::*;
-
-pub trait Tr {
-    fn get() -> Self;
-}
-
-impl Tr for i32 {
-    fn get() -> Self {
-        42
-    }
-}
-
-impl Tr for u32 {
-    fn get() -> Self {
-        42
-    }
-}
-
-#[fixture]
-pub fn fgen<T: Tr>() -> T {
-    T::get()
-}
-
-#[rstest]
-fn generics_u32(fgen: u32) {
-    assert_eq!(fgen, 42u32);
-}
-
-#[rstest]
-fn generics_i32(fgen: i32) {
-    assert_eq!(fgen, 42i32);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/single/simple.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/single/simple.rs
deleted file mode 100644
index b99b5f0..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/single/simple.rs
+++ /dev/null
@@ -1,14 +0,0 @@
-use rstest::*;
-
-#[fixture]
-pub fn fixture() -> u32 { 42 }
-
-#[rstest]
-fn should_success(fixture: u32) {
-    assert_eq!(fixture, 42);
-}
-
-#[rstest]
-fn should_fail(fixture: u32) {
-    assert_ne!(fixture, 42);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/timeout.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/timeout.rs
deleted file mode 100644
index 88df752..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/timeout.rs
+++ /dev/null
@@ -1,232 +0,0 @@
-use rstest::*;
-use std::time::Duration;
-
-fn ms(ms: u32) -> Duration {
-    Duration::from_millis(ms.into())
-}
-
-mod thread {
-    use super::*;
-
-    fn delayed_sum(a: u32, b: u32, delay: Duration) -> u32 {
-        std::thread::sleep(delay);
-        a + b
-    }
-
-    #[rstest]
-    #[timeout(ms(80))]
-    fn single_pass() {
-        assert_eq!(4, delayed_sum(2, 2, ms(10)));
-    }
-
-    #[rstest]
-    #[timeout(ms(100))]
-    fn single_fail_value() {
-        assert_eq!(5, delayed_sum(2, 2, ms(1)));
-    }
-    
-    #[rstest]
-    #[timeout(ms(1000))]
-    #[should_panic = "user message"]
-    fn fail_with_user_message() {
-        panic!("user message");
-    }
-
-    #[rstest]
-    #[timeout(ms(10))]
-    fn single_fail_timeout() {
-        assert_eq!(4, delayed_sum(2, 2, ms(80)));
-    }
-
-    #[rstest]
-    #[timeout(ms(80))]
-    #[case(ms(10))]
-    fn one_pass(#[case] delay: Duration) {
-        assert_eq!(4, delayed_sum(2, 2, delay));
-    }
-
-    #[rstest]
-    #[timeout(ms(10))]
-    #[case(ms(80))]
-    fn one_fail_timeout(#[case] delay: Duration) {
-        assert_eq!(4, delayed_sum(2, 2, delay));
-    }
-
-    #[rstest]
-    #[timeout(ms(100))]
-    #[case(ms(1))]
-    fn one_fail_value(#[case] delay: Duration) {
-        assert_eq!(5, delayed_sum(2, 2, delay));
-    }
-
-    #[rstest]
-    #[case::pass(ms(1), 4)]
-    #[case::fail_timeout(ms(80), 4)]
-    #[case::fail_value(ms(1), 5)]
-    #[timeout(ms(40))]
-    fn group_same_timeout(#[case] delay: Duration, #[case] expected: u32) {
-        assert_eq!(expected, delayed_sum(2, 2, delay));
-    }
-
-    #[rstest]
-    #[timeout(ms(100))]
-    #[case::pass(ms(1), 4)]
-    #[timeout(ms(30))]
-    #[case::fail_timeout(ms(70), 4)]
-    #[timeout(ms(100))]
-    #[case::fail_value(ms(1), 5)]
-    fn group_single_timeout(#[case] delay: Duration, #[case] expected: u32) {
-        assert_eq!(expected, delayed_sum(2, 2, delay));
-    }
-
-    #[rstest]
-    #[case::pass(ms(1), 4)]
-    #[timeout(ms(10))]
-    #[case::fail_timeout(ms(60), 4)]
-    #[case::fail_value(ms(1), 5)]
-    #[timeout(ms(100))]
-    fn group_one_timeout_override(#[case] delay: Duration, #[case] expected: u32) {
-        assert_eq!(expected, delayed_sum(2, 2, delay));
-    }
-
-    struct S {}
-
-    #[rstest]
-    #[case(S{})]
-    fn compile_with_no_copy_arg(#[case] _s: S) {
-        assert!(true);
-    }
-
-    #[fixture]
-    fn no_copy() -> S {
-        S {}
-    }
-
-    #[rstest]
-    fn compile_with_no_copy_fixture(no_copy: S) {
-        assert!(true);
-    }
-
-    #[rstest]
-    fn default_timeout_failure() {
-        assert_eq!(4, delayed_sum(2, 2, ms(1100)));
-    }
-}
-
-mod async_std_cases {
-    use super::*;
-
-    async fn delayed_sum(a: u32, b: u32, delay: Duration) -> u32 {
-        async_std::task::sleep(delay).await;
-        a + b
-    }
-
-    #[rstest]
-    #[timeout(ms(80))]
-    async fn single_pass() {
-        assert_eq!(4, delayed_sum(2, 2, ms(10)).await);
-    }
-
-    #[rstest]
-    #[timeout(ms(10))]
-    async fn single_fail_timeout() {
-        assert_eq!(4, delayed_sum(2, 2, ms(80)).await);
-    }
-
-    #[rstest]
-    #[timeout(ms(100))]
-    async fn single_fail_value() {
-        assert_eq!(5, delayed_sum(2, 2, ms(1)).await);
-    }
-
-    #[rstest]
-    #[timeout(ms(1000))]
-    #[should_panic = "user message"]
-    async fn fail_with_user_message() {
-        panic! {"user message"};
-    }
-
-    #[rstest]
-    #[timeout(ms(80))]
-    #[case(ms(10))]
-    async fn one_pass(#[case] delay: Duration) {
-        assert_eq!(4, delayed_sum(2, 2, delay).await);
-    }
-
-    #[rstest]
-    #[timeout(ms(10))]
-    #[case(ms(80))]
-    async fn one_fail_timeout(#[case] delay: Duration) {
-        assert_eq!(4, delayed_sum(2, 2, delay).await);
-    }
-
-    #[rstest]
-    #[timeout(ms(100))]
-    #[case(ms(1))]
-    async fn one_fail_value(#[case] delay: Duration) {
-        assert_eq!(5, delayed_sum(2, 2, delay).await);
-    }
-
-    #[rstest]
-    #[case::pass(ms(1), 4)]
-    #[case::fail_timeout(ms(80), 4)]
-    #[case::fail_value(ms(1), 5)]
-    #[timeout(ms(40))]
-    async fn group_same_timeout(#[case] delay: Duration, #[case] expected: u32) {
-        assert_eq!(expected, delayed_sum(2, 2, delay).await);
-    }
-
-    #[rstest]
-    #[timeout(ms(100))]
-    #[case::pass(ms(1), 4)]
-    #[timeout(ms(30))]
-    #[case::fail_timeout(ms(70), 4)]
-    #[timeout(ms(100))]
-    #[case::fail_value(ms(1), 5)]
-    async fn group_single_timeout(#[case] delay: Duration, #[case] expected: u32) {
-        assert_eq!(expected, delayed_sum(2, 2, delay).await);
-    }
-
-    #[rstest]
-    #[case::pass(ms(1), 4)]
-    #[timeout(ms(10))]
-    #[case::fail_timeout(ms(60), 4)]
-    #[case::fail_value(ms(1), 5)]
-    #[timeout(ms(100))]
-    async fn group_one_timeout_override(#[case] delay: Duration, #[case] expected: u32) {
-        assert_eq!(expected, delayed_sum(2, 2, delay).await);
-    }
-
-    struct S {}
-
-    #[rstest]
-    #[case(S{})]
-    async fn compile_with_no_copy_arg(#[case] _s: S) {
-        assert!(true);
-    }
-
-    #[fixture]
-    fn no_copy() -> S {
-        S {}
-    }
-
-    #[rstest]
-    fn compile_with_no_copy_fixture(_no_copy: S) {
-        assert!(true);
-    }
-
-    #[fixture]
-    async fn a_fix() -> S {
-        S {}
-    }
-
-    #[rstest]
-    fn compile_with_async_fixture(#[future] a_fix: S) {
-        assert!(true);
-    }
-
-    #[rstest]
-    async fn compile_with_async_awt_fixture(#[future(awt)] a_fix: S) {
-        assert!(true);
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/timeout_async.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/timeout_async.rs
deleted file mode 100644
index 1d12cf52..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/timeout_async.rs
+++ /dev/null
@@ -1,17 +0,0 @@
-use rstest::*;
-use std::time::Duration;
-
-fn ms(ms: u32) -> Duration {
-    Duration::from_millis(ms.into())
-}
-
-async fn delayed_sum(a: u32, b: u32,delay: Duration) -> u32 {
-    async_std::task::sleep(delay).await;
-    a + b
-}
-
-#[rstest]
-#[timeout(ms(80))]
-async fn single_pass() {
-    assert_eq!(4, delayed_sum(2, 2, ms(10)).await);
-}
\ No newline at end of file
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/timeout_other_name.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/timeout_other_name.rs
deleted file mode 100644
index b360b313..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/timeout_other_name.rs
+++ /dev/null
@@ -1,114 +0,0 @@
-use other_name::*;
-use std::time::Duration;
-
-fn ms(ms: u32) -> Duration {
-    Duration::from_millis(ms.into())
-}
-
-mod thread {
-    use super::*;
-
-    fn delayed_sum(a: u32, b: u32, delay: Duration) -> u32 {
-        std::thread::sleep(delay);
-        a + b
-    }
-
-    #[rstest]
-    #[timeout(ms(80))]
-    fn single_pass() {
-        assert_eq!(4, delayed_sum(2, 2, ms(10)));
-    }
-
-    #[rstest]
-    #[timeout(ms(100))]
-    fn single_fail_value() {
-        assert_eq!(5, delayed_sum(2, 2, ms(1)));
-    }
-
-    #[rstest]
-    #[timeout(ms(1000))]
-    #[should_panic = "user message"]
-    fn fail_with_user_message() {
-        panic!("user message");
-    }
-
-    #[rstest]
-    #[timeout(ms(10))]
-    fn single_fail_timeout() {
-        assert_eq!(4, delayed_sum(2, 2, ms(80)));
-    }
-
-    #[rstest]
-    #[timeout(ms(80))]
-    #[case(ms(10))]
-    fn one_pass(#[case] delay: Duration) {
-        assert_eq!(4, delayed_sum(2, 2, delay));
-    }
-
-    #[rstest]
-    #[timeout(ms(10))]
-    #[case(ms(80))]
-    fn one_fail_timeout(#[case] delay: Duration) {
-        assert_eq!(4, delayed_sum(2, 2, delay));
-    }
-
-    #[rstest]
-    #[timeout(ms(100))]
-    #[case(ms(1))]
-    fn one_fail_value(#[case] delay: Duration) {
-        assert_eq!(5, delayed_sum(2, 2, delay));
-    }
-
-    #[rstest]
-    #[case::pass(ms(1), 4)]
-    #[case::fail_timeout(ms(80), 4)]
-    #[case::fail_value(ms(1), 5)]
-    #[timeout(ms(40))]
-    fn group_same_timeout(#[case] delay: Duration, #[case] expected: u32) {
-        assert_eq!(expected, delayed_sum(2, 2, delay));
-    }
-
-    #[rstest]
-    #[timeout(ms(100))]
-    #[case::pass(ms(1), 4)]
-    #[timeout(ms(30))]
-    #[case::fail_timeout(ms(70), 4)]
-    #[timeout(ms(100))]
-    #[case::fail_value(ms(1), 5)]
-    fn group_single_timeout(#[case] delay: Duration, #[case] expected: u32) {
-        assert_eq!(expected, delayed_sum(2, 2, delay));
-    }
-
-    #[rstest]
-    #[case::pass(ms(1), 4)]
-    #[timeout(ms(10))]
-    #[case::fail_timeout(ms(60), 4)]
-    #[case::fail_value(ms(1), 5)]
-    #[timeout(ms(100))]
-    fn group_one_timeout_override(#[case] delay: Duration, #[case] expected: u32) {
-        assert_eq!(expected, delayed_sum(2, 2, delay));
-    }
-
-    struct S {}
-
-    #[rstest]
-    #[case(S{})]
-    fn compile_with_no_copy_arg(#[case] _s: S) {
-        assert!(true);
-    }
-
-    #[fixture]
-    fn no_copy() -> S {
-        S {}
-    }
-
-    #[rstest]
-    fn compile_with_no_copy_fixture(no_copy: S) {
-        assert!(true);
-    }
-
-    #[rstest]
-    fn default_timeout_failure() {
-        assert_eq!(4, delayed_sum(2, 2, ms(1100)));
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/use_mutable_fixture_in_parametric_arguments.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/use_mutable_fixture_in_parametric_arguments.rs
deleted file mode 100644
index 97a6b0f..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/use_mutable_fixture_in_parametric_arguments.rs
+++ /dev/null
@@ -1,25 +0,0 @@
-use rstest::*;
-
-#[fixture]
-fn f() -> String {
-    "f".to_owned()
-}
-
-fn append(s: &mut String, a: &str) -> String {
-    s.push_str("-");
-    s.push_str(a);
-    s.clone()
-}
-
-#[rstest]
-#[case(append(&mut f, "a"), "f-a", "f-a-b")]
-fn use_mutate_fixture(
-    mut f: String,
-    #[case] a: String,
-    #[values(append(&mut f, "b"))] b: String,
-    #[case] expected_a: &str,
-    #[case] expected_b: &str,
-) {
-    assert_eq!(expected_a, a);
-    assert_eq!(expected_b, b);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/values_tests_name.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/values_tests_name.rs
deleted file mode 100644
index eec208d..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/resources/rstest/values_tests_name.rs
+++ /dev/null
@@ -1,21 +0,0 @@
-use rstest::*;
-
-enum Application {
-    Python,
-    Node,
-    Go,
-}
-
-enum Method {
-    GET,
-    POST,
-    PUT,
-    HEAD,
-}
-
-#[rstest]
-fn name_values(
-    #[values(Application::Python, Application::Node, Application::Go)] _val: Application,
-    #[values(Method::GET, Method::POST, Method::PUT, Method::HEAD)] _method: Method,
-) {
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/rstest/mod.rs b/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/rstest/mod.rs
deleted file mode 100644
index e4b44c3..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/tests/rstest/mod.rs
+++ /dev/null
@@ -1,1785 +0,0 @@
-use std::{fs::File, io::Write, path::Path};
-
-use mytest::*;
-use rstest_test::*;
-use unindent::Unindent;
-
-pub fn resources(res: impl AsRef<Path>) -> std::path::PathBuf {
-    let path = Path::new("rstest").join(res.as_ref());
-    super::resources(path)
-}
-
-fn prj(res: impl AsRef<Path>) -> Project {
-    crate::prj().set_code_file(resources(res))
-}
-
-fn run_test(res: impl AsRef<Path>) -> (std::process::Output, String) {
-    let prj = prj(res);
-    (
-        prj.run_tests().unwrap(),
-        prj.get_name().to_owned().to_string(),
-    )
-}
-
-#[test]
-fn files() {
-    let prj = prj("files.rs");
-    let files_path = prj.path().join("files");
-    let sub_folder = files_path.join("sub");
-    let up_sub_folder = prj.path().join("../files_test_sub_folder");
-    std::fs::create_dir(&files_path).unwrap();
-    std::fs::create_dir(&sub_folder).unwrap();
-    std::fs::create_dir(&up_sub_folder).unwrap();
-
-    for n in 0..4 {
-        let name = format!("element_{}.txt", n);
-        let path = files_path.join(&name);
-        let mut out = File::create(path).unwrap();
-        out.write_all(name.as_bytes()).unwrap();
-        out.write_all(b"--\n").unwrap();
-        out.write_all(b"something else\n").unwrap();
-    }
-    let dot = files_path.join(".ignore_me.txt");
-    File::create(dot)
-        .unwrap()
-        .write_all(b".ignore_me.txt--\n")
-        .unwrap();
-    let exclude = files_path.join("exclude.txt");
-    File::create(exclude)
-        .unwrap()
-        .write_all(b"excluded\n")
-        .unwrap();
-    let sub = sub_folder.join("sub_dir_file.txt");
-    File::create(sub)
-        .unwrap()
-        .write_all(b"sub_dir_file.txt--\nmore")
-        .unwrap();
-    let down_from_parent_folder = up_sub_folder.join("from_parent_folder.txt");
-    File::create(down_from_parent_folder)
-        .unwrap()
-        .write_all(b"from_parent_folder.txt--\nmore")
-        .unwrap();
-    let output = prj.run_tests().unwrap();
-
-    TestResults::new()
-        .ok("start_with_name::path_1__UP_files_test_sub_folder_from_parent_folder_txt")
-        .ok("start_with_name::path_2_files_element_0_txt")
-        .ok("start_with_name::path_3_files_element_1_txt")
-        .ok("start_with_name::path_4_files_element_2_txt")
-        .ok("start_with_name::path_5_files_element_3_txt")
-        .ok("start_with_name::path_6_files_sub_sub_dir_file_txt")
-        .ok("start_with_name_with_include::path_1_files__ignore_me_txt")
-        .ok("start_with_name_with_include::path_2_files_element_0_txt")
-        .ok("start_with_name_with_include::path_3_files_element_1_txt")
-        .ok("start_with_name_with_include::path_4_files_element_2_txt")
-        .ok("start_with_name_with_include::path_5_files_element_3_txt")
-        .ok("start_with_name_with_include::path_6_files_sub_sub_dir_file_txt")
-        .ok("module::pathbuf_need_not_be_in_scope::path_1_files__ignore_me_txt")
-        .ok("module::pathbuf_need_not_be_in_scope::path_2_files_element_0_txt")
-        .ok("module::pathbuf_need_not_be_in_scope::path_3_files_element_1_txt")
-        .ok("module::pathbuf_need_not_be_in_scope::path_4_files_element_2_txt")
-        .ok("module::pathbuf_need_not_be_in_scope::path_5_files_element_3_txt")
-        .ok("module::pathbuf_need_not_be_in_scope::path_6_files_sub_sub_dir_file_txt")
-        .assert(output);
-}
-
-#[test]
-fn mutable_input() {
-    let (output, _) = run_test("mut.rs");
-
-    TestResults::new()
-        .ok("should_success")
-        .fail("should_fail")
-        .ok("add_test::case_1")
-        .ok("add_test::case_2")
-        .fail("add_test::case_3")
-        .assert(output);
-}
-
-#[test]
-fn test_with_return_type() {
-    let (output, _) = run_test("return_result.rs");
-
-    TestResults::new()
-        .ok("should_success")
-        .fail("should_fail")
-        .ok("return_type::case_1_should_success")
-        .fail("return_type::case_2_should_fail")
-        .assert(output);
-}
-
-#[test]
-fn should_panic() {
-    let (output, _) = run_test("panic.rs");
-
-    TestResults::new()
-        .ok("should_success")
-        .fail("should_fail")
-        .ok("fail::case_1")
-        .ok("fail::case_2")
-        .fail("fail::case_3")
-        .assert(output);
-}
-
-#[test]
-fn should_not_show_a_warning_for_should_panic_attribute() {
-    let (output, _) = run_test("panic.rs");
-
-    assert!(!output.stderr.str().contains("unused attribute"));
-}
-
-#[test]
-fn should_not_show_a_warning_for_values_test_names() {
-    let (output, _) = run_test("values_tests_name.rs");
-
-    assert_not_in!(output.stderr.str(), "warning:");
-}
-
-#[test]
-fn should_map_fixture_by_remove_first_underscore_if_any() {
-    let (output, _) = run_test("remove_underscore.rs");
-
-    TestResults::new().ok("ignore_input").assert(output);
-}
-
-#[test]
-fn generic_input() {
-    let (output, _) = run_test("generic.rs");
-
-    TestResults::new()
-        .ok("simple")
-        .ok("strlen_test::case_1")
-        .ok("strlen_test::case_2")
-        .assert(output);
-}
-
-#[test]
-fn impl_input() {
-    let (output, _) = run_test("impl_param.rs");
-
-    TestResults::new()
-        .ok("simple")
-        .ok("strlen_test::case_1")
-        .ok("strlen_test::case_2")
-        .assert(output);
-}
-
-#[test]
-fn use_mutable_fixture_in_parametric_arguments() {
-    let (output, _) = run_test("use_mutable_fixture_in_parametric_arguments.rs");
-
-    TestResults::new()
-        .with_contains(true)
-        .ok("use_mutate_fixture::case_1::b_1")
-        .assert(output);
-}
-
-#[test]
-fn should_not_remove_lifetimes() {
-    let (output, _) = run_test("lifetimes.rs");
-
-    TestResults::new()
-        .with_contains(true)
-        .ok("case")
-        .ok("values")
-        .ok("fixture")
-        .assert(output);
-}
-
-#[test]
-fn should_reject_no_item_function() {
-    let (output, name) = run_test("reject_no_item_function.rs");
-
-    assert_in!(
-        output.stderr.str(),
-        format!(
-            "
-        error: expected `fn`
-         --> {}/src/lib.rs:4:1
-          |
-        4 | struct Foo;
-          | ^^^^^^
-        ",
-            name
-        )
-        .unindent()
-    );
-
-    assert_in!(
-        output.stderr.str(),
-        format!(
-            "
-        error: expected `fn`
-         --> {}/src/lib.rs:7:1
-          |
-        7 | impl Foo {{}}
-          | ^^^^
-        ",
-            name
-        )
-        .unindent()
-    );
-
-    assert_in!(
-        output.stderr.str(),
-        format!(
-            "
-        error: expected `fn`
-          --> {}/src/lib.rs:10:1
-           |
-        10 | mod mod_baz {{}}
-           | ^^^
-        ",
-            name
-        )
-        .unindent()
-    );
-}
-
-mod dump_input_values {
-    use super::*;
-
-    #[rstest]
-    #[case::compact_syntax("dump_debug_compact.rs")]
-    #[case::attr_syntax("dump_debug.rs")]
-    fn if_implements_debug(#[case] source: &str) {
-        let (output, _) = run_test(source);
-        let out = output.stdout.str().to_string();
-
-        TestResults::new()
-            .fail("single_fail")
-            .fail("no_trace_single_fail")
-            .fail("cases_fail::case_1")
-            .fail("cases_fail::case_2")
-            .fail("no_trace_cases_fail::case_1")
-            .fail("no_trace_cases_fail::case_2")
-            .fail_with("matrix_fail::u_1", false, 8)
-            .fail_with("matrix_fail::u_2", false, 8)
-            .assert(output);
-
-        assert_in!(out, "fu32 = 42");
-        assert_in!(out, r#"fstring = "A String""#);
-        assert_in!(out, r#"ftuple = (A, "A String", -12"#);
-
-        assert_in!(out, "u = 42");
-        assert_in!(out, r#"s = "str""#);
-        assert_in!(out, r#"t = ("ss", -12)"#);
-
-        assert_in!(out, "u = 24");
-        assert_in!(out, r#"s = "trs""#);
-        assert_in!(out, r#"t = ("tt", -24)"#);
-
-        assert_in!(out, "u = 1");
-        assert_in!(out, r#"s = "rst""#);
-        assert_in!(out, r#"t = ("SS", -12)"#);
-
-        assert_in!(out, "u = 2");
-        assert_in!(out, r#"s = "srt""#);
-        assert_in!(out, r#"t = ("TT", -24)"#);
-
-        let expected = 11;
-        for marker in ["TEST START", "TEST ARGUMENTS"] {
-            let n_found = out.lines().filter(|l| l.contains(marker)).count();
-            assert_eq!(
-                n_found, expected,
-                "Should contain {expected} '{marker}' but found {n_found}. [Should not enclose output if no trace]"
-            );
-        }
-    }
-
-    #[rstest]
-    #[case::compact_syntax("dump_not_debug_compact.rs")]
-    #[case::attr_syntax("dump_not_debug.rs")]
-    fn should_not_compile_if_not_implement_debug(#[case] source: &str) {
-        let (output, name) = run_test(source);
-
-        assert_all_in!(
-            output.stderr.str(),
-            format!("--> {}/src/lib.rs:10:11", name),
-            "fn single(fixture: S) {}",
-            "^^^^^^^ `S` cannot be formatted using `{:?}`"
-        );
-
-        assert_in!(
-            output.stderr.str(),
-            format!("--> {}/src/lib.rs:15:10", name),
-            "fn cases(s: S) {}",
-            "^ `S` cannot be formatted using `{:?}`"
-        );
-
-        assert_in!(
-            output.stderr.str(),
-            format!("--> {}/src/lib.rs:20:11", name),
-            "fn matrix(s: S) {}",
-            "^ `S` cannot be formatted using `{:?}`"
-        );
-    }
-
-    #[rstest]
-    #[case::compact_syntax("dump_exclude_some_inputs_compact.rs")]
-    #[case::attr_syntax("dump_exclude_some_inputs.rs")]
-    fn can_exclude_some_inputs(#[case] source: &str) {
-        let (output, _) = run_test(source);
-        let out = output.stdout.str().to_string();
-
-        TestResults::new()
-            .fail("simple")
-            .fail("cases::case_1")
-            .fail_in("matrix::a_1")
-            .assert(output);
-
-        assert_in!(out, "fu32 = 42");
-        assert_in!(out, "d = D");
-        assert_in!(out, "fd = D");
-        assert_in!(out, "dd = D");
-    }
-
-    #[test]
-    fn should_be_enclosed_in_an_explicit_session() {
-        let (output, _) = run_test(Path::new("single").join("dump_debug.rs"));
-        let out = output.stdout.str().to_string();
-
-        TestResults::new().fail("should_fail").assert(output);
-
-        let lines = out
-            .lines()
-            .skip_while(|l| !l.contains("TEST ARGUMENTS"))
-            .take_while(|l| !l.contains("TEST START"))
-            .collect::<Vec<_>>();
-
-        let expected = 4;
-        assert_eq!(
-            expected,
-            lines.len(),
-            "Not contains {expected} lines but {}: '{}'",
-            lines.len(),
-            lines.join("\n")
-        );
-    }
-}
-
-mod single {
-    use super::*;
-
-    fn res(name: impl AsRef<Path>) -> impl AsRef<Path> {
-        Path::new("single").join(name.as_ref())
-    }
-
-    #[test]
-    fn one_success_and_one_fail() {
-        let (output, _) = run_test(res("simple.rs"));
-
-        TestResults::new()
-            .ok("should_success")
-            .fail("should_fail")
-            .assert(output);
-    }
-
-    #[test]
-    fn should_resolve_generics_fixture_outputs() {
-        let (output, _) = run_test(res("resolve.rs"));
-
-        TestResults::new()
-            .ok("generics_u32")
-            .ok("generics_i32")
-            .assert(output);
-    }
-
-    #[test]
-    fn should_apply_partial_fixture() {
-        let (output, _) = run_test(res("partial.rs"));
-
-        TestResults::new()
-            .ok("default")
-            .ok("partial_1")
-            .ok("partial_attr_1")
-            .ok("partial_2")
-            .ok("partial_attr_2")
-            .ok("complete")
-            .ok("complete_attr")
-            .assert(output);
-    }
-
-    #[rstest]
-    #[case("async.rs")]
-    #[case("async_awt.rs")]
-    #[case("async_awt_global.rs")]
-    fn should_run_async_function(#[case] name: &str) {
-        let prj = prj(res(name));
-        prj.add_dependency("async-std", r#"{version="*", features=["attributes"]}"#);
-
-        let output = prj.run_tests().unwrap();
-
-        TestResults::new()
-            .ok("should_pass")
-            .fail("should_fail")
-            .ok("should_panic_pass")
-            .fail("should_panic_fail")
-            .assert(output);
-    }
-
-    #[test]
-    fn should_use_injected_test_attr() {
-        let prj = prj(res("inject.rs"));
-        prj.add_dependency("actix-rt", r#""1.1.0""#);
-
-        let output = prj.run_tests().unwrap();
-
-        TestResults::new()
-            .ok("sync_case")
-            .ok("sync_case_panic")
-            .fail("sync_case_fail")
-            .fail("sync_case_panic_fail")
-            .ok("async_case")
-            .ok("async_case_panic")
-            .fail("async_case_fail")
-            .fail("async_case_panic_fail")
-            .assert(output);
-    }
-}
-
-mod cases {
-    use super::*;
-
-    fn res(name: impl AsRef<Path>) -> impl AsRef<Path> {
-        Path::new("cases").join(name.as_ref())
-    }
-
-    #[test]
-    fn should_compile() {
-        let output = prj(res("simple.rs")).compile().unwrap();
-
-        assert_eq!(
-            Some(0),
-            output.status.code(),
-            "Compile error due: {}",
-            output.stderr.str()
-        )
-    }
-
-    #[test]
-    fn happy_path() {
-        let (output, _) = run_test(res("simple.rs"));
-
-        TestResults::new()
-            .ok("strlen_test::case_1")
-            .ok("strlen_test::case_2")
-            .assert(output);
-    }
-
-    #[test]
-    fn use_attr() {
-        let (output, _) = run_test(res("use_attr.rs"));
-
-        TestResults::new()
-            .ok("all::case_1_ciao")
-            .ok("all::case_2_panic")
-            .ok("all::case_3_foo")
-            .ok("just_cases::case_1_ciao")
-            .ok("just_cases::case_2_foo")
-            .ok("just_cases::case_3_panic")
-            .ok("just_args::case_1_ciao")
-            .ok("just_args::case_2_foo")
-            .ok("just_args::case_3_panic")
-            .ok("all_panic::case_1")
-            .ok("all_panic::case_2")
-            .assert(output);
-    }
-
-    #[test]
-    fn case_description() {
-        let (output, _) = run_test(res("description.rs"));
-
-        TestResults::new()
-            .ok("description::case_1_user_test_description")
-            .ok("description::case_2")
-            .fail("description::case_3_user_test_description_fail")
-            .assert(output);
-    }
-
-    #[test]
-    fn should_apply_partial_fixture() {
-        let (output, _) = run_test(res("partial.rs"));
-
-        TestResults::new()
-            .ok("default::case_1")
-            .ok("partial_1::case_1")
-            .ok("partial_2::case_1")
-            .ok("complete::case_1")
-            .ok("partial_attr_1::case_1")
-            .ok("partial_attr_2::case_1")
-            .ok("complete_attr::case_1")
-            .fail("default::case_2")
-            .fail("partial_1::case_2")
-            .fail("partial_2::case_2")
-            .fail("complete::case_2")
-            .fail("partial_attr_1::case_2")
-            .fail("partial_attr_2::case_2")
-            .fail("complete_attr::case_2")
-            .assert(output);
-    }
-
-    #[test]
-    fn should_use_case_attributes() {
-        let (output, _) = run_test(res("case_attributes.rs"));
-
-        TestResults::new()
-            .ok("attribute_per_case::case_1_no_panic")
-            .ok("attribute_per_case::case_2_panic")
-            .ok("attribute_per_case::case_3_panic_with_message")
-            .fail("attribute_per_case::case_4_no_panic_but_fail")
-            .fail("attribute_per_case::case_5_panic_but_fail")
-            .fail("attribute_per_case::case_6_panic_with_wrong_message")
-            .assert(output);
-    }
-
-    #[rstest]
-    #[case("async.rs")]
-    #[case("async_awt.rs")]
-    #[case("async_awt_global.rs")]
-    fn should_run_async_function(#[case] name: &str) {
-        let prj = prj(res(name));
-        prj.add_dependency("async-std", r#"{version="*", features=["attributes"]}"#);
-
-        let output = prj.run_tests().unwrap();
-
-        TestResults::new()
-            .ok("my_async_test::case_1_pass")
-            .fail("my_async_test::case_2_fail")
-            .ok("my_async_test::case_3_pass_panic")
-            .fail("my_async_test::case_4_fail_panic")
-            .ok("my_async_test_revert::case_1_pass")
-            .assert(output);
-    }
-
-    #[rstest]
-    fn should_run_async_mut() {
-        let prj = prj(res("async_awt_mut.rs"));
-        prj.add_dependency("async-std", r#"{version="*", features=["attributes"]}"#);
-
-        let output = prj.run_tests().unwrap();
-
-        TestResults::new()
-            .ok("my_mut_test_global_awt::case_1_pass")
-            .ok("my_mut_test_local_awt::case_1_pass")
-            .assert(output);
-    }
-
-    #[test]
-    fn should_use_injected_test_attr() {
-        let prj = prj(res("inject.rs"));
-        prj.add_dependency("actix-rt", r#""1.1.0""#);
-
-        let output = prj.run_tests().unwrap();
-
-        TestResults::new()
-            .ok("sync::case_1_pass")
-            .ok("sync::case_2_panic")
-            .fail("sync::case_3_fail")
-            .ok("fn_async::case_1_pass")
-            .ok("fn_async::case_2_panic")
-            .fail("fn_async::case_3_fail")
-            .assert(output);
-    }
-
-    #[test]
-    fn trace_just_one_test() {
-        let (output, _) = run_test(res("dump_just_one_case.rs"));
-        let out = output.stdout.str().to_string();
-
-        TestResults::new()
-            .fail("cases::case_1_first_no_dump")
-            .fail("cases::case_2_dump_me")
-            .fail("cases::case_3_last_no_dump")
-            .assert(output);
-
-        assert_in!(out, r#"s = "Trace it!""#);
-        assert_not_in!(out, r#"s = "Please don't trace me""#);
-    }
-
-    mod not_compile_if_missed_arguments {
-        use super::*;
-
-        #[test]
-        fn happy_path() {
-            let (output, _) = run_test(res("missed_argument.rs"));
-            let stderr = output.stderr.str();
-
-            assert_ne!(Some(0), output.status.code());
-            assert_in!(stderr, "Missed argument");
-            assert_in!(
-                stderr,
-                "
-                  |
-                4 | #[rstest(f, case(42), case(24))]
-                  |          ^
-                "
-                .unindent()
-            );
-        }
-
-        #[test]
-        fn should_reports_all() {
-            let (output, _) = run_test(res("missed_some_arguments.rs"));
-            let stderr = output.stderr.str();
-
-            assert_in!(
-                stderr,
-                "
-                  |
-                4 | #[rstest(a,b,c, case(1,2,3), case(3,2,1))]
-                  |          ^
-                "
-                .unindent()
-            );
-            assert_in!(
-                stderr,
-                "
-                  |
-                4 | #[rstest(a,b,c, case(1,2,3), case(3,2,1))]
-                  |              ^
-                "
-                .unindent()
-            );
-
-            assert_eq!(
-                2,
-                stderr.count("Missed argument"),
-                "Should contain message exactly 2 occurrences in error message:\n{}",
-                stderr
-            )
-        }
-
-        #[test]
-        fn should_report_just_one_error_message_for_all_test_cases() {
-            let (output, _) = run_test(res("missed_argument.rs"));
-            let stderr = output.stderr.str();
-
-            assert_eq!(
-                1,
-                stderr.count("Missed argument"),
-                "More than one message occurrence in error message:\n{}",
-                stderr
-            )
-        }
-
-        #[test]
-        fn should_not_report_error_in_macro_syntax() {
-            let (output, _) = run_test(res("missed_argument.rs"));
-            let stderr = output.stderr.str();
-
-            assert!(!stderr.contains("macros that expand to items"));
-        }
-    }
-
-    mod not_compile_if_a_case_has_a_wrong_signature {
-        use std::process::Output;
-
-        use lazy_static::lazy_static;
-
-        use super::*;
-
-        //noinspection RsTypeCheck
-        fn execute() -> &'static (Output, String) {
-            lazy_static! {
-                static ref OUTPUT: (Output, String) = run_test(res("case_with_wrong_args.rs"));
-            }
-            assert_ne!(Some(0), OUTPUT.0.status.code(), "Should not compile");
-            &OUTPUT
-        }
-
-        #[test]
-        fn with_too_much_arguments() {
-            let (output, _) = execute();
-            let stderr = output.stderr.str();
-
-            assert_in!(
-                stderr,
-                "
-                  |
-                8 | #[rstest(a, case(42, 43), case(12), case(24, 34))]
-                  |                  ^^^^^^
-                "
-                .unindent()
-            );
-
-            assert_in!(
-                stderr,
-                "
-                  |
-                8 | #[rstest(a, case(42, 43), case(12), case(24, 34))]
-                  |                                          ^^^^^^
-                "
-                .unindent()
-            );
-        }
-
-        #[test]
-        fn with_less_arguments() {
-            let (output, _) = execute();
-            let stderr = output.stderr.str();
-
-            assert_in!(
-                stderr,
-                "
-                  |
-                4 | #[rstest(a, b, case(42), case(1, 2), case(43))]
-                  |                     ^^
-                "
-                .unindent()
-            );
-
-            assert_in!(
-                stderr,
-                "
-                  |
-                4 | #[rstest(a, b, case(42), case(1, 2), case(43))]
-                  |                                           ^^
-                "
-                .unindent()
-            );
-        }
-
-        #[test]
-        fn and_reports_all_errors() {
-            let (output, _) = execute();
-            let stderr = output.stderr.str();
-
-            // Exactly 4 cases are wrong
-            assert_eq!(
-                4,
-                stderr.count("Wrong case signature: should match the given parameters list."),
-                "Should contain message exactly 4 occurrences in error message:\n{}",
-                stderr
-            );
-        }
-    }
-
-    mod not_compile_if_args_but_no_cases {
-        use std::process::Output;
-
-        use lazy_static::lazy_static;
-
-        use super::*;
-
-        //noinspection RsTypeCheck
-        fn execute() -> &'static (Output, String) {
-            lazy_static! {
-                static ref OUTPUT: (Output, String) = run_test(res("args_with_no_cases.rs"));
-            }
-            assert_ne!(Some(0), OUTPUT.0.status.code(), "Should not compile");
-            &OUTPUT
-        }
-
-        #[test]
-        fn report_error() {
-            let (output, name) = execute();
-            let stderr = output.stderr.str();
-
-            assert_in!(
-                stderr,
-                format!(
-                    "
-                error: No cases for this argument.
-                 --> {}/src/lib.rs:3:10
-                  |
-                3 | #[rstest(one, two, three)]
-                  |          ^^^
-                ",
-                    name
-                )
-                .unindent()
-            );
-        }
-
-        #[test]
-        fn and_reports_all_errors() {
-            let (output, _) = execute();
-            let stderr = output.stderr.str();
-
-            // Exactly 3 cases are wrong
-            assert_eq!(
-                3,
-                stderr.count("No cases for this argument."),
-                "Should contain message exactly 3 occurrences in error message:\n{}",
-                stderr
-            );
-        }
-    }
-}
-
-mod matrix {
-    use super::*;
-
-    fn res(name: impl AsRef<Path>) -> impl AsRef<Path> {
-        Path::new("matrix").join(name.as_ref())
-    }
-
-    #[test]
-    fn should_compile() {
-        let output = prj(res("simple.rs")).compile().unwrap();
-
-        assert_eq!(
-            Some(0),
-            output.status.code(),
-            "Compile error due: {}",
-            output.stderr.str()
-        )
-    }
-
-    #[test]
-    fn happy_path() {
-        let (output, _) = run_test(res("simple.rs"));
-
-        TestResults::new()
-            .with_contains(true)
-            .ok("strlen_test::expected_1_4::input_1___ciao__")
-            .ok("strlen_test::expected_1_4::input_2___buzz__")
-            .ok("strlen_test::expected_2_2_3_2::input_1___ciao__")
-            .ok("strlen_test::expected_2_2_3_2::input_2___buzz__")
-            .assert(output);
-    }
-
-    #[test]
-    fn should_apply_partial_fixture() {
-        let (output, _) = run_test(res("partial.rs"));
-
-        TestResults::new()
-            .with_contains(true)
-            .ok_times("default::a_1", 2)
-            .ok("default::a_2")
-            .ok("partial_2::a_2")
-            .ok("partial_attr_2::a_2")
-            .ok("complete::a_2")
-            .ok("complete_attr::a_2")
-            .fail("default::a_2")
-            .fail_times("partial_1::a_1", 2)
-            .fail_times("partial_1::a_2", 2)
-            .fail_times("partial_2::a_1", 2)
-            .fail("partial_2::a_2")
-            .fail_times("complete::a_1", 2)
-            .fail("complete::a_2")
-            .fail_times("partial_attr_1::a_1", 2)
-            .fail_times("partial_attr_1::a_2", 2)
-            .fail_times("partial_attr_2::a_1", 2)
-            .fail("partial_attr_2::a_2")
-            .fail_times("complete_attr::a_1", 2)
-            .fail("complete_attr::a_2")
-            .assert(output);
-    }
-
-    #[rstest]
-    #[case("async.rs")]
-    #[case("async_awt.rs")]
-    #[case("async_awt_global.rs")]
-    fn should_run_async_function(#[case] name: &str) {
-        let prj = prj(res(name));
-        prj.add_dependency("async-std", r#"{version="*", features=["attributes"]}"#);
-
-        let output = prj.run_tests().unwrap();
-
-        TestResults::new()
-            .with_contains(true)
-            .ok("my_async_test::first_1")
-            .fail("my_async_test::first_1")
-            .fail("my_async_test::first_2")
-            .ok("my_async_test::first_2")
-            .assert(output);
-    }
-
-    #[test]
-    fn should_use_injected_test_attr() {
-        let prj = prj(res("inject.rs"));
-        prj.add_dependency("actix-rt", r#""1.1.0""#);
-
-        let output = prj.run_tests().unwrap();
-
-        TestResults::new()
-            .with_contains(true)
-            .ok("sync::first_1")
-            .fail("sync::first_1")
-            .fail("sync::first_2")
-            .ok("sync::first_2")
-            .ok("fn_async::first_1")
-            .fail("fn_async::first_1")
-            .fail("fn_async::first_2")
-            .ok("fn_async::first_2")
-            .assert(output);
-    }
-
-    #[test]
-    fn use_args_attributes() {
-        let (output, _) = run_test(res("use_attr.rs"));
-
-        TestResults::new()
-            .ok("both::expected_1_4::input_1___ciao__")
-            .ok("both::expected_1_4::input_2___buzz__")
-            .ok("both::expected_2_2_3_2::input_1___ciao__")
-            .ok("both::expected_2_2_3_2::input_2___buzz__")
-            .ok("first::input_1___ciao__::expected_1_4")
-            .ok("first::input_2___buzz__::expected_1_4")
-            .ok("first::input_1___ciao__::expected_2_2_3_2")
-            .ok("first::input_2___buzz__::expected_2_2_3_2")
-            .ok("second::expected_1_4::input_1___ciao__")
-            .ok("second::expected_1_4::input_2___buzz__")
-            .ok("second::expected_2_2_3_2::input_1___ciao__")
-            .ok("second::expected_2_2_3_2::input_2___buzz__")
-            .assert(output);
-    }
-}
-
-#[test]
-fn convert_string_literal() {
-    let (output, _) = run_test("convert_string_literal.rs");
-
-    assert_regex!(
-        "Cannot parse 'error' to get [a-z:_0-9]*MyType",
-        output.stdout.str()
-    );
-
-    TestResults::new()
-        .ok("cases::case_1")
-        .ok("cases::case_2")
-        .ok("cases::case_3")
-        .ok("cases::case_4")
-        .fail("cases::case_5")
-        .fail("cases::case_6")
-        .ok_in("values::addr_1")
-        .ok_in("values::addr_2")
-        .fail_in("values::addr_3")
-        .fail_in("values::addr_4")
-        .ok_in("not_convert_byte_array::case_1::values_1")
-        .ok("not_convert_impl::case_1")
-        .ok("not_convert_generics::case_1")
-        .ok("not_convert_generics::case_2")
-        .ok("convert_without_debug::case_1")
-        .fail("convert_without_debug::case_2")
-        .assert(output);
-}
-
-#[test]
-fn happy_path() {
-    let (output, _) = run_test("happy_path.rs");
-
-    TestResults::new()
-        .ok("happy::case_1::expected_1_4::input_1___ciao__")
-        .ok("happy::case_1::expected_1_4::input_2___buzz__")
-        .ok("happy::case_1::expected_2_2_3_2::input_1___ciao__")
-        .ok("happy::case_1::expected_2_2_3_2::input_2___buzz__")
-        .ok("happy::case_2_second::expected_1_4::input_1___ciao__")
-        .ok("happy::case_2_second::expected_1_4::input_2___buzz__")
-        .ok("happy::case_2_second::expected_2_2_3_2::input_1___ciao__")
-        .ok("happy::case_2_second::expected_2_2_3_2::input_2___buzz__")
-        .assert(output);
-}
-
-#[test]
-fn destruct() {
-    let (output, _) = run_test("destruct.rs");
-
-    TestResults::new()
-        .ok("cases_destruct::case_1_two_times_twenty_one::__destruct_3_1_T__new_42_1_")
-        .ok("cases_destruct::case_1_two_times_twenty_one::__destruct_3_2_T_a_3_b_14_")
-        .ok("cases_destruct::case_2_six_times_seven::__destruct_3_1_T__new_42_1_")
-        .ok("cases_destruct::case_2_six_times_seven::__destruct_3_2_T_a_3_b_14_")
-        .ok("cases_destruct_named_tuple::case_1_two_times_twenty_one::__destruct_3_1_S_42_1_")
-        .ok("cases_destruct_named_tuple::case_1_two_times_twenty_one::__destruct_3_2_S_3_14_")
-        .ok("cases_destruct_named_tuple::case_2_six_times_seven::__destruct_3_1_S_42_1_")
-        .ok("cases_destruct_named_tuple::case_2_six_times_seven::__destruct_3_2_S_3_14_")
-        .ok("cases_destruct_tuple::case_1_two_times_twenty_one::__destruct_3_1__42_1_")
-        .ok("cases_destruct_tuple::case_1_two_times_twenty_one::__destruct_3_2__3_14_")
-        .ok("cases_destruct_tuple::case_2_six_times_seven::__destruct_3_1__42_1_")
-        .ok("cases_destruct_tuple::case_2_six_times_seven::__destruct_3_2__3_14_")
-        .ok("swapped")
-        .assert(output);
-}
-
-#[test]
-fn rename() {
-    let (output, _) = run_test("rename.rs");
-
-    TestResults::new()
-        .ok("compact")
-        .ok("compact_mod")
-        .ok("compact_injected")
-        .ok("attribute")
-        .ok("attribute_mod")
-        .ok("attribute_injected")
-        .assert(output);
-}
-
-#[test]
-fn ignore_underscore_args() {
-    let (output, _) = run_test("ignore_args.rs");
-
-    TestResults::new()
-        .with_contains(true)
-        .ok("test::case_1::_ignore3_1")
-        .ok("test::case_1::_ignore3_2")
-        .ok("test::case_1::_ignore3_3")
-        .ok("test::case_1::_ignore3_4")
-        .ok("test::case_2::_ignore3_1")
-        .ok("test::case_2::_ignore3_2")
-        .ok("test::case_2::_ignore3_3")
-        .ok("test::case_2::_ignore3_4")
-        .assert(output);
-}
-
-#[test]
-fn ignore_args_not_fixtures() {
-    let prj = prj("ignore_not_fixture_arg.rs");
-    prj.add_dependency(
-        "sqlx",
-        r#"{version="*", features=["sqlite","macros","runtime-tokio"]}"#,
-    );
-
-    let output = prj.run_tests().unwrap();
-
-    TestResults::new()
-        .with_contains(true)
-        .ok("test_db")
-        .assert(output);
-}
-
-#[test]
-fn timeout() {
-    let mut prj = prj("timeout.rs");
-    prj.add_dependency("async-std", r#"{version="*", features=["attributes"]}"#);
-    prj.set_default_timeout(1);
-    let output = prj.run_tests().unwrap();
-
-    TestResults::new()
-        .ok("thread::single_pass")
-        .fail("thread::single_fail_value")
-        .ok("thread::fail_with_user_message")
-        .fail("thread::single_fail_timeout")
-        .ok("thread::one_pass::case_1")
-        .fail("thread::one_fail_value::case_1")
-        .fail("thread::one_fail_timeout::case_1")
-        .ok("thread::group_same_timeout::case_1_pass")
-        .fail("thread::group_same_timeout::case_2_fail_timeout")
-        .fail("thread::group_same_timeout::case_3_fail_value")
-        .ok("thread::group_single_timeout::case_1_pass")
-        .fail("thread::group_single_timeout::case_2_fail_timeout")
-        .fail("thread::group_single_timeout::case_3_fail_value")
-        .ok("thread::group_one_timeout_override::case_1_pass")
-        .fail("thread::group_one_timeout_override::case_2_fail_timeout")
-        .fail("thread::group_one_timeout_override::case_3_fail_value")
-        .ok("thread::compile_with_no_copy_arg::case_1")
-        .ok("thread::compile_with_no_copy_fixture")
-        .fail("thread::default_timeout_failure")
-        .ok("async_std_cases::single_pass")
-        .fail("async_std_cases::single_fail_value")
-        .ok("async_std_cases::fail_with_user_message")
-        .fail("async_std_cases::single_fail_timeout")
-        .ok("async_std_cases::one_pass::case_1")
-        .fail("async_std_cases::one_fail_value::case_1")
-        .fail("async_std_cases::one_fail_timeout::case_1")
-        .ok("async_std_cases::group_same_timeout::case_1_pass")
-        .fail("async_std_cases::group_same_timeout::case_2_fail_timeout")
-        .fail("async_std_cases::group_same_timeout::case_3_fail_value")
-        .ok("async_std_cases::group_single_timeout::case_1_pass")
-        .fail("async_std_cases::group_single_timeout::case_2_fail_timeout")
-        .fail("async_std_cases::group_single_timeout::case_3_fail_value")
-        .ok("async_std_cases::group_one_timeout_override::case_1_pass")
-        .fail("async_std_cases::group_one_timeout_override::case_2_fail_timeout")
-        .fail("async_std_cases::group_one_timeout_override::case_3_fail_value")
-        .ok("async_std_cases::compile_with_no_copy_arg::case_1")
-        .ok("async_std_cases::compile_with_no_copy_fixture")
-        .ok("async_std_cases::compile_with_async_fixture")
-        .ok("async_std_cases::compile_with_async_awt_fixture")
-        .assert(output);
-}
-
-mod import_crate_with_other_name {
-    use super::*;
-
-    fn prj(res: &str, features: Option<&[&str]>) -> Project {
-        let prj = crate::base_prj();
-        let default_features = features.is_none();
-        let features = features
-            .map(|features| {
-                features
-                    .iter()
-                    .map(|f| format!(r#""{}""#, f))
-                    .collect::<Vec<_>>()
-                    .join(",")
-            })
-            .unwrap_or_else(|| "".to_string());
-        prj.add_dependency(
-            "other_name",
-            &format!(
-                r#"{{path="{}", package = "rstest", default-features = {}, features = [{}]}}"#,
-                prj.exec_dir_str().as_str(),
-                default_features,
-                features
-            ),
-        );
-        prj.set_code_file(resources(res))
-    }
-
-    #[test]
-    fn should_fails_to_compile_if_crate_name_feature_is_not_enabled() {
-        let prj = prj("timeout_other_name.rs", Some(&[]));
-        assert!(!prj.compile().unwrap().status.success());
-    }
-
-    #[test]
-    fn should_always_compile_project_that_use_default_name() {
-        let prj = crate::base_prj();
-        prj.add_dependency(
-            "rstest",
-            &format!(
-                r#"{{path="{}", default-features = false}}"#,
-                prj.exec_dir_str().as_str(),
-            ),
-        );
-        let prj = prj.set_code_file(resources("convert_string_literal.rs"));
-
-        assert!(prj.compile().unwrap().status.success());
-    }
-
-    #[rstest]
-    #[case::default_features(None)]
-    #[case::with_crate_name_feature(Some(["crate-name"].as_slice()))]
-    fn timeout_should_compile_and_run(#[case] features: Option<&[&str]>) {
-        let prj = prj("timeout_other_name.rs", features);
-        assert!(prj.compile().unwrap().status.success());
-    }
-
-    #[rstest]
-    #[case::default(None)]
-    #[case::with_crate_name_feature(Some(["crate-name"].as_slice()))]
-    fn convert_string_literal_should_compile_and_run(#[case] features: Option<&[&str]>) {
-        let prj = prj("convert_string_literal_other_name.rs", features);
-        assert!(prj.compile().unwrap().status.success());
-    }
-}
-
-#[test]
-fn local_lifetime() {
-    let (output, _) = run_test("local_lifetime.rs");
-
-    TestResults::new()
-        .ok("tests::it_works::case_1")
-        .assert(output);
-}
-
-#[test]
-fn by_ref() {
-    let prj = prj("by_ref.rs");
-    let files_path = prj.path().join("files");
-    std::fs::create_dir(&files_path).unwrap();
-    let name = "my_name.txt";
-    let mut out = File::create(files_path.join(name)).unwrap();
-    out.write_all(name.as_bytes()).unwrap();
-    let output = prj.run_tests().unwrap();
-
-    TestResults::new()
-        .ok("test::case_1::v_1_42")
-        .ok("test::case_1::v_2_142")
-        .ok("start_with_name::path_1_files_my_name_txt")
-        .assert(output);
-}
-
-mod async_timeout_feature {
-    use super::*;
-
-    fn build_prj(features: &[&str]) -> Project {
-        let prj = crate::base_prj();
-        let features = match features.is_empty() {
-            true => String::new(),
-            false => format!(r#", features=["{}"]"#, features.join(r#"",""#)),
-        };
-        prj.add_dependency(
-            "rstest",
-            &format!(
-                r#"{{path="{}", default-features = false {}}}"#,
-                prj.exec_dir_str().as_str(),
-                features
-            ),
-        );
-        prj.add_dependency("async-std", r#"{version="*", features=["attributes"]}"#);
-        prj
-    }
-
-    #[test]
-    fn should_not_compile_if_feature_disable() {
-        let prj = build_prj(&[]);
-        let output = prj
-            .set_code_file(resources("timeout_async.rs"))
-            .run_tests()
-            .unwrap();
-
-        assert_in!(output.stderr.str(), "error: Enable async-timeout feature");
-    }
-
-    #[test]
-    fn should_work_if_feature_enabled() {
-        let prj = build_prj(&["async-timeout"]);
-
-        let output = prj
-            .set_code_file(resources("timeout_async.rs"))
-            .run_tests()
-            .unwrap();
-
-        TestResults::new().ok("single_pass").assert(output);
-    }
-}
-
-mod should_show_correct_errors {
-    use std::process::Output;
-
-    use lazy_static::lazy_static;
-
-    use super::*;
-
-    //noinspection RsTypeCheck
-    fn execute() -> &'static (Output, String) {
-        lazy_static! {
-            static ref OUTPUT: (Output, String) = run_test("errors.rs");
-        }
-        &OUTPUT
-    }
-
-    #[test]
-    fn if_no_fixture() {
-        let (output, name) = execute();
-
-        assert_in!(output.stderr.str(), "error[E0433]: ");
-        assert_in!(
-            output.stderr.str(),
-            format!(
-                "
-                  --> {}/src/lib.rs:13:33
-                   |
-                13 | fn error_cannot_resolve_fixture(no_fixture: u32, f: u32) {{}}",
-                name
-            )
-            .unindent()
-        );
-    }
-
-    #[test]
-    fn if_inject_wrong_fixture() {
-        let (output, name) = execute();
-
-        assert_in!(
-            output.stderr.str(),
-            format!(
-                "
-                error: Missed argument: 'not_a_fixture' should be a test function argument.
-                  --> {}/src/lib.rs:28:23
-                   |
-                28 | #[rstest(f, case(42), not_a_fixture(24))]
-                   |                       ^^^^^^^^^^^^^
-                ",
-                name
-            )
-            .unindent()
-        );
-    }
-
-    #[test]
-    fn if_wrong_type() {
-        let (output, name) = execute();
-
-        assert_in!(
-            output.stderr.str(),
-            format!(
-                r#"
-                error[E0308]: mismatched types
-                 --> {}/src/lib.rs:9:18
-                  |
-                9 |     let a: u32 = "";
-                "#,
-                name
-            )
-            .unindent()
-        );
-    }
-
-    #[test]
-    fn if_wrong_type_fixture() {
-        let (output, name) = execute();
-
-        assert_in!(
-            output.stderr.str(),
-            format!(
-                "
-                error[E0308]: mismatched types
-                  --> {}/src/lib.rs:16:29
-                   |
-                16 | fn error_fixture_wrong_type(fixture: String, f: u32) {{}}
-                ",
-                name
-            )
-            .unindent()
-        );
-    }
-
-    #[test]
-    fn if_wrong_type_case_param() {
-        let (output, name) = execute();
-
-        assert_in!(
-            output.stderr.str(),
-            format!(
-                "
-                error[E0308]: mismatched types
-                  --> {}/src/lib.rs:19:26
-                   |
-                19 | fn error_case_wrong_type(f: &str) {{}}",
-                name
-            )
-            .unindent()
-        );
-    }
-
-    #[test]
-    fn if_wrong_type_matrix_param() {
-        let (output, name) = execute();
-
-        assert_in!(
-            output.stderr.str(),
-            format!(
-                "
-                error[E0308]: mismatched types
-                  --> {}/src/lib.rs:51:28
-                   |
-                51 | fn error_matrix_wrong_type(f: &str) {{}}",
-                name
-            )
-            .unindent()
-        );
-    }
-
-    #[test]
-    fn if_arbitrary_rust_code_has_some_errors() {
-        let (output, name) = execute();
-
-        assert_regex!(
-            format!(
-                r#"error\[E0308\]: mismatched types
-                \s+--> {}/src/lib\.rs:22:31"#,
-                name
-            )
-            .unindent(),
-            output.stderr.str()
-        );
-        assert_regex!(
-            r#"22\s+|\s+case\(vec!\[1,2,3\]\.contains\(2\)\)\)"#,
-            output.stderr.str()
-        );
-
-        assert_regex!(
-            format!(
-                r#"error\[E0308\]: mismatched types
-                \s+--> {}/src/lib\.rs:53:45"#,
-                name
-            )
-            .unindent(),
-            output.stderr.str()
-        );
-        assert_regex!(
-            r#"53\s+|\s+#\[rstest\(condition => \[vec!\[1,2,3\]\.contains\(2\)\] \)\]"#,
-            output.stderr.str()
-        );
-    }
-
-    #[test]
-    fn if_inject_a_fixture_that_is_already_a_case() {
-        let (output, name) = execute();
-
-        assert_in!(
-            output.stderr.str(),
-            format!(
-                "
-                error: Duplicate argument: 'f' is already defined.
-                  --> {}/src/lib.rs:41:13
-                   |
-                41 | #[rstest(f, f(42), case(12))]
-                   |             ^",
-                name
-            )
-            .unindent()
-        );
-    }
-
-    #[test]
-    fn if_define_a_case_arg_that_is_already_an_injected_fixture() {
-        let (output, name) = execute();
-
-        assert_in!(
-            output.stderr.str(),
-            format!(
-                "
-                error: Duplicate argument: 'f' is already defined.
-                  --> {}/src/lib.rs:44:17
-                   |
-                44 | #[rstest(f(42), f, case(12))]
-                   |                 ^",
-                name
-            )
-            .unindent()
-        );
-    }
-
-    #[test]
-    fn if_inject_a_fixture_more_than_once() {
-        let (output, name) = execute();
-
-        assert_in!(
-            output.stderr.str(),
-            format!(
-                "
-                error: Duplicate argument: 'f' is already defined.
-                  --> {}/src/lib.rs:47:20
-                   |
-                47 | #[rstest(v, f(42), f(42), case(12))]
-                   |                    ^",
-                name
-            )
-            .unindent()
-        );
-    }
-
-    #[test]
-    fn if_list_argument_dont_match_function_signature() {
-        let (output, name) = execute();
-
-        assert_in!(
-            output.stderr.str(),
-            format!(
-                "
-                error: Missed argument: 'not_exist_1' should be a test function argument.
-                  --> {}/src/lib.rs:61:10
-                   |
-                61 | #[rstest(not_exist_1 => [42],
-                   |          ^^^^^^^^^^^",
-                name
-            )
-            .unindent()
-        );
-
-        assert_in!(
-            output.stderr.str(),
-            format!(
-                "
-                error: Missed argument: 'not_exist_2' should be a test function argument.
-                  --> {}/src/lib.rs:62:10
-                   |
-                62 |          not_exist_2 => [42])]
-                   |          ^^^^^^^^^^^",
-                name
-            )
-            .unindent()
-        );
-    }
-
-    #[test]
-    fn if_inject_a_fixture_that_is_already_a_value_list() {
-        let (output, name) = execute();
-
-        assert_in!(
-            output.stderr.str(),
-            format!(
-                "
-                error: Duplicate argument: 'f' is already defined.
-                  --> {}/src/lib.rs:65:25
-                   |
-                65 | #[rstest(f => [41, 42], f(42))]
-                   |                         ^",
-                name
-            )
-            .unindent()
-        );
-    }
-
-    #[test]
-    fn if_define_value_list_more_that_once() {
-        let (output, name) = execute();
-
-        assert_in!(
-            output.stderr.str(),
-            format!(
-                "
-                error: Duplicate argument: 'a' is already defined.
-                  --> {}/src/lib.rs:77:25
-                   |
-                77 | #[rstest(a => [42, 24], a => [24, 42])]
-                   |                         ^",
-                name
-            )
-            .unindent()
-        );
-    }
-
-    #[test]
-    fn if_define_value_list_that_is_already_an_injected_fixture() {
-        let (output, name) = execute();
-
-        assert_in!(
-            output.stderr.str(),
-            format!(
-                "
-                error: Duplicate argument: 'f' is already defined.
-                  --> {}/src/lib.rs:68:17
-                   |
-                68 | #[rstest(f(42), f => [41, 42])]
-                   |                 ^",
-                name
-            )
-            .unindent()
-        );
-    }
-
-    #[test]
-    fn if_define_value_list_that_is_already_a_case_arg() {
-        let (output, name) = execute();
-
-        assert_in!(
-            output.stderr.str(),
-            format!(
-                "
-                error: Duplicate argument: 'a' is already defined.
-                  --> {}/src/lib.rs:71:23
-                   |
-                71 | #[rstest(a, case(42), a => [42])]
-                   |                       ^",
-                name
-            )
-            .unindent()
-        );
-    }
-
-    #[test]
-    fn if_define_a_case_arg_that_is_already_a_value_list() {
-        let (output, name) = execute();
-
-        assert_in!(
-            output.stderr.str(),
-            format!(
-                "
-                error: Duplicate argument: 'a' is already defined.
-                  --> {}/src/lib.rs:74:21
-                   |
-                74 | #[rstest(a => [42], a, case(42))]
-                   |                     ^",
-                name
-            )
-            .unindent()
-        );
-    }
-
-    #[test]
-    fn if_define_a_case_arg_more_that_once() {
-        let (output, name) = execute();
-
-        assert_in!(
-            output.stderr.str(),
-            format!(
-                "
-                error: Duplicate argument: 'a' is already defined.
-                  --> {}/src/lib.rs:80:13
-                   |
-                80 | #[rstest(a, a, case(42))]
-                   |             ^",
-                name
-            )
-            .unindent()
-        );
-    }
-
-    #[test]
-    fn if_a_value_contains_empty_list() {
-        let (output, name) = execute();
-
-        assert_in!(
-            output.stderr.str(),
-            format!(
-                "
-                error: Values list should not be empty
-                  --> {}/src/lib.rs:58:19
-                   |
-                58 | #[rstest(empty => [])]
-                   |                   ^^",
-                name
-            )
-            .unindent()
-        );
-    }
-
-    #[test]
-    fn if_try_to_convert_literal_string_to_a_type_that_not_implement_from_str() {
-        let (output, name) = execute();
-
-        assert_in!(output.stderr.str(), format!("--> {}/src/lib.rs:84:1", name));
-        assert_in!(
-            output.stderr.str(),
-            "| -------- doesn't satisfy `S: FromStr`"
-        );
-    }
-
-    #[test]
-    fn if_try_to_use_future_on_an_impl() {
-        let (output, name) = execute();
-
-        assert_in!(
-            output.stderr.str(),
-            format!(
-                "
-                  --> {}/src/lib.rs:93:8
-                   |
-                93 |     s: impl AsRef<str>,
-                   |        ^^^^^^^^^^^^^^^
-                ",
-                name
-            )
-            .unindent()
-        );
-    }
-
-    #[test]
-    fn if_try_to_use_future_more_that_once() {
-        let (output, name) = execute();
-
-        assert_in!(
-            output.stderr.str(),
-            format!(
-                "
-                   --> {}/src/lib.rs:102:5
-                    |
-                102 |     #[future]
-                    |     ^^^^^^^^^
-                ",
-                name
-            )
-            .unindent()
-        );
-    }
-
-    #[test]
-    fn if_use_timeout_without_arg() {
-        let (output, name) = execute();
-
-        assert_in!(
-            output.stderr.str(),
-            format!(
-                "
-                error: expected attribute arguments in parentheses: #[timeout(...)]
-                   --> {}/src/lib.rs:108:3
-                    |
-                108 | #[timeout]
-                    |   ^^^^^^^
-                ",
-                name
-            )
-            .unindent()
-        );
-    }
-
-    #[test]
-    fn if_timeout_is_not_an_expression() {
-        let (output, name) = execute();
-
-        assert_in!(
-            output.stderr.str(),
-            format!(
-                "
-                error: expected an expression
-                   --> {}/src/lib.rs:112:17
-                    |
-                112 | #[timeout(some -> strange -> invalid -> expression)]
-                    |                 ^
-                ",
-                name
-            )
-            .unindent()
-        );
-    }
-
-    #[test]
-    fn if_timeout_is_not_a_duration() {
-        let (output, name) = execute();
-
-        assert_in!(
-            output.stderr.str(),
-            format!(
-                "
-                error[E0308]: mismatched types
-                   --> {}/src/lib.rs:116:11",
-                name
-            )
-            .unindent()
-        );
-
-        assert_in!(
-            output.stderr.str(),
-            "
-            116 | #[timeout(42)]
-                |           ^^ expected `Duration`, found integer
-            "
-            .unindent()
-        );
-    }
-
-    #[test]
-    fn if_files_contains_absolute_path() {
-        let (output, name) = execute();
-
-        assert_in!(
-            output.stderr.str(),
-            format!(
-                "
-                error: Invalid glob path: path contains non-relative component
-                   --> {}/src/lib.rs:120:30",
-                name
-            )
-            .unindent()
-        );
-
-        assert_in!(
-            output.stderr.str(),
-            r#"
-                120 | fn error_absolute_path_files(#[files("/tmp/tmp.Q81idVZYAV/*.txt")] path: std::path::PathBuf) {}
-                    |                              ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-            "#
-            .unindent()
-        );
-    }
-
-    #[test]
-    fn try_to_destruct_implicit_fixture() {
-        let (output, name) = execute();
-
-        assert_in!(
-            output.stderr.str(),
-            format!(
-                r#"
-                error: To destruct a fixture you should provide a path to resolve it by '#[from(...)]' attribute.
-                   --> {name}/src/lib.rs:126:27
-                    |
-                126 | fn wrong_destruct_fixture(T(a, b): T, #[with(42)] T(c, d): T) {{}}
-                    |                           ^^^^^^^^^^"#,
-            )
-            .unindent()
-        );
-
-        assert_in!(
-            output.stderr.str(),
-            format!(
-                r#"
-                error: To destruct a fixture you should provide a path to resolve it by '#[from(...)]' attribute.
-                   --> {name}/src/lib.rs:126:51
-                    |
-                126 | fn wrong_destruct_fixture(T(a, b): T, #[with(42)] T(c, d): T) {{}}
-                    |                                                   ^^^^^^^^^^"#,
-            )
-            .unindent()
-        );
-
-        assert_not_in!(output.stderr.str(), "#[case] T(e, f): T");
-        assert_not_in!(output.stderr.str(), "#[values(T(1, 2))] T(g, h): T");
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/.cargo-checksum.json
deleted file mode 100644
index 697c9ce..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/.cargo-checksum.json
+++ /dev/null
@@ -1 +0,0 @@
-{"files":{}}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/.cargo_vcs_info.json
deleted file mode 100644
index 43711f3..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/.cargo_vcs_info.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
-  "git": {
-    "sha1": "62134281cf451fc2bea69f9d2a16805a9ad03fef"
-  },
-  "path_in_vcs": "rstest_macros"
-}
\ No newline at end of file
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/Cargo.toml
deleted file mode 100644
index db9380a..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/Cargo.toml
+++ /dev/null
@@ -1,103 +0,0 @@
-# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
-#
-# When uploading crates to the registry Cargo will automatically
-# "normalize" Cargo.toml files for maximal compatibility
-# with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g., crates.io) dependencies.
-#
-# If you are reading this file be aware that the original Cargo.toml
-# will likely look very different (and much more reasonable).
-# See Cargo.toml.orig for the original contents.
-
-[package]
-edition = "2021"
-rust-version = "1.67.1"
-name = "rstest_macros"
-version = "0.22.0"
-authors = ["Michele d'Amico <michele.damico@gmail.com>"]
-build = "build.rs"
-autobins = false
-autoexamples = false
-autotests = false
-autobenches = false
-description = """
-Rust fixture based test framework. It use procedural macro
-to implement fixtures and table based tests.
-"""
-homepage = "https://github.com/la10736/rstest"
-readme = "README.md"
-keywords = [
-    "test",
-    "fixture",
-]
-categories = ["development-tools::testing"]
-license = "MIT OR Apache-2.0"
-repository = "https://github.com/la10736/rstest"
-
-[lib]
-name = "rstest_macros"
-path = "src/lib.rs"
-proc-macro = true
-
-[dependencies.cfg-if]
-version = "1.0.0"
-
-[dependencies.glob]
-version = "0.3.1"
-
-[dependencies.proc-macro-crate]
-version = "3.1.0"
-optional = true
-
-[dependencies.proc-macro2]
-version = "1.0.39"
-
-[dependencies.quote]
-version = "1.0.19"
-
-[dependencies.regex]
-version = "1.7.3"
-
-[dependencies.relative-path]
-version = "1.8.0"
-
-[dependencies.syn]
-version = "2.0.2"
-features = [
-    "full",
-    "parsing",
-    "extra-traits",
-    "visit",
-    "visit-mut",
-]
-
-[dependencies.unicode-ident]
-version = "1.0.5"
-
-[dev-dependencies.actix-rt]
-version = "2.7.0"
-
-[dev-dependencies.async-std]
-version = "1.12.0"
-features = ["attributes"]
-
-[dev-dependencies.maplit]
-version = "1.0.2"
-
-[dev-dependencies.pretty_assertions]
-version = "1.2.1"
-
-[dev-dependencies.rstest]
-version = "0.21.0"
-default-features = false
-
-[build-dependencies.rustc_version]
-version = "0.4.0"
-
-[features]
-async-timeout = []
-crate-name = ["dep:proc-macro-crate"]
-default = [
-    "async-timeout",
-    "crate-name",
-]
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/Cargo.toml.orig
deleted file mode 100644
index 9fd94dc5..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/Cargo.toml.orig
+++ /dev/null
@@ -1,52 +0,0 @@
-[package]
-authors = ["Michele d'Amico <michele.damico@gmail.com>"]
-categories = ["development-tools::testing"]
-description = """
-Rust fixture based test framework. It use procedural macro
-to implement fixtures and table based tests.
-"""
-edition = "2021"
-homepage = "https://github.com/la10736/rstest"
-keywords = ["test", "fixture"]
-license = "MIT OR Apache-2.0"
-name = "rstest_macros"
-repository = "https://github.com/la10736/rstest"
-rust-version = "1.67.1"
-version = "0.22.0"
-
-[lib]
-proc-macro = true
-
-[features]
-async-timeout = []
-default = ["async-timeout", "crate-name"]
-crate-name = ["dep:proc-macro-crate"]
-
-[dependencies]
-cfg-if = "1.0.0"
-glob = "0.3.1"
-proc-macro2 = "1.0.39"
-quote = "1.0.19"
-regex = "1.7.3"
-relative-path = "1.8.0"
-syn = { version = "2.0.2", features = [
-    "full",
-    "parsing",
-    "extra-traits",
-    "visit",
-    "visit-mut",
-] }
-unicode-ident = "1.0.5"
-proc-macro-crate = { version = "3.1.0", optional = true }
-
-[dev-dependencies]
-actix-rt = "2.7.0"
-async-std = { version = "1.12.0", features = ["attributes"] }
-maplit = "1.0.2"
-pretty_assertions = "1.2.1"
-rstest = { version = "0.21.0", default-features = false }
-rstest_reuse = { path = "../rstest_reuse" }
-rstest_test = { path = "../rstest_test" }
-
-[build-dependencies]
-rustc_version = "0.4.0"
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/LICENSE-APACHE b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/LICENSE-APACHE
deleted file mode 100644
index 0f9875d..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/LICENSE-APACHE
+++ /dev/null
@@ -1,202 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-   
-   Copyright 2018-19 Michele d'Amico
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/LICENSE-MIT b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/LICENSE-MIT
deleted file mode 100644
index 513033e..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/LICENSE-MIT
+++ /dev/null
@@ -1,18 +0,0 @@
-Copyright 2018-19 Michele d'Amico
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), 
-to deal in the Software without restriction, including without limitation the
-rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-sell copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
-INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
-PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/README.md b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/README.md
deleted file mode 100644
index 04edd46..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/README.md
+++ /dev/null
@@ -1,21 +0,0 @@
-[![Crate][crate-image]][crate-link]
-[![Docs][docs-image]][docs-link]
-[![Status][test-action-image]][test-action-link]
-[![Apache 2.0 Licensed][license-apache-image]][license-apache-link]
-[![MIT Licensed][license-mit-image]][license-mit-link]
-
-# `rstest`'s Macros Crate
-
-See [`rstest`][crate-link].
-
-[crate-image]: https://img.shields.io/crates/v/rstest.svg
-[crate-link]: https://crates.io/crates/rstest
-[docs-image]: https://docs.rs/rstest/badge.svg
-[docs-link]: https://docs.rs/rstest/
-[test-action-image]: https://github.com/la10736/rstest/workflows/Test/badge.svg
-[test-action-link]: https://github.com/la10736/rstest/actions?query=workflow:Test
-[license-apache-image]: https://img.shields.io/badge/license-Apache2.0-blue.svg
-[license-mit-image]: https://img.shields.io/badge/license-MIT-blue.svg
-[license-apache-link]: http://www.apache.org/licenses/LICENSE-2.0
-[license-MIT-link]: http://opensource.org/licenses/MIT
-[reuse-crate-link]: https://crates.io/crates/rstest_reuse
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/build.rs b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/build.rs
deleted file mode 100644
index cab134c..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/build.rs
+++ /dev/null
@@ -1,30 +0,0 @@
-use rustc_version::{version, version_meta, Channel};
-
-fn allow_features() -> Option<Vec<String>> {
-    std::env::var("CARGO_ENCODED_RUSTFLAGS").ok().map(|args| {
-        args.split('\u{001f}')
-            .filter(|arg| arg.starts_with("-Zallow-features="))
-            .map(|arg| arg.split('=').nth(1).unwrap())
-            .flat_map(|features| features.split(','))
-            .map(|f| f.to_owned())
-            .collect()
-    })
-}
-
-fn can_enable_proc_macro_diagnostic() -> bool {
-    allow_features()
-        .map(|f| f.iter().any(|f| f == "proc_macro_diagnostic"))
-        .unwrap_or(true)
-}
-
-fn main() {
-    let ver = version().unwrap();
-    assert!(ver.major >= 1);
-
-    match version_meta().unwrap().channel {
-        Channel::Nightly | Channel::Dev if can_enable_proc_macro_diagnostic() => {
-            println!("cargo:rustc-cfg=use_proc_macro_diagnostic");
-        }
-        _ => {}
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/error.rs b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/error.rs
deleted file mode 100644
index 74fac8b6..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/error.rs
+++ /dev/null
@@ -1,398 +0,0 @@
-/// Module for error rendering stuff
-use std::collections::HashMap;
-
-use proc_macro2::TokenStream;
-use syn::{spanned::Spanned, visit::Visit};
-use syn::{visit, ItemFn, Pat};
-
-use crate::parse::{
-    fixture::FixtureInfo,
-    rstest::{RsTestData, RsTestInfo},
-};
-use crate::refident::{MaybeIdent, MaybePat};
-
-use super::utils::fn_args_has_pat;
-
-pub mod messages {
-    pub const DESTRUCT_WITHOUT_FROM : &str = "To destruct a fixture you should provide a path to resolve it by '#[from(...)]' attribute.";
-    pub fn use_more_than_once(name: &str) -> String {
-        format!("You cannot use '{name}' attribute more than once for the same argument")
-    }
-}
-
-pub(crate) fn rstest(test: &ItemFn, info: &RsTestInfo) -> TokenStream {
-    missed_arguments(test, info.data.items.iter())
-        .chain(duplicate_arguments(info.data.items.iter()))
-        .chain(invalid_cases(&info.data))
-        .chain(case_args_without_cases(&info.data))
-        .chain(destruct_fixture_without_from(test, info))
-        .map(|e| e.to_compile_error())
-        .collect()
-}
-
-pub(crate) fn fixture(test: &ItemFn, info: &FixtureInfo) -> TokenStream {
-    missed_arguments(test, info.data.items.iter())
-        .chain(duplicate_arguments(info.data.items.iter()))
-        .chain(async_once(test, info))
-        .chain(generics_once(test, info))
-        .chain(destruct_fixture_without_from(test, info))
-        .map(|e| e.to_compile_error())
-        .collect()
-}
-
-fn async_once<'a>(test: &'a ItemFn, info: &FixtureInfo) -> Errors<'a> {
-    match (test.sig.asyncness, info.arguments.get_once()) {
-        (Some(_asyncness), Some(once)) => Box::new(std::iter::once(syn::Error::new_spanned(
-            once,
-            "Cannot apply #[once] to async fixture.",
-        ))),
-        _ => Box::new(std::iter::empty()),
-    }
-}
-
-#[derive(Default)]
-struct SearchImpl(bool);
-
-impl<'ast> Visit<'ast> for SearchImpl {
-    fn visit_type(&mut self, i: &'ast syn::Type) {
-        if self.0 {
-            return;
-        }
-        if let syn::Type::ImplTrait(_) = i {
-            self.0 = true
-        }
-        visit::visit_type(self, i);
-    }
-}
-
-impl SearchImpl {
-    fn function_has_some_impl(f: &ItemFn) -> bool {
-        let mut s = SearchImpl::default();
-        visit::visit_item_fn(&mut s, f);
-        s.0
-    }
-}
-
-fn has_some_generics(test: &ItemFn) -> bool {
-    !test.sig.generics.params.is_empty() || SearchImpl::function_has_some_impl(test)
-}
-
-fn generics_once<'a>(test: &'a ItemFn, info: &FixtureInfo) -> Errors<'a> {
-    match (has_some_generics(test), info.arguments.get_once()) {
-        (true, Some(once)) => Box::new(std::iter::once(syn::Error::new_spanned(
-            once,
-            "Cannot apply #[once] on generic fixture.",
-        ))),
-        _ => Box::new(std::iter::empty()),
-    }
-}
-
-trait IsImplicitFixture {
-    fn is_implicit_fixture(&self, pat: &Pat) -> bool;
-}
-
-impl IsImplicitFixture for FixtureInfo {
-    fn is_implicit_fixture(&self, pat: &Pat) -> bool {
-        !self.data.fixtures().any(|f| &f.arg == pat)
-    }
-}
-
-impl IsImplicitFixture for RsTestInfo {
-    fn is_implicit_fixture(&self, pat: &Pat) -> bool {
-        !self.data.case_args().any(|a| a == pat)
-            && !self.data.list_values().any(|a| &a.arg == pat)
-            && !self.data.fixtures().any(|f| &f.arg == pat)
-    }
-}
-
-fn destruct_fixture_without_from<'a>(
-    function: &'a ItemFn,
-    info: &'a impl IsImplicitFixture,
-) -> Errors<'a> {
-    Box::new(
-        function
-            .sig
-            .inputs
-            .iter()
-            .filter_map(|a| a.maybe_pat().map(|p| (a, p)))
-            .filter(|&(_, p)| p.maybe_ident().is_none())
-            .filter(|&(_, p)| info.is_implicit_fixture(p))
-            .map(|(a, _)| syn::Error::new_spanned(a, messages::DESTRUCT_WITHOUT_FROM)),
-    )
-}
-
-#[derive(Debug, Default)]
-pub struct ErrorsVec(Vec<syn::Error>);
-
-pub(crate) fn _merge_errors<R1, R2>(
-    r1: Result<R1, ErrorsVec>,
-    r2: Result<R2, ErrorsVec>,
-) -> Result<(R1, R2), ErrorsVec> {
-    match (r1, r2) {
-        (Ok(r1), Ok(r2)) => Ok((r1, r2)),
-        (Ok(_), Err(e)) | (Err(e), Ok(_)) => Err(e),
-        (Err(mut e1), Err(mut e2)) => {
-            e1.append(&mut e2);
-            Err(e1)
-        }
-    }
-}
-
-macro_rules! merge_errors {
-    ($e:expr) => {
-        $e
-    };
-    ($e:expr, $($es:expr), +) => {
-        crate::error::_merge_errors($e, merge_errors!($($es),*))
-    };
-}
-
-macro_rules! composed_tuple {
-    ($i:ident) => {
-        $i
-    };
-    ($i:ident, $($is:ident), +) => {
-        ($i, composed_tuple!($($is),*))
-    };
-}
-
-impl std::ops::Deref for ErrorsVec {
-    type Target = Vec<syn::Error>;
-    fn deref(&self) -> &Self::Target {
-        &self.0
-    }
-}
-
-impl std::ops::DerefMut for ErrorsVec {
-    fn deref_mut(&mut self) -> &mut Self::Target {
-        &mut self.0
-    }
-}
-
-impl From<syn::Error> for ErrorsVec {
-    fn from(errors: syn::Error) -> Self {
-        vec![errors].into()
-    }
-}
-
-impl From<Vec<syn::Error>> for ErrorsVec {
-    fn from(errors: Vec<syn::Error>) -> Self {
-        Self(errors)
-    }
-}
-
-impl From<ErrorsVec> for Vec<syn::Error> {
-    fn from(v: ErrorsVec) -> Self {
-        v.0
-    }
-}
-
-impl quote::ToTokens for ErrorsVec {
-    fn to_tokens(&self, tokens: &mut TokenStream) {
-        tokens.extend(self.0.iter().map(|e| e.to_compile_error()))
-    }
-}
-
-impl From<ErrorsVec> for proc_macro::TokenStream {
-    fn from(v: ErrorsVec) -> Self {
-        use quote::ToTokens;
-        v.into_token_stream().into()
-    }
-}
-
-type Errors<'a> = Box<dyn Iterator<Item = syn::Error> + 'a>;
-
-fn missed_arguments<'a, I: MaybePat + Spanned + 'a>(
-    test: &'a ItemFn,
-    args: impl Iterator<Item = &'a I> + 'a,
-) -> Errors<'a> {
-    Box::new(
-        args.filter_map(|it| it.maybe_pat().map(|pat| (it, pat)))
-            .filter(move |(_, pat)| !fn_args_has_pat(test, pat))
-            .map(|(missed, pat)| {
-                syn::Error::new(
-                    missed.span(),
-                    format!(
-                        "Missed argument: '{}' should be a test function argument.",
-                        pat.render_type()
-                    ),
-                )
-            }),
-    )
-}
-
-fn duplicate_arguments<'a, I: MaybePat + Spanned + 'a>(
-    args: impl Iterator<Item = &'a I> + 'a,
-) -> Errors<'a> {
-    let mut used = HashMap::new();
-    Box::new(
-        args.filter_map(|it| it.maybe_pat().map(|pat| (it, pat)))
-            .filter_map(move |(it, pat)| {
-                let is_duplicate = used.contains_key(&pat);
-                used.insert(pat, it);
-                match is_duplicate {
-                    true => Some((it, pat)),
-                    false => None,
-                }
-            })
-            .map(|(duplicate, pat)| {
-                syn::Error::new(
-                    duplicate.span(),
-                    format!(
-                        "Duplicate argument: '{}' is already defined.",
-                        pat.render_type()
-                    ),
-                )
-            }),
-    )
-}
-
-fn invalid_cases(params: &RsTestData) -> Errors {
-    let n_args = params.case_args().count();
-    Box::new(
-        params
-            .cases()
-            .filter(move |case| case.args.len() != n_args)
-            .map(|case| {
-                syn::Error::new_spanned(
-                    case,
-                    "Wrong case signature: should match the given parameters list.",
-                )
-            }),
-    )
-}
-
-fn case_args_without_cases(params: &RsTestData) -> Errors {
-    if !params.has_cases() {
-        return Box::new(
-            params
-                .case_args()
-                .map(|a| syn::Error::new(a.span(), "No cases for this argument.")),
-        );
-    }
-    Box::new(std::iter::empty())
-}
-
-trait RenderType {
-    fn render_type(&self) -> String;
-}
-
-impl RenderType for syn::Pat {
-    fn render_type(&self) -> String {
-        match self {
-            syn::Pat::Ident(ref i) => i.ident.to_string(),
-            other => format!("{other:?}"),
-        }
-    }
-}
-
-#[cfg(test)]
-mod test {
-    use crate::{
-        parse::ExtendWithFunctionAttrs,
-        test::{assert_eq, *},
-    };
-    use rstest_test::assert_in;
-
-    use super::*;
-
-    #[rstest]
-    #[case::generics("fn f<G: SomeTrait>(){}")]
-    #[case::const_generics("fn f<const N: usize>(){}")]
-    #[case::lifetimes("fn f<'a>(){}")]
-    #[case::use_impl_in_answer("fn f() -> impl Iterator<Item=u32>{}")]
-    #[case::use_impl_in_arguments("fn f(it: impl Iterator<Item=u32>){}")]
-    #[should_panic]
-    #[case::sanity_check_with_no_generics("fn f() {}")]
-    fn generics_once_should_return_error(#[case] f: &str) {
-        let f: ItemFn = f.ast();
-        let info = FixtureInfo::default().with_once();
-
-        let errors = generics_once(&f, &info);
-
-        let out = errors
-            .map(|e| format!("{:?}", e))
-            .collect::<Vec<_>>()
-            .join("-----------------------\n");
-
-        assert_in!(out, "Cannot apply #[once] on generic fixture.");
-    }
-
-    #[rstest]
-    #[case::generics("fn f<G: SomeTrait>(){}")]
-    #[case::const_generics("fn f<const N: usize>(){}")]
-    #[case::lifetimes("fn f<'a>(){}")]
-    #[case::use_impl_in_answer("fn f() -> impl Iterator<Item=u32>{}")]
-    #[case::use_impl_in_arguments("fn f(it: impl Iterator<Item=u32>){}")]
-    fn generics_once_should_not_return_if_no_once(#[case] f: &str) {
-        let f: ItemFn = f.ast();
-        let info = FixtureInfo::default();
-
-        let errors = generics_once(&f, &info);
-
-        assert_eq!(0, errors.count());
-    }
-
-    #[rstest]
-    #[case::base_in_fixture("fn f(T{a}: T){}", FixtureInfo::default(), 1)]
-    #[case::one_of_two("fn f(T{a}: T, #[from(f)] T{a: c}: T){}", FixtureInfo::default(), 1)]
-    #[case::find_all(
-        "fn f(T{a}: T, z: u32, S(a,b): S, x: u32, (f , g, h): (u32, String, f32)){}",
-        FixtureInfo::default(),
-        3
-    )]
-    #[case::base_in_test("fn f(T{a}: T){}", RsTestInfo::default(), 1)]
-    #[case::not_case_or_values(
-        "fn f(#[case] T{a}: T, #[values(T::a(),T::b())] T{v}: T, S{e}: S){}",
-        RsTestInfo::default(),
-        1
-    )]
-    #[case::mixed_more(
-        r#"fn wrong_destruct_fixture(
-            T(a, b): T,
-            #[case] T(e, f): T,
-            #[values(T(1, 2))] T(g, h): T,
-        ) {}"#,
-        RsTestInfo::default(),
-        1
-    )]
-    fn destruct_implicit_from_should_return_error(
-        #[case] f: &str,
-        #[case] mut info: impl ExtendWithFunctionAttrs + IsImplicitFixture,
-        #[case] n: usize,
-    ) {
-        let mut f: ItemFn = f.ast();
-
-        info.extend_with_function_attrs(&mut f).unwrap();
-
-        let errors = destruct_fixture_without_from(&f, &info);
-
-        let out = errors
-            .map(|e| format!("{:?}", e))
-            .collect::<Vec<_>>()
-            .join("\n");
-
-        assert_in!(out, messages::DESTRUCT_WITHOUT_FROM);
-        assert_eq!(n, out.lines().count())
-    }
-
-    #[rstest]
-    #[case::happy_fixture("fn f(#[from(b)] T{a}: T){}", FixtureInfo::default())]
-    #[case::happy_test("fn f(#[from(b)] T{a}: T){}", RsTestInfo::default())]
-    #[case::some_cases_or_values(
-        "fn f(#[case] T{a}: T, #[values(T::a(),T::b())] T{v}: T){}",
-        RsTestInfo::default()
-    )]
-    fn destruct_not_implicit_should_not_return_error(
-        #[case] f: &str,
-        #[case] mut info: impl ExtendWithFunctionAttrs + IsImplicitFixture,
-    ) {
-        let mut f: ItemFn = f.ast();
-
-        info.extend_with_function_attrs(&mut f).unwrap();
-
-        let errors = destruct_fixture_without_from(&f, &info);
-
-        assert_eq!(0, errors.count());
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/lib.rs
deleted file mode 100644
index 317c2402..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/lib.rs
+++ /dev/null
@@ -1,78 +0,0 @@
-#![allow(clippy::test_attr_in_doctest)]
-#![allow(unexpected_cfgs)]
-#![cfg_attr(use_proc_macro_diagnostic, feature(proc_macro_diagnostic))]
-extern crate proc_macro;
-
-// Test utility module
-#[cfg(test)]
-pub(crate) mod test;
-
-#[macro_use]
-mod error;
-mod parse;
-mod refident;
-mod render;
-mod resolver;
-mod utils;
-
-use syn::{parse_macro_input, ItemFn};
-
-use crate::parse::{fixture::FixtureInfo, rstest::RsTestInfo};
-use parse::ExtendWithFunctionAttrs;
-use quote::ToTokens;
-
-#[allow(missing_docs)]
-#[proc_macro_attribute]
-pub fn fixture(
-    args: proc_macro::TokenStream,
-    input: proc_macro::TokenStream,
-) -> proc_macro::TokenStream {
-    let mut info: FixtureInfo = parse_macro_input!(args as FixtureInfo);
-    let mut fixture = parse_macro_input!(input as ItemFn);
-
-    let extend_result = info.extend_with_function_attrs(&mut fixture);
-
-    let mut errors = error::fixture(&fixture, &info);
-
-    if let Err(attrs_errors) = extend_result {
-        attrs_errors.to_tokens(&mut errors);
-    }
-
-    if errors.is_empty() {
-        render::fixture(fixture, info)
-    } else {
-        errors
-    }
-    .into()
-}
-
-#[allow(missing_docs)]
-#[proc_macro_attribute]
-pub fn rstest(
-    args: proc_macro::TokenStream,
-    input: proc_macro::TokenStream,
-) -> proc_macro::TokenStream {
-    let mut test = parse_macro_input!(input as ItemFn);
-    let mut info = parse_macro_input!(args as RsTestInfo);
-
-    let extend_result = info.extend_with_function_attrs(&mut test);
-
-    let mut errors = error::rstest(&test, &info);
-
-    if let Err(attrs_errors) = extend_result {
-        attrs_errors.to_tokens(&mut errors);
-    }
-
-    if errors.is_empty() {
-        if info.data.has_list_values() {
-            render::matrix(test, info)
-        } else if info.data.has_cases() {
-            render::parametrize(test, info)
-        } else {
-            render::single(test, info)
-        }
-    } else {
-        errors
-    }
-    .into()
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/arguments.rs b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/arguments.rs
deleted file mode 100644
index ab2b94b9..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/arguments.rs
+++ /dev/null
@@ -1,321 +0,0 @@
-use std::collections::HashMap;
-
-use quote::format_ident;
-use syn::{FnArg, Ident, Pat};
-
-use crate::{
-    refident::{IntoPat, MaybeIdent, MaybePatType, MaybePatTypeMut},
-    resolver::pat_invert_mutability,
-};
-
-#[derive(PartialEq, Debug, Clone, Copy)]
-#[allow(dead_code)]
-#[derive(Default)]
-pub(crate) enum FutureArg {
-    #[default]
-    None,
-    Define,
-    Await,
-}
-
-#[derive(Clone, PartialEq, Default, Debug)]
-pub(crate) struct ArgumentInfo {
-    future: FutureArg,
-    by_ref: bool,
-    ignore: bool,
-    inner_pat: Option<Pat>, // Optional pat used to inject data and call test function
-}
-
-impl ArgumentInfo {
-    fn future(future: FutureArg) -> Self {
-        Self {
-            future,
-            ..Default::default()
-        }
-    }
-
-    fn by_ref() -> Self {
-        Self {
-            by_ref: true,
-            ..Default::default()
-        }
-    }
-
-    fn ignore() -> Self {
-        Self {
-            ignore: true,
-            ..Default::default()
-        }
-    }
-
-    fn inner_pat(pat: Pat) -> Self {
-        Self {
-            inner_pat: Some(pat),
-            ..Default::default()
-        }
-    }
-
-    fn is_future(&self) -> bool {
-        use FutureArg::*;
-
-        matches!(self.future, Define | Await)
-    }
-
-    fn is_future_await(&self) -> bool {
-        use FutureArg::*;
-
-        matches!(self.future, Await)
-    }
-
-    fn is_by_ref(&self) -> bool {
-        self.by_ref
-    }
-
-    fn is_ignore(&self) -> bool {
-        self.ignore
-    }
-}
-
-#[derive(Clone, PartialEq, Default, Debug)]
-struct Args {
-    args: HashMap<Pat, ArgumentInfo>,
-}
-
-impl Args {
-    fn get(&self, pat: &Pat) -> Option<&ArgumentInfo> {
-        self.args
-            .get(pat)
-            .or_else(|| self.args.get(&pat_invert_mutability(pat)))
-    }
-
-    fn entry(&mut self, pat: Pat) -> std::collections::hash_map::Entry<Pat, ArgumentInfo> {
-        self.args.entry(pat)
-    }
-}
-
-#[derive(Clone, PartialEq, Default, Debug)]
-pub(crate) struct ArgumentsInfo {
-    args: Args,
-    is_global_await: bool,
-    once: Option<syn::Attribute>,
-}
-
-impl ArgumentsInfo {
-    pub(crate) fn set_future(&mut self, pat: Pat, kind: FutureArg) {
-        self.args
-            .entry(pat)
-            .and_modify(|v| v.future = kind)
-            .or_insert_with(|| ArgumentInfo::future(kind));
-    }
-
-    pub(crate) fn set_futures(&mut self, futures: impl Iterator<Item = (Pat, FutureArg)>) {
-        futures.for_each(|(pat, k)| self.set_future(pat, k));
-    }
-
-    pub(crate) fn set_global_await(&mut self, is_global_await: bool) {
-        self.is_global_await = is_global_await;
-    }
-
-    #[allow(dead_code)]
-    pub(crate) fn add_future(&mut self, pat: Pat) {
-        self.set_future(pat, FutureArg::Define);
-    }
-
-    pub(crate) fn is_future(&self, pat: &Pat) -> bool {
-        self.args
-            .get(pat)
-            .map(|arg| arg.is_future())
-            .unwrap_or_default()
-    }
-
-    pub(crate) fn is_future_await(&self, pat: &Pat) -> bool {
-        match self.args.get(pat) {
-            Some(arg) => arg.is_future_await() || (arg.is_future() && self.is_global_await()),
-            None => false,
-        }
-    }
-
-    pub(crate) fn is_global_await(&self) -> bool {
-        self.is_global_await
-    }
-
-    pub(crate) fn set_once(&mut self, once: Option<syn::Attribute>) {
-        self.once = once
-    }
-
-    pub(crate) fn get_once(&self) -> Option<&syn::Attribute> {
-        self.once.as_ref()
-    }
-
-    pub(crate) fn is_once(&self) -> bool {
-        self.get_once().is_some()
-    }
-
-    pub(crate) fn set_by_ref(&mut self, pat: Pat) {
-        self.args
-            .entry(pat)
-            .and_modify(|v| v.by_ref = true)
-            .or_insert_with(ArgumentInfo::by_ref);
-    }
-
-    pub(crate) fn set_ignore(&mut self, pat: Pat) {
-        self.args
-            .entry(pat)
-            .and_modify(|v| v.ignore = true)
-            .or_insert_with(ArgumentInfo::ignore);
-    }
-
-    pub(crate) fn set_by_refs(&mut self, by_refs: impl Iterator<Item = Pat>) {
-        by_refs.for_each(|pat| self.set_by_ref(pat));
-    }
-
-    pub(crate) fn set_ignores(&mut self, ignores: impl Iterator<Item = Pat>) {
-        ignores.for_each(|pat| self.set_ignore(pat));
-    }
-
-    pub(crate) fn is_by_refs(&self, id: &Pat) -> bool {
-        self.args
-            .get(id)
-            .map(|arg| arg.is_by_ref())
-            .unwrap_or_default()
-    }
-
-    pub(crate) fn is_ignore(&self, pat: &Pat) -> bool {
-        self.args
-            .get(pat)
-            .map(|arg| arg.is_ignore())
-            .unwrap_or_default()
-    }
-
-    pub(crate) fn set_inner_pat(&mut self, pat: Pat, inner: Pat) {
-        self.args
-            .entry(pat)
-            .and_modify(|v| v.inner_pat = Some(inner.clone()))
-            .or_insert_with(|| ArgumentInfo::inner_pat(inner));
-    }
-
-    pub(crate) fn set_inner_ident(&mut self, pat: Pat, ident: Ident) {
-        self.set_inner_pat(pat, ident.into_pat());
-    }
-
-    pub(crate) fn inner_pat<'arguments: 'pat_ref, 'pat_ref>(
-        &'arguments self,
-        id: &'pat_ref Pat,
-    ) -> &'pat_ref Pat {
-        self.args
-            .get(id)
-            .and_then(|arg| arg.inner_pat.as_ref())
-            .unwrap_or(id)
-    }
-
-    pub(crate) fn register_inner_destructored_idents_names(&mut self, item_fn: &syn::ItemFn) {
-        let mut anonymous_destruct = 0_usize;
-        // On the signature we remove all destruct arguments and replace them with `__destruct_{id}`
-        // This is just to define the new arguments and local variable that we use in the test
-        // and coll the original signature that should preserve the destruct arguments.
-        for arg in item_fn.sig.inputs.iter() {
-            if let Some(pt) = arg.maybe_pat_type() {
-                if pt.maybe_ident().is_none() {
-                    anonymous_destruct += 1;
-                    let ident = format_ident!("__destruct_{}", anonymous_destruct);
-                    self.set_inner_ident(pt.pat.as_ref().clone(), ident);
-                }
-            }
-        }
-    }
-
-    pub(crate) fn replace_fn_args_with_related_inner_pat<'a>(
-        &'a self,
-        fn_args: impl Iterator<Item = FnArg> + 'a,
-    ) -> impl Iterator<Item = FnArg> + 'a {
-        fn_args.map(|mut fn_arg| {
-            if let Some(p) = fn_arg.maybe_pat_type_mut() {
-                p.pat = Box::new(self.inner_pat(p.pat.as_ref()).clone());
-            }
-            fn_arg
-        })
-    }
-}
-
-#[cfg(test)]
-mod should_implement_is_future_await_logic {
-    use super::*;
-    use crate::test::*;
-
-    #[fixture]
-    fn info() -> ArgumentsInfo {
-        let mut a = ArgumentsInfo::default();
-        a.set_future(pat("simple"), FutureArg::Define);
-        a.set_future(pat("other_simple"), FutureArg::Define);
-        a.set_future(pat("awaited"), FutureArg::Await);
-        a.set_future(pat("other_awaited"), FutureArg::Await);
-        a.set_future(pat("none"), FutureArg::None);
-        a
-    }
-
-    #[rstest]
-    fn no_matching_ident(info: ArgumentsInfo) {
-        assert!(!info.is_future_await(&pat("some")));
-        assert!(!info.is_future_await(&pat("simple")));
-        assert!(!info.is_future_await(&pat("none")));
-    }
-
-    #[rstest]
-    fn matching_ident(info: ArgumentsInfo) {
-        assert!(info.is_future_await(&pat("awaited")));
-        assert!(info.is_future_await(&pat("other_awaited")));
-    }
-
-    #[rstest]
-    fn global_matching_future_ident(mut info: ArgumentsInfo) {
-        info.set_global_await(true);
-        assert!(info.is_future_await(&pat("simple")));
-        assert!(info.is_future_await(&pat("other_simple")));
-        assert!(info.is_future_await(&pat("awaited")));
-
-        assert!(!info.is_future_await(&pat("some")));
-        assert!(!info.is_future_await(&pat("none")));
-    }
-}
-
-#[cfg(test)]
-mod should_register_inner_destructored_idents_names {
-    use super::*;
-    use crate::test::{assert_eq, *};
-
-    #[test]
-    fn implement_the_correct_pat_reolver() {
-        let item_fn = "fn test_function(A(a,b): A, (c,d,e): (u32, u32, u32), none: u32, B{s,d} : B, clean: C) {}".ast();
-
-        let mut arguments = ArgumentsInfo::default();
-
-        arguments.register_inner_destructored_idents_names(&item_fn);
-
-        assert_eq!(arguments.inner_pat(&pat("A(a,b)")), &pat("__destruct_1"));
-        assert_eq!(arguments.inner_pat(&pat("(c,d,e)")), &pat("__destruct_2"));
-        assert_eq!(arguments.inner_pat(&pat("none")), &pat("none"));
-        assert_eq!(arguments.inner_pat(&pat("B{s,d}")), &pat("__destruct_3"));
-        assert_eq!(arguments.inner_pat(&pat("clean")), &pat("clean"));
-    }
-
-    #[test]
-    fn and_replace_them_correctly() {
-        let item_fn = "fn test_function(A(a,b): A, (c,d,e): (u32, u32, u32), none: u32, B{s,d} : B, clean: C) {}".ast();
-
-        let mut arguments = ArgumentsInfo::default();
-
-        arguments.register_inner_destructored_idents_names(&item_fn);
-
-        let new_args = arguments
-            .replace_fn_args_with_related_inner_pat(item_fn.sig.inputs.into_iter())
-            .filter_map(|f| f.maybe_ident().cloned())
-            .map(|id| id.to_string())
-            .collect::<Vec<_>>()
-            .join(" | ");
-
-        assert_eq!(
-            new_args,
-            "__destruct_1 | __destruct_2 | none | __destruct_3 | clean"
-        );
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/by_ref.rs b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/by_ref.rs
deleted file mode 100644
index bf59f77..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/by_ref.rs
+++ /dev/null
@@ -1,61 +0,0 @@
-use syn::{visit_mut::VisitMut, ItemFn, Pat};
-
-use crate::error::ErrorsVec;
-
-use super::just_once::JustOnceFnArgAttributeExtractor;
-
-pub(crate) fn extract_by_ref(item_fn: &mut ItemFn) -> Result<Vec<Pat>, ErrorsVec> {
-    let mut extractor = JustOnceFnArgAttributeExtractor::from("by_ref");
-    extractor.visit_item_fn_mut(item_fn);
-    extractor.take()
-}
-
-#[cfg(test)]
-mod should {
-    use super::*;
-    use crate::test::{assert_eq, *};
-    use rstest_test::assert_in;
-
-    #[rstest]
-    #[case("fn simple(a: u32) {}")]
-    #[case("fn more(a: u32, b: &str) {}")]
-    #[case("fn gen<S: AsRef<str>>(a: u32, b: S) {}")]
-    #[case("fn attr(#[case] a: u32, #[values(1,2)] b: i32) {}")]
-    fn not_change_anything_if_no_by_ref_attribute_found(#[case] item_fn: &str) {
-        let mut item_fn: ItemFn = item_fn.ast();
-        let orig = item_fn.clone();
-
-        let by_refs = extract_by_ref(&mut item_fn).unwrap();
-
-        assert_eq!(orig, item_fn);
-        assert!(by_refs.is_empty());
-    }
-
-    #[rstest]
-    #[case::simple("fn f(#[by_ref] a: &u32) {}", "fn f(a: &u32) {}", &["a"])]
-    #[case::more_than_one(
-        "fn f(#[by_ref] a: &u32, #[by_ref] b: &String, #[by_ref] c: &std::collection::HashMap<usize, String>) {}",
-        r#"fn f(a: &u32, 
-                b: &String, 
-                c: &std::collection::HashMap<usize, String>) {}"#,
-        &["a", "b", "c"])]
-    fn extract(#[case] item_fn: &str, #[case] expected: &str, #[case] expected_refs: &[&str]) {
-        let mut item_fn: ItemFn = item_fn.ast();
-        let expected: ItemFn = expected.ast();
-
-        let by_refs = extract_by_ref(&mut item_fn).unwrap();
-
-        assert_eq!(expected, item_fn);
-        assert_eq!(by_refs, to_pats!(expected_refs));
-    }
-
-    #[rstest]
-    #[case::no_more_than_one("fn f(#[by_ref] #[by_ref] a: u32) {}", "more than once")]
-    fn raise_error(#[case] item_fn: &str, #[case] message: &str) {
-        let mut item_fn: ItemFn = item_fn.ast();
-
-        let err = extract_by_ref(&mut item_fn).unwrap_err();
-
-        assert_in!(format!("{:?}", err), message);
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/expressions.rs b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/expressions.rs
deleted file mode 100644
index 966fe419..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/expressions.rs
+++ /dev/null
@@ -1,28 +0,0 @@
-use syn::{
-    parse::{Parse, ParseStream, Result},
-    Expr, Token,
-};
-
-pub(crate) struct Expressions(Vec<Expr>);
-
-impl Expressions {
-    pub(crate) fn take(self) -> Vec<Expr> {
-        self.0
-    }
-}
-
-impl Parse for Expressions {
-    fn parse(input: ParseStream) -> Result<Self> {
-        let values = input
-            .parse_terminated(Parse::parse, Token![,])?
-            .into_iter()
-            .collect();
-        Ok(Self(values))
-    }
-}
-
-impl From<Expressions> for Vec<Expr> {
-    fn from(expressions: Expressions) -> Self {
-        expressions.0
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/fixture.rs b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/fixture.rs
deleted file mode 100644
index 70fe2018..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/fixture.rs
+++ /dev/null
@@ -1,780 +0,0 @@
-/// `fixture`'s related data and parsing
-use syn::{
-    parse::{Parse, ParseStream},
-    parse_quote,
-    visit_mut::VisitMut,
-    Expr, FnArg, Ident, ItemFn, Pat, Token,
-};
-
-use super::{
-    arguments::ArgumentsInfo,
-    extract_default_return_type, extract_defaults, extract_fixtures, extract_partials_return_type,
-    future::{extract_futures, extract_global_awt},
-    parse_vector_trailing_till_double_comma, Attributes, ExtendWithFunctionAttrs, Fixture,
-};
-use crate::{
-    error::ErrorsVec,
-    parse::extract_once,
-    refident::{IntoPat, MaybeIdent, MaybePat, MaybePatTypeMut, RefPat},
-    utils::attr_is,
-};
-use crate::{parse::Attribute, utils::attr_in};
-use proc_macro2::TokenStream;
-use quote::{format_ident, ToTokens};
-
-#[derive(PartialEq, Debug, Default)]
-pub(crate) struct FixtureInfo {
-    pub(crate) data: FixtureData,
-    pub(crate) attributes: FixtureModifiers,
-    pub(crate) arguments: ArgumentsInfo,
-}
-
-impl Parse for FixtureModifiers {
-    fn parse(input: ParseStream) -> syn::Result<Self> {
-        Ok(input.parse::<Attributes>()?.into())
-    }
-}
-
-impl Parse for FixtureInfo {
-    fn parse(input: ParseStream) -> syn::Result<Self> {
-        Ok(if input.is_empty() {
-            Default::default()
-        } else {
-            Self {
-                data: input.parse()?,
-                attributes: input
-                    .parse::<Token![::]>()
-                    .or_else(|_| Ok(Default::default()))
-                    .and_then(|_| input.parse())?,
-                arguments: Default::default(),
-            }
-        })
-    }
-}
-
-impl ExtendWithFunctionAttrs for FixtureInfo {
-    fn extend_with_function_attrs(
-        &mut self,
-        item_fn: &mut ItemFn,
-    ) -> std::result::Result<(), ErrorsVec> {
-        let composed_tuple!(
-            fixtures,
-            defaults,
-            default_return_type,
-            partials_return_type,
-            once,
-            futures,
-            global_awt
-        ) = merge_errors!(
-            extract_fixtures(item_fn),
-            extract_defaults(item_fn),
-            extract_default_return_type(item_fn),
-            extract_partials_return_type(item_fn),
-            extract_once(item_fn),
-            extract_futures(item_fn),
-            extract_global_awt(item_fn)
-        )?;
-        self.data.items.extend(
-            fixtures
-                .into_iter()
-                .map(|f| f.into())
-                .chain(defaults.into_iter().map(|d| d.into())),
-        );
-        if let Some(return_type) = default_return_type {
-            self.attributes.set_default_return_type(return_type);
-        }
-        for (id, return_type) in partials_return_type {
-            self.attributes.set_partial_return_type(id, return_type);
-        }
-        self.arguments.set_once(once);
-        self.arguments.set_global_await(global_awt);
-        self.arguments.set_futures(futures.into_iter());
-        self.arguments
-            .register_inner_destructored_idents_names(item_fn);
-
-        Ok(())
-    }
-}
-
-fn parse_attribute_args_just_once<'a, T: Parse>(
-    attributes: impl Iterator<Item = &'a syn::Attribute>,
-    name: &str,
-) -> (Option<T>, Vec<syn::Error>) {
-    let mut errors = Vec::new();
-    let val = attributes
-        .filter(|&a| attr_is(a, name))
-        .map(|a| (a, a.parse_args::<T>()))
-        .fold(None, |first, (a, res)| match (first, res) {
-            (None, Ok(parsed)) => Some(parsed),
-            (first, Err(err)) => {
-                errors.push(err);
-                first
-            }
-            (first, _) => {
-                errors.push(syn::Error::new_spanned(
-                    a,
-                    crate::error::messages::use_more_than_once(name),
-                ));
-                first
-            }
-        });
-    (val, errors)
-}
-
-/// Simple struct used to visit function attributes and extract Fixtures and
-/// eventually parsing errors
-#[derive(Default)]
-pub(crate) struct FixturesFunctionExtractor(pub(crate) Vec<Fixture>, pub(crate) Vec<syn::Error>);
-
-impl VisitMut for FixturesFunctionExtractor {
-    fn visit_fn_arg_mut(&mut self, node: &mut FnArg) {
-        let arg = match node.maybe_pat_type_mut() {
-            Some(pt) => pt,
-            None => return,
-        };
-        let (extracted, remain): (Vec<_>, Vec<_>) = std::mem::take(&mut arg.attrs)
-            .into_iter()
-            .partition(|attr| attr_in(attr, &["with", "from"]));
-        arg.attrs = remain;
-
-        let (pos, errors) = parse_attribute_args_just_once(extracted.iter(), "with");
-        self.1.extend(errors);
-        let (resolve, errors): (Option<syn::Path>, _) =
-            parse_attribute_args_just_once(extracted.iter(), "from");
-        self.1.extend(errors);
-
-        match (resolve, arg.pat.maybe_ident()) {
-            (Some(res), _) => self.0.push(Fixture::new(
-                arg.pat.as_ref().clone(),
-                res,
-                pos.unwrap_or_default(),
-            )),
-            (None, Some(ident)) if pos.is_some() => self.0.push(Fixture::new(
-                arg.pat.as_ref().clone(),
-                ident.clone().into(),
-                pos.unwrap_or_default(),
-            )),
-            (None, None) if pos.is_some() => {
-                self.1.push(syn::Error::new_spanned(
-                    node,
-                    crate::error::messages::DESTRUCT_WITHOUT_FROM,
-                ));
-            }
-            _ => {}
-        }
-    }
-}
-
-#[derive(PartialEq, Debug, Default)]
-pub(crate) struct FixtureData {
-    pub items: Vec<FixtureItem>,
-}
-
-impl FixtureData {
-    pub(crate) fn fixtures(&self) -> impl Iterator<Item = &Fixture> {
-        self.items.iter().filter_map(|f| match f {
-            FixtureItem::Fixture(ref fixture) => Some(fixture),
-            _ => None,
-        })
-    }
-
-    pub(crate) fn values(&self) -> impl Iterator<Item = &ArgumentValue> {
-        self.items.iter().filter_map(|f| match f {
-            FixtureItem::ArgumentValue(ref value) => Some(value.as_ref()),
-            _ => None,
-        })
-    }
-}
-
-impl Parse for FixtureData {
-    fn parse(input: ParseStream) -> syn::Result<Self> {
-        if input.peek(Token![::]) {
-            Ok(Default::default())
-        } else {
-            Ok(Self {
-                items: parse_vector_trailing_till_double_comma::<_, Token![,]>(input)?,
-            })
-        }
-    }
-}
-
-#[derive(PartialEq, Debug)]
-pub(crate) struct ArgumentValue {
-    pub arg: Pat,
-    pub expr: Expr,
-}
-
-impl ArgumentValue {
-    pub(crate) fn new(arg: Pat, expr: Expr) -> Self {
-        Self { arg, expr }
-    }
-}
-
-#[derive(PartialEq, Debug)]
-pub(crate) enum FixtureItem {
-    Fixture(Fixture),
-    ArgumentValue(Box<ArgumentValue>),
-}
-
-impl From<Fixture> for FixtureItem {
-    fn from(f: Fixture) -> Self {
-        FixtureItem::Fixture(f)
-    }
-}
-
-impl Parse for FixtureItem {
-    fn parse(input: ParseStream) -> syn::Result<Self> {
-        if input.peek2(Token![=]) {
-            input.parse::<ArgumentValue>().map(|v| v.into())
-        } else {
-            input.parse::<Fixture>().map(|v| v.into())
-        }
-    }
-}
-
-impl RefPat for FixtureItem {
-    fn pat(&self) -> &Pat {
-        match self {
-            FixtureItem::Fixture(Fixture { ref arg, .. }) => arg,
-            FixtureItem::ArgumentValue(ref av) => &av.arg,
-        }
-    }
-}
-
-impl MaybePat for FixtureItem {
-    fn maybe_pat(&self) -> Option<&syn::Pat> {
-        Some(self.pat())
-    }
-}
-
-impl ToTokens for FixtureItem {
-    fn to_tokens(&self, tokens: &mut TokenStream) {
-        self.pat().to_tokens(tokens)
-    }
-}
-
-impl From<ArgumentValue> for FixtureItem {
-    fn from(av: ArgumentValue) -> Self {
-        FixtureItem::ArgumentValue(Box::new(av))
-    }
-}
-
-impl Parse for ArgumentValue {
-    fn parse(input: ParseStream) -> syn::Result<Self> {
-        let name: Ident = input.parse()?;
-        let _eq: Token![=] = input.parse()?;
-        let expr = input.parse()?;
-        Ok(ArgumentValue::new(name.into_pat(), expr))
-    }
-}
-
-wrap_attributes!(FixtureModifiers);
-
-impl FixtureModifiers {
-    pub(crate) const DEFAULT_RET_ATTR: &'static str = "default";
-    pub(crate) const PARTIAL_RET_ATTR: &'static str = "partial_";
-
-    pub(crate) fn extract_default_type(&self) -> Option<syn::ReturnType> {
-        self.extract_type(Self::DEFAULT_RET_ATTR)
-    }
-
-    pub(crate) fn extract_partial_type(&self, pos: usize) -> Option<syn::ReturnType> {
-        self.extract_type(&format!("{}{}", Self::PARTIAL_RET_ATTR, pos))
-    }
-
-    pub(crate) fn set_default_return_type(&mut self, return_type: syn::Type) {
-        self.inner.attributes.push(Attribute::Type(
-            format_ident!("{}", Self::DEFAULT_RET_ATTR),
-            Box::new(return_type),
-        ))
-    }
-
-    pub(crate) fn set_partial_return_type(&mut self, id: usize, return_type: syn::Type) {
-        self.inner.attributes.push(Attribute::Type(
-            format_ident!("{}{}", Self::PARTIAL_RET_ATTR, id),
-            Box::new(return_type),
-        ))
-    }
-
-    fn extract_type(&self, attr_name: &str) -> Option<syn::ReturnType> {
-        self.iter()
-            .filter_map(|m| match m {
-                Attribute::Type(name, t) if name == attr_name => Some(parse_quote! { -> #t}),
-                _ => None,
-            })
-            .next()
-    }
-}
-
-#[cfg(test)]
-mod should {
-    use super::*;
-    use crate::test::{assert_eq, *};
-
-    mod parse {
-        use super::{assert_eq, *};
-
-        fn parse_fixture<S: AsRef<str>>(fixture_data: S) -> FixtureInfo {
-            parse_meta(fixture_data)
-        }
-
-        #[test]
-        fn happy_path() {
-            let data = parse_fixture(
-                r#"my_fixture(42, "other"), other(vec![42]), value=42, other_value=vec![1.0]
-                    :: trace :: no_trace(some)"#,
-            );
-
-            let expected = FixtureInfo {
-                data: vec![
-                    fixture("my_fixture", &["42", r#""other""#]).into(),
-                    fixture("other", &["vec![42]"]).into(),
-                    arg_value("value", "42").into(),
-                    arg_value("other_value", "vec![1.0]").into(),
-                ]
-                .into(),
-                attributes: Attributes {
-                    attributes: vec![
-                        Attribute::attr("trace"),
-                        Attribute::tagged("no_trace", vec!["some"]),
-                    ],
-                }
-                .into(),
-                arguments: Default::default(),
-            };
-
-            assert_eq!(expected, data);
-        }
-
-        #[test]
-        fn some_literals() {
-            let args_expressions = literal_expressions_str();
-            let fixture = parse_fixture(&format!("my_fixture({})", args_expressions.join(", ")));
-            let args = fixture.data.fixtures().next().unwrap().positional.clone();
-
-            assert_eq!(to_args!(args_expressions), args.0);
-        }
-
-        #[test]
-        fn empty_fixtures() {
-            let data = parse_fixture(r#"::trace::no_trace(some)"#);
-
-            let expected = FixtureInfo {
-                attributes: Attributes {
-                    attributes: vec![
-                        Attribute::attr("trace"),
-                        Attribute::tagged("no_trace", vec!["some"]),
-                    ],
-                }
-                .into(),
-                ..Default::default()
-            };
-
-            assert_eq!(expected, data);
-        }
-
-        #[test]
-        fn empty_attributes() {
-            let data = parse_fixture(r#"my_fixture(42, "other")"#);
-
-            let expected = FixtureInfo {
-                data: vec![fixture("my_fixture", &["42", r#""other""#]).into()].into(),
-                ..Default::default()
-            };
-
-            assert_eq!(expected, data);
-        }
-
-        #[rstest]
-        #[case("first(42),", 1)]
-        #[case("first(42), second=42,", 2)]
-        #[case(r#"fixture(42, "other"), :: trace"#, 1)]
-        #[case(r#"second=42, fixture(42, "other"), :: trace"#, 2)]
-        fn should_accept_trailing_comma(#[case] input: &str, #[case] expected: usize) {
-            let info: FixtureInfo = input.ast();
-
-            assert_eq!(
-                expected,
-                info.data.fixtures().count() + info.data.values().count()
-            );
-        }
-    }
-}
-
-#[cfg(test)]
-mod extend {
-    use super::*;
-    use crate::test::{assert_eq, *};
-    use syn::ItemFn;
-
-    mod should {
-        use super::{assert_eq, *};
-
-        #[test]
-        fn use_with_attributes() {
-            let to_parse = r#"
-                fn my_fix(#[with(2)] f1: &str, #[with(vec![1,2], "s")] f2: u32) {}
-            "#;
-
-            let mut item_fn: ItemFn = to_parse.ast();
-            let mut info = FixtureInfo::default();
-
-            info.extend_with_function_attrs(&mut item_fn).unwrap();
-
-            let expected = FixtureInfo {
-                data: vec![
-                    fixture("f1", &["2"]).into(),
-                    fixture("f2", &["vec![1,2]", r#""s""#]).into(),
-                ]
-                .into(),
-                ..Default::default()
-            };
-
-            assert!(!format!("{:?}", item_fn).contains("with"));
-            assert_eq!(expected, info);
-        }
-
-        #[test]
-        fn rename_with_attributes() {
-            let mut item_fn = r#"
-                    fn test_fn(
-                        #[from(long_fixture_name)] 
-                        #[with(42, "other")] short: u32, 
-                        #[from(sub_module::fix)]
-                        f: u32,
-                        #[from(simple)]
-                        s: &str,
-                        no_change: i32) {
-                    }
-                    "#
-            .ast();
-
-            let expected = FixtureInfo {
-                data: vec![
-                    fixture("short", &["42", r#""other""#])
-                        .with_resolve("long_fixture_name")
-                        .into(),
-                    fixture("f", &[]).with_resolve("sub_module::fix").into(),
-                    fixture("s", &[]).with_resolve("simple").into(),
-                ]
-                .into(),
-                ..Default::default()
-            };
-
-            let mut data = FixtureInfo::default();
-            data.extend_with_function_attrs(&mut item_fn).unwrap();
-
-            assert_eq!(expected, data);
-        }
-
-        #[test]
-        fn use_default_values_attributes() {
-            let to_parse = r#"
-                fn my_fix(#[default(2)] f1: &str, #[default((vec![1,2], "s"))] f2: (Vec<u32>, &str)) {}
-            "#;
-
-            let mut item_fn: ItemFn = to_parse.ast();
-            let mut info = FixtureInfo::default();
-
-            info.extend_with_function_attrs(&mut item_fn).unwrap();
-
-            let expected = FixtureInfo {
-                data: vec![
-                    arg_value("f1", "2").into(),
-                    arg_value("f2", r#"(vec![1,2], "s")"#).into(),
-                ]
-                .into(),
-                ..Default::default()
-            };
-
-            assert!(!format!("{:?}", item_fn).contains("default"));
-            assert_eq!(expected, info);
-        }
-
-        #[test]
-        fn find_default_return_type() {
-            let mut item_fn: ItemFn = r#"
-                #[simple]
-                #[first(comp)]
-                #[second::default]
-                #[default(impl Iterator<Item=(u32, i32)>)]
-                #[last::more]
-                fn my_fix<I, J>(f1: I, f2: J) -> impl Iterator<Item=(I, J)> {}
-            "#
-            .ast();
-
-            let mut info = FixtureInfo::default();
-
-            info.extend_with_function_attrs(&mut item_fn).unwrap();
-
-            assert_eq!(
-                info.attributes.extract_default_type(),
-                Some(parse_quote! { -> impl Iterator<Item=(u32, i32)> })
-            );
-            assert_eq!(
-                attrs("#[simple]#[first(comp)]#[second::default]#[last::more]"),
-                item_fn.attrs
-            );
-        }
-
-        #[test]
-        fn find_partials_return_type() {
-            let mut item_fn: ItemFn = r#"
-                #[simple]
-                #[first(comp)]
-                #[second::default]
-                #[partial_1(impl Iterator<Item=(u32, J, K)>)]
-                #[partial_2(impl Iterator<Item=(u32, i32, K)>)]
-                #[last::more]
-                fn my_fix<I, J, K>(f1: I, f2: J, f3: K) -> impl Iterator<Item=(I, J, K)> {}
-            "#
-            .ast();
-
-            let mut info = FixtureInfo::default();
-
-            info.extend_with_function_attrs(&mut item_fn).unwrap();
-
-            assert_eq!(
-                info.attributes.extract_partial_type(1),
-                Some(parse_quote! { -> impl Iterator<Item=(u32, J, K)> })
-            );
-            assert_eq!(
-                info.attributes.extract_partial_type(2),
-                Some(parse_quote! { -> impl Iterator<Item=(u32, i32, K)> })
-            );
-            assert_eq!(
-                attrs("#[simple]#[first(comp)]#[second::default]#[last::more]"),
-                item_fn.attrs
-            );
-        }
-
-        #[test]
-        fn find_once_attribute() {
-            let mut item_fn: ItemFn = r#"
-                #[simple]
-                #[first(comp)]
-                #[second::default]
-                #[once]
-                #[last::more]
-                fn my_fix<I, J, K>(f1: I, f2: J, f3: K) -> impl Iterator<Item=(I, J, K)> {}
-            "#
-            .ast();
-
-            let mut info = FixtureInfo::default();
-
-            info.extend_with_function_attrs(&mut item_fn).unwrap();
-
-            assert!(info.arguments.is_once());
-        }
-
-        #[test]
-        fn no_once_attribute() {
-            let mut item_fn: ItemFn = r#"
-                fn my_fix<I, J, K>(f1: I, f2: J, f3: K) -> impl Iterator<Item=(I, J, K)> {}
-            "#
-            .ast();
-
-            let mut info = FixtureInfo::default();
-
-            info.extend_with_function_attrs(&mut item_fn).unwrap();
-
-            assert!(!info.arguments.is_once());
-        }
-
-        #[rstest]
-        fn extract_future() {
-            let mut item_fn = "fn f(#[future] a: u32, b: u32) {}".ast();
-            let expected = "fn f(a: u32, b: u32) {}".ast();
-
-            let mut info = FixtureInfo::default();
-
-            info.extend_with_function_attrs(&mut item_fn).unwrap();
-
-            assert_eq!(item_fn, expected);
-            assert!(info.arguments.is_future(&pat("a")));
-            assert!(!info.arguments.is_future(&pat("b")));
-        }
-
-        mod raise_error {
-            use super::{assert_eq, *};
-            use rstest_test::assert_in;
-
-            #[test]
-            fn for_invalid_expressions() {
-                let mut item_fn: ItemFn = r#"
-                fn my_fix(#[with(valid)] f1: &str, #[with(with(,.,))] f2: u32, #[with(with(use))] f3: u32) {}
-                "#
-                .ast();
-
-                let errors = FixtureInfo::default()
-                    .extend_with_function_attrs(&mut item_fn)
-                    .unwrap_err();
-
-                assert_eq!(2, errors.len());
-            }
-
-            #[test]
-            fn for_invalid_default_type() {
-                let mut item_fn: ItemFn = r#"
-                    #[default(no<valid::>type)]
-                    fn my_fix<I>() -> I {}
-                "#
-                .ast();
-
-                let errors = FixtureInfo::default()
-                    .extend_with_function_attrs(&mut item_fn)
-                    .unwrap_err();
-
-                assert_eq!(1, errors.len());
-            }
-
-            #[test]
-            fn with_used_more_than_once() {
-                let mut item_fn: ItemFn = r#"
-                    fn my_fix(#[with(1)] #[with(2)] fixture1: &str, #[with(1)] #[with(2)] #[with(3)] fixture2: &str) {}
-                "#
-                .ast();
-
-                let errors = FixtureInfo::default()
-                    .extend_with_function_attrs(&mut item_fn)
-                    .err()
-                    .unwrap_or_default();
-
-                assert_eq!(3, errors.len());
-            }
-
-            #[test]
-            fn fixture_destruct_without_from() {
-                let mut item_fn: ItemFn = r#"
-                    fn my_fix(#[with(1)] T{a}: T) {}
-                "#
-                .ast();
-
-                let errors = FixtureInfo::default()
-                    .extend_with_function_attrs(&mut item_fn)
-                    .err()
-                    .unwrap_or_default();
-
-                assert_in!(errors[0].to_string(), "destruct");
-            }
-
-            #[test]
-            fn from_used_more_than_once() {
-                let mut item_fn: ItemFn = r#"
-                    fn my_fix(#[from(a)] #[from(b)] fixture1: &str, #[from(c)] #[from(d)] #[from(e)] fixture2: &str) {}
-                "#
-                .ast();
-
-                let errors = FixtureInfo::default()
-                    .extend_with_function_attrs(&mut item_fn)
-                    .err()
-                    .unwrap_or_default();
-
-                assert_eq!(3, errors.len());
-            }
-
-            #[test]
-            fn future_is_used_more_than_once() {
-                let mut item_fn: ItemFn = r#"
-                    fn my_fix(#[future] #[future] fixture1: u32) {}
-                "#
-                .ast();
-
-                let errors = FixtureInfo::default()
-                    .extend_with_function_attrs(&mut item_fn)
-                    .err()
-                    .unwrap_or_default();
-
-                assert_eq!(1, errors.len());
-                assert_in!(errors[0].to_string(), "more than once");
-            }
-
-            #[test]
-            fn default_used_more_than_once() {
-                let mut item_fn: ItemFn = r#"
-                    fn my_fix(#[default(2)] #[default(3)] f1: u32) {}
-                "#
-                .ast();
-
-                let errors = FixtureInfo::default()
-                    .extend_with_function_attrs(&mut item_fn)
-                    .err()
-                    .unwrap_or_default();
-
-                assert_eq!(1, errors.len());
-                assert_in!(errors[0].to_string(), "more than once");
-            }
-
-            #[test]
-            fn if_once_is_defined_more_than_once() {
-                let mut item_fn: ItemFn = r#"
-                    #[once]
-                    #[once]
-                    fn my_fix<I>() -> I {}
-                    "#
-                .ast();
-
-                let mut info = FixtureInfo::default();
-
-                let error = info.extend_with_function_attrs(&mut item_fn).unwrap_err();
-
-                assert_in!(
-                    format!("{:?}", error).to_lowercase(),
-                    "cannot use #[once] more than once"
-                );
-            }
-
-            #[test]
-            fn if_default_is_defined_more_than_once() {
-                let mut item_fn: ItemFn = r#"
-                    #[default(u32)]
-                    #[default(u32)]
-                    fn my_fix<I>() -> I {}
-                    "#
-                .ast();
-
-                let mut info = FixtureInfo::default();
-
-                let error = info.extend_with_function_attrs(&mut item_fn).unwrap_err();
-
-                assert_in!(
-                    format!("{:?}", error).to_lowercase(),
-                    "cannot use #[default] more than once"
-                );
-            }
-
-            #[test]
-            fn for_invalid_partial_type() {
-                let mut item_fn: ItemFn = r#"
-                    #[partial_1(no<valid::>type)]
-                    fn my_fix<I>(x: I, y: u32) -> I {}
-                "#
-                .ast();
-
-                let errors = FixtureInfo::default()
-                    .extend_with_function_attrs(&mut item_fn)
-                    .unwrap_err();
-
-                assert_eq!(1, errors.len());
-            }
-
-            #[test]
-            fn if_partial_is_not_correct() {
-                let mut item_fn: ItemFn = r#"
-                    #[partial_not_a_number(u32)]
-                    fn my_fix<I, J>(f1: I, f2: &str) -> I {}
-                    "#
-                .ast();
-
-                let mut info = FixtureInfo::default();
-
-                let error = info.extend_with_function_attrs(&mut item_fn).unwrap_err();
-
-                assert_in!(
-                    format!("{:?}", error).to_lowercase(),
-                    "invalid partial syntax"
-                );
-            }
-        }
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/future.rs b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/future.rs
deleted file mode 100644
index ff20988c..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/future.rs
+++ /dev/null
@@ -1,241 +0,0 @@
-use quote::{format_ident, ToTokens};
-use syn::{visit_mut::VisitMut, FnArg, Ident, ItemFn, Pat, PatType, Type};
-
-use crate::{error::ErrorsVec, refident::MaybeType};
-
-use super::{
-    arguments::FutureArg,
-    just_once::{
-        AttrBuilder, JustOnceFnArgAttributeExtractor, JustOnceFnAttributeExtractor, Validator,
-    },
-};
-
-pub(crate) fn extract_futures(item_fn: &mut ItemFn) -> Result<Vec<(Pat, FutureArg)>, ErrorsVec> {
-    let mut extractor = JustOnceFnArgAttributeExtractor::<FutureBuilder>::new("future");
-
-    extractor.visit_item_fn_mut(item_fn);
-    extractor.take()
-}
-
-pub(crate) fn extract_global_awt(item_fn: &mut ItemFn) -> Result<bool, ErrorsVec> {
-    let mut extractor = JustOnceFnAttributeExtractor::<GlobalAwtBuilder>::new("awt");
-
-    extractor.visit_item_fn_mut(item_fn);
-    extractor.take().map(|inner| inner.is_some())
-}
-
-struct GlobalAwtBuilder;
-
-impl AttrBuilder<ItemFn> for GlobalAwtBuilder {
-    type Out = ();
-
-    fn build(_attr: syn::Attribute, _ident: &ItemFn) -> syn::Result<Self::Out> {
-        Ok(())
-    }
-}
-
-impl Validator<ItemFn> for GlobalAwtBuilder {}
-
-struct FutureBuilder;
-
-impl AttrBuilder<Pat> for FutureBuilder {
-    type Out = (Pat, FutureArg);
-
-    fn build(attr: syn::Attribute, pat: &Pat) -> syn::Result<Self::Out> {
-        Self::compute_arguments_kind(&attr).map(|kind| (pat.clone(), kind))
-    }
-}
-
-impl Validator<FnArg> for FutureBuilder {
-    fn validate(arg: &FnArg) -> syn::Result<()> {
-        arg.as_future_impl_type().map(|_| ()).ok_or_else(|| {
-            syn::Error::new_spanned(
-                arg.maybe_type().unwrap().into_token_stream(),
-                "This type cannot used to generate impl Future.".to_owned(),
-            )
-        })
-    }
-}
-
-impl FutureBuilder {
-    fn compute_arguments_kind(arg: &syn::Attribute) -> syn::Result<FutureArg> {
-        if matches!(arg.meta, syn::Meta::Path(_)) {
-            Ok(FutureArg::Define)
-        } else {
-            match arg.parse_args::<Option<Ident>>()? {
-                Some(awt) if awt == format_ident!("awt") => Ok(FutureArg::Await),
-                None => Ok(FutureArg::Define),
-                Some(invalid) => Err(syn::Error::new_spanned(
-                    arg.parse_args::<Option<Ident>>()?.into_token_stream(),
-                    format!("Invalid '{invalid}' #[future(...)] arg."),
-                )),
-            }
-        }
-    }
-}
-
-pub(crate) trait MaybeFutureImplType {
-    fn as_future_impl_type(&self) -> Option<&Type>;
-
-    fn as_mut_future_impl_type(&mut self) -> Option<&mut Type>;
-}
-
-impl MaybeFutureImplType for FnArg {
-    fn as_future_impl_type(&self) -> Option<&Type> {
-        match self {
-            FnArg::Typed(PatType { ty, .. }) if can_impl_future(ty.as_ref()) => Some(ty.as_ref()),
-            _ => None,
-        }
-    }
-
-    fn as_mut_future_impl_type(&mut self) -> Option<&mut Type> {
-        match self {
-            FnArg::Typed(PatType { ty, .. }) if can_impl_future(ty.as_ref()) => Some(ty.as_mut()),
-            _ => None,
-        }
-    }
-}
-
-fn can_impl_future(ty: &Type) -> bool {
-    use Type::*;
-    !matches!(
-        ty,
-        Group(_)
-            | ImplTrait(_)
-            | Infer(_)
-            | Macro(_)
-            | Never(_)
-            | Slice(_)
-            | TraitObject(_)
-            | Verbatim(_)
-    )
-}
-
-#[cfg(test)]
-mod should {
-    use super::*;
-    use crate::test::{assert_eq, *};
-    use rstest_test::assert_in;
-
-    #[rstest]
-    #[case("fn simple(a: u32) {}")]
-    #[case("fn more(a: u32, b: &str) {}")]
-    #[case("fn gen<S: AsRef<str>>(a: u32, b: S) {}")]
-    #[case("fn attr(#[case] a: u32, #[values(1,2)] b: i32) {}")]
-    fn not_change_anything_if_no_future_attribute_found(#[case] item_fn: &str) {
-        let mut item_fn: ItemFn = item_fn.ast();
-        let orig = item_fn.clone();
-
-        let composed_tuple!(futures, awt) = merge_errors!(
-            extract_futures(&mut item_fn),
-            extract_global_awt(&mut item_fn)
-        )
-        .unwrap();
-
-        assert_eq!(orig, item_fn);
-        assert!(futures.is_empty());
-        assert!(!awt);
-    }
-
-    #[rstest]
-    #[case::simple("fn f(#[future] a: u32) {}", "fn f(a: u32) {}", &[("a", FutureArg::Define)], false)]
-    #[case::global_awt("#[awt] fn f(a: u32) {}", "fn f(a: u32) {}", &[], true)]
-    #[case::global_awt_with_inner_function("#[awt] fn f(a: u32) { fn g(){} }", "fn f(a: u32) { fn g(){} }", &[], true)]
-    #[case::simple_awaited("fn f(#[future(awt)] a: u32) {}", "fn f(a: u32) {}", &[("a", FutureArg::Await)], false)]
-    #[case::simple_awaited_and_global("#[awt] fn f(#[future(awt)] a: u32) {}", "fn f(a: u32) {}", &[("a", FutureArg::Await)], true)]
-    #[case::more_than_one(
-        "fn f(#[future] a: u32, #[future(awt)] b: String, #[future()] c: std::collection::HashMap<usize, String>) {}",
-        r#"fn f(a: u32, 
-                b: String, 
-                c: std::collection::HashMap<usize, String>) {}"#,
-        &[("a", FutureArg::Define), ("b", FutureArg::Await), ("c", FutureArg::Define)],
-        false,
-    )]
-    #[case::just_one(
-        "fn f(a: u32, #[future] b: String) {}",
-        r#"fn f(a: u32, b: String) {}"#,
-        &[("b", FutureArg::Define)],
-        false,
-    )]
-    #[case::just_one_awaited(
-        "fn f(a: u32, #[future(awt)] b: String) {}",
-        r#"fn f(a: u32, b: String) {}"#,
-        &[("b", FutureArg::Await)],
-        false,
-    )]
-    fn extract(
-        #[case] item_fn: &str,
-        #[case] expected: &str,
-        #[case] expected_futures: &[(&str, FutureArg)],
-        #[case] expected_awt: bool,
-    ) {
-        let mut item_fn: ItemFn = item_fn.ast();
-        let expected: ItemFn = expected.ast();
-
-        let composed_tuple!(futures, awt) = merge_errors!(
-            extract_futures(&mut item_fn),
-            extract_global_awt(&mut item_fn)
-        )
-        .unwrap();
-
-        assert_eq!(expected, item_fn);
-        assert_eq!(
-            futures,
-            expected_futures
-                .into_iter()
-                .map(|(id, a)| (pat(id), *a))
-                .collect::<Vec<_>>()
-        );
-        assert_eq!(expected_awt, awt);
-    }
-
-    #[rstest]
-    #[case::base(r#"#[awt] fn f(a: u32) {}"#, r#"fn f(a: u32) {}"#)]
-    #[case::two(
-        r#"
-        #[awt]
-        #[awt] 
-        fn f(a: u32) {}
-        "#,
-        r#"fn f(a: u32) {}"#
-    )]
-    #[case::inner(
-        r#"
-        #[one]
-        #[awt] 
-        #[two]
-        fn f(a: u32) {}
-        "#,
-        r#"
-        #[one]
-        #[two]
-        fn f(a: u32) {}
-        "#
-    )]
-    fn remove_all_awt_attributes(#[case] item_fn: &str, #[case] expected: &str) {
-        let mut item_fn: ItemFn = item_fn.ast();
-        let expected: ItemFn = expected.ast();
-
-        let _ = extract_global_awt(&mut item_fn);
-
-        assert_eq!(item_fn, expected);
-    }
-
-    #[rstest]
-    #[case::no_more_than_one("fn f(#[future] #[future] a: u32) {}", "more than once")]
-    #[case::no_impl("fn f(#[future] a: impl AsRef<str>) {}", "generate impl Future")]
-    #[case::no_slice("fn f(#[future] a: [i32]) {}", "generate impl Future")]
-    #[case::invalid_arg("fn f(#[future(other)] a: [i32]) {}", "Invalid 'other'")]
-    #[case::no_more_than_one_awt("#[awt] #[awt] fn f(a: u32) {}", "more than once")]
-    fn raise_error(#[case] item_fn: &str, #[case] message: &str) {
-        let mut item_fn: ItemFn = item_fn.ast();
-
-        let err = merge_errors!(
-            extract_futures(&mut item_fn),
-            extract_global_awt(&mut item_fn)
-        )
-        .unwrap_err();
-
-        assert_in!(format!("{:?}", err), message);
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/ignore.rs b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/ignore.rs
deleted file mode 100644
index 52d3985..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/ignore.rs
+++ /dev/null
@@ -1,60 +0,0 @@
-use syn::{visit_mut::VisitMut, ItemFn, Pat};
-
-use crate::error::ErrorsVec;
-
-use super::just_once::JustOnceFnArgAttributeExtractor;
-
-pub(crate) fn extract_ignores(item_fn: &mut ItemFn) -> Result<Vec<Pat>, ErrorsVec> {
-    let mut extractor = JustOnceFnArgAttributeExtractor::from("ignore");
-    extractor.visit_item_fn_mut(item_fn);
-    extractor.take()
-}
-
-#[cfg(test)]
-mod should {
-    use super::*;
-    use crate::test::{assert_eq, *};
-    use rstest_test::assert_in;
-
-    #[rstest]
-    #[case("fn simple(a: u32) {}")]
-    #[case("fn more(a: u32, b: &str) {}")]
-    #[case("fn gen<S: AsRef<str>>(a: u32, b: S) {}")]
-    #[case("fn attr(#[case] a: u32, #[values(1,2)] b: i32) {}")]
-    fn not_change_anything_if_no_ignore_attribute_found(#[case] item_fn: &str) {
-        let mut item_fn: ItemFn = item_fn.ast();
-        let orig = item_fn.clone();
-
-        let by_refs = extract_ignores(&mut item_fn).unwrap();
-
-        assert_eq!(orig, item_fn);
-        assert!(by_refs.is_empty());
-    }
-
-    #[rstest]
-    #[case::simple("fn f(#[ignore] a: u32) {}", "fn f(a: u32) {}", &["a"])]
-    #[case::more_than_one(
-        "fn f(#[ignore] a: u32, #[ignore] b: String, #[ignore] c: std::collection::HashMap<usize, String>) {}",
-        r#"fn f(a: u32, 
-                b: String, 
-                c: std::collection::HashMap<usize, String>) {}"#,
-        &["a", "b", "c"])]
-    fn extract(#[case] item_fn: &str, #[case] expected: &str, #[case] expected_refs: &[&str]) {
-        let mut item_fn: ItemFn = item_fn.ast();
-        let expected: ItemFn = expected.ast();
-
-        let by_refs = extract_ignores(&mut item_fn).unwrap();
-
-        assert_eq!(expected, item_fn);
-        assert_eq!(by_refs, to_pats!(expected_refs));
-    }
-
-    #[test]
-    fn raise_error() {
-        let mut item_fn: ItemFn = "fn f(#[ignore] #[ignore] a: u32) {}".ast();
-
-        let err = extract_ignores(&mut item_fn).unwrap_err();
-
-        assert_in!(format!("{:?}", err), "more than once");
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/just_once.rs b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/just_once.rs
deleted file mode 100644
index 303d556e..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/just_once.rs
+++ /dev/null
@@ -1,214 +0,0 @@
-use std::marker::PhantomData;
-
-use quote::ToTokens;
-use syn::{visit_mut::VisitMut, Attribute, FnArg, ItemFn, Pat};
-
-use crate::{error::ErrorsVec, refident::MaybePat, utils::attr_is};
-
-pub trait AttrBuilder<E> {
-    type Out;
-
-    fn build(attr: Attribute, extra: &E) -> syn::Result<Self::Out>;
-}
-
-pub trait Validator<T> {
-    fn validate(_arg: &T) -> syn::Result<()> {
-        Ok(())
-    }
-}
-
-impl AttrBuilder<Pat> for () {
-    type Out = Pat;
-
-    fn build(_attr: Attribute, pat: &Pat) -> syn::Result<Self::Out> {
-        Ok(pat.clone())
-    }
-}
-
-impl AttrBuilder<ItemFn> for () {
-    type Out = Attribute;
-
-    fn build(attr: Attribute, _item_fn: &ItemFn) -> syn::Result<Self::Out> {
-        Ok(attr.clone())
-    }
-}
-
-impl<T> Validator<T> for () {}
-
-/// Simple struct used to visit function argument attributes and extract attributes that match
-/// the `name`: Only one attribute is allowed for arguments.
-pub struct JustOnceFnArgAttributeExtractor<'a, B = ()>
-where
-    B: AttrBuilder<Pat>,
-{
-    name: &'a str,
-    elements: Vec<B::Out>,
-    errors: Vec<syn::Error>,
-    _phantom: PhantomData<B>,
-}
-
-impl<'a> From<&'a str> for JustOnceFnArgAttributeExtractor<'a, ()> {
-    fn from(value: &'a str) -> Self {
-        Self::new(value)
-    }
-}
-
-impl<'a, B> JustOnceFnArgAttributeExtractor<'a, B>
-where
-    B: AttrBuilder<Pat>,
-{
-    pub fn new(name: &'a str) -> Self {
-        Self {
-            name,
-            elements: Default::default(),
-            errors: Default::default(),
-            _phantom: PhantomData,
-        }
-    }
-
-    pub fn take(self) -> Result<Vec<B::Out>, ErrorsVec> {
-        if self.errors.is_empty() {
-            Ok(self.elements)
-        } else {
-            Err(self.errors.into())
-        }
-    }
-}
-
-impl<B> VisitMut for JustOnceFnArgAttributeExtractor<'_, B>
-where
-    B: AttrBuilder<Pat>,
-    B: Validator<FnArg>,
-{
-    fn visit_fn_arg_mut(&mut self, node: &mut FnArg) {
-        let pat = match node.maybe_pat() {
-            Some(pat) => pat.clone(),
-            None => return,
-        };
-        if let FnArg::Typed(ref mut arg) = node {
-            // Extract interesting attributes
-            let attrs = std::mem::take(&mut arg.attrs);
-            let (extracted, remain): (Vec<_>, Vec<_>) =
-                attrs.into_iter().partition(|a| attr_is(a, self.name));
-
-            arg.attrs = remain;
-
-            let parsed = extracted
-                .into_iter()
-                .map(|attr| B::build(attr.clone(), &pat).map(|t| (attr, t)))
-                .collect::<Result<Vec<_>, _>>();
-
-            match parsed {
-                Ok(data) => match data.len() {
-                    1 => match B::validate(node) {
-                        Ok(_) => self.elements.extend(data.into_iter().map(|(_attr, t)| t)),
-                        Err(e) => {
-                            self.errors.push(e);
-                        }
-                    },
-
-                    0 => {}
-                    _ => {
-                        self.errors
-                            .extend(data.into_iter().skip(1).map(|(attr, _t)| {
-                                syn::Error::new_spanned(
-                                    attr.into_token_stream(),
-                                    format!("Cannot use #[{}] more than once.", self.name),
-                                )
-                            }));
-                    }
-                },
-                Err(e) => {
-                    self.errors.push(e);
-                }
-            }
-        }
-    }
-}
-
-/// Simple struct used to visit function attributes and extract attributes that match
-/// the `name`: Only one attribute is allowed for arguments.
-pub struct JustOnceFnAttributeExtractor<'a, B = ()>
-where
-    B: AttrBuilder<ItemFn>,
-{
-    name: &'a str,
-    inner: Result<Option<B::Out>, Vec<syn::Error>>,
-    _phantom: PhantomData<B>,
-}
-
-impl<'a> From<&'a str> for JustOnceFnAttributeExtractor<'a, ()> {
-    fn from(value: &'a str) -> Self {
-        Self::new(value)
-    }
-}
-
-impl<'a, B> JustOnceFnAttributeExtractor<'a, B>
-where
-    B: AttrBuilder<ItemFn>,
-{
-    pub fn new(name: &'a str) -> Self {
-        Self {
-            name,
-            inner: Ok(Default::default()),
-            _phantom: PhantomData,
-        }
-    }
-
-    pub fn take(self) -> Result<Option<B::Out>, ErrorsVec> {
-        self.inner.map_err(Into::into)
-    }
-}
-
-impl<B> VisitMut for JustOnceFnAttributeExtractor<'_, B>
-where
-    B: AttrBuilder<ItemFn>,
-    B: Validator<ItemFn>,
-{
-    fn visit_item_fn_mut(&mut self, item_fn: &mut ItemFn) {
-        // Extract interesting attributes
-        let attrs = std::mem::take(&mut item_fn.attrs);
-        let (extracted, remain): (Vec<_>, Vec<_>) =
-            attrs.into_iter().partition(|a| attr_is(a, self.name));
-
-        item_fn.attrs = remain;
-
-        let parsed = extracted
-            .into_iter()
-            .map(|attr| B::build(attr.clone(), item_fn).map(|t| (attr, t)))
-            .collect::<Result<Vec<_>, _>>();
-        let mut errors = Vec::default();
-        let mut out = None;
-
-        match parsed {
-            Ok(data) => match data.len() {
-                1 => match B::validate(item_fn) {
-                    Ok(_) => {
-                        out = data.into_iter().next().map(|(_attr, t)| t);
-                    }
-                    Err(e) => {
-                        errors.push(e);
-                    }
-                },
-
-                0 => {}
-                _ => {
-                    errors.extend(data.into_iter().skip(1).map(|(attr, _t)| {
-                        syn::Error::new_spanned(
-                            attr.into_token_stream(),
-                            format!("Cannot use #[{}] more than once.", self.name),
-                        )
-                    }));
-                }
-            },
-            Err(e) => {
-                errors.push(e);
-            }
-        };
-        if errors.is_empty() {
-            self.inner = Ok(out);
-        } else {
-            self.inner = Err(errors);
-        }
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/macros.rs b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/macros.rs
deleted file mode 100644
index beb47cf..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/macros.rs
+++ /dev/null
@@ -1,27 +0,0 @@
-macro_rules! wrap_attributes {
-    ($ident:ident) => {
-        #[derive(Default, Debug, PartialEq, Clone)]
-        pub(crate) struct $ident {
-            inner: Attributes,
-        }
-
-        impl From<Attributes> for $ident {
-            fn from(inner: Attributes) -> Self {
-                $ident { inner }
-            }
-        }
-
-        impl $ident {
-            fn iter(&self) -> impl Iterator<Item = &Attribute> {
-                self.inner.attributes.iter()
-            }
-        }
-
-        impl $ident {
-            #[allow(dead_code)]
-            pub(crate) fn append(&mut self, attr: Attribute) {
-                self.inner.attributes.push(attr)
-            }
-        }
-    };
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/mod.rs b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/mod.rs
deleted file mode 100644
index baef9e66..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/mod.rs
+++ /dev/null
@@ -1,603 +0,0 @@
-use proc_macro2::TokenStream;
-use syn::{
-    parse::{Parse, ParseStream},
-    parse_quote,
-    punctuated::Punctuated,
-    token::{self, Async, Paren},
-    visit_mut::VisitMut,
-    FnArg, Ident, ItemFn, Pat, Token,
-};
-
-use crate::{
-    error::ErrorsVec,
-    parse::just_once::{AttrBuilder, JustOnceFnAttributeExtractor, Validator},
-    refident::{IntoPat, MaybeIdent, MaybePat},
-    utils::{attr_is, attr_starts_with},
-};
-use fixture::{ArgumentValue, FixtureModifiers, FixturesFunctionExtractor};
-use quote::ToTokens;
-use testcase::TestCase;
-
-use self::{
-    expressions::Expressions, just_once::JustOnceFnArgAttributeExtractor, vlist::ValueList,
-};
-
-// To use the macros this should be the first one module
-#[macro_use]
-pub(crate) mod macros;
-
-pub(crate) mod arguments;
-pub(crate) mod by_ref;
-pub(crate) mod expressions;
-pub(crate) mod fixture;
-pub(crate) mod future;
-pub(crate) mod ignore;
-pub(crate) mod just_once;
-pub(crate) mod rstest;
-pub(crate) mod testcase;
-pub(crate) mod vlist;
-
-pub(crate) trait ExtendWithFunctionAttrs {
-    fn extend_with_function_attrs(
-        &mut self,
-        item_fn: &mut ItemFn,
-    ) -> std::result::Result<(), ErrorsVec>;
-}
-
-#[derive(Default, Debug, PartialEq, Clone)]
-pub(crate) struct Attributes {
-    pub(crate) attributes: Vec<Attribute>,
-}
-
-impl Parse for Attributes {
-    fn parse(input: ParseStream) -> syn::Result<Self> {
-        let vars = Punctuated::<Attribute, Token![::]>::parse_terminated(input)?;
-        Ok(Attributes {
-            attributes: vars.into_iter().collect(),
-        })
-    }
-}
-
-#[derive(Debug, PartialEq, Clone)]
-pub(crate) enum Attribute {
-    Attr(Ident),
-    Tagged(Ident, Vec<Pat>),
-    Type(Ident, Box<syn::Type>),
-}
-
-impl Parse for Attribute {
-    fn parse(input: ParseStream) -> syn::Result<Self> {
-        if input.peek2(Token![<]) {
-            let tag = input.parse()?;
-            let _open = input.parse::<Token![<]>()?;
-            let inner = input.parse()?;
-            let _close = input.parse::<Token![>]>()?;
-            Ok(Attribute::Type(tag, inner))
-        } else if input.peek2(Token![::]) {
-            let inner = input.parse()?;
-            Ok(Attribute::Attr(inner))
-        } else if input.peek2(token::Paren) {
-            let tag = input.parse()?;
-            let content;
-            let _ = syn::parenthesized!(content in input);
-            let args = Punctuated::<Ident, Token![,]>::parse_terminated(&content)?
-                .into_iter()
-                .map(IntoPat::into_pat)
-                .collect();
-
-            Ok(Attribute::Tagged(tag, args))
-        } else {
-            Ok(Attribute::Attr(input.parse()?))
-        }
-    }
-}
-
-fn parse_vector_trailing_till_double_comma<T, P>(input: ParseStream) -> syn::Result<Vec<T>>
-where
-    T: Parse,
-    P: syn::token::Token + Parse,
-{
-    Ok(
-        Punctuated::<Option<T>, P>::parse_separated_nonempty_with(input, |input_tokens| {
-            if input_tokens.is_empty() || input_tokens.peek(Token![::]) {
-                Ok(None)
-            } else {
-                T::parse(input_tokens).map(Some)
-            }
-        })?
-        .into_iter()
-        .flatten()
-        .collect(),
-    )
-}
-
-#[allow(dead_code)]
-pub(crate) fn drain_stream(input: ParseStream) {
-    // JUST TO SKIP ALL
-    let _ = input.step(|cursor| {
-        let mut rest = *cursor;
-        while let Some((_, next)) = rest.token_tree() {
-            rest = next
-        }
-        Ok(((), rest))
-    });
-}
-
-#[derive(PartialEq, Debug, Clone, Default)]
-pub(crate) struct Positional(pub(crate) Vec<syn::Expr>);
-
-impl Parse for Positional {
-    fn parse(input: ParseStream) -> syn::Result<Self> {
-        Ok(Self(
-            Punctuated::<syn::Expr, Token![,]>::parse_terminated(input)?
-                .into_iter()
-                .collect(),
-        ))
-    }
-}
-
-#[derive(PartialEq, Debug, Clone)]
-pub(crate) struct Fixture {
-    pub(crate) arg: Pat,
-    pub(crate) resolve: syn::Path,
-    pub(crate) positional: Positional,
-}
-
-impl Fixture {
-    pub(crate) fn new(arg: Pat, resolve: syn::Path, positional: Positional) -> Self {
-        Self {
-            arg,
-            resolve,
-            positional,
-        }
-    }
-}
-
-impl Parse for Fixture {
-    fn parse(input: ParseStream) -> syn::Result<Self> {
-        let resolve: syn::Path = input.parse()?;
-        if input.peek(Paren) || input.peek(Token![as]) {
-            let positional = if input.peek(Paren) {
-                let content;
-                let _ = syn::parenthesized!(content in input);
-                content.parse()?
-            } else {
-                Default::default()
-            };
-
-            if input.peek(Token![as]) {
-                let _: Token![as] = input.parse()?;
-                let ident: Ident = input.parse()?;
-                Ok(Self::new(ident.into_pat(), resolve, positional))
-            } else {
-                let name = resolve.get_ident().ok_or_else(|| {
-                    syn::Error::new_spanned(
-                        resolve.to_token_stream(),
-                        "Should be an ident".to_string(),
-                    )
-                })?;
-                Ok(Self::new(
-                    name.clone().into_pat(),
-                    name.clone().into(),
-                    positional,
-                ))
-            }
-        } else {
-            Err(syn::Error::new(
-                input.span(),
-                "fixture need arguments or 'as new_name' format",
-            ))
-        }
-    }
-}
-
-impl ToTokens for Fixture {
-    fn to_tokens(&self, tokens: &mut TokenStream) {
-        self.arg.to_tokens(tokens)
-    }
-}
-
-pub(crate) fn extract_fixtures(item_fn: &mut ItemFn) -> Result<Vec<Fixture>, ErrorsVec> {
-    let mut fixtures_extractor = FixturesFunctionExtractor::default();
-    fixtures_extractor.visit_item_fn_mut(item_fn);
-
-    if fixtures_extractor.1.is_empty() {
-        Ok(fixtures_extractor.0)
-    } else {
-        Err(fixtures_extractor.1.into())
-    }
-}
-pub(crate) fn extract_defaults(item_fn: &mut ItemFn) -> Result<Vec<ArgumentValue>, ErrorsVec> {
-    struct DefaultBuilder;
-    impl AttrBuilder<Pat> for DefaultBuilder {
-        type Out = ArgumentValue;
-
-        fn build(attr: syn::Attribute, name: &Pat) -> syn::Result<Self::Out> {
-            attr.parse_args::<syn::Expr>()
-                .map(|e| ArgumentValue::new(name.clone(), e))
-        }
-    }
-    impl Validator<syn::FnArg> for DefaultBuilder {}
-
-    let mut extractor = JustOnceFnArgAttributeExtractor::<DefaultBuilder>::new("default");
-    extractor.visit_item_fn_mut(item_fn);
-
-    extractor.take()
-}
-
-pub(crate) fn extract_default_return_type(
-    item_fn: &mut ItemFn,
-) -> Result<Option<syn::Type>, ErrorsVec> {
-    struct DefaultTypeBuilder;
-    impl AttrBuilder<ItemFn> for DefaultTypeBuilder {
-        type Out = syn::Type;
-
-        fn build(attr: syn::Attribute, _extra: &ItemFn) -> syn::Result<Self::Out> {
-            attr.parse_args::<syn::Type>()
-        }
-    }
-    impl Validator<syn::ItemFn> for DefaultTypeBuilder {}
-
-    let mut extractor =
-        JustOnceFnAttributeExtractor::<DefaultTypeBuilder>::new(FixtureModifiers::DEFAULT_RET_ATTR);
-
-    extractor.visit_item_fn_mut(item_fn);
-    extractor.take()
-}
-
-pub(crate) fn extract_partials_return_type(
-    item_fn: &mut ItemFn,
-) -> Result<Vec<(usize, syn::Type)>, ErrorsVec> {
-    let mut partials_type_extractor = PartialsTypeFunctionExtractor::default();
-    partials_type_extractor.visit_item_fn_mut(item_fn);
-    partials_type_extractor.take()
-}
-
-pub(crate) fn extract_once(item_fn: &mut ItemFn) -> Result<Option<syn::Attribute>, ErrorsVec> {
-    let mut extractor = JustOnceFnAttributeExtractor::from("once");
-
-    extractor.visit_item_fn_mut(item_fn);
-    extractor.take()
-}
-
-pub(crate) fn extract_argument_attrs<'a, B: 'a + std::fmt::Debug>(
-    node: &mut FnArg,
-    is_valid_attr: fn(&syn::Attribute) -> bool,
-    build: impl Fn(syn::Attribute) -> syn::Result<B> + 'a,
-) -> Box<dyn Iterator<Item = syn::Result<B>> + 'a> {
-    let name = node.maybe_ident().cloned();
-    if name.is_none() {
-        return Box::new(std::iter::empty());
-    }
-
-    if let FnArg::Typed(ref mut arg) = node {
-        // Extract interesting attributes
-        let attrs = std::mem::take(&mut arg.attrs);
-        let (extracted, remain): (Vec<_>, Vec<_>) = attrs.into_iter().partition(is_valid_attr);
-
-        arg.attrs = remain;
-
-        // Parse attrs
-        Box::new(extracted.into_iter().map(build))
-    } else {
-        Box::new(std::iter::empty())
-    }
-}
-
-/// Simple struct used to visit function attributes and extract default return
-/// type
-struct PartialsTypeFunctionExtractor(Result<Vec<(usize, syn::Type)>, ErrorsVec>);
-
-impl PartialsTypeFunctionExtractor {
-    fn take(self) -> Result<Vec<(usize, syn::Type)>, ErrorsVec> {
-        self.0
-    }
-}
-
-impl Default for PartialsTypeFunctionExtractor {
-    fn default() -> Self {
-        Self(Ok(Vec::default()))
-    }
-}
-
-impl VisitMut for PartialsTypeFunctionExtractor {
-    fn visit_item_fn_mut(&mut self, node: &mut ItemFn) {
-        let attrs = std::mem::take(&mut node.attrs);
-        let (partials, remain): (Vec<_>, Vec<_>) =
-            attrs
-                .into_iter()
-                .partition(|attr| match attr.path().get_ident() {
-                    Some(name) => name
-                        .to_string()
-                        .starts_with(FixtureModifiers::PARTIAL_RET_ATTR),
-                    None => false,
-                });
-
-        node.attrs = remain;
-        let mut errors = ErrorsVec::default();
-        let mut data: Vec<(usize, syn::Type)> = Vec::default();
-        for attr in partials {
-            match attr.parse_args::<syn::Type>() {
-                Ok(t) => {
-                    match attr.path().get_ident().unwrap().to_string()
-                        [FixtureModifiers::PARTIAL_RET_ATTR.len()..]
-                        .parse()
-                    {
-                        Ok(id) => data.push((id, t)),
-                        Err(_) => errors.push(syn::Error::new_spanned(
-                            attr,
-                            "Invalid partial syntax: should be partial_<n_arguments>",
-                        )),
-                    }
-                }
-                Err(e) => errors.push(e),
-            }
-        }
-        self.0 = if errors.len() > 0 {
-            Err(errors)
-        } else {
-            Ok(data)
-        };
-    }
-}
-
-pub(crate) fn extract_case_args(item_fn: &mut ItemFn) -> Result<Vec<Pat>, ErrorsVec> {
-    let mut extractor = JustOnceFnArgAttributeExtractor::from("case");
-    extractor.visit_item_fn_mut(item_fn);
-
-    extractor.take()
-}
-
-/// Simple struct used to visit function attributes and extract cases and
-/// eventualy parsing errors
-#[derive(Default)]
-struct CasesFunctionExtractor(Vec<TestCase>, Vec<syn::Error>);
-
-impl VisitMut for CasesFunctionExtractor {
-    fn visit_item_fn_mut(&mut self, node: &mut ItemFn) {
-        let attrs = std::mem::take(&mut node.attrs);
-        let mut attrs_buffer = Default::default();
-        let case: syn::PathSegment = parse_quote! { case };
-        for attr in attrs.into_iter() {
-            if attr_starts_with(&attr, &case) {
-                match attr.parse_args::<Expressions>() {
-                    Ok(expressions) => {
-                        let description =
-                            attr.path().segments.iter().nth(1).map(|p| p.ident.clone());
-                        self.0.push(TestCase {
-                            args: expressions.into(),
-                            attrs: std::mem::take(&mut attrs_buffer),
-                            description,
-                        });
-                    }
-                    Err(err) => self.1.push(err),
-                };
-            } else {
-                attrs_buffer.push(attr)
-            }
-        }
-        node.attrs = std::mem::take(&mut attrs_buffer);
-    }
-}
-
-pub(crate) fn extract_cases(item_fn: &mut ItemFn) -> Result<Vec<TestCase>, ErrorsVec> {
-    let mut cases_extractor = CasesFunctionExtractor::default();
-    cases_extractor.visit_item_fn_mut(item_fn);
-
-    if cases_extractor.1.is_empty() {
-        Ok(cases_extractor.0)
-    } else {
-        Err(cases_extractor.1.into())
-    }
-}
-
-pub(crate) fn extract_value_list(item_fn: &mut ItemFn) -> Result<Vec<ValueList>, ErrorsVec> {
-    struct ValueListBuilder;
-    impl AttrBuilder<Pat> for ValueListBuilder {
-        type Out = ValueList;
-
-        fn build(attr: syn::Attribute, extra: &Pat) -> syn::Result<Self::Out> {
-            attr.parse_args::<Expressions>().map(|v| ValueList {
-                arg: extra.clone(),
-                values: v.take().into_iter().map(|e| e.into()).collect(),
-            })
-        }
-    }
-    impl Validator<FnArg> for ValueListBuilder {}
-
-    let mut extractor = JustOnceFnArgAttributeExtractor::<ValueListBuilder>::new("values");
-
-    extractor.visit_item_fn_mut(item_fn);
-    extractor.take()
-}
-
-/// Simple struct used to visit function args attributes to extract the
-/// excluded ones and eventualy parsing errors
-struct ExcludedTraceAttributesFunctionExtractor(Result<Vec<Pat>, ErrorsVec>);
-impl From<Result<Vec<Pat>, ErrorsVec>> for ExcludedTraceAttributesFunctionExtractor {
-    fn from(inner: Result<Vec<Pat>, ErrorsVec>) -> Self {
-        Self(inner)
-    }
-}
-
-impl ExcludedTraceAttributesFunctionExtractor {
-    pub(crate) fn take(self) -> Result<Vec<Pat>, ErrorsVec> {
-        self.0
-    }
-
-    fn update_error(&mut self, mut errors: ErrorsVec) {
-        match &mut self.0 {
-            Ok(_) => self.0 = Err(errors),
-            Err(err) => err.append(&mut errors),
-        }
-    }
-
-    fn update_excluded(&mut self, value: Pat) {
-        if let Some(inner) = self.0.iter_mut().next() {
-            inner.push(value);
-        }
-    }
-}
-
-impl Default for ExcludedTraceAttributesFunctionExtractor {
-    fn default() -> Self {
-        Self(Ok(Default::default()))
-    }
-}
-
-impl VisitMut for ExcludedTraceAttributesFunctionExtractor {
-    fn visit_fn_arg_mut(&mut self, node: &mut FnArg) {
-        let pat = match node.maybe_pat().cloned() {
-            Some(pat) => pat,
-            None => return,
-        };
-        for r in extract_argument_attrs(node, |a| attr_is(a, "notrace"), |_a| Ok(())) {
-            match r {
-                Ok(_) => self.update_excluded(pat.clone()),
-                Err(err) => self.update_error(err.into()),
-            }
-        }
-
-        syn::visit_mut::visit_fn_arg_mut(self, node);
-    }
-}
-
-pub(crate) fn extract_excluded_trace(item_fn: &mut ItemFn) -> Result<Vec<Pat>, ErrorsVec> {
-    let mut excluded_trace_extractor = ExcludedTraceAttributesFunctionExtractor::default();
-    excluded_trace_extractor.visit_item_fn_mut(item_fn);
-    excluded_trace_extractor.take()
-}
-
-/// Simple struct used to visit function args attributes to check timeout syntax
-struct CheckTimeoutAttributesFunction(Result<(), ErrorsVec>);
-impl From<ErrorsVec> for CheckTimeoutAttributesFunction {
-    fn from(errors: ErrorsVec) -> Self {
-        Self(Err(errors))
-    }
-}
-
-impl CheckTimeoutAttributesFunction {
-    pub(crate) fn take(self) -> Result<(), ErrorsVec> {
-        self.0
-    }
-
-    fn check_if_can_implement_timeous(
-        &self,
-        timeouts: &[&syn::Attribute],
-        asyncness: Option<&Async>,
-    ) -> Option<syn::Error> {
-        if cfg!(feature = "async-timeout") || timeouts.is_empty() {
-            None
-        } else {
-            asyncness.map(|a| {
-                syn::Error::new(
-                    a.span,
-                    "Enable async-timeout feature to use timeout in async tests",
-                )
-            })
-        }
-    }
-}
-
-impl Default for CheckTimeoutAttributesFunction {
-    fn default() -> Self {
-        Self(Ok(()))
-    }
-}
-
-impl VisitMut for CheckTimeoutAttributesFunction {
-    fn visit_item_fn_mut(&mut self, node: &mut ItemFn) {
-        let timeouts = node
-            .attrs
-            .iter()
-            .filter(|&a| attr_is(a, "timeout"))
-            .collect::<Vec<_>>();
-        let mut errors = timeouts
-            .iter()
-            .map(|&attr| attr.parse_args::<syn::Expr>())
-            .filter_map(Result::err)
-            .collect::<Vec<_>>();
-
-        if let Some(e) =
-            self.check_if_can_implement_timeous(timeouts.as_slice(), node.sig.asyncness.as_ref())
-        {
-            errors.push(e);
-        }
-        if !errors.is_empty() {
-            *self = Self(Err(errors.into()));
-        }
-    }
-}
-
-pub(crate) fn check_timeout_attrs(item_fn: &mut ItemFn) -> Result<(), ErrorsVec> {
-    let mut checker = CheckTimeoutAttributesFunction::default();
-    checker.visit_item_fn_mut(item_fn);
-    checker.take()
-}
-
-#[cfg(test)]
-mod should {
-    use super::*;
-    use crate::test::*;
-
-    mod parse_attributes {
-        use super::assert_eq;
-        use super::*;
-
-        fn parse_attributes<S: AsRef<str>>(attributes: S) -> Attributes {
-            parse_meta(attributes)
-        }
-
-        #[test]
-        fn one_simple_ident() {
-            let attributes = parse_attributes("my_ident");
-
-            let expected = Attributes {
-                attributes: vec![Attribute::attr("my_ident")],
-            };
-
-            assert_eq!(expected, attributes);
-        }
-
-        #[test]
-        fn one_simple_group() {
-            let attributes = parse_attributes("group_tag(first, second)");
-
-            let expected = Attributes {
-                attributes: vec![Attribute::tagged("group_tag", vec!["first", "second"])],
-            };
-
-            assert_eq!(expected, attributes);
-        }
-
-        #[test]
-        fn one_simple_type() {
-            let attributes = parse_attributes("type_tag<(u32, T, (String, i32))>");
-
-            let expected = Attributes {
-                attributes: vec![Attribute::typed("type_tag", "(u32, T, (String, i32))")],
-            };
-
-            assert_eq!(expected, attributes);
-        }
-
-        #[test]
-        fn integrated() {
-            let attributes = parse_attributes(
-                r#"
-            simple :: tagged(first, second) :: type_tag<(u32, T, (std::string::String, i32))> :: more_tagged(a,b)"#,
-            );
-
-            let expected = Attributes {
-                attributes: vec![
-                    Attribute::attr("simple"),
-                    Attribute::tagged("tagged", vec!["first", "second"]),
-                    Attribute::typed("type_tag", "(u32, T, (std::string::String, i32))"),
-                    Attribute::tagged("more_tagged", vec!["a", "b"]),
-                ],
-            };
-
-            assert_eq!(expected, attributes);
-        }
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/rstest.rs b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/rstest.rs
deleted file mode 100644
index 4ee2dea..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/rstest.rs
+++ /dev/null
@@ -1,1073 +0,0 @@
-use syn::{
-    parse::{Parse, ParseStream},
-    Ident, ItemFn, Pat, Token,
-};
-
-use self::files::{extract_files, ValueListFromFiles};
-
-use super::{
-    arguments::ArgumentsInfo,
-    by_ref::extract_by_ref,
-    check_timeout_attrs, extract_case_args, extract_cases, extract_excluded_trace,
-    extract_fixtures, extract_value_list,
-    future::{extract_futures, extract_global_awt},
-    ignore::extract_ignores,
-    parse_vector_trailing_till_double_comma,
-    testcase::TestCase,
-    Attribute, Attributes, ExtendWithFunctionAttrs, Fixture,
-};
-use crate::{error::ErrorsVec, refident::IntoPat};
-use crate::{parse::vlist::ValueList, refident::MaybePat};
-use proc_macro2::{Span, TokenStream};
-use quote::{format_ident, ToTokens};
-
-pub(crate) mod files;
-
-#[derive(PartialEq, Debug, Default)]
-pub(crate) struct RsTestInfo {
-    pub(crate) data: RsTestData,
-    pub(crate) attributes: RsTestAttributes,
-    pub(crate) arguments: ArgumentsInfo,
-}
-
-impl Parse for RsTestInfo {
-    fn parse(input: ParseStream) -> syn::Result<Self> {
-        Ok(if input.is_empty() {
-            Default::default()
-        } else {
-            Self {
-                data: input.parse()?,
-                attributes: input
-                    .parse::<Token![::]>()
-                    .or_else(|_| Ok(Default::default()))
-                    .and_then(|_| input.parse())?,
-                arguments: Default::default(),
-            }
-        })
-    }
-}
-
-impl ExtendWithFunctionAttrs for RsTestInfo {
-    fn extend_with_function_attrs(&mut self, item_fn: &mut ItemFn) -> Result<(), ErrorsVec> {
-        let composed_tuple!(_inner, excluded, _timeout, futures, global_awt, by_refs, ignores) = merge_errors!(
-            self.data.extend_with_function_attrs(item_fn),
-            extract_excluded_trace(item_fn),
-            check_timeout_attrs(item_fn),
-            extract_futures(item_fn),
-            extract_global_awt(item_fn),
-            extract_by_ref(item_fn),
-            extract_ignores(item_fn)
-        )?;
-        self.attributes.add_notraces(excluded);
-        self.arguments.set_global_await(global_awt);
-        self.arguments.set_futures(futures.into_iter());
-        self.arguments.set_by_refs(by_refs.into_iter());
-        self.arguments.set_ignores(ignores.into_iter());
-        self.arguments
-            .register_inner_destructored_idents_names(item_fn);
-        Ok(())
-    }
-}
-
-#[derive(PartialEq, Debug, Default)]
-pub(crate) struct RsTestData {
-    pub(crate) items: Vec<RsTestItem>,
-}
-
-impl RsTestData {
-    pub(crate) fn case_args(&self) -> impl Iterator<Item = &Pat> {
-        self.items.iter().filter_map(|it| match it {
-            RsTestItem::CaseArgName(ref arg) => Some(arg),
-            _ => None,
-        })
-    }
-
-    #[allow(dead_code)]
-    pub(crate) fn has_case_args(&self) -> bool {
-        self.case_args().next().is_some()
-    }
-
-    pub(crate) fn cases(&self) -> impl Iterator<Item = &TestCase> {
-        self.items.iter().filter_map(|it| match it {
-            RsTestItem::TestCase(ref case) => Some(case),
-            _ => None,
-        })
-    }
-
-    pub(crate) fn has_cases(&self) -> bool {
-        self.cases().next().is_some()
-    }
-
-    pub(crate) fn fixtures(&self) -> impl Iterator<Item = &Fixture> {
-        self.items.iter().filter_map(|it| match it {
-            RsTestItem::Fixture(ref fixture) => Some(fixture),
-            _ => None,
-        })
-    }
-
-    #[allow(dead_code)]
-    pub(crate) fn has_fixtures(&self) -> bool {
-        self.fixtures().next().is_some()
-    }
-
-    pub(crate) fn list_values(&self) -> impl Iterator<Item = &ValueList> {
-        self.items.iter().filter_map(|mv| match mv {
-            RsTestItem::ValueList(ref value_list) => Some(value_list),
-            _ => None,
-        })
-    }
-
-    pub(crate) fn has_list_values(&self) -> bool {
-        self.list_values().next().is_some()
-    }
-}
-
-impl Parse for RsTestData {
-    fn parse(input: ParseStream) -> syn::Result<Self> {
-        if input.peek(Token![::]) {
-            Ok(Default::default())
-        } else {
-            Ok(Self {
-                items: parse_vector_trailing_till_double_comma::<_, Token![,]>(input)?,
-            })
-        }
-    }
-}
-
-impl ExtendWithFunctionAttrs for RsTestData {
-    fn extend_with_function_attrs(&mut self, item_fn: &mut ItemFn) -> Result<(), ErrorsVec> {
-        let composed_tuple!(fixtures, case_args, cases, value_list, files) = merge_errors!(
-            extract_fixtures(item_fn),
-            extract_case_args(item_fn),
-            extract_cases(item_fn),
-            extract_value_list(item_fn),
-            extract_files(item_fn)
-        )?;
-
-        self.items.extend(fixtures.into_iter().map(|f| f.into()));
-        self.items.extend(case_args.into_iter().map(|f| f.into()));
-        self.items.extend(cases.into_iter().map(|f| f.into()));
-        self.items.extend(value_list.into_iter().map(|f| f.into()));
-        self.items.extend(
-            ValueListFromFiles::default()
-                .to_value_list(files)?
-                .into_iter()
-                .map(|f| f.into()),
-        );
-        Ok(())
-    }
-}
-
-#[derive(PartialEq, Debug)]
-pub(crate) enum RsTestItem {
-    Fixture(Fixture),
-    CaseArgName(Pat),
-    TestCase(TestCase),
-    ValueList(ValueList),
-}
-
-impl MaybePat for Fixture {
-    fn maybe_pat(&self) -> Option<&syn::Pat> {
-        Some(&self.arg)
-    }
-}
-
-impl MaybePat for RsTestItem {
-    fn maybe_pat(&self) -> Option<&syn::Pat> {
-        match self {
-            RsTestItem::Fixture(f) => f.maybe_pat(),
-            RsTestItem::CaseArgName(c) => Some(c),
-            RsTestItem::TestCase(_) => None,
-            RsTestItem::ValueList(vl) => Some(&vl.arg),
-        }
-    }
-}
-
-impl From<Fixture> for RsTestItem {
-    fn from(f: Fixture) -> Self {
-        RsTestItem::Fixture(f)
-    }
-}
-
-impl From<Pat> for RsTestItem {
-    fn from(pat: Pat) -> Self {
-        RsTestItem::CaseArgName(pat)
-    }
-}
-
-impl From<Ident> for RsTestItem {
-    fn from(ident: Ident) -> Self {
-        RsTestItem::CaseArgName(ident.into_pat())
-    }
-}
-
-impl From<TestCase> for RsTestItem {
-    fn from(case: TestCase) -> Self {
-        RsTestItem::TestCase(case)
-    }
-}
-
-impl From<ValueList> for RsTestItem {
-    fn from(value_list: ValueList) -> Self {
-        RsTestItem::ValueList(value_list)
-    }
-}
-
-impl Parse for RsTestItem {
-    fn parse(input: ParseStream) -> syn::Result<Self> {
-        if input.fork().parse::<TestCase>().is_ok() {
-            input.parse::<TestCase>().map(RsTestItem::TestCase)
-        } else if input.peek2(Token![=>]) {
-            input.parse::<ValueList>().map(RsTestItem::ValueList)
-        } else if input.fork().parse::<Fixture>().is_ok() {
-            input.parse::<Fixture>().map(RsTestItem::Fixture)
-        } else if input.fork().parse::<Ident>().is_ok() {
-            input
-                .parse::<Ident>()
-                .map(IntoPat::into_pat)
-                .map(RsTestItem::CaseArgName)
-        } else {
-            Err(syn::Error::new(Span::call_site(), "Cannot parse it"))
-        }
-    }
-}
-
-impl ToTokens for RsTestItem {
-    fn to_tokens(&self, tokens: &mut TokenStream) {
-        use RsTestItem::*;
-        match self {
-            Fixture(ref fixture) => fixture.to_tokens(tokens),
-            CaseArgName(ref case_arg) => case_arg.to_tokens(tokens),
-            TestCase(ref case) => case.to_tokens(tokens),
-            ValueList(ref list) => list.to_tokens(tokens),
-        }
-    }
-}
-
-wrap_attributes!(RsTestAttributes);
-
-impl RsTestAttributes {
-    const TRACE_VARIABLE_ATTR: &'static str = "trace";
-    const NOTRACE_VARIABLE_ATTR: &'static str = "notrace";
-
-    pub(crate) fn trace_me(&self, pat: &Pat) -> bool {
-        if self.should_trace() {
-            !self.iter().any(|m| Self::is_notrace(pat, m))
-        } else {
-            false
-        }
-    }
-
-    fn is_notrace(pat: &Pat, m: &Attribute) -> bool {
-        match m {
-            Attribute::Tagged(i, args) if i == Self::NOTRACE_VARIABLE_ATTR => {
-                args.iter().any(|a| a == pat)
-            }
-            _ => false,
-        }
-    }
-
-    pub(crate) fn should_trace(&self) -> bool {
-        self.iter().any(Self::is_trace)
-    }
-
-    pub(crate) fn add_trace(&mut self, trace: Ident) {
-        self.inner.attributes.push(Attribute::Attr(trace));
-    }
-
-    pub(crate) fn add_notraces(&mut self, notraces: Vec<Pat>) {
-        if notraces.is_empty() {
-            return;
-        }
-        self.inner.attributes.push(Attribute::Tagged(
-            format_ident!("{}", Self::NOTRACE_VARIABLE_ATTR),
-            notraces,
-        ));
-    }
-
-    fn is_trace(m: &Attribute) -> bool {
-        matches!(m, Attribute::Attr(i) if i == Self::TRACE_VARIABLE_ATTR)
-    }
-}
-
-impl Parse for RsTestAttributes {
-    fn parse(input: ParseStream) -> syn::Result<Self> {
-        Ok(input.parse::<Attributes>()?.into())
-    }
-}
-
-#[cfg(test)]
-mod test {
-    use super::*;
-    use crate::test::{assert_eq, *};
-    use rstest_test::assert_in;
-
-    mod parse_rstest_data {
-        use super::assert_eq;
-        use super::*;
-
-        fn parse_rstest_data<S: AsRef<str>>(fixtures: S) -> RsTestData {
-            parse_meta(fixtures)
-        }
-
-        #[test]
-        fn one_arg() {
-            let fixtures = parse_rstest_data("my_fixture(42)");
-
-            let expected = RsTestData {
-                items: vec![fixture("my_fixture", &["42"]).into()],
-            };
-
-            assert_eq!(expected, fixtures);
-        }
-    }
-
-    #[test]
-    fn should_check_all_timeout_to_catch_the_right_errors() {
-        let mut item_fn = r#"
-            #[timeout(<some>)]
-            #[timeout(42)]
-            #[timeout]
-            #[timeout(Duration::from_millis(20))]
-            fn test_fn(#[case] arg: u32) {
-            }
-        "#
-        .ast();
-
-        let mut info = RsTestInfo::default();
-
-        let errors = info.extend_with_function_attrs(&mut item_fn).unwrap_err();
-
-        assert_eq!(2, errors.len());
-    }
-
-    #[cfg(feature = "async-timeout")]
-    #[test]
-    fn should_parse_async_timeout() {
-        let mut item_fn = r#"
-            #[timeout(Duration::from_millis(20))]
-            async fn test_fn(#[case] arg: u32) {
-            }
-        "#
-        .ast();
-
-        let mut info = RsTestInfo::default();
-
-        info.extend_with_function_attrs(&mut item_fn).unwrap();
-    }
-
-    #[cfg(not(feature = "async-timeout"))]
-    #[test]
-    fn should_return_error_for_async_timeout() {
-        let mut item_fn = r#"
-            #[timeout(Duration::from_millis(20))]
-            async fn test_fn(#[case] arg: u32) {
-            }
-        "#
-        .ast();
-
-        let mut info = RsTestInfo::default();
-
-        let errors = info.extend_with_function_attrs(&mut item_fn).unwrap_err();
-
-        assert_eq!(1, errors.len());
-        assert!(format!("{:?}", errors).contains("async-timeout feature"))
-    }
-
-    fn parse_rstest<S: AsRef<str>>(rstest_data: S) -> RsTestInfo {
-        parse_meta(rstest_data)
-    }
-
-    mod no_cases {
-        use super::{assert_eq, *};
-
-        #[test]
-        fn happy_path() {
-            let data = parse_rstest(
-                r#"my_fixture(42, "other"), other(vec![42])
-            :: trace :: no_trace(some)"#,
-            );
-
-            let expected = RsTestInfo {
-                data: vec![
-                    fixture("my_fixture", &["42", r#""other""#]).into(),
-                    fixture("other", &["vec![42]"]).into(),
-                ]
-                .into(),
-                attributes: Attributes {
-                    attributes: vec![
-                        Attribute::attr("trace"),
-                        Attribute::tagged("no_trace", vec!["some"]),
-                    ],
-                }
-                .into(),
-                ..Default::default()
-            };
-
-            assert_eq!(expected, data);
-        }
-
-        mod fixture_extraction {
-            use super::{assert_eq, *};
-
-            #[test]
-            fn rename() {
-                let data = parse_rstest(
-                    r#"long_fixture_name(42, "other") as short, sub_module::fix as f, simple as s, no_change()"#,
-                );
-
-                let expected = RsTestInfo {
-                    data: vec![
-                        fixture("short", &["42", r#""other""#])
-                            .with_resolve("long_fixture_name")
-                            .into(),
-                        fixture("f", &[]).with_resolve("sub_module::fix").into(),
-                        fixture("s", &[]).with_resolve("simple").into(),
-                        fixture("no_change", &[]).into(),
-                    ]
-                    .into(),
-                    ..Default::default()
-                };
-
-                assert_eq!(expected, data);
-            }
-
-            #[test]
-            fn rename_with_attributes() {
-                let mut item_fn = r#"
-                    fn test_fn(
-                        #[from(long_fixture_name)] 
-                        #[with(42, "other")] short: u32, 
-                        #[from(simple)]
-                        s: &str,
-                        #[from(sub_module::fix)]
-                        f: u32,
-                        no_change: i32) {
-                    }
-                    "#
-                .ast();
-
-                let expected = RsTestInfo {
-                    data: vec![
-                        fixture("short", &["42", r#""other""#])
-                            .with_resolve("long_fixture_name")
-                            .into(),
-                        fixture("s", &[]).with_resolve("simple").into(),
-                        fixture("f", &[]).with_resolve("sub_module::fix").into(),
-                    ]
-                    .into(),
-                    ..Default::default()
-                };
-
-                let mut data = RsTestInfo::default();
-
-                data.extend_with_function_attrs(&mut item_fn).unwrap();
-
-                assert_eq!(expected, data);
-            }
-
-            #[test]
-            fn defined_via_with_attributes() {
-                let mut item_fn = r#"
-                    fn test_fn(#[with(42, "other")] my_fixture: u32, #[with(vec![42])] other: &str) {
-                    }
-                    "#
-                .ast();
-
-                let expected = RsTestInfo {
-                    data: vec![
-                        fixture("my_fixture", &["42", r#""other""#]).into(),
-                        fixture("other", &["vec![42]"]).into(),
-                    ]
-                    .into(),
-                    ..Default::default()
-                };
-
-                let mut data = RsTestInfo::default();
-
-                data.extend_with_function_attrs(&mut item_fn).unwrap();
-
-                assert_eq!(expected, data);
-            }
-        }
-
-        #[test]
-        fn empty_fixtures() {
-            let data = parse_rstest(r#"::trace::no_trace(some)"#);
-
-            let expected = RsTestInfo {
-                attributes: Attributes {
-                    attributes: vec![
-                        Attribute::attr("trace"),
-                        Attribute::tagged("no_trace", vec!["some"]),
-                    ],
-                }
-                .into(),
-                ..Default::default()
-            };
-
-            assert_eq!(expected, data);
-        }
-
-        #[test]
-        fn empty_attributes() {
-            let data = parse_rstest(r#"my_fixture(42, "other")"#);
-
-            let expected = RsTestInfo {
-                data: vec![fixture("my_fixture", &["42", r#""other""#]).into()].into(),
-                ..Default::default()
-            };
-
-            assert_eq!(expected, data);
-        }
-
-        #[test]
-        fn extract_notrace_args_attribute() {
-            let mut item_fn = r#"
-            fn test_fn(#[notrace] a: u32, #[something_else] b: &str, #[notrace] c: i32) {
-            }
-            "#
-            .ast();
-
-            let mut info = RsTestInfo::default();
-
-            info.extend_with_function_attrs(&mut item_fn).unwrap();
-            info.attributes.add_trace(ident("trace"));
-
-            assert!(!info.attributes.trace_me(&pat("a")));
-            assert!(info.attributes.trace_me(&pat("b")));
-            assert!(!info.attributes.trace_me(&pat("c")));
-            let b_args = item_fn
-                .sig
-                .inputs
-                .into_iter()
-                .nth(1)
-                .and_then(|id| match id {
-                    syn::FnArg::Typed(arg) => Some(arg.attrs),
-                    _ => None,
-                })
-                .unwrap();
-            assert_eq!(attrs("#[something_else]"), b_args);
-        }
-
-        #[rstest]
-        fn extract_future() {
-            let mut item_fn = "fn f(#[future] a: u32, b: u32) {}".ast();
-            let expected = "fn f(a: u32, b: u32) {}".ast();
-
-            let mut info = RsTestInfo::default();
-
-            info.extend_with_function_attrs(&mut item_fn).unwrap();
-
-            assert_eq!(item_fn, expected);
-            assert!(info.arguments.is_future(&pat("a")));
-            assert!(!info.arguments.is_future(&pat("b")));
-        }
-    }
-
-    mod parametrize_cases {
-        use super::{assert_eq, *};
-
-        #[test]
-        fn one_simple_case_one_arg() {
-            let data = parse_rstest(r#"arg, case(42)"#).data;
-
-            let args = data.case_args().collect::<Vec<_>>();
-            let cases = data.cases().collect::<Vec<_>>();
-
-            assert_eq!(1, args.len());
-            assert_eq!(1, cases.len());
-            assert_eq!("arg", &args[0].display_code());
-            assert_eq!(to_args!(["42"]), cases[0].args())
-        }
-
-        #[test]
-        fn happy_path() {
-            let info = parse_rstest(
-                r#"
-                my_fixture(42,"foo"),
-                arg1, arg2, arg3,
-                case(1,2,3),
-                case(11,12,13),
-                case(21,22,23)
-            "#,
-            );
-
-            let data = info.data;
-            let fixtures = data.fixtures().cloned().collect::<Vec<_>>();
-
-            assert_eq!(vec![fixture("my_fixture", &["42", r#""foo""#])], fixtures);
-            assert_eq!(
-                to_strs!(vec!["arg1", "arg2", "arg3"]),
-                data.case_args()
-                    .map(DisplayCode::display_code)
-                    .collect::<Vec<_>>()
-            );
-
-            let cases = data.cases().collect::<Vec<_>>();
-
-            assert_eq!(3, cases.len());
-            assert_eq!(to_args!(["1", "2", "3"]), cases[0].args());
-            assert_eq!(to_args!(["11", "12", "13"]), cases[1].args());
-            assert_eq!(to_args!(["21", "22", "23"]), cases[2].args());
-        }
-
-        mod defined_via_with_attributes {
-            use super::{assert_eq, *};
-
-            #[test]
-            fn one_case() {
-                let mut item_fn = r#"
-                #[case::first(42, "first")]
-                fn test_fn(#[case] arg1: u32, #[case] arg2: &str) {
-                }
-                "#
-                .ast();
-
-                let mut info = RsTestInfo::default();
-
-                info.extend_with_function_attrs(&mut item_fn).unwrap();
-
-                let case_args = info.data.case_args().cloned().collect::<Vec<_>>();
-                let cases = info.data.cases().cloned().collect::<Vec<_>>();
-
-                assert_eq!(to_pats!(["arg1", "arg2"]), case_args);
-                assert_eq!(
-                    vec![
-                        TestCase::from_iter(["42", r#""first""#].iter()).with_description("first"),
-                    ],
-                    cases
-                );
-            }
-
-            #[test]
-            fn destruct_case() {
-                let mut item_fn: ItemFn = r#"
-                #[case::destruct(T::new(2, 21))]
-                fn test_fn(#[case] T{a, b}: T) {
-                }
-                "#
-                .ast();
-
-                let mut info = RsTestInfo::default();
-
-                info.extend_with_function_attrs(&mut item_fn).unwrap();
-
-                let case_args = info.data.case_args().cloned().collect::<Vec<_>>();
-                let cases = info.data.cases().cloned().collect::<Vec<_>>();
-
-                // Should just remove attributes
-                assert_eq!(
-                    to_fnargs!(["T{a, b}: T"]),
-                    item_fn.sig.inputs.into_iter().collect::<Vec<_>>()
-                );
-                assert_eq!(to_pats!(["T{a, b}"]), case_args);
-                assert_eq!(
-                    vec![
-                        TestCase::from_iter(["T::new(2, 21)"].iter()).with_description("destruct"),
-                    ],
-                    cases
-                );
-                assert_eq!(
-                    info.arguments.inner_pat(&pat("T{a, b}")),
-                    &pat("__destruct_1")
-                );
-            }
-
-            #[test]
-            fn parse_tuple_value() {
-                let mut item_fn = r#"
-                #[case(42, (24, "first"))]
-                fn test_fn(#[case] arg1: u32, #[case] tupled: (u32, &str)) {
-                }
-                "#
-                .ast();
-
-                let mut info = RsTestInfo::default();
-
-                info.extend_with_function_attrs(&mut item_fn).unwrap();
-
-                let cases = info.data.cases().cloned().collect::<Vec<_>>();
-
-                assert_eq!(
-                    vec![TestCase::from_iter(["42", r#"(24, "first")"#].iter()),],
-                    cases
-                );
-            }
-
-            #[test]
-            fn more_cases() {
-                let mut item_fn = r#"
-                #[case::first(42, "first")]
-                #[case(24, "second")]
-                #[case::third(0, "third")]
-                fn test_fn(#[case] arg1: u32, #[case] arg2: &str) {
-                }
-                "#
-                .ast();
-
-                let mut info = RsTestInfo::default();
-
-                info.extend_with_function_attrs(&mut item_fn).unwrap();
-
-                let case_args = info.data.case_args().cloned().collect::<Vec<_>>();
-                let cases = info.data.cases().cloned().collect::<Vec<_>>();
-
-                assert_eq!(to_pats!(["arg1", "arg2"]), case_args);
-                assert_eq!(
-                    vec![
-                        TestCase::from_iter(["42", r#""first""#].iter()).with_description("first"),
-                        TestCase::from_iter(["24", r#""second""#].iter()),
-                        TestCase::from_iter(["0", r#""third""#].iter()).with_description("third"),
-                    ],
-                    cases
-                );
-            }
-
-            #[test]
-            fn should_collect_attributes() {
-                let mut item_fn = r#"
-                    #[first]
-                    #[first2(42)]
-                    #[case(42)]
-                    #[second]
-                    #[case(24)]
-                    #[global]
-                    fn test_fn(#[case] arg: u32) {
-                    }
-                "#
-                .ast();
-
-                let mut info = RsTestInfo::default();
-
-                info.extend_with_function_attrs(&mut item_fn).unwrap();
-
-                let cases = info.data.cases().cloned().collect::<Vec<_>>();
-
-                assert_eq!(
-                    vec![
-                        TestCase::from_iter(["42"].iter()).with_attrs(attrs(
-                            "
-                                #[first]
-                                #[first2(42)]
-                            "
-                        )),
-                        TestCase::from_iter(["24"].iter()).with_attrs(attrs(
-                            "
-                            #[second]
-                        "
-                        )),
-                    ],
-                    cases
-                );
-            }
-
-            #[test]
-            fn should_consume_all_used_attributes() {
-                let mut item_fn = r#"
-                    #[first]
-                    #[first2(42)]
-                    #[case(42)]
-                    #[second]
-                    #[case(24)]
-                    #[global]
-                    fn test_fn(#[case] arg: u32) {
-                    }
-                "#
-                .ast();
-
-                let mut info = RsTestInfo::default();
-
-                info.extend_with_function_attrs(&mut item_fn).unwrap();
-
-                assert_eq!(
-                    item_fn.attrs,
-                    attrs(
-                        "
-                        #[global]
-                        "
-                    )
-                );
-                assert!(!format!("{:?}", item_fn).contains("case"));
-            }
-
-            #[test]
-            fn should_report_all_errors() {
-                let mut item_fn = r#"
-                    #[case(#case_error#)]
-                    fn test_fn(#[case] arg: u32, #[with(#fixture_error#)] err_fixture: u32) {
-                    }
-                "#
-                .ast();
-
-                let mut info = RsTestInfo::default();
-
-                let errors = info.extend_with_function_attrs(&mut item_fn).unwrap_err();
-
-                assert_eq!(2, errors.len());
-            }
-        }
-
-        #[test]
-        fn should_accept_comma_at_the_end_of_cases() {
-            let data = parse_rstest(
-                r#"
-                arg,
-                case(42),
-            "#,
-            )
-            .data;
-
-            let args = data.case_args().collect::<Vec<_>>();
-            let cases = data.cases().collect::<Vec<_>>();
-
-            assert_eq!(1, args.len());
-            assert_eq!(1, cases.len());
-            assert_eq!("arg", &args[0].display_code());
-            assert_eq!(to_args!(["42"]), cases[0].args())
-        }
-
-        #[test]
-        #[should_panic]
-        fn should_not_accept_invalid_separator_from_args_and_cases() {
-            parse_rstest(
-                r#"
-                ret
-                case::should_success(Ok(())),
-                case::should_fail(Err("Return Error"))
-            "#,
-            );
-        }
-
-        #[test]
-        fn case_could_be_arg_name() {
-            let data = parse_rstest(
-                r#"
-                case,
-                case(42)
-            "#,
-            )
-            .data;
-
-            assert_eq!("case", &data.case_args().next().unwrap().display_code());
-
-            let cases = data.cases().collect::<Vec<_>>();
-
-            assert_eq!(1, cases.len());
-            assert_eq!(to_args!(["42"]), cases[0].args());
-        }
-
-        #[test]
-        fn should_reject_case_args_marked_more_than_once() {
-            let mut item_fn = r#"
-                    #[case(42)]
-                    fn test_fn(#[case] #[case] arg: u32) {
-                    }
-                "#
-            .ast();
-
-            let mut info = RsTestInfo::default();
-
-            let errors = info.extend_with_function_attrs(&mut item_fn).unwrap_err();
-
-            assert_eq!(1, errors.len());
-            assert_in!(errors[0].to_string(), "more than once");
-        }
-    }
-
-    mod matrix_cases {
-
-        use super::{assert_eq, *};
-
-        #[test]
-        fn happy_path() {
-            let info = parse_rstest(
-                r#"
-                    expected => [12, 34 * 2],
-                    input => [format!("aa_{}", 2), "other"],
-                "#,
-            );
-
-            let value_ranges = info.data.list_values().collect::<Vec<_>>();
-            assert_eq!(2, value_ranges.len());
-            assert_eq!(to_args!(["12", "34 * 2"]), value_ranges[0].args());
-            assert_eq!(
-                to_args!([r#"format!("aa_{}", 2)"#, r#""other""#]),
-                value_ranges[1].args()
-            );
-            assert_eq!(info.attributes, Default::default());
-        }
-
-        #[test]
-        fn should_parse_attributes_too() {
-            let info = parse_rstest(
-                r#"
-                                        a => [12, 24, 42]
-                                        ::trace
-                                    "#,
-            );
-
-            assert_eq!(
-                info.attributes,
-                Attributes {
-                    attributes: vec![Attribute::attr("trace")]
-                }
-                .into()
-            );
-        }
-
-        #[test]
-        fn should_parse_injected_fixtures_too() {
-            let info = parse_rstest(
-                r#"
-                a => [12, 24, 42],
-                fixture_1(42, "foo"),
-                fixture_2("bar")
-                "#,
-            );
-
-            let fixtures = info.data.fixtures().cloned().collect::<Vec<_>>();
-
-            assert_eq!(
-                vec![
-                    fixture("fixture_1", &["42", r#""foo""#]),
-                    fixture("fixture_2", &[r#""bar""#])
-                ],
-                fixtures
-            );
-        }
-
-        #[test]
-        #[should_panic(expected = "should not be empty")]
-        fn should_not_compile_if_empty_expression_slice() {
-            parse_rstest(
-                r#"
-                invalid => []
-                "#,
-            );
-        }
-
-        mod defined_via_with_attributes {
-            use super::{assert_eq, *};
-
-            #[test]
-            fn one_arg() {
-                let mut item_fn = r#"
-                fn test_fn(#[values(1, 2, 1+2)] arg1: u32, #[values(format!("a"), "b b".to_owned(), String::new())] arg2: String) {
-                }
-                "#
-                .ast();
-
-                let mut info = RsTestInfo::default();
-
-                info.extend_with_function_attrs(&mut item_fn).unwrap();
-
-                let list_values = info.data.list_values().cloned().collect::<Vec<_>>();
-
-                assert_eq!(2, list_values.len());
-                assert_eq!(to_args!(["1", "2", "1+2"]), list_values[0].args());
-                assert_eq!(
-                    to_args!([r#"format!("a")"#, r#""b b".to_owned()"#, "String::new()"]),
-                    list_values[1].args()
-                );
-            }
-
-            #[test]
-            fn destruct() {
-                let mut item_fn = r#"
-                fn test_fn(#[values(S(1,2), S(3,4))] S(a,b): S, #[values(T::new("a", "b"), T{s: "a" ,t: "c" })] T{s, t}: T) {
-                }
-                "#
-                .ast();
-
-                let mut info = RsTestInfo::default();
-
-                info.extend_with_function_attrs(&mut item_fn).unwrap();
-
-                let list_values = info.data.list_values().cloned().collect::<Vec<_>>();
-
-                // Should just remove attributes
-                assert_eq!(
-                    to_fnargs!(["S(a, b): S", "T{s, t}: T"]),
-                    item_fn.sig.inputs.into_iter().collect::<Vec<_>>()
-                );
-                assert_eq!(2, list_values.len());
-                assert_eq!(list_values[0].arg, pat("S(a, b)"));
-                assert_eq!(to_args!(["S(1,2)", "S(3,4)"]), list_values[0].args());
-                assert_eq!(list_values[1].arg, pat("T{s, t}"));
-                assert_eq!(
-                    to_args!([r#"T::new("a", "b")"#, r#"T{s: "a" ,t: "c" }"#]),
-                    list_values[1].args()
-                );
-                assert_eq!(
-                    info.arguments.inner_pat(&pat("S(a, b)")),
-                    &pat("__destruct_1")
-                );
-                assert_eq!(
-                    info.arguments.inner_pat(&pat("T{s, t}")),
-                    &pat("__destruct_2")
-                );
-            }
-        }
-
-        #[test]
-        fn should_reject_values_attribute_marked_more_than_once() {
-            let mut item_fn = r#"
-                fn test_fn(#[values(1, 2, 1+2)] #[values(1, 2, 1+2)] arg1: u32, ) {
-                }
-                "#
-            .ast();
-
-            let mut info = RsTestInfo::default();
-
-            let errors = info.extend_with_function_attrs(&mut item_fn).unwrap_err();
-
-            assert_eq!(1, errors.len());
-            assert_in!(errors[0].to_string(), "more than once");
-        }
-    }
-
-    mod integrated {
-        use super::{assert_eq, *};
-
-        #[test]
-        fn should_parse_fixture_cases_and_matrix_in_any_order() {
-            let data = parse_rstest(
-                r#"
-                u,
-                m => [1, 2],
-                case(42, A{}, D{}),
-                a,
-                case(43, A{}, D{}),
-                the_fixture(42),
-                mm => ["f", "oo", "BAR"],
-                d
-            "#,
-            )
-            .data;
-
-            let fixtures = data.fixtures().cloned().collect::<Vec<_>>();
-            assert_eq!(vec![fixture("the_fixture", &["42"])], fixtures);
-
-            assert_eq!(
-                to_strs!(vec!["u", "a", "d"]),
-                data.case_args()
-                    .map(DisplayCode::display_code)
-                    .collect::<Vec<_>>()
-            );
-
-            let cases = data.cases().collect::<Vec<_>>();
-            assert_eq!(2, cases.len());
-            assert_eq!(to_args!(["42", "A{}", "D{}"]), cases[0].args());
-            assert_eq!(to_args!(["43", "A{}", "D{}"]), cases[1].args());
-
-            let value_ranges = data.list_values().collect::<Vec<_>>();
-            assert_eq!(2, value_ranges.len());
-            assert_eq!(to_args!(["1", "2"]), value_ranges[0].args());
-            assert_eq!(
-                to_args!([r#""f""#, r#""oo""#, r#""BAR""#]),
-                value_ranges[1].args()
-            );
-        }
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/rstest/files.rs b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/rstest/files.rs
deleted file mode 100644
index c4597cb9..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/rstest/files.rs
+++ /dev/null
@@ -1,709 +0,0 @@
-use std::{env, path::PathBuf};
-
-use glob::glob;
-use quote::ToTokens;
-use regex::Regex;
-use relative_path::RelativePath;
-use syn::{parse_quote, visit_mut::VisitMut, Attribute, Expr, FnArg, Ident, ItemFn, LitStr};
-
-use crate::{
-    error::ErrorsVec,
-    parse::{
-        extract_argument_attrs,
-        vlist::{Value, ValueList},
-    },
-    refident::{IntoPat, MaybeIdent},
-    utils::attr_is,
-};
-
-#[derive(Debug, Clone, PartialEq)]
-pub(crate) struct FilesGlobReferences {
-    glob: Vec<LitStrAttr>,
-    exclude: Vec<Exclude>,
-    ignore_dot_files: bool,
-}
-
-impl FilesGlobReferences {
-    /// Return the tuples attribute, path string if they are valid relative paths
-    fn paths(&self, base_dir: &PathBuf) -> Result<Vec<(&LitStrAttr, String)>, syn::Error> {
-        self.glob
-            .iter()
-            .map(|attr| {
-                RelativePath::from_path(&attr.value())
-                    .map_err(|e| attr.error(&format!("Invalid glob path: {e}")))
-                    .map(|p| p.to_logical_path(base_dir))
-                    .map(|p| (attr, p.to_string_lossy().into_owned()))
-            })
-            .collect::<Result<Vec<_>, _>>()
-    }
-}
-
-trait RaiseError: ToTokens {
-    fn error(&self, msg: &str) -> syn::Error {
-        syn::Error::new_spanned(self, msg)
-    }
-}
-
-impl RaiseError for Attribute {}
-impl RaiseError for LitStrAttr {}
-impl ToTokens for LitStrAttr {
-    fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) {
-        self.attr.to_tokens(tokens)
-    }
-}
-
-impl FilesGlobReferences {
-    fn new(glob: Vec<LitStrAttr>, exclude: Vec<Exclude>, ignore_dot_files: bool) -> Self {
-        Self {
-            glob,
-            exclude,
-            ignore_dot_files,
-        }
-    }
-
-    fn is_valid(&self, p: &RelativePath) -> bool {
-        if self.ignore_dot_files
-            && p.components()
-                .any(|c| matches!(c, relative_path::Component::Normal(c) if c.starts_with('.')))
-        {
-            return false;
-        }
-        !self.exclude.iter().any(|e| e.r.is_match(p.as_ref()))
-    }
-}
-
-/// An attribute in the form `#[name("some string")]`
-#[derive(Debug, Clone, PartialEq)]
-struct LitStrAttr {
-    attr: Attribute,
-    value: LitStr,
-}
-
-impl LitStrAttr {
-    fn value(&self) -> String {
-        self.value.value()
-    }
-}
-
-impl TryFrom<Attribute> for LitStrAttr {
-    type Error = syn::Error;
-
-    fn try_from(attr: Attribute) -> Result<Self, Self::Error> {
-        let value = attr.parse_args::<LitStr>()?;
-        Ok(Self { attr, value })
-    }
-}
-
-/// The `#[exclude("regex")]` attribute
-#[derive(Debug, Clone)]
-struct Exclude {
-    attr: LitStrAttr,
-    r: Regex,
-}
-
-impl PartialEq for Exclude {
-    fn eq(&self, other: &Self) -> bool {
-        self.attr.value == other.attr.value
-    }
-}
-
-impl TryFrom<Attribute> for Exclude {
-    type Error = syn::Error;
-
-    fn try_from(attr: Attribute) -> Result<Self, Self::Error> {
-        let attr: LitStrAttr = attr.try_into()?;
-        let r = regex::Regex::new(&attr.value()).map_err(|e| {
-            syn::Error::new_spanned(
-                &attr,
-                format!(r#""{}" Should be a valid regex: {e}"#, attr.value()),
-            )
-        })?;
-        Ok(Self { attr, r })
-    }
-}
-
-impl From<Vec<LitStrAttr>> for FilesGlobReferences {
-    fn from(value: Vec<LitStrAttr>) -> Self {
-        Self::new(value, Default::default(), true)
-    }
-}
-
-/// Entry point function to extract files attributes
-pub(crate) fn extract_files(
-    item_fn: &mut ItemFn,
-) -> Result<Vec<(Ident, FilesGlobReferences)>, ErrorsVec> {
-    let mut extractor = ValueFilesExtractor::default();
-    extractor.visit_item_fn_mut(item_fn);
-    extractor.take()
-}
-
-/// Simple struct used to visit function attributes and extract future args to
-/// implement the boilerplate.
-#[derive(Default)]
-struct ValueFilesExtractor {
-    files: Vec<(Ident, FilesGlobReferences)>,
-    errors: Vec<syn::Error>,
-}
-
-impl ValueFilesExtractor {
-    pub(crate) fn take(self) -> Result<Vec<(Ident, FilesGlobReferences)>, ErrorsVec> {
-        if self.errors.is_empty() {
-            Ok(self.files)
-        } else {
-            Err(self.errors.into())
-        }
-    }
-
-    fn collect_errors<T: Default>(&mut self, result: Result<T, syn::Error>) -> T {
-        match result {
-            Ok(v) => v,
-            Err(e) => {
-                self.errors.push(e);
-                T::default()
-            }
-        }
-    }
-
-    fn extract_argument_attrs<'a, B: 'a + std::fmt::Debug>(
-        &mut self,
-        node: &mut FnArg,
-        is_valid_attr: fn(&syn::Attribute) -> bool,
-        build: impl Fn(syn::Attribute) -> syn::Result<B> + 'a,
-    ) -> Vec<B> {
-        self.collect_errors(
-            extract_argument_attrs(node, is_valid_attr, build).collect::<Result<Vec<_>, _>>(),
-        )
-    }
-
-    fn extract_files(&mut self, node: &mut FnArg) -> Vec<LitStrAttr> {
-        self.extract_argument_attrs(node, |a| attr_is(a, "files"), |attr| attr.try_into())
-    }
-
-    fn extract_exclude(&mut self, node: &mut FnArg) -> Vec<Exclude> {
-        self.extract_argument_attrs(node, |a| attr_is(a, "exclude"), Exclude::try_from)
-    }
-
-    fn extract_include_dot_files(&mut self, node: &mut FnArg) -> Vec<Attribute> {
-        self.extract_argument_attrs(
-            node,
-            |a| attr_is(a, "include_dot_files"),
-            |attr| {
-                attr.meta
-                    .require_path_only()
-                    .map_err(|_| attr.error("Use #[include_dot_files] to include dot files"))?;
-                Ok(attr)
-            },
-        )
-    }
-}
-
-impl VisitMut for ValueFilesExtractor {
-    fn visit_fn_arg_mut(&mut self, node: &mut FnArg) {
-        let name = node.maybe_ident().cloned();
-        if matches!(node, FnArg::Receiver(_)) || name.is_none() {
-            return;
-        }
-        let name = name.unwrap();
-        let files = self.extract_files(node);
-        let excludes = self.extract_exclude(node);
-        let include_dot_files = self.extract_include_dot_files(node);
-        if !include_dot_files.is_empty() {
-            include_dot_files.iter().skip(1).for_each(|attr| {
-                self.errors
-                    .push(attr.error("Cannot use #[include_dot_files] more than once"))
-            })
-        }
-        if !files.is_empty() {
-            self.files.push((
-                name,
-                FilesGlobReferences::new(files, excludes, include_dot_files.is_empty()),
-            ))
-        } else {
-            excludes.into_iter().for_each(|e| {
-                self.errors.push(
-                    e.attr
-                        .error("You cannot use #[exclude(...)] without #[files(...)]"),
-                )
-            });
-            include_dot_files.into_iter().for_each(|attr| {
-                self.errors
-                    .push(attr.error("You cannot use #[include_dot_files] without #[files(...)]"))
-            });
-        }
-    }
-}
-
-trait BaseDir {
-    fn base_dir(&self) -> Result<PathBuf, String> {
-        env::var("CARGO_MANIFEST_DIR")
-            .map(PathBuf::from)
-            .map_err(|_|
-                "Rstest's #[files(...)] requires that CARGO_MANIFEST_DIR is defined to define glob the relative path".to_string()
-            )
-    }
-}
-
-struct DefaultBaseDir;
-
-impl BaseDir for DefaultBaseDir {}
-
-trait GlobResolver {
-    fn glob(&self, pattern: &str) -> Result<Vec<PathBuf>, String> {
-        let globs =
-            glob(pattern).map_err(|e| format!("glob failed for whole path `{pattern}` due {e}"))?;
-        globs
-            .into_iter()
-            .map(|p| p.map_err(|e| format!("glob failed for file due {e}")))
-            .map(|r| {
-                r.and_then(|p| {
-                    p.canonicalize()
-                        .map_err(|e| format!("failed to canonicalize {} due {e}", p.display()))
-                })
-            })
-            .collect()
-    }
-}
-
-struct DefaultGlobResolver;
-
-impl GlobResolver for DefaultGlobResolver {}
-
-/// The struct used to gel te values from the files attributes. You can inject
-/// the base dir resolver and glob resolver implementation.
-pub(crate) struct ValueListFromFiles<'a> {
-    base_dir: Box<dyn BaseDir + 'a>,
-    g_resolver: Box<dyn GlobResolver + 'a>,
-}
-
-impl<'a> Default for ValueListFromFiles<'a> {
-    fn default() -> Self {
-        Self {
-            g_resolver: Box::new(DefaultGlobResolver),
-            base_dir: Box::new(DefaultBaseDir),
-        }
-    }
-}
-
-impl<'a> ValueListFromFiles<'a> {
-    pub fn to_value_list(
-        &self,
-        files: Vec<(Ident, FilesGlobReferences)>,
-    ) -> Result<Vec<ValueList>, syn::Error> {
-        files
-            .into_iter()
-            .map(|(arg, refs)| {
-                self.file_list_values(refs).map(|values| ValueList {
-                    arg: arg.into_pat(),
-                    values,
-                })
-            })
-            .collect::<Result<Vec<ValueList>, _>>()
-    }
-
-    fn file_list_values(&self, refs: FilesGlobReferences) -> Result<Vec<Value>, syn::Error> {
-        let base_dir = self
-            .base_dir
-            .base_dir()
-            .map_err(|msg| refs.glob[0].error(&msg))?;
-        let resolved_paths = refs.paths(&base_dir)?;
-        let base_dir = base_dir
-            .into_os_string()
-            .into_string()
-            .map_err(|p| refs.glob[0].error(&format!("Cannot get a valid string from {p:?}")))?;
-
-        let mut values: Vec<(Expr, String)> = vec![];
-        for (attr, abs_path) in self.all_files_path(resolved_paths)? {
-            let relative_path = abs_path
-                .clone()
-                .into_os_string()
-                .into_string()
-                .map(|inner| RelativePath::new(base_dir.as_str()).relative(inner))
-                .map_err(|e| {
-                    attr.error(&format!("Invalid absolute path {}", e.to_string_lossy()))
-                })?;
-
-            if !refs.is_valid(&relative_path) {
-                continue;
-            }
-
-            let path_str = abs_path.to_string_lossy();
-            values.push((
-                parse_quote! {
-                    <::std::path::PathBuf as std::str::FromStr>::from_str(#path_str).unwrap()
-                },
-                render_file_description(&relative_path),
-            ));
-        }
-
-        if values.is_empty() {
-            Err(refs.glob[0].error("No file found"))?;
-        }
-
-        Ok(values
-            .into_iter()
-            .map(|(e, desc)| Value::new(e, Some(desc)))
-            .collect())
-    }
-
-    /// Return the tuples of attribute, file path resolved via glob resolver, sorted by path and without duplications.
-    fn all_files_path<'b>(
-        &self,
-        resolved_paths: Vec<(&'b LitStrAttr, String)>,
-    ) -> Result<Vec<(&'b LitStrAttr, PathBuf)>, syn::Error> {
-        let mut paths = resolved_paths
-            .iter()
-            .map(|(attr, pattern)| {
-                self.g_resolver
-                    .glob(pattern.as_ref())
-                    .map_err(|msg| attr.error(&msg))
-                    .map(|p| (attr, p))
-            })
-            .collect::<Result<Vec<_>, _>>()?
-            .into_iter()
-            .flat_map(|(&attr, inner)| inner.into_iter().map(move |p| (attr, p)))
-            .collect::<Vec<_>>();
-        paths.sort_by(|(_, a), (_, b)| a.cmp(b));
-        paths.dedup_by(|(_, a), (_, b)| a.eq(&b));
-        Ok(paths)
-    }
-}
-
-fn render_file_description(file: &RelativePath) -> String {
-    let mut description = String::new();
-    for c in file.components() {
-        match c {
-            relative_path::Component::CurDir => continue,
-            relative_path::Component::ParentDir => description.push_str("_UP"),
-            relative_path::Component::Normal(segment) => description.push_str(segment),
-        }
-        description.push('/')
-    }
-    description.pop();
-    description
-}
-
-#[cfg(test)]
-mod should {
-    use std::collections::HashMap;
-
-    use super::*;
-    use crate::test::{assert_eq, *};
-    use maplit::hashmap;
-    use rstest_test::assert_in;
-
-    fn lit_str_attr(name: &str, value: impl AsRef<str>) -> LitStrAttr {
-        attrs(&format!(r#"#[{name}("{}")]"#, value.as_ref()))
-            .into_iter()
-            .next()
-            .unwrap()
-            .try_into()
-            .unwrap()
-    }
-
-    fn files_attr(lstr: impl AsRef<str>) -> LitStrAttr {
-        lit_str_attr("files", lstr)
-    }
-
-    fn exclude_attr(lstr: impl AsRef<str>) -> LitStrAttr {
-        lit_str_attr("exclude", lstr)
-    }
-
-    impl Exclude {
-        fn fake(value: &str, r: Option<Regex>) -> Self {
-            let r = r.unwrap_or_else(|| regex::Regex::new(value).unwrap());
-            Self {
-                attr: exclude_attr(value),
-                r,
-            }
-        }
-    }
-
-    impl From<&str> for Exclude {
-        fn from(value: &str) -> Self {
-            Self {
-                attr: exclude_attr(value),
-                r: regex::Regex::new(value).unwrap(),
-            }
-        }
-    }
-
-    #[rstest]
-    #[case::simple(r#"fn f(#[files("some_glob")] a: PathBuf) {}"#, "fn f(a: PathBuf) {}", &[("a", &["some_glob"], &[], true)])]
-    #[case::more_than_one(
-        r#"fn f(#[files("first")] a: PathBuf, b: u32, #[files("third")] c: PathBuf) {}"#,
-        r#"fn f(a: PathBuf, 
-                b: u32, 
-                c: PathBuf) {}"#,
-        &[("a", &["first"], &[], true), ("c", &["third"], &[], true)],
-    )]
-    #[case::more_globs_on_the_same_var(
-        r#"fn f(#[files("first")] #[files("second")] a: PathBuf) {}"#,
-        r#"fn f(a: PathBuf) {}"#,
-        &[("a", &["first", "second"], &[], true)],
-    )]
-    #[case::exclude(r#"fn f(#[files("some_glob")] #[exclude("exclude")] a: PathBuf) {}"#, 
-    "fn f(a: PathBuf) {}", &[("a", &["some_glob"], &["exclude"], true)])]
-    #[case::exclude_more(r#"fn f(#[files("some_glob")] #[exclude("first")]  #[exclude("second")] a: PathBuf) {}"#, 
-    "fn f(a: PathBuf) {}", &[("a", &["some_glob"], &["first", "second"], true)])]
-    #[case::include_dot_files(r#"fn f(#[files("some_glob")] #[include_dot_files] a: PathBuf) {}"#, 
-    "fn f(a: PathBuf) {}", &[("a", &["some_glob"], &[], false)])]
-
-    fn extract<'a, G: AsRef<[&'a str]>, E: AsRef<[&'a str]>>(
-        #[case] item_fn: &str,
-        #[case] expected: &str,
-        #[case] expected_files: &[(&str, G, E, bool)],
-    ) {
-        let mut item_fn: ItemFn = item_fn.ast();
-        let expected: ItemFn = expected.ast();
-
-        let files = extract_files(&mut item_fn).unwrap();
-
-        assert_eq!(expected, item_fn);
-        assert_eq!(
-            files,
-            expected_files
-                .into_iter()
-                .map(|(id, globs, ex, ignore)| (
-                    ident(id),
-                    FilesGlobReferences::new(
-                        globs.as_ref().iter().map(files_attr).collect(),
-                        ex.as_ref().iter().map(|&ex| ex.into()).collect(),
-                        *ignore
-                    )
-                ))
-                .collect::<Vec<_>>()
-        );
-    }
-
-    #[rstest]
-    #[case::no_files_arg("fn f(#[files] a: PathBuf) {}", "#[files(...)]")]
-    #[case::invalid_files_inner("fn f(#[files(a::b::c)] a: PathBuf) {}", "string literal")]
-    #[case::no_exclude_args(
-        r#"fn f(#[files("some")] #[exclude] a: PathBuf) {}"#,
-        "#[exclude(...)]"
-    )]
-    #[case::invalid_exclude_inner(
-        r#"fn f(#[files("some")] #[exclude(a::b)] a: PathBuf) {}"#,
-        "string literal"
-    )]
-    #[case::invalid_exclude_regex(
-        r#"fn f(#[files("some")] #[exclude("invalid(reg(ex")] a: PathBuf) {}"#,
-        "valid regex"
-    )]
-    #[case::include_dot_files_with_args(
-        r#"fn f(#[files("some")] #[include_dot_files(some)] a: PathBuf) {}"#,
-        "#[include_dot_files]"
-    )]
-    #[case::exclude_without_files(
-        r#"fn f(#[exclude("some")] a: PathBuf) {}"#,
-        "#[exclude(...)] without #[files(...)]"
-    )]
-    #[case::include_dot_files_without_files(
-        r#"fn f(#[include_dot_files] a: PathBuf) {}"#,
-        "#[include_dot_files] without #[files(...)]"
-    )]
-    #[case::include_dot_files_more_than_once(
-        r#"fn f(#[files("some")] #[include_dot_files] #[include_dot_files] a: PathBuf) {}"#,
-        "more than once"
-    )]
-    fn raise_error(#[case] item_fn: &str, #[case] message: &str) {
-        let mut item_fn: ItemFn = item_fn.ast();
-
-        let err = extract_files(&mut item_fn).unwrap_err();
-
-        assert_in!(format!("{:?}", err), message);
-    }
-
-    #[derive(Default)]
-    struct FakeBaseDir(PathBuf);
-    impl From<&str> for FakeBaseDir {
-        fn from(value: &str) -> Self {
-            Self(PathBuf::from(value))
-        }
-    }
-    impl BaseDir for FakeBaseDir {
-        fn base_dir(&self) -> Result<PathBuf, String> {
-            Ok(self.0.clone())
-        }
-    }
-
-    impl<'a> ValueListFromFiles<'a> {
-        fn new(bdir: impl BaseDir + 'a, g_resover: impl GlobResolver + 'a) -> Self {
-            Self {
-                base_dir: Box::new(bdir),
-                g_resolver: Box::new(g_resover),
-            }
-        }
-    }
-
-    #[derive(Default)]
-    struct FakeResolver(Vec<String>);
-
-    impl From<&[&str]> for FakeResolver {
-        fn from(value: &[&str]) -> Self {
-            Self(value.iter().map(ToString::to_string).collect())
-        }
-    }
-
-    impl GlobResolver for FakeResolver {
-        fn glob(&self, _pattern: &str) -> Result<Vec<PathBuf>, String> {
-            Ok(self.0.iter().map(PathBuf::from).collect())
-        }
-    }
-
-    #[derive(Default)]
-    struct FakeMapResolver(String, HashMap<String, Vec<PathBuf>>);
-
-    impl From<(&str, &HashMap<&str, &[&str]>)> for FakeMapResolver {
-        fn from(value: (&str, &HashMap<&str, &[&str]>)) -> Self {
-            Self(
-                value.0.to_string(),
-                value
-                    .1
-                    .iter()
-                    .map(|(&key, &values)| {
-                        (
-                            key.to_string(),
-                            values
-                                .iter()
-                                .map(|&v| PathBuf::from(format!("{}/{v}", value.0)))
-                                .collect::<Vec<_>>(),
-                        )
-                    })
-                    .collect(),
-            )
-        }
-    }
-
-    impl GlobResolver for FakeMapResolver {
-        fn glob(&self, pattern: &str) -> Result<Vec<PathBuf>, String> {
-            let pattern = pattern.strip_prefix(&format!("{}/", self.0)).unwrap();
-            Ok(self.1.get(pattern).cloned().unwrap_or_default())
-        }
-    }
-
-    #[rstest]
-    #[case::simple("/base", None, FakeResolver::from(["/base/first", "/base/second"].as_slice()), vec![], true, &["first", "second"])]
-    #[case::more_glob("/base", Some(["path1", "path2"].as_slice()), FakeMapResolver::from(
-        ("/base", &hashmap!(
-            "path1" => ["first", "second"].as_slice(),
-            "path2" => ["third", "zzzz"].as_slice()
-        ))
-    ), vec![], true, &["first", "second", "third", "zzzz"])]
-    #[case::should_remove_duplicates("/base", Some(["path1", "path2"].as_slice()), FakeMapResolver::from(
-        ("/base", &hashmap!(
-            "path1" => ["first", "second"].as_slice(),
-            "path2" => ["second", "third"].as_slice()
-        ))
-    ), vec![], true, &["first", "second", "third"])]
-    #[case::should_sort("/base", None, FakeResolver::from(["/base/second", "/base/first"].as_slice()), vec![], true, &["first", "second"])]
-    #[case::exclude("/base", None, FakeResolver::from([
-        "/base/first", "/base/rem_1", "/base/other/rem_2", "/base/second"].as_slice()), 
-        vec![Exclude::fake("no_mater", Some(Regex::new("rem_").unwrap()))], true, &["first", "second"])]
-    #[case::exclude_more("/base", None, FakeResolver::from([
-        "/base/first", "/base/rem_1", "/base/other/rem_2", "/base/some/other", "/base/second"].as_slice()), 
-        vec![
-            Exclude::fake("no_mater", Some(Regex::new("rem_").unwrap())),
-            Exclude::fake("no_mater", Some(Regex::new("some").unwrap())),
-            ], true, &["first", "second"])]
-    #[case::ignore_dot_files("/base", None, FakeResolver::from([
-        "/base/first", "/base/.ignore", "/base/.ignore_dir/a", "/base/second/.not", "/base/second/but_include", "/base/in/.out/other/ignored"].as_slice()), 
-        vec![], true, &["first", "second/but_include"])]
-    #[case::include_dot_files("/base", None, FakeResolver::from([
-        "/base/first", "/base/.ignore", "/base/.ignore_dir/a", "/base/second/.not", "/base/second/but_include", "/base/in/.out/other/ignored"].as_slice()), 
-        vec![], false, &[".ignore", ".ignore_dir/a", "first", "in/.out/other/ignored", "second/.not", "second/but_include"])]
-    #[case::relative_path("/base/some/other/folders", None, 
-        FakeResolver::from(["/base/first", "/base/second"].as_slice()), vec![], true, &["../../../first", "../../../second"])]
-    fn generate_a_variable_with_the_glob_resolved_path(
-        #[case] bdir: &str,
-        #[case] paths: Option<&[&str]>,
-        #[case] resolver: impl GlobResolver,
-        #[case] exclude: Vec<Exclude>,
-        #[case] ignore_dot_files: bool,
-        #[case] expected: &[&str],
-    ) {
-        let paths = paths
-            .map(|inner| inner.into_iter().map(files_attr).collect())
-            .unwrap_or(vec![files_attr("no_mater")]);
-        let values = ValueListFromFiles::new(FakeBaseDir::from(bdir), resolver)
-            .to_value_list(vec![(
-                ident("a"),
-                FilesGlobReferences::new(paths, exclude, ignore_dot_files),
-            )])
-            .unwrap();
-
-        let mut v_list = values_list(
-            "a",
-            &expected
-                .iter()
-                .map(|&p| RelativePath::from_path(p).unwrap())
-                .map(|r| r.to_logical_path(bdir))
-                .map(|p| {
-                    format!(
-                        r#"<::std::path::PathBuf as std::str::FromStr>::from_str("{}").unwrap()"#,
-                        p.as_os_str().to_str().unwrap()
-                    )
-                })
-                .collect::<Vec<_>>(),
-        );
-        v_list
-            .values
-            .iter_mut()
-            .zip(expected.iter())
-            .for_each(|(v, &ex)| {
-                v.description = Some(render_file_description(
-                    &RelativePath::from_path(ex).unwrap(),
-                ))
-            });
-        assert_eq!(vec![v_list], values);
-    }
-
-    #[rstest]
-    #[case::file("name.txt", "name.txt")]
-    #[case::in_folder("some/folder/name.txt", "some/folder/name.txt")]
-    #[case::no_extension("name", "name")]
-    #[case::parent("../../name.txt", "_UP/_UP/name.txt")]
-    #[case::ignore_current("./../other/name.txt", "_UP/other/name.txt")]
-    fn render_file_description_should(#[case] path: &str, #[case] expected: &str) {
-        assert_eq!(
-            render_file_description(&RelativePath::from_path(path).unwrap()),
-            expected
-        );
-    }
-
-    #[test]
-    #[should_panic(expected = "Fake error")]
-    fn raise_error_if_fail_to_get_root() {
-        #[derive(Default)]
-        struct ErrorBaseDir;
-        impl BaseDir for ErrorBaseDir {
-            fn base_dir(&self) -> Result<PathBuf, String> {
-                Err("Fake error".to_string())
-            }
-        }
-
-        ValueListFromFiles::new(ErrorBaseDir::default(), FakeResolver::default())
-            .to_value_list(vec![(
-                ident("a"),
-                FilesGlobReferences::new(vec![files_attr("no_mater")], Default::default(), true),
-            )])
-            .unwrap();
-    }
-
-    #[test]
-    #[should_panic(expected = "No file found")]
-    fn raise_error_if_no_files_found() {
-        ValueListFromFiles::new(FakeBaseDir::default(), FakeResolver::default())
-            .to_value_list(vec![(
-                ident("a"),
-                FilesGlobReferences::new(vec![files_attr("no_mater")], Default::default(), true),
-            )])
-            .unwrap();
-    }
-
-    #[test]
-    #[should_panic(expected = "glob failed")]
-    fn default_glob_resolver_raise_error_if_invalid_glob_path() {
-        DefaultGlobResolver.glob("/invalid/path/***").unwrap();
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/testcase.rs b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/testcase.rs
deleted file mode 100644
index 20efd79..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/testcase.rs
+++ /dev/null
@@ -1,162 +0,0 @@
-use syn::{
-    parse::{Error, Parse, ParseStream, Result},
-    punctuated::Punctuated,
-    Attribute, Expr, Ident, Token,
-};
-
-use proc_macro2::TokenStream;
-use quote::ToTokens;
-
-#[derive(PartialEq, Debug, Clone)]
-/// A test case instance data. Contains a list of arguments. It is parsed by parametrize
-/// attributes.
-pub(crate) struct TestCase {
-    pub(crate) args: Vec<Expr>,
-    pub(crate) attrs: Vec<Attribute>,
-    pub(crate) description: Option<Ident>,
-}
-
-impl Parse for TestCase {
-    fn parse(input: ParseStream) -> Result<Self> {
-        let attrs = Attribute::parse_outer(input)?;
-        let case: Ident = input.parse()?;
-        if case == "case" {
-            let mut description = None;
-            if input.peek(Token![::]) {
-                let _ = input.parse::<Token![::]>();
-                description = Some(input.parse()?);
-            }
-            let content;
-            let _ = syn::parenthesized!(content in input);
-            let args = Punctuated::<Expr, Token![,]>::parse_terminated(&content)?
-                .into_iter()
-                .collect();
-            Ok(TestCase {
-                args,
-                attrs,
-                description,
-            })
-        } else {
-            Err(Error::new(case.span(), "expected a test case"))
-        }
-    }
-}
-
-impl ToTokens for TestCase {
-    fn to_tokens(&self, tokens: &mut TokenStream) {
-        self.args.iter().for_each(|c| c.to_tokens(tokens))
-    }
-}
-
-#[cfg(test)]
-mod should {
-    use super::*;
-    use crate::test::{assert_eq, *};
-
-    fn parse_test_case<S: AsRef<str>>(test_case: S) -> TestCase {
-        parse_meta(test_case)
-    }
-
-    #[test]
-    fn two_literal_args() {
-        let test_case = parse_test_case(r#"case(42, "value")"#);
-        let args = test_case.args();
-
-        let expected = to_args!(["42", r#""value""#]);
-
-        assert_eq!(expected, args);
-    }
-
-    #[test]
-    fn some_literals() {
-        let args_expressions = literal_expressions_str();
-        let test_case = parse_test_case(&format!("case({})", args_expressions.join(", ")));
-        let args = test_case.args();
-
-        assert_eq!(to_args!(args_expressions), args);
-    }
-
-    #[test]
-    fn accept_arbitrary_rust_code() {
-        let test_case = parse_test_case(r#"case(vec![1,2,3])"#);
-        let args = test_case.args();
-
-        assert_eq!(to_args!(["vec![1, 2, 3]"]), args);
-    }
-
-    #[test]
-    #[should_panic]
-    fn raise_error_on_invalid_rust_code() {
-        parse_test_case(r#"case(some:<>(1,2,3))"#);
-    }
-
-    #[test]
-    fn get_description_if_any() {
-        let test_case = parse_test_case(r#"case::this_test_description(42)"#);
-        let args = test_case.args();
-
-        assert_eq!(
-            "this_test_description",
-            &test_case.description.unwrap().to_string()
-        );
-        assert_eq!(to_args!(["42"]), args);
-    }
-
-    #[test]
-    fn get_description_also_with_more_args() {
-        let test_case = parse_test_case(r#"case :: this_test_description (42, 24)"#);
-        let args = test_case.args();
-
-        assert_eq!(
-            "this_test_description",
-            &test_case.description.unwrap().to_string()
-        );
-        assert_eq!(to_args!(["42", "24"]), args);
-    }
-
-    #[test]
-    fn parse_arbitrary_rust_code_as_expression() {
-        let test_case = parse_test_case(
-            r##"
-            case(42, -42,
-            pippo("pluto"),
-            Vec::new(),
-            String::from(r#"prrr"#),
-            {
-                let mut sum=0;
-                for i in 1..3 {
-                    sum += i;
-                }
-                sum
-            },
-            vec![1,2,3]
-        )"##,
-        );
-
-        let args = test_case.args();
-
-        assert_eq!(
-            to_args!([
-                "42",
-                "-42",
-                r#"pippo("pluto")"#,
-                "Vec::new()",
-                r##"String::from(r#"prrr"#)"##,
-                r#"{let mut sum=0;for i in 1..3 {sum += i;}sum}"#,
-                "vec![1,2,3]"
-            ]),
-            args
-        );
-    }
-
-    #[test]
-    fn save_attributes() {
-        let test_case = parse_test_case(r#"#[should_panic]#[other_attr(x)]case(42)"#);
-
-        let content = format!("{:?}", test_case.attrs);
-
-        assert_eq!(2, test_case.attrs.len());
-        assert!(content.contains("should_panic"));
-        assert!(content.contains("other_attr"));
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/vlist.rs b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/vlist.rs
deleted file mode 100644
index eb72e90..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/vlist.rs
+++ /dev/null
@@ -1,123 +0,0 @@
-use proc_macro2::TokenStream;
-use quote::ToTokens;
-use syn::{
-    parse::{Parse, ParseStream, Result},
-    Expr, Ident, Pat, Token,
-};
-
-use crate::refident::IntoPat;
-
-use super::expressions::Expressions;
-
-#[derive(Debug, PartialEq, Clone)]
-pub(crate) struct Value {
-    pub(crate) expr: Expr,
-    pub(crate) description: Option<String>,
-}
-
-impl Value {
-    pub(crate) fn new(expr: Expr, description: Option<String>) -> Self {
-        Self { expr, description }
-    }
-
-    pub(crate) fn description(&self) -> String {
-        self.description
-            .clone()
-            .unwrap_or_else(|| self.expr.to_token_stream().to_string())
-    }
-}
-
-impl From<Expr> for Value {
-    fn from(expr: Expr) -> Self {
-        Self::new(expr, None)
-    }
-}
-
-#[derive(Debug, PartialEq, Clone)]
-pub(crate) struct ValueList {
-    pub(crate) arg: Pat,
-    pub(crate) values: Vec<Value>,
-}
-
-impl Parse for ValueList {
-    fn parse(input: ParseStream) -> Result<Self> {
-        let ident: Ident = input.parse()?;
-        let _to: Token![=>] = input.parse()?;
-        let content;
-        let paren = syn::bracketed!(content in input);
-        let values: Expressions = content.parse()?;
-
-        let ret = Self {
-            arg: ident.into_pat(),
-            values: values.take().into_iter().map(|e| e.into()).collect(),
-        };
-        if ret.values.is_empty() {
-            Err(syn::Error::new(
-                paren.span.join(),
-                "Values list should not be empty",
-            ))
-        } else {
-            Ok(ret)
-        }
-    }
-}
-
-impl ToTokens for ValueList {
-    fn to_tokens(&self, tokens: &mut TokenStream) {
-        self.arg.to_tokens(tokens)
-    }
-}
-
-#[cfg(test)]
-mod should {
-    use crate::test::{assert_eq, *};
-
-    use super::*;
-
-    mod parse_values_list {
-        use super::assert_eq;
-        use super::*;
-
-        fn parse_values_list<S: AsRef<str>>(values_list: S) -> ValueList {
-            parse_meta(values_list)
-        }
-
-        #[test]
-        fn some_literals() {
-            let literals = literal_expressions_str();
-            let name = "argument";
-
-            let values_list = parse_values_list(format!(
-                r#"{} => [{}]"#,
-                name,
-                literals
-                    .iter()
-                    .map(ToString::to_string)
-                    .collect::<Vec<String>>()
-                    .join(", ")
-            ));
-
-            assert_eq!(name, &values_list.arg.display_code());
-            assert_eq!(values_list.args(), to_args!(literals));
-        }
-
-        #[test]
-        fn raw_code() {
-            let values_list = parse_values_list(r#"no_mater => [vec![1,2,3]]"#);
-
-            assert_eq!(values_list.args(), to_args!(["vec![1, 2, 3]"]));
-        }
-
-        #[test]
-        #[should_panic]
-        fn raw_code_with_parsing_error() {
-            parse_values_list(r#"other => [some:<>(1,2,3)]"#);
-        }
-
-        #[test]
-        #[should_panic(expected = r#"expected square brackets"#)]
-        fn forget_brackets() {
-            parse_values_list(r#"other => 42"#);
-        }
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/refident.rs b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/refident.rs
deleted file mode 100644
index 45be6e9f..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/refident.rs
+++ /dev/null
@@ -1,218 +0,0 @@
-/// Provide `RefIdent` and `MaybeIdent` traits that give a shortcut to extract identity reference
-/// (`syn::Ident` struct).
-use proc_macro2::Ident;
-use syn::{FnArg, Pat, PatIdent, PatType, Type};
-
-pub trait RefIdent {
-    /// Return the reference to ident if any
-    fn ident(&self) -> &Ident;
-}
-
-pub trait MaybeIdent {
-    /// Return the reference to ident if any
-    fn maybe_ident(&self) -> Option<&Ident>;
-}
-
-impl<I: RefIdent> MaybeIdent for I {
-    fn maybe_ident(&self) -> Option<&Ident> {
-        Some(self.ident())
-    }
-}
-
-impl RefIdent for Ident {
-    fn ident(&self) -> &Ident {
-        self
-    }
-}
-
-impl<'a> RefIdent for &'a Ident {
-    fn ident(&self) -> &Ident {
-        self
-    }
-}
-
-impl MaybeIdent for FnArg {
-    fn maybe_ident(&self) -> Option<&Ident> {
-        match self {
-            FnArg::Typed(pat) => pat.maybe_ident(),
-            _ => None,
-        }
-    }
-}
-
-impl MaybeIdent for PatType {
-    fn maybe_ident(&self) -> Option<&Ident> {
-        self.pat.maybe_ident()
-    }
-}
-
-impl MaybeIdent for Pat {
-    fn maybe_ident(&self) -> Option<&Ident> {
-        match self {
-            Pat::Ident(ident) => Some(&ident.ident),
-            _ => None,
-        }
-    }
-}
-
-impl MaybeIdent for Type {
-    fn maybe_ident(&self) -> Option<&Ident> {
-        match self {
-            Type::Path(tp) if tp.qself.is_none() => tp.path.get_ident(),
-            _ => None,
-        }
-    }
-}
-
-pub trait MaybeType {
-    /// Return the reference to type if any
-    fn maybe_type(&self) -> Option<&Type>;
-}
-
-impl MaybeType for FnArg {
-    fn maybe_type(&self) -> Option<&Type> {
-        match self {
-            FnArg::Typed(PatType { ty, .. }) => Some(ty.as_ref()),
-            _ => None,
-        }
-    }
-}
-
-impl MaybeIdent for syn::GenericParam {
-    fn maybe_ident(&self) -> Option<&Ident> {
-        match self {
-            syn::GenericParam::Type(syn::TypeParam { ident, .. })
-            | syn::GenericParam::Const(syn::ConstParam { ident, .. }) => Some(ident),
-            syn::GenericParam::Lifetime(syn::LifetimeParam { lifetime, .. }) => {
-                Some(&lifetime.ident)
-            }
-        }
-    }
-}
-
-impl MaybeIdent for crate::parse::Attribute {
-    fn maybe_ident(&self) -> Option<&Ident> {
-        use crate::parse::Attribute::*;
-        match self {
-            Attr(ident) | Tagged(ident, _) | Type(ident, _) => Some(ident),
-        }
-    }
-}
-
-pub trait MaybeIntoPath {
-    fn maybe_into_path(self) -> Option<syn::Path>;
-}
-
-impl MaybeIntoPath for PatIdent {
-    fn maybe_into_path(self) -> Option<syn::Path> {
-        Some(self.ident.into())
-    }
-}
-
-impl MaybeIntoPath for Pat {
-    fn maybe_into_path(self) -> Option<syn::Path> {
-        match self {
-            Pat::Ident(pi) => pi.maybe_into_path(),
-            _ => None,
-        }
-    }
-}
-
-pub trait RefPat {
-    /// Return the reference to ident if any
-    fn pat(&self) -> &Pat;
-}
-
-pub trait MaybePatIdent {
-    fn maybe_patident(&self) -> Option<&syn::PatIdent>;
-}
-
-impl MaybePatIdent for FnArg {
-    fn maybe_patident(&self) -> Option<&syn::PatIdent> {
-        match self {
-            FnArg::Typed(PatType { pat, .. }) => match pat.as_ref() {
-                Pat::Ident(ident) => Some(ident),
-                _ => None,
-            },
-            _ => None,
-        }
-    }
-}
-
-impl MaybePatIdent for Pat {
-    fn maybe_patident(&self) -> Option<&syn::PatIdent> {
-        match self {
-            Pat::Ident(ident) => Some(ident),
-            _ => None,
-        }
-    }
-}
-
-pub trait MaybePatType {
-    fn maybe_pat_type(&self) -> Option<&syn::PatType>;
-}
-
-impl MaybePatType for FnArg {
-    fn maybe_pat_type(&self) -> Option<&syn::PatType> {
-        match self {
-            FnArg::Typed(pt) => Some(pt),
-            _ => None,
-        }
-    }
-}
-
-pub trait MaybePatTypeMut {
-    fn maybe_pat_type_mut(&mut self) -> Option<&mut syn::PatType>;
-}
-
-impl MaybePatTypeMut for FnArg {
-    fn maybe_pat_type_mut(&mut self) -> Option<&mut syn::PatType> {
-        match self {
-            FnArg::Typed(pt) => Some(pt),
-            _ => None,
-        }
-    }
-}
-
-pub trait MaybePat {
-    fn maybe_pat(&self) -> Option<&syn::Pat>;
-}
-
-impl MaybePat for FnArg {
-    fn maybe_pat(&self) -> Option<&syn::Pat> {
-        match self {
-            FnArg::Typed(PatType { pat, .. }) => Some(pat.as_ref()),
-            _ => None,
-        }
-    }
-}
-
-pub trait RemoveMutability {
-    fn remove_mutability(&mut self);
-}
-
-impl RemoveMutability for FnArg {
-    fn remove_mutability(&mut self) {
-        if let FnArg::Typed(PatType { pat, .. }) = self {
-            if let Pat::Ident(ident) = pat.as_mut() {
-                ident.mutability = None
-            }
-        };
-    }
-}
-
-pub trait IntoPat {
-    fn into_pat(self) -> Pat;
-}
-
-impl IntoPat for Ident {
-    fn into_pat(self) -> Pat {
-        Pat::Ident(syn::PatIdent {
-            attrs: vec![],
-            by_ref: None,
-            mutability: None,
-            ident: self,
-            subpat: None,
-        })
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/render/apply_arguments.rs b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/render/apply_arguments.rs
deleted file mode 100644
index 715e3a6..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/render/apply_arguments.rs
+++ /dev/null
@@ -1,299 +0,0 @@
-use quote::{format_ident, ToTokens};
-use syn::{parse_quote, FnArg, Generics, Ident, ItemFn, Lifetime, Signature, Type, TypeReference};
-
-use crate::{
-    parse::{arguments::ArgumentsInfo, future::MaybeFutureImplType},
-    refident::{MaybeIdent, MaybePat, MaybePatIdent, RemoveMutability},
-};
-
-pub(crate) trait ApplyArguments {
-    type Output: Sized;
-    type Context;
-
-    fn apply_arguments(
-        &mut self,
-        arguments: &mut ArgumentsInfo,
-        ctx: &mut Self::Context,
-    ) -> Self::Output;
-}
-
-impl ApplyArguments for FnArg {
-    type Output = Option<Lifetime>;
-    type Context = usize;
-
-    fn apply_arguments(
-        &mut self,
-        arguments: &mut ArgumentsInfo,
-        anoymous_id: &mut usize,
-    ) -> Self::Output {
-        if self
-            .maybe_pat()
-            .map(|id| arguments.is_future(id))
-            .unwrap_or_default()
-        {
-            self.impl_future_arg(anoymous_id)
-        } else {
-            None
-        }
-    }
-}
-
-fn move_generic_list(data: &mut Generics, other: Generics) {
-    data.lt_token = data.lt_token.or(other.lt_token);
-    data.params = other.params;
-    data.gt_token = data.gt_token.or(other.gt_token);
-}
-
-fn extend_generics_with_lifetimes<'a, 'b>(
-    generics: impl Iterator<Item = &'a syn::GenericParam>,
-    lifetimes: impl Iterator<Item = &'b syn::Lifetime>,
-) -> Generics {
-    let all = lifetimes
-        .map(|lt| lt as &dyn ToTokens)
-        .chain(generics.map(|gp| gp as &dyn ToTokens));
-    parse_quote! {
-                <#(#all),*>
-    }
-}
-
-impl ApplyArguments for Signature {
-    type Output = ();
-    type Context = ();
-
-    fn apply_arguments(&mut self, arguments: &mut ArgumentsInfo, _: &mut ()) {
-        let mut anonymous_lt = 0_usize;
-        let new_lifetimes = self
-            .inputs
-            .iter_mut()
-            .filter_map(|arg| arg.apply_arguments(arguments, &mut anonymous_lt))
-            .collect::<Vec<_>>();
-        if !new_lifetimes.is_empty() || !self.generics.params.is_empty() {
-            let new_generics =
-                extend_generics_with_lifetimes(self.generics.params.iter(), new_lifetimes.iter());
-            move_generic_list(&mut self.generics, new_generics);
-        }
-    }
-}
-
-impl ApplyArguments for ItemFn {
-    type Output = ();
-    type Context = ();
-
-    fn apply_arguments(&mut self, arguments: &mut ArgumentsInfo, _: &mut ()) {
-        let args = self.sig.inputs.iter().cloned().collect::<Vec<_>>();
-        self.sig.apply_arguments(arguments, &mut ());
-        let rebound_awaited_args = args
-            .iter()
-            .filter_map(MaybePat::maybe_pat)
-            .filter(|p| arguments.is_future_await(p))
-            .filter_map(MaybePatIdent::maybe_patident)
-            .map(|p| {
-                let a = &p.ident;
-                quote::quote! { let #p = #a.await; }
-            });
-        let orig_block_impl = self.block.clone();
-        self.block = parse_quote! {
-            {
-                #(#rebound_awaited_args)*
-                #orig_block_impl
-            }
-        };
-    }
-}
-
-pub(crate) trait ImplFutureArg {
-    fn impl_future_arg(&mut self, anonymous_lt: &mut usize) -> Option<Lifetime>;
-}
-
-impl ImplFutureArg for FnArg {
-    fn impl_future_arg(&mut self, anonymous_lt: &mut usize) -> Option<Lifetime> {
-        let lifetime_id = self
-            .maybe_ident()
-            .map(|id| format_ident!("_{}", id))
-            .unwrap_or_else(|| {
-                *anonymous_lt += 1;
-                format_ident!("_anonymous_lt_{}", anonymous_lt)
-            });
-        match self.as_mut_future_impl_type() {
-            Some(ty) => {
-                let lifetime = update_type_with_lifetime(ty, lifetime_id);
-                *ty = parse_quote! {
-                    impl std::future::Future<Output = #ty>
-                };
-                self.remove_mutability();
-                lifetime
-            }
-            None => None,
-        }
-    }
-}
-
-fn update_type_with_lifetime(ty: &mut Type, ident: Ident) -> Option<Lifetime> {
-    if let Type::Reference(ty_ref @ TypeReference { lifetime: None, .. }) = ty {
-        let lifetime = Some(syn::Lifetime {
-            apostrophe: ident.span(),
-            ident,
-        });
-        ty_ref.lifetime.clone_from(&lifetime);
-        lifetime
-    } else {
-        None
-    }
-}
-
-#[cfg(test)]
-mod should {
-    use super::*;
-    use crate::test::{assert_eq, *};
-    use syn::ItemFn;
-
-    #[rstest]
-    #[case("fn simple(a: u32) {}")]
-    #[case("fn more(a: u32, b: &str) {}")]
-    #[case("fn gen<S: AsRef<str>>(a: u32, b: S) {}")]
-    #[case("fn attr(#[case] a: u32, #[values(1,2)] b: i32) {}")]
-    fn no_change(#[case] item_fn: &str) {
-        let mut item_fn: ItemFn = item_fn.ast();
-        let orig = item_fn.clone();
-        let mut args = ArgumentsInfo::default();
-
-        item_fn.sig.apply_arguments(&mut args, &mut ());
-
-        assert_eq!(orig, item_fn)
-    }
-
-    #[rstest]
-    #[case::simple(
-        "fn f(a: u32) {}",
-        &["a"],
-        "fn f(a: impl std::future::Future<Output = u32>) {}"
-    )]
-    #[case::more_than_one(
-        "fn f(a: u32, b: String, c: std::collection::HashMap<usize, String>) {}",
-        &["a", "b", "c"],
-        r#"fn f(a: impl std::future::Future<Output = u32>, 
-                b: impl std::future::Future<Output = String>, 
-                c: impl std::future::Future<Output = std::collection::HashMap<usize, String>>) {}"#,
-    )]
-    #[case::just_one(
-        "fn f(a: u32, b: String) {}",
-        &["b"],
-        r#"fn f(a: u32, 
-                b: impl std::future::Future<Output = String>) {}"#
-    )]
-    #[case::generics(
-        "fn f<S: AsRef<str>>(a: S) {}",
-        &["a"],
-        "fn f<S: AsRef<str>>(a: impl std::future::Future<Output = S>) {}"
-    )]
-    #[case::remove_mut(
-        "fn f(mut a: u32) {}",
-        &["a"],
-        r#"fn f(a: impl std::future::Future<Output = u32>) {}"#
-    )]
-    fn replace_future_basic_type(
-        #[case] item_fn: &str,
-        #[case] futures: &[&str],
-        #[case] expected: &str,
-    ) {
-        let mut item_fn: ItemFn = item_fn.ast();
-        let expected: ItemFn = expected.ast();
-
-        let mut arguments = ArgumentsInfo::default();
-        futures
-            .into_iter()
-            .for_each(|&f| arguments.add_future(pat(f)));
-
-        item_fn.sig.apply_arguments(&mut arguments, &mut ());
-
-        assert_eq!(expected, item_fn)
-    }
-
-    #[rstest]
-    #[case::base(
-        "fn f(ident_name: &u32) {}",
-        &["ident_name"],
-        "fn f<'_ident_name>(ident_name: impl std::future::Future<Output = &'_ident_name u32>) {}"
-    )]
-    #[case::lifetime_already_exists(
-        "fn f<'b>(a: &'b u32) {}",
-        &["a"],
-        "fn f<'b>(a: impl std::future::Future<Output = &'b u32>) {}"
-    )]
-    #[case::some_other_generics(
-        "fn f<'b, IT: Iterator<Item=String + 'b>>(a: &u32, it: IT) {}",
-        &["a"],
-        "fn f<'_a, 'b, IT: Iterator<Item=String + 'b>>(a: impl std::future::Future<Output = &'_a u32>, it: IT) {}"
-    )]
-    fn replace_reference_type(
-        #[case] item_fn: &str,
-        #[case] futures: &[&str],
-        #[case] expected: &str,
-    ) {
-        let mut item_fn: ItemFn = item_fn.ast();
-        let expected: ItemFn = expected.ast();
-
-        let mut arguments = ArgumentsInfo::default();
-        futures
-            .into_iter()
-            .for_each(|&f| arguments.add_future(pat(f)));
-
-        item_fn.sig.apply_arguments(&mut arguments, &mut ());
-
-        assert_eq!(expected, item_fn)
-    }
-
-    mod await_future_args {
-        use rstest_test::{assert_in, assert_not_in};
-
-        use crate::parse::arguments::FutureArg;
-
-        use super::*;
-
-        #[test]
-        fn with_global_await() {
-            let mut item_fn: ItemFn = r#"fn test(a: i32, b:i32, c:i32) {} "#.ast();
-            let mut arguments: ArgumentsInfo = Default::default();
-            arguments.set_global_await(true);
-            arguments.add_future(pat("a"));
-            arguments.add_future(pat("b"));
-
-            item_fn.apply_arguments(&mut arguments, &mut ());
-
-            let code = item_fn.block.display_code();
-
-            assert_in!(code, await_argument_code_string("a"));
-            assert_in!(code, await_argument_code_string("b"));
-            assert_not_in!(code, await_argument_code_string("c"));
-        }
-
-        #[test]
-        fn with_selective_await() {
-            let mut item_fn: ItemFn = r#"fn test(a: i32, b:i32, c:i32) {} "#.ast();
-            let mut arguments: ArgumentsInfo = Default::default();
-            arguments.set_future(pat("a"), FutureArg::Define);
-            arguments.set_future(pat("b"), FutureArg::Await);
-
-            item_fn.apply_arguments(&mut arguments, &mut ());
-
-            let code = item_fn.block.display_code();
-
-            assert_not_in!(code, await_argument_code_string("a"));
-            assert_in!(code, await_argument_code_string("b"));
-            assert_not_in!(code, await_argument_code_string("c"));
-        }
-
-        #[test]
-        fn with_mut_await() {
-            let mut item_fn: ItemFn = r#"fn test(mut a: i32) {} "#.ast();
-            let mut arguments: ArgumentsInfo = Default::default();
-
-            arguments.set_future(pat("a").with_mut(), FutureArg::Await);
-
-            item_fn.apply_arguments(&mut arguments, &mut ());
-
-            let code = item_fn.block.display_code();
-            assert_in!(code, mut_await_argument_code_string("a"));
-        }
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/render/crate_resolver.rs b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/render/crate_resolver.rs
deleted file mode 100644
index 39095d70..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/render/crate_resolver.rs
+++ /dev/null
@@ -1,21 +0,0 @@
-use syn::parse_quote;
-
-pub fn crate_name() -> syn::Path {
-    cfg_if::cfg_if! {
-    if #[cfg(feature = "crate-name")] {
-        use proc_macro_crate::FoundCrate;
-        use quote::format_ident;
-
-        match proc_macro_crate::crate_name("rstest").expect("rstest is present in `Cargo.toml` qed")
-        {
-            FoundCrate::Itself => parse_quote! { rstest },
-            FoundCrate::Name(name) => {
-                let myself = format_ident!("{name}");
-                parse_quote! { #myself }
-            }
-        }
-    } else {
-        parse_quote! { rstest }
-    }
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/render/fixture.rs b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/render/fixture.rs
deleted file mode 100644
index 74451a8e..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/render/fixture.rs
+++ /dev/null
@@ -1,599 +0,0 @@
-use proc_macro2::{Span, TokenStream};
-use syn::token::Async;
-use syn::{parse_quote, FnArg, Generics, Ident, ItemFn, ReturnType};
-
-use quote::quote;
-
-use super::apply_arguments::ApplyArguments;
-use super::{inject, render_exec_call};
-use crate::refident::MaybeIdent;
-use crate::resolver::{self, Resolver};
-use crate::{parse::fixture::FixtureInfo, utils::generics_clean_up};
-
-fn wrap_return_type_as_static_ref(rt: ReturnType) -> ReturnType {
-    match rt {
-        syn::ReturnType::Type(_, t) => parse_quote! {
-           -> &'static #t
-        },
-        o => o,
-    }
-}
-
-fn wrap_call_impl_with_call_once_impl(call_impl: TokenStream, rt: &ReturnType) -> TokenStream {
-    match rt {
-        syn::ReturnType::Type(_, t) => parse_quote! {
-            static CELL: std::sync::OnceLock<#t> =
-                std::sync::OnceLock::new();
-            CELL.get_or_init(|| #call_impl )
-        },
-        _ => parse_quote! {
-            static CELL: std::sync::Once = std::sync::Once::new();
-            CELL.call_once(|| #call_impl );
-        },
-    }
-}
-
-pub(crate) fn render(mut fixture: ItemFn, info: FixtureInfo) -> TokenStream {
-    let mut arguments = info.arguments.clone();
-    fixture.apply_arguments(&mut arguments, &mut ());
-    let name = &fixture.sig.ident;
-    let asyncness = &fixture.sig.asyncness.clone();
-    let inner_args = info
-        .arguments
-        .replace_fn_args_with_related_inner_pat(fixture.sig.inputs.iter().cloned())
-        .collect::<Vec<_>>();
-    let args_ident = inner_args
-        .iter()
-        .filter_map(MaybeIdent::maybe_ident)
-        .cloned()
-        .collect::<Vec<_>>();
-    let orig_attrs = &fixture.attrs;
-    let generics = &fixture.sig.generics;
-    let mut default_output = info
-        .attributes
-        .extract_default_type()
-        .unwrap_or_else(|| fixture.sig.output.clone());
-    let default_generics =
-        generics_clean_up(&fixture.sig.generics, std::iter::empty(), &default_output);
-    let default_where_clause = &default_generics.where_clause;
-    let where_clause = &fixture.sig.generics.where_clause;
-    let mut output = fixture.sig.output.clone();
-    let visibility = &fixture.vis;
-    let resolver = (
-        resolver::fixtures::get(&info.arguments, info.data.fixtures()),
-        resolver::values::get(info.data.values()),
-    );
-    let generics_idents = generics
-        .type_params()
-        .map(|tp| &tp.ident)
-        .cloned()
-        .collect::<Vec<_>>();
-    let inject = inject::resolve_arguments(inner_args.iter(), &resolver, &generics_idents);
-
-    let partials = (1..=inner_args.len()).map(|n| {
-        render_partial_impl(
-            &inner_args,
-            &fixture.sig.output,
-            &fixture.sig.generics,
-            fixture.sig.asyncness.as_ref(),
-            n,
-            &resolver,
-            &info,
-        )
-    });
-
-    let args = args_ident
-        .iter()
-        .map(|arg| parse_quote! { #arg })
-        .collect::<Vec<_>>();
-    let call_get = render_exec_call(parse_quote! { Self::get }, &args, asyncness.is_some());
-    let mut call_impl = render_exec_call(parse_quote! { #name }, &args, asyncness.is_some());
-
-    if info.arguments.is_once() {
-        call_impl = wrap_call_impl_with_call_once_impl(call_impl, &output);
-        output = wrap_return_type_as_static_ref(output);
-        default_output = wrap_return_type_as_static_ref(default_output);
-    }
-
-    quote! {
-        #[allow(non_camel_case_types)]
-        #visibility struct #name {}
-
-        impl #name {
-            #(#orig_attrs)*
-            #[allow(unused_mut)]
-            pub #asyncness fn get #generics (#(#inner_args),*) #output #where_clause {
-                #call_impl
-            }
-
-            pub #asyncness fn default #default_generics () #default_output #default_where_clause {
-                #inject
-                #call_get
-            }
-
-            #(#partials)*
-        }
-
-        #[allow(dead_code)]
-        #fixture
-    }
-}
-
-fn render_partial_impl(
-    args: &[FnArg],
-    output: &ReturnType,
-    generics: &Generics,
-    asyncness: Option<&Async>,
-    n: usize,
-    resolver: &impl Resolver,
-    info: &FixtureInfo,
-) -> TokenStream {
-    let mut output = info
-        .attributes
-        .extract_partial_type(n)
-        .unwrap_or_else(|| output.clone());
-
-    if info.arguments.is_once() {
-        output = wrap_return_type_as_static_ref(output);
-    }
-
-    let generics = generics_clean_up(generics, args.iter().take(n), &output);
-    let where_clause = &generics.where_clause;
-    let genercs_idents = generics
-        .type_params()
-        .map(|tp| &tp.ident)
-        .cloned()
-        .collect::<Vec<_>>();
-    let inject = inject::resolve_arguments(args.iter().skip(n), resolver, &genercs_idents);
-
-    let sign_args = args.iter().take(n);
-    let fixture_args = args
-        .iter()
-        .filter_map(MaybeIdent::maybe_ident)
-        .map(|arg| parse_quote! {#arg})
-        .collect::<Vec<_>>();
-    let name = Ident::new(&format!("partial_{n}"), Span::call_site());
-
-    let call_get = render_exec_call(
-        parse_quote! { Self::get },
-        &fixture_args,
-        asyncness.is_some(),
-    );
-
-    quote! {
-        #[allow(unused_mut)]
-        pub #asyncness fn #name #generics (#(#sign_args),*) #output #where_clause {
-            #inject
-            #call_get
-        }
-    }
-}
-
-#[cfg(test)]
-mod should {
-    use rstest_test::{assert_in, assert_not_in};
-    use syn::{
-        parse::{Parse, ParseStream},
-        parse2, parse_str, ItemImpl, ItemStruct, Result,
-    };
-
-    use crate::parse::{
-        arguments::{ArgumentsInfo, FutureArg},
-        Attribute, Attributes,
-    };
-
-    use super::*;
-    use crate::test::{assert_eq, *};
-    use rstest_reuse::*;
-
-    #[derive(Clone)]
-    struct FixtureOutput {
-        orig: ItemFn,
-        fixture: ItemStruct,
-        core_impl: ItemImpl,
-    }
-
-    impl Parse for FixtureOutput {
-        fn parse(input: ParseStream) -> Result<Self> {
-            Ok(FixtureOutput {
-                fixture: input.parse()?,
-                core_impl: input.parse()?,
-                orig: input.parse()?,
-            })
-        }
-    }
-
-    fn parse_fixture<S: AsRef<str>>(code: S) -> (ItemFn, FixtureOutput) {
-        let item_fn = parse_str::<ItemFn>(code.as_ref()).unwrap();
-
-        let tokens = render(item_fn.clone(), Default::default());
-        (item_fn, parse2(tokens).unwrap())
-    }
-
-    fn test_maintains_function_visibility(code: &str) {
-        let (item_fn, out) = parse_fixture(code);
-
-        assert_eq!(item_fn.vis, out.fixture.vis);
-        assert_eq!(item_fn.vis, out.orig.vis);
-    }
-
-    fn select_method<S: AsRef<str>>(impl_code: ItemImpl, name: S) -> Option<syn::ImplItemFn> {
-        impl_code
-            .items
-            .into_iter()
-            .filter_map(|ii| match ii {
-                syn::ImplItem::Fn(f) => Some(f),
-                _ => None,
-            })
-            .find(|f| f.sig.ident == name.as_ref())
-    }
-
-    #[test]
-    fn maintains_pub_visibility() {
-        test_maintains_function_visibility(r#"pub fn test() { }"#);
-    }
-
-    #[test]
-    fn maintains_no_pub_visibility() {
-        test_maintains_function_visibility(r#"fn test() { }"#);
-    }
-
-    #[test]
-    fn implement_a_get_method_with_input_fixture_signature() {
-        let (item_fn, out) = parse_fixture(
-            r#"
-                    pub fn test<R: AsRef<str>, B>(mut s: String, v: &u32, a: &mut [i32], r: R) -> (u32, B, String, &str)
-                            where B: Borrow<u32>
-                    { }
-                    "#,
-        );
-
-        let mut signature = select_method(out.core_impl, "get").unwrap().sig;
-
-        signature.ident = item_fn.sig.ident.clone();
-
-        assert_eq!(item_fn.sig, signature);
-    }
-
-    #[test]
-    fn return_a_static_reference_if_once_attribute() {
-        let item_fn = parse_str::<ItemFn>(r#"
-                pub fn test<R: AsRef<str>, B>(mut s: String, v: &u32, a: &mut [i32], r: R) -> (u32, B, String, &str)
-                            where B: Borrow<u32>
-                    { }    
-        "#).unwrap();
-        let info = FixtureInfo::default().with_once();
-
-        let out: FixtureOutput = parse2(render(item_fn.clone(), info)).unwrap();
-
-        let signature = select_method(out.core_impl, "get").unwrap().sig;
-
-        assert_eq!(signature.output, "-> &'static (u32, B, String, &str)".ast())
-    }
-
-    #[template]
-    #[rstest(
-        method => ["default", "get", "partial_1", "partial_2", "partial_3"])
-    ]
-    #[case::async_fn(true)]
-    #[case::not_async_fn(false)]
-    fn async_fixture_cases(#[case] is_async: bool, method: &str) {}
-
-    #[apply(async_fixture_cases)]
-    fn fixture_method_should_be_async_if_fixture_function_is_async(
-        #[case] is_async: bool,
-        method: &str,
-    ) {
-        let prefix = if is_async { "async" } else { "" };
-        let (_, out) = parse_fixture(&format!(
-            r#"
-                    pub {} fn test(mut s: String, v: &u32, a: &mut [i32]) -> u32
-                            where B: Borrow<u32>
-                    {{ }}
-                    "#,
-            prefix
-        ));
-
-        let signature = select_method(out.core_impl, method).unwrap().sig;
-
-        assert_eq!(is_async, signature.asyncness.is_some());
-    }
-
-    #[apply(async_fixture_cases)]
-    fn fixture_method_should_use_await_if_fixture_function_is_async(
-        #[case] is_async: bool,
-        method: &str,
-    ) {
-        let prefix = if is_async { "async" } else { "" };
-        let (_, out) = parse_fixture(&format!(
-            r#"
-                    pub {} fn test(mut s: String, v: &u32, a: &mut [i32]) -> u32
-                    {{ }}
-                    "#,
-            prefix
-        ));
-
-        let body = select_method(out.core_impl, method).unwrap().block;
-        let last_statement = body.stmts.last().unwrap();
-        let is_await = match last_statement {
-            syn::Stmt::Expr(syn::Expr::Await(_), _) => true,
-            _ => false,
-        };
-
-        assert_eq!(is_async, is_await);
-    }
-
-    #[test]
-    fn implement_a_default_method_with_input_cleaned_fixture_signature_and_no_args() {
-        let (item_fn, out) = parse_fixture(
-            r#"
-                    pub fn test<R: AsRef<str>, B, F, H: Iterator<Item=u32>>(mut s: String, v: &u32, a: &mut [i32], r: R) -> (H, B, String, &str)
-                        where F: ToString,
-                        B: Borrow<u32>
-
-                    { }
-                    "#,
-        );
-
-        let default_decl = select_method(out.core_impl, "default").unwrap().sig;
-
-        let expected = parse_str::<ItemFn>(
-            r#"
-                    pub fn default<B, H: Iterator<Item=u32>>() -> (H, B, String, &str)
-                            where B: Borrow<u32>
-                    { }
-                    "#,
-        )
-        .unwrap();
-
-        assert_eq!(expected.sig.generics, default_decl.generics);
-        assert_eq!(item_fn.sig.output, default_decl.output);
-        assert!(default_decl.inputs.is_empty());
-    }
-
-    #[test]
-    fn use_default_return_type_if_any() {
-        let item_fn = parse_str::<ItemFn>(
-            r#"
-                    pub fn test<R: AsRef<str>, B, F, H: Iterator<Item=u32>>() -> (H, B)
-                            where F: ToString,
-                            B: Borrow<u32>
-                    { }
-                    "#,
-        )
-        .unwrap();
-
-        let tokens = render(
-            item_fn.clone(),
-            FixtureInfo {
-                attributes: Attributes {
-                    attributes: vec![Attribute::Type(
-                        parse_str("default").unwrap(),
-                        parse_str("(impl Iterator<Item=u32>, B)").unwrap(),
-                    )],
-                }
-                .into(),
-                ..Default::default()
-            },
-        );
-        let out: FixtureOutput = parse2(tokens).unwrap();
-
-        let expected = parse_str::<syn::ItemFn>(
-            r#"
-                    pub fn default<B>() -> (impl Iterator<Item=u32>, B)
-                            where B: Borrow<u32>
-                    { }
-                    "#,
-        )
-        .unwrap();
-
-        let default_decl = select_method(out.core_impl, "default").unwrap().sig;
-
-        assert_eq!(expected.sig, default_decl);
-    }
-
-    #[test]
-    fn implement_partial_methods() {
-        let (item_fn, out) = parse_fixture(
-            r#"
-                    pub fn test(mut s: String, v: &u32, a: &mut [i32]) -> usize
-                    { }
-                    "#,
-        );
-
-        let partials = (1..=3)
-            .map(|n| {
-                select_method(out.core_impl.clone(), format!("partial_{}", n))
-                    .unwrap()
-                    .sig
-            })
-            .collect::<Vec<_>>();
-
-        // All 3 methods found
-
-        assert!(select_method(out.core_impl, "partial_4").is_none());
-
-        let expected_1 = parse_str::<ItemFn>(
-            r#"
-                    pub fn partial_1(mut s: String) -> usize
-                    { }
-                    "#,
-        )
-        .unwrap();
-
-        assert_eq!(expected_1.sig, partials[0]);
-        for p in partials {
-            assert_eq!(item_fn.sig.output, p.output);
-        }
-    }
-
-    #[rstest]
-    #[case::base("fn test<S: AsRef<str>, U: AsRef<u32>, F: ToString>(mut s: S, v: U) -> F {}",
-        vec![
-            "fn default<F: ToString>() -> F {}",
-            "fn partial_1<S: AsRef<str>, F: ToString>(mut s: S) -> F {}",
-            "fn partial_2<S: AsRef<str>, U: AsRef<u32>, F: ToString>(mut s: S, v: U) -> F {}",
-        ]
-    )]
-    #[case::associated_type("fn test<T: IntoIterator>(mut i: T) where T::Item: Copy {}",
-        vec![
-            "fn default() {}",
-            "fn partial_1<T: IntoIterator>(mut i: T) where T::Item: Copy {}",
-        ]
-    )]
-    #[case::not_remove_const_generics("fn test<const N:usize>(v: [u32; N]) -> [i32; N] {}",
-        vec![
-            "fn default<const N:usize>() -> [i32; N] {}",
-            "fn partial_1<const N:usize>(v: [u32; N]) -> [i32; N] {}",
-        ]
-    )]
-    #[case::remove_const_generics("fn test<const N:usize>(a: i32, v: [u32; N]) {}",
-        vec![
-            "fn default() {}",
-            "fn partial_1(a:i32) {}",
-            "fn partial_2<const N:usize>(a:i32, v: [u32; N]) {}",
-        ]
-    )]
-
-    fn clean_generics(#[case] code: &str, #[case] expected: Vec<&str>) {
-        let (item_fn, out) = parse_fixture(code);
-        let n_args = item_fn.sig.inputs.iter().count();
-
-        let mut signatures = vec![select_method(out.core_impl.clone(), "default").unwrap().sig];
-        signatures.extend((1..=n_args).map(|n| {
-            select_method(out.core_impl.clone(), format!("partial_{}", n))
-                .unwrap()
-                .sig
-        }));
-
-        let expected = expected
-            .into_iter()
-            .map(parse_str::<ItemFn>)
-            .map(|f| f.unwrap().sig)
-            .collect::<Vec<_>>();
-
-        assert_eq!(expected, signatures);
-    }
-
-    #[test]
-    fn use_partial_return_type_if_any() {
-        let item_fn = parse_str::<ItemFn>(
-            r#"
-                    pub fn test<R: AsRef<str>, B, F, H: Iterator<Item=u32>>(h: H, b: B) -> (H, B)
-                            where F: ToString,
-                            B: Borrow<u32>
-                    { }
-                     "#,
-        )
-        .unwrap();
-
-        let tokens = render(
-            item_fn.clone(),
-            FixtureInfo {
-                attributes: Attributes {
-                    attributes: vec![Attribute::Type(
-                        parse_str("partial_1").unwrap(),
-                        parse_str("(H, impl Iterator<Item=u32>)").unwrap(),
-                    )],
-                }
-                .into(),
-                ..Default::default()
-            },
-        );
-        let out: FixtureOutput = parse2(tokens).unwrap();
-
-        let expected = parse_str::<syn::ItemFn>(
-            r#"
-                    pub fn partial_1<H: Iterator<Item=u32>>(h: H) -> (H, impl Iterator<Item=u32>)
-                    { }
-                    "#,
-        )
-        .unwrap();
-
-        let partial = select_method(out.core_impl, "partial_1").unwrap();
-
-        assert_eq!(expected.sig, partial.sig);
-    }
-
-    #[test]
-    fn add_future_boilerplate_if_requested() {
-        let item_fn: ItemFn =
-            r#"async fn test(async_ref_u32: &u32, async_u32: u32,simple: u32) { }"#.ast();
-
-        let mut arguments = ArgumentsInfo::default();
-        arguments.add_future(pat("async_ref_u32"));
-        arguments.add_future(pat("async_u32"));
-
-        let tokens = render(
-            item_fn.clone(),
-            FixtureInfo {
-                arguments,
-                ..Default::default()
-            },
-        );
-        let out: FixtureOutput = parse2(tokens).unwrap();
-
-        let expected = parse_str::<syn::ItemFn>(
-            r#"
-                    async fn get<'_async_ref_u32>(
-                        async_ref_u32: impl std::future::Future<Output = &'_async_ref_u32 u32>, 
-                        async_u32: impl std::future::Future<Output = u32>, 
-                        simple: u32
-                    )
-                    { }
-                    "#,
-        )
-        .unwrap();
-
-        let rendered = select_method(out.core_impl, "get").unwrap();
-
-        assert_eq!(expected.sig, rendered.sig);
-    }
-
-    #[test]
-    fn use_global_await() {
-        let item_fn: ItemFn = r#"fn test(a: i32, b:i32, c:i32) {} "#.ast();
-        let mut arguments: ArgumentsInfo = Default::default();
-        arguments.set_global_await(true);
-        arguments.add_future(pat("a"));
-        arguments.add_future(pat("b"));
-
-        let tokens = render(
-            item_fn.clone(),
-            FixtureInfo {
-                arguments,
-                ..Default::default()
-            },
-        );
-        let out: FixtureOutput = parse2(tokens).unwrap();
-
-        let code = out.orig.display_code();
-
-        assert_in!(code, await_argument_code_string("a"));
-        assert_in!(code, await_argument_code_string("b"));
-        assert_not_in!(code, await_argument_code_string("c"));
-    }
-
-    #[test]
-    fn use_selective_await() {
-        let item_fn: ItemFn = r#"fn test(a: i32, b:i32, c:i32) {} "#.ast();
-        let mut arguments: ArgumentsInfo = Default::default();
-        arguments.set_future(pat("a"), FutureArg::Define);
-        arguments.set_future(pat("b"), FutureArg::Await);
-
-        let tokens = render(
-            item_fn.clone(),
-            FixtureInfo {
-                arguments,
-                ..Default::default()
-            },
-        );
-        let out: FixtureOutput = parse2(tokens).unwrap();
-
-        let code = out.orig.display_code();
-
-        assert_not_in!(code, await_argument_code_string("a"));
-        assert_in!(code, await_argument_code_string("b"));
-        assert_not_in!(code, await_argument_code_string("c"));
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/render/inject.rs b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/render/inject.rs
deleted file mode 100644
index d3946f0d..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/render/inject.rs
+++ /dev/null
@@ -1,211 +0,0 @@
-use std::borrow::Cow;
-
-use proc_macro2::TokenStream;
-use quote::quote;
-use syn::{parse_quote, Expr, FnArg, Ident, Pat, Stmt, Type};
-
-use crate::{
-    refident::{IntoPat, MaybeIdent, MaybePat, MaybeType},
-    render::crate_resolver::crate_name,
-    resolver::Resolver,
-    utils::{fn_arg_mutability, IsLiteralExpression},
-};
-
-pub(crate) fn resolve_arguments<'a>(
-    args: impl Iterator<Item = &'a FnArg>,
-    resolver: &impl Resolver,
-    generic_types: &[Ident],
-) -> TokenStream {
-    let define_vars = args.map(|arg| ArgumentResolver::new(resolver, generic_types).resolve(arg));
-    quote! {
-        #(#define_vars)*
-    }
-}
-
-struct ArgumentResolver<'resolver, 'idents, 'f, R>
-where
-    R: Resolver + 'resolver,
-{
-    resolver: &'resolver R,
-    generic_types_names: &'idents [Ident],
-    magic_conversion: &'f dyn Fn(Cow<Expr>, &Type) -> Expr,
-}
-
-impl<'resolver, 'idents, 'f, R> ArgumentResolver<'resolver, 'idents, 'f, R>
-where
-    R: Resolver + 'resolver,
-{
-    fn new(resolver: &'resolver R, generic_types_names: &'idents [Ident]) -> Self {
-        Self {
-            resolver,
-            generic_types_names,
-            magic_conversion: &handling_magic_conversion_code,
-        }
-    }
-
-    fn resolve(&self, arg: &FnArg) -> Option<Stmt> {
-        let pat = arg.maybe_pat()?;
-        let mutability = fn_arg_mutability(arg);
-        let unused_mut: Option<syn::Attribute> = mutability
-            .as_ref()
-            .map(|_| parse_quote! {#[allow(unused_mut)]});
-        let arg_type = arg.maybe_type()?;
-        let fixture_name = self.fixture_name(pat);
-
-        let mut fixture = self
-            .resolver
-            .resolve(pat)
-            .or_else(|| self.resolver.resolve(&fixture_name.clone().into_pat()))
-            .unwrap_or_else(|| default_fixture_resolve(&fixture_name));
-
-        if fixture.is_literal() && self.type_can_be_get_from_literal_str(arg_type) {
-            fixture = Cow::Owned((self.magic_conversion)(fixture, arg_type));
-        }
-        Some(parse_quote! {
-            #unused_mut
-            let #pat = #fixture;
-        })
-    }
-
-    fn fixture_name(&self, ident: &Pat) -> Ident {
-        let ident = ident
-            .maybe_ident()
-            .cloned()
-            .expect("BUG: Here all arguments should be PatIdent types");
-        let id_str = ident.to_string();
-        if id_str.starts_with('_') && !id_str.starts_with("__") {
-            Ident::new(&id_str[1..], ident.span())
-        } else {
-            ident
-        }
-    }
-
-    fn type_can_be_get_from_literal_str(&self, t: &Type) -> bool {
-        // Check valid type to apply magic conversion
-        match t {
-            Type::ImplTrait(_)
-            | Type::TraitObject(_)
-            | Type::Infer(_)
-            | Type::Group(_)
-            | Type::Macro(_)
-            | Type::Never(_)
-            | Type::Paren(_)
-            | Type::Verbatim(_)
-            | Type::Slice(_) => return false,
-            _ => {}
-        }
-        match t.maybe_ident() {
-            Some(id) => !self.generic_types_names.contains(id),
-            None => true,
-        }
-    }
-}
-
-fn default_fixture_resolve(ident: &Ident) -> Cow<Expr> {
-    Cow::Owned(parse_quote! { #ident::default() })
-}
-
-fn handling_magic_conversion_code(fixture: Cow<Expr>, arg_type: &Type) -> Expr {
-    let rstest_path = crate_name();
-    parse_quote! {
-        {
-            use #rstest_path::magic_conversion::*;
-            (&&&Magic::<#arg_type>(std::marker::PhantomData)).magic_conversion(#fixture)
-        }
-    }
-}
-
-#[cfg(test)]
-mod should {
-    use super::*;
-    use crate::{
-        test::{assert_eq, *},
-        utils::fn_args,
-    };
-
-    #[rstest]
-    #[case::as_is("fix: String", "let fix = fix::default();")]
-    #[case::without_underscore("_fix: String", "let _fix = fix::default();")]
-    #[case::do_not_remove_inner_underscores("f_i_x: String", "let f_i_x = f_i_x::default();")]
-    #[case::do_not_remove_double_underscore("__fix: String", "let __fix = __fix::default();")]
-    #[case::preserve_mut_but_annotate_as_allow_unused_mut(
-        "mut fix: String",
-        "#[allow(unused_mut)] let mut fix = fix::default();"
-    )]
-    fn call_fixture(#[case] arg_str: &str, #[case] expected: &str) {
-        let arg = arg_str.ast();
-
-        let injected = ArgumentResolver::new(&EmptyResolver {}, &[])
-            .resolve(&arg)
-            .unwrap();
-
-        assert_eq!(injected, expected.ast());
-    }
-
-    #[rstest]
-    #[case::as_is("fix: String", ("fix", expr("bar()")), "let fix = bar();")]
-    #[case::with_allow_unused_mut("mut fix: String", ("fix", expr("bar()")), "#[allow(unused_mut)] let mut fix = bar();")]
-    #[case::without_underscore("_fix: String", ("fix", expr("bar()")), "let _fix = bar();")]
-    #[case::without_remove_underscore_if_value("_orig: S", ("_orig", expr("S{}")), r#"let _orig = S{};"#)]
-    fn call_given_fixture(
-        #[case] arg_str: &str,
-        #[case] rule: (&str, Expr),
-        #[case] expected: &str,
-    ) {
-        let arg = arg_str.ast();
-        let mut resolver = std::collections::HashMap::new();
-        resolver.insert(pat(rule.0), &rule.1);
-
-        let injected = ArgumentResolver::new(&resolver, &[]).resolve(&arg).unwrap();
-
-        assert_eq!(injected, expected.ast());
-    }
-
-    fn _mock_conversion_code(fixture: Cow<Expr>, arg_type: &Type) -> Expr {
-        parse_quote! {
-            #fixture as #arg_type
-        }
-    }
-
-    #[rstest]
-    #[case::implement_it(
-        "fn test(arg: MyType){}",
-        0,
-        r#"let arg = "value to convert" as MyType;"#
-    )]
-    #[case::discard_impl(
-        "fn test(arg: impl AsRef<str>){}",
-        0,
-        r#"let arg = "value to convert";"#
-    )]
-    #[case::discard_generic_type(
-        "fn test<S: AsRef<str>>(arg: S){}",
-        0,
-        r#"let arg = "value to convert";"#
-    )]
-    fn handle_magic_conversion(#[case] fn_str: &str, #[case] n_arg: usize, #[case] expected: &str) {
-        let function = fn_str.ast();
-        let arg = fn_args(&function).nth(n_arg).unwrap();
-        let generics = function
-            .sig
-            .generics
-            .type_params()
-            .map(|tp| &tp.ident)
-            .cloned()
-            .collect::<Vec<_>>();
-
-        let mut resolver = std::collections::HashMap::new();
-        let expr = expr(r#""value to convert""#);
-        resolver.insert(arg.maybe_pat().unwrap().clone(), &expr);
-
-        let ag = ArgumentResolver {
-            resolver: &resolver,
-            generic_types_names: &generics,
-            magic_conversion: &_mock_conversion_code,
-        };
-
-        let injected = ag.resolve(&arg).unwrap();
-
-        assert_eq!(injected, expected.ast());
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/render/mod.rs b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/render/mod.rs
deleted file mode 100644
index 275abef..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/render/mod.rs
+++ /dev/null
@@ -1,450 +0,0 @@
-pub mod crate_resolver;
-pub(crate) mod fixture;
-mod test;
-mod wrapper;
-
-use std::collections::HashMap;
-
-use syn::token::Async;
-
-use proc_macro2::{Span, TokenStream};
-use syn::{parse_quote, Attribute, Expr, FnArg, Ident, ItemFn, Pat, Path, ReturnType, Stmt};
-
-use quote::{format_ident, quote};
-
-use crate::refident::MaybePat;
-use crate::utils::{attr_ends_with, sanitize_ident};
-use crate::{
-    parse::{
-        rstest::{RsTestAttributes, RsTestInfo},
-        testcase::TestCase,
-        vlist::ValueList,
-    },
-    utils::attr_is,
-};
-use crate::{
-    refident::MaybeIdent,
-    resolver::{self, Resolver},
-};
-use wrapper::WrapByModule;
-
-pub(crate) use fixture::render as fixture;
-
-use self::apply_arguments::ApplyArguments;
-use self::crate_resolver::crate_name;
-pub(crate) mod apply_arguments;
-pub(crate) mod inject;
-
-pub(crate) fn single(mut test: ItemFn, mut info: RsTestInfo) -> TokenStream {
-    test.apply_arguments(&mut info.arguments, &mut ());
-
-    let resolver = resolver::fixtures::get(&info.arguments, info.data.fixtures());
-
-    let args = test.sig.inputs.iter().cloned().collect::<Vec<_>>();
-    let attrs = std::mem::take(&mut test.attrs);
-    let asyncness = test.sig.asyncness;
-
-    single_test_case(
-        &test.sig.ident,
-        &test.sig.ident,
-        &args,
-        &attrs,
-        &test.sig.output,
-        asyncness,
-        Some(&test),
-        resolver,
-        &info,
-        &test.sig.generics,
-    )
-}
-
-pub(crate) fn parametrize(mut test: ItemFn, info: RsTestInfo) -> TokenStream {
-    let mut arguments_info = info.arguments.clone();
-    test.apply_arguments(&mut arguments_info, &mut ());
-
-    let resolver_fixtures = resolver::fixtures::get(&info.arguments, info.data.fixtures());
-
-    let rendered_cases = cases_data(&info, test.sig.ident.span())
-        .map(|(name, attrs, resolver)| {
-            TestCaseRender::new(name, attrs, (resolver, &resolver_fixtures))
-        })
-        .map(|case| case.render(&test, &info))
-        .collect();
-
-    test_group(test, rendered_cases)
-}
-
-impl ValueList {
-    fn render(
-        &self,
-        test: &ItemFn,
-        resolver: &dyn Resolver,
-        attrs: &[syn::Attribute],
-        info: &RsTestInfo,
-    ) -> TokenStream {
-        let span = test.sig.ident.span();
-        let test_cases = self
-            .argument_data(resolver, info)
-            .map(|(name, r)| TestCaseRender::new(Ident::new(&name, span), attrs, r))
-            .map(|test_case| test_case.render(test, info));
-
-        quote! { #(#test_cases)* }
-    }
-
-    fn argument_data<'a>(
-        &'a self,
-        resolver: &'a dyn Resolver,
-        info: &'a RsTestInfo,
-    ) -> impl Iterator<Item = (String, Box<(&'a dyn Resolver, (Pat, Expr))>)> + 'a {
-        let max_len = self.values.len();
-        self.values.iter().enumerate().map(move |(index, value)| {
-            let description = sanitize_ident(&value.description());
-            let arg = info.arguments.inner_pat(&self.arg);
-
-            let arg_name = arg
-                .maybe_ident()
-                .expect("BUG: Here all arguments should be PatIdent types")
-                .to_string();
-
-            let name = format!(
-                "{}_{:0len$}_{description:.64}",
-                arg_name,
-                index + 1,
-                len = max_len.display_len()
-            );
-            let resolver_this = (arg.clone(), value.expr.clone());
-            (name, Box::new((resolver, resolver_this)))
-        })
-    }
-}
-
-fn _matrix_recursive<'a>(
-    test: &ItemFn,
-    list_values: &'a [&'a ValueList],
-    resolver: &dyn Resolver,
-    attrs: &'a [syn::Attribute],
-    info: &RsTestInfo,
-) -> TokenStream {
-    if list_values.is_empty() {
-        return Default::default();
-    }
-    let vlist = list_values[0];
-    let list_values = &list_values[1..];
-
-    if list_values.is_empty() {
-        let mut attrs = attrs.to_vec();
-        attrs.push(parse_quote!(
-            #[allow(non_snake_case)]
-        ));
-        vlist.render(test, resolver, &attrs, info)
-    } else {
-        let span = test.sig.ident.span();
-        let modules = vlist
-            .argument_data(resolver, info)
-            .map(move |(name, resolver)| {
-                _matrix_recursive(test, list_values, &resolver, attrs, info)
-                    .wrap_by_mod(&Ident::new(&name, span))
-            });
-
-        quote! { #(
-            #[allow(non_snake_case)]
-            #modules
-        )* }
-    }
-}
-
-pub(crate) fn matrix(mut test: ItemFn, mut info: RsTestInfo) -> TokenStream {
-    test.apply_arguments(&mut info.arguments, &mut ());
-    let span = test.sig.ident.span();
-
-    let cases = cases_data(&info, span).collect::<Vec<_>>();
-
-    let resolver = resolver::fixtures::get(&info.arguments, info.data.fixtures());
-    let rendered_cases = if cases.is_empty() {
-        let list_values = info.data.list_values().collect::<Vec<_>>();
-        _matrix_recursive(&test, &list_values, &resolver, &[], &info)
-    } else {
-        cases
-            .into_iter()
-            .map(|(case_name, attrs, case_resolver)| {
-                let list_values = info.data.list_values().collect::<Vec<_>>();
-                _matrix_recursive(
-                    &test,
-                    &list_values,
-                    &(case_resolver, &resolver),
-                    attrs,
-                    &info,
-                )
-                .wrap_by_mod(&case_name)
-            })
-            .collect()
-    };
-
-    test_group(test, rendered_cases)
-}
-
-fn resolve_default_test_attr(is_async: bool) -> TokenStream {
-    if is_async {
-        quote! { #[async_std::test] }
-    } else {
-        quote! { #[test] }
-    }
-}
-
-fn render_exec_call(fn_path: Path, args: &[Expr], is_async: bool) -> TokenStream {
-    if is_async {
-        quote! {#fn_path(#(#args),*).await}
-    } else {
-        quote! {#fn_path(#(#args),*)}
-    }
-}
-
-fn render_test_call(
-    fn_path: Path,
-    args: &[Expr],
-    timeout: Option<Expr>,
-    is_async: bool,
-) -> TokenStream {
-    let timeout = timeout.map(|x| quote! {#x}).or_else(|| {
-        std::env::var("RSTEST_TIMEOUT")
-            .ok()
-            .map(|to| quote! { std::time::Duration::from_secs( (#to).parse().unwrap()) })
-    });
-    let rstest_path = crate_name();
-    match (timeout, is_async) {
-        (Some(to_expr), true) => quote! {
-            use #rstest_path::timeout::*;
-            execute_with_timeout_async(move || #fn_path(#(#args),*), #to_expr).await
-        },
-        (Some(to_expr), false) => quote! {
-            use #rstest_path::timeout::*;
-            execute_with_timeout_sync(move || #fn_path(#(#args),*), #to_expr)
-        },
-        _ => render_exec_call(fn_path, args, is_async),
-    }
-}
-
-fn generics_types_ident(generics: &syn::Generics) -> impl Iterator<Item = &'_ Ident> {
-    generics.type_params().map(|tp| &tp.ident)
-}
-
-/// Render a single test case:
-///
-/// * `name` - Test case name
-/// * `testfn_name` - The name of test function to call
-/// * `args` - The arguments of the test function
-/// * `attrs` - The expected test attributes
-/// * `output` - The expected test return type
-/// * `asyncness` - The `async` fn token
-/// * `test_impl` - If you want embed test function (should be the one called by `testfn_name`)
-/// * `resolver` - The resolver used to resolve injected values
-/// * `info` - `RsTestInfo` that's expose the requested test behavior
-/// * `generic_types` - The generic types used in signature
-///
-// Ok I need some refactoring here but now that not a real issue
-#[allow(clippy::too_many_arguments)]
-fn single_test_case(
-    name: &Ident,
-    testfn_name: &Ident,
-    args: &[FnArg],
-    attrs: &[Attribute],
-    output: &ReturnType,
-    asyncness: Option<Async>,
-    test_impl: Option<&ItemFn>,
-    resolver: impl Resolver,
-    info: &RsTestInfo,
-    generics: &syn::Generics,
-) -> TokenStream {
-    let (attrs, trace_me): (Vec<_>, Vec<_>) =
-        attrs.iter().cloned().partition(|a| !attr_is(a, "trace"));
-    let mut attributes = info.attributes.clone();
-    if !trace_me.is_empty() {
-        attributes.add_trace(format_ident!("trace"));
-    }
-
-    let generics_types = generics_types_ident(generics).cloned().collect::<Vec<_>>();
-    let args = info
-        .arguments
-        .replace_fn_args_with_related_inner_pat(args.iter().cloned())
-        .collect::<Vec<_>>();
-
-    let (injectable_args, ignored_args): (Vec<_>, Vec<_>) =
-        args.iter().partition(|arg| match arg.maybe_pat() {
-            Some(pat) => !info.arguments.is_ignore(pat),
-            None => true,
-        });
-
-    let inject = inject::resolve_arguments(injectable_args.into_iter(), &resolver, &generics_types);
-
-    let args = args
-        .iter()
-        .filter_map(MaybePat::maybe_pat)
-        .cloned()
-        .collect::<Vec<_>>();
-    let trace_args = trace_arguments(args.iter(), &attributes);
-
-    let is_async = asyncness.is_some();
-    let (attrs, timeouts): (Vec<_>, Vec<_>) =
-        attrs.iter().cloned().partition(|a| !attr_is(a, "timeout"));
-
-    let timeout = timeouts
-        .into_iter()
-        .last()
-        .map(|attribute| attribute.parse_args::<Expr>().unwrap());
-
-    // If no injected attribute provided use the default one
-    let test_attr = if attrs
-        .iter()
-        .any(|a| attr_ends_with(a, &parse_quote! {test}))
-    {
-        None
-    } else {
-        Some(resolve_default_test_attr(is_async))
-    };
-
-    let args = args
-        .iter()
-        .map(|arg| (arg, info.arguments.is_by_refs(arg)))
-        .filter_map(|(a, by_refs)| a.maybe_ident().map(|id| (id, by_refs)))
-        .map(|(arg, by_ref)| {
-            if by_ref {
-                parse_quote! { &#arg }
-            } else {
-                parse_quote! { #arg }
-            }
-        })
-        .collect::<Vec<_>>();
-
-    let execute = render_test_call(testfn_name.clone().into(), &args, timeout, is_async);
-    let lifetimes = generics.lifetimes();
-
-    quote! {
-        #test_attr
-        #(#attrs)*
-        #asyncness fn #name<#(#lifetimes,)*>(#(#ignored_args,)*) #output {
-            #test_impl
-            #inject
-            #trace_args
-            #execute
-        }
-    }
-}
-
-fn trace_arguments<'a>(
-    args: impl Iterator<Item = &'a Pat>,
-    attributes: &RsTestAttributes,
-) -> Option<TokenStream> {
-    let mut statements = args
-        .filter(|&arg| attributes.trace_me(arg))
-        .map(|arg| {
-            let s: Stmt = parse_quote! {
-                println!("{} = {:?}", stringify!(#arg), #arg);
-            };
-            s
-        })
-        .peekable();
-    if statements.peek().is_some() {
-        Some(quote! {
-            println!("{:-^40}", " TEST ARGUMENTS ");
-            #(#statements)*
-            println!("{:-^40}", " TEST START ");
-        })
-    } else {
-        None
-    }
-}
-
-struct TestCaseRender<'a> {
-    name: Ident,
-    attrs: &'a [syn::Attribute],
-    resolver: Box<dyn Resolver + 'a>,
-}
-
-impl<'a> TestCaseRender<'a> {
-    pub fn new<R: Resolver + 'a>(name: Ident, attrs: &'a [syn::Attribute], resolver: R) -> Self {
-        TestCaseRender {
-            name,
-            attrs,
-            resolver: Box::new(resolver),
-        }
-    }
-
-    fn render(self, testfn: &ItemFn, info: &RsTestInfo) -> TokenStream {
-        let args = testfn.sig.inputs.iter().cloned().collect::<Vec<_>>();
-        let mut attrs = testfn.attrs.clone();
-        attrs.extend(self.attrs.iter().cloned());
-        let asyncness = testfn.sig.asyncness;
-
-        single_test_case(
-            &self.name,
-            &testfn.sig.ident,
-            &args,
-            &attrs,
-            &testfn.sig.output,
-            asyncness,
-            None,
-            self.resolver,
-            info,
-            &testfn.sig.generics,
-        )
-    }
-}
-
-fn test_group(mut test: ItemFn, rendered_cases: TokenStream) -> TokenStream {
-    let fname = &test.sig.ident;
-    test.attrs = vec![];
-
-    quote! {
-        #[cfg(test)]
-        #test
-
-        #[cfg(test)]
-        mod #fname {
-            use super::*;
-
-            #rendered_cases
-        }
-    }
-}
-
-trait DisplayLen {
-    fn display_len(&self) -> usize;
-}
-
-impl<D: std::fmt::Display> DisplayLen for D {
-    fn display_len(&self) -> usize {
-        format!("{self}").len()
-    }
-}
-
-fn format_case_name(case: &TestCase, index: usize, display_len: usize) -> String {
-    let description = case
-        .description
-        .as_ref()
-        .map(|d| format!("_{d}"))
-        .unwrap_or_default();
-    format!("case_{index:0display_len$}{description}")
-}
-
-fn cases_data(
-    info: &RsTestInfo,
-    name_span: Span,
-) -> impl Iterator<Item = (Ident, &[syn::Attribute], HashMap<Pat, &syn::Expr>)> {
-    let display_len = info.data.cases().count().display_len();
-    info.data.cases().enumerate().map({
-        move |(n, case)| {
-            let resolver_case = info
-                .data
-                .case_args()
-                .cloned()
-                .map(|arg| info.arguments.inner_pat(&arg).clone())
-                .zip(case.args.iter())
-                .collect::<HashMap<_, _>>();
-            (
-                Ident::new(&format_case_name(case, n + 1, display_len), name_span),
-                case.attrs.as_slice(),
-                resolver_case,
-            )
-        }
-    })
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/render/test.rs b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/render/test.rs
deleted file mode 100644
index f4086757..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/render/test.rs
+++ /dev/null
@@ -1,1871 +0,0 @@
-#![cfg(test)]
-
-use syn::{
-    parse::{Parse, ParseStream, Result},
-    parse2, parse_str,
-    visit::Visit,
-    ItemFn, ItemMod, LocalInit,
-};
-
-use super::*;
-use crate::test::{assert_eq, fixture, *};
-use crate::utils::*;
-
-trait SetAsync {
-    fn set_async(&mut self, is_async: bool);
-}
-
-impl SetAsync for ItemFn {
-    fn set_async(&mut self, is_async: bool) {
-        self.sig.asyncness = if is_async {
-            Some(parse_quote! { async })
-        } else {
-            None
-        };
-    }
-}
-
-fn trace_argument_code_string(arg_name: &str) -> String {
-    let arg_name = ident(arg_name);
-    let statement: Stmt = parse_quote! {
-        println!("{} = {:?}", stringify!(#arg_name) ,#arg_name);
-    };
-    statement.display_code()
-}
-
-mod single_test_should {
-    use rstest_test::{assert_in, assert_not_in};
-
-    use crate::{
-        parse::arguments::{ArgumentsInfo, FutureArg},
-        test::{assert_eq, *},
-    };
-
-    use super::*;
-
-    #[test]
-    fn add_return_type_if_any() {
-        let input_fn: ItemFn = "fn function(fix: String) -> Result<i32, String> { Ok(42) }".ast();
-
-        let result: ItemFn = single(input_fn.clone(), Default::default()).ast();
-
-        assert_eq!(result.sig.output, input_fn.sig.output);
-    }
-
-    fn extract_inner_test_function(outer: &ItemFn) -> ItemFn {
-        let first_stmt = outer.block.stmts.get(0).unwrap();
-
-        parse_quote! {
-            #first_stmt
-        }
-    }
-
-    #[test]
-    fn include_given_function() {
-        let input_fn: ItemFn = r#"
-                pub fn test<R: AsRef<str>, B>(mut s: String, v: &u32, a: &mut [i32], r: R) -> (u32, B, String, &str)
-                        where B: Borrow<u32>
-                {
-                    let some = 42;
-                    assert_eq!(42, some);
-                }
-                "#.ast();
-
-        let result: ItemFn = single(input_fn.clone(), Default::default()).ast();
-
-        let inner_fn = extract_inner_test_function(&result);
-        let inner_fn_impl: Stmt = inner_fn.block.stmts.last().cloned().unwrap();
-
-        assert_eq!(inner_fn.sig, input_fn.sig);
-        assert_eq!(inner_fn_impl.display_code(), input_fn.block.display_code());
-    }
-
-    #[test]
-    fn not_remove_lifetimes() {
-        let input_fn: ItemFn = r#"
-                pub fn test<'a, 'b, 'c: 'a + 'b>(a: A<'a>, b: A<'b>, c: A<'c>) -> A<'c>
-                {
-                }
-                "#
-        .ast();
-
-        let result: ItemFn = single(input_fn.clone(), Default::default()).ast();
-
-        assert_eq!(3, result.sig.generics.lifetimes().count());
-    }
-
-    #[rstest]
-    fn not_copy_any_attributes(
-        #[values(
-            "#[test]",
-            "#[very::complicated::path]",
-            "#[test]#[should_panic]",
-            "#[should_panic]#[test]",
-            "#[a]#[b]#[c]"
-        )]
-        attributes: &str,
-    ) {
-        let attributes = attrs(attributes);
-        let mut input_fn: ItemFn = r#"pub fn test(_s: String){}"#.ast();
-        input_fn.attrs = attributes;
-
-        let result: ItemFn = single(input_fn.clone(), Default::default()).ast();
-        let first_stmt = result.block.stmts.get(0).unwrap();
-
-        let inner_fn: ItemFn = parse_quote! {
-            #first_stmt
-        };
-
-        assert!(inner_fn.attrs.is_empty());
-    }
-
-    #[rstest]
-    #[case::sync(false)]
-    #[case::async_fn(true)]
-    fn use_injected_test_attribute_to_mark_test_functions_if_any(
-        #[case] is_async: bool,
-        #[values(
-            "#[test]",
-            "#[other::test]",
-            "#[very::complicated::path::test]",
-            "#[prev]#[test]",
-            "#[test]#[after]",
-            "#[prev]#[other::test]"
-        )]
-        attributes: &str,
-    ) {
-        let attributes = attrs(attributes);
-        let mut input_fn: ItemFn = r#"fn test(_s: String) {} "#.ast();
-        input_fn.set_async(is_async);
-        input_fn.attrs = attributes.clone();
-
-        let result: ItemFn = single(input_fn.clone(), Default::default()).ast();
-
-        assert_eq!(result.attrs, attributes);
-    }
-
-    #[test]
-    fn use_global_await() {
-        let input_fn: ItemFn = r#"fn test(a: i32, b:i32, c:i32) {} "#.ast();
-        let mut info: RsTestInfo = Default::default();
-        info.arguments.set_global_await(true);
-        info.arguments.add_future(pat("a"));
-        info.arguments.add_future(pat("b"));
-
-        let item_fn: ItemFn = single(input_fn.clone(), info).ast();
-
-        assert_in!(
-            item_fn.block.display_code(),
-            await_argument_code_string("a")
-        );
-        assert_in!(
-            item_fn.block.display_code(),
-            await_argument_code_string("b")
-        );
-        assert_not_in!(
-            item_fn.block.display_code(),
-            await_argument_code_string("c")
-        );
-    }
-
-    #[test]
-    fn use_selective_await() {
-        let input_fn: ItemFn = r#"fn test(a: i32, b:i32, c:i32) {} "#.ast();
-        let mut info: RsTestInfo = Default::default();
-        info.arguments.set_future(pat("a"), FutureArg::Define);
-        info.arguments.set_future(pat("b"), FutureArg::Await);
-
-        let item_fn: ItemFn = single(input_fn.clone(), info).ast();
-
-        assert_not_in!(
-            item_fn.block.display_code(),
-            await_argument_code_string("a",)
-        );
-        assert_in!(
-            item_fn.block.display_code(),
-            await_argument_code_string("b")
-        );
-        assert_not_in!(
-            item_fn.block.display_code(),
-            await_argument_code_string("c")
-        );
-    }
-
-    #[test]
-    fn use_ref_if_any() {
-        let input_fn: ItemFn = r#"fn test(a: i32, b:i32, c:i32) {} "#.ast();
-        let mut info: RsTestInfo = Default::default();
-        info.arguments.set_by_ref(pat("a"));
-        info.arguments.set_by_ref(pat("c"));
-
-        let item_fn: ItemFn = single(input_fn.clone(), info).ast();
-
-        assert_in!(
-            item_fn.block.stmts.last().display_code(),
-            ref_argument_code_string("a")
-        );
-        assert_not_in!(
-            item_fn.block.stmts.last().display_code(),
-            ref_argument_code_string("b")
-        );
-        assert_in!(
-            item_fn.block.stmts.last().display_code(),
-            ref_argument_code_string("c")
-        );
-    }
-
-    #[test]
-    fn trace_arguments_values() {
-        let input_fn: ItemFn = r#"#[trace]fn test(s: String, a:i32) {} "#.ast();
-
-        let item_fn: ItemFn = single(input_fn.clone(), Default::default()).ast();
-
-        assert_in!(
-            item_fn.block.display_code(),
-            trace_argument_code_string("s")
-        );
-        assert_in!(
-            item_fn.block.display_code(),
-            trace_argument_code_string("a")
-        );
-    }
-
-    #[test]
-    fn trace_not_all_arguments_values() {
-        let input_fn: ItemFn =
-            r#"#[trace] fn test(a_trace: i32, b_no_trace:i32, c_no_trace:i32, d_trace:i32) {} "#
-                .ast();
-
-        let mut attributes = RsTestAttributes::default();
-        attributes.add_notraces(vec![pat("b_no_trace"), pat("c_no_trace")]);
-
-        let item_fn: ItemFn = single(
-            input_fn.clone(),
-            RsTestInfo {
-                attributes,
-                ..Default::default()
-            },
-        )
-        .ast();
-
-        assert_in!(
-            item_fn.block.display_code(),
-            trace_argument_code_string("a_trace")
-        );
-        assert_not_in!(
-            item_fn.block.display_code(),
-            trace_argument_code_string("b_no_trace")
-        );
-        assert_not_in!(
-            item_fn.block.display_code(),
-            trace_argument_code_string("c_no_trace")
-        );
-        assert_in!(
-            item_fn.block.display_code(),
-            trace_argument_code_string("d_trace")
-        );
-    }
-
-    #[rstest]
-    #[case::sync("", parse_quote! { #[test] })]
-    #[case::async_fn("async", parse_quote! { #[async_std::test] })]
-    fn add_default_test_attribute(
-        #[case] prefix: &str,
-        #[case] test_attribute: Attribute,
-        #[values(
-            "",
-            "#[no_one]",
-            "#[should_panic]",
-            "#[should_panic]#[other]",
-            "#[a::b::c]#[should_panic]"
-        )]
-        attributes: &str,
-    ) {
-        let attributes = attrs(attributes);
-        let mut input_fn: ItemFn = format!(r#"{} fn test(_s: String) {{}} "#, prefix).ast();
-        input_fn.attrs = attributes.clone();
-
-        let result: ItemFn = single(input_fn.clone(), Default::default()).ast();
-
-        assert_eq!(result.attrs[0], test_attribute);
-        assert_eq!(&result.attrs[1..], attributes.as_slice());
-    }
-
-    #[rstest]
-    #[case::sync(false, false)]
-    #[case::async_fn(true, true)]
-    fn use_await_for_no_async_test_function(#[case] is_async: bool, #[case] use_await: bool) {
-        let mut input_fn: ItemFn = r#"fn test(_s: String) {} "#.ast();
-        input_fn.set_async(is_async);
-
-        let result: ItemFn = single(input_fn.clone(), Default::default()).ast();
-
-        let last_stmt = result.block.stmts.last().unwrap();
-
-        assert_eq!(use_await, last_stmt.is_await());
-    }
-    #[test]
-    fn add_future_boilerplate_if_requested() {
-        let item_fn: ItemFn = r#"
-                    async fn test(async_ref_u32: &u32, async_u32: u32,simple: u32)
-                    { }
-                     "#
-        .ast();
-
-        let mut arguments = ArgumentsInfo::default();
-        arguments.add_future(pat("async_ref_u32"));
-        arguments.add_future(pat("async_u32"));
-
-        let info = RsTestInfo {
-            arguments,
-            ..Default::default()
-        };
-
-        let result: ItemFn = single(item_fn.clone(), info).ast();
-        let inner_fn = extract_inner_test_function(&result);
-
-        let expected = parse_str::<syn::ItemFn>(
-            r#"async fn test<'_async_ref_u32>(
-                        async_ref_u32: impl std::future::Future<Output = &'_async_ref_u32 u32>, 
-                        async_u32: impl std::future::Future<Output = u32>, 
-                        simple: u32
-                    )
-                    { }
-                    "#,
-        )
-        .unwrap();
-
-        assert_eq!(inner_fn.sig, expected.sig);
-    }
-}
-
-struct TestsGroup {
-    requested_test: ItemFn,
-    module: ItemMod,
-}
-
-impl Parse for TestsGroup {
-    fn parse(input: ParseStream) -> Result<Self> {
-        Ok(Self {
-            requested_test: input.parse()?,
-            module: input.parse()?,
-        })
-    }
-}
-
-trait QueryAttrs {
-    #[allow(dead_code)]
-    fn has_attr(&self, attr: &syn::Path) -> bool;
-    fn has_attr_that_ends_with(&self, attr: &syn::PathSegment) -> bool;
-}
-
-impl QueryAttrs for ItemFn {
-    fn has_attr(&self, attr: &syn::Path) -> bool {
-        self.attrs.iter().find(|a| a.path() == attr).is_some()
-    }
-
-    fn has_attr_that_ends_with(&self, name: &syn::PathSegment) -> bool {
-        self.attrs
-            .iter()
-            .find(|a| attr_ends_with(a, name))
-            .is_some()
-    }
-}
-
-/// To extract all test functions
-struct TestFunctions(Vec<ItemFn>);
-
-fn is_test_fn(item_fn: &ItemFn) -> bool {
-    item_fn.has_attr_that_ends_with(&parse_quote! { test })
-}
-
-impl TestFunctions {
-    fn is_test_fn(item_fn: &ItemFn) -> bool {
-        is_test_fn(item_fn)
-    }
-}
-
-impl<'ast> Visit<'ast> for TestFunctions {
-    //noinspection RsTypeCheck
-    fn visit_item_fn(&mut self, item_fn: &'ast ItemFn) {
-        if Self::is_test_fn(item_fn) {
-            self.0.push(item_fn.clone())
-        }
-    }
-}
-
-trait Named {
-    fn name(&self) -> String;
-}
-
-impl Named for Ident {
-    fn name(&self) -> String {
-        self.to_string()
-    }
-}
-
-impl Named for ItemFn {
-    fn name(&self) -> String {
-        self.sig.ident.name()
-    }
-}
-
-impl Named for ItemMod {
-    fn name(&self) -> String {
-        self.ident.name()
-    }
-}
-
-trait Names {
-    fn names(&self) -> Vec<String>;
-}
-
-impl<T: Named> Names for Vec<T> {
-    fn names(&self) -> Vec<String> {
-        self.iter().map(Named::name).collect()
-    }
-}
-
-trait ModuleInspector {
-    fn get_all_tests(&self) -> Vec<ItemFn>;
-    fn get_tests(&self) -> Vec<ItemFn>;
-    fn get_modules(&self) -> Vec<ItemMod>;
-}
-
-impl ModuleInspector for ItemMod {
-    fn get_tests(&self) -> Vec<ItemFn> {
-        self.content
-            .as_ref()
-            .map(|(_, items)| {
-                items
-                    .iter()
-                    .filter_map(|it| match it {
-                        syn::Item::Fn(item_fn) if is_test_fn(item_fn) => Some(item_fn.clone()),
-                        _ => None,
-                    })
-                    .collect()
-            })
-            .unwrap_or_default()
-    }
-
-    fn get_all_tests(&self) -> Vec<ItemFn> {
-        let mut f = TestFunctions(vec![]);
-        f.visit_item_mod(&self);
-        f.0
-    }
-
-    fn get_modules(&self) -> Vec<ItemMod> {
-        self.content
-            .as_ref()
-            .map(|(_, items)| {
-                items
-                    .iter()
-                    .filter_map(|it| match it {
-                        syn::Item::Mod(item_mod) => Some(item_mod.clone()),
-                        _ => None,
-                    })
-                    .collect()
-            })
-            .unwrap_or_default()
-    }
-}
-
-impl ModuleInspector for TestsGroup {
-    fn get_all_tests(&self) -> Vec<ItemFn> {
-        self.module.get_all_tests()
-    }
-
-    fn get_tests(&self) -> Vec<ItemFn> {
-        self.module.get_tests()
-    }
-
-    fn get_modules(&self) -> Vec<ItemMod> {
-        self.module.get_modules()
-    }
-}
-
-#[derive(Default, Debug)]
-struct Assignments(HashMap<String, syn::Expr>);
-
-impl<'ast> Visit<'ast> for Assignments {
-    //noinspection RsTypeCheck
-    fn visit_local(&mut self, assign: &syn::Local) {
-        match &assign {
-            syn::Local {
-                pat: syn::Pat::Ident(pat),
-                init: Some(LocalInit { expr, .. }),
-                ..
-            } => {
-                self.0.insert(pat.ident.to_string(), expr.as_ref().clone());
-            }
-            _ => {}
-        }
-    }
-}
-
-impl Assignments {
-    pub fn collect_assignments(item_fn: &ItemFn) -> Self {
-        let mut collect = Self::default();
-        collect.visit_item_fn(item_fn);
-        collect
-    }
-}
-
-impl From<TokenStream> for TestsGroup {
-    fn from(tokens: TokenStream) -> Self {
-        syn::parse2::<TestsGroup>(tokens).unwrap()
-    }
-}
-
-mod cases_should {
-
-    use rstest_test::{assert_in, assert_not_in};
-
-    use crate::parse::{
-        arguments::{ArgumentsInfo, FutureArg},
-        rstest::{RsTestData, RsTestItem},
-    };
-
-    use super::{assert_eq, *};
-
-    fn into_rstest_data(item_fn: &ItemFn) -> RsTestData {
-        RsTestData {
-            items: fn_args_pats(item_fn)
-                .cloned()
-                .map(RsTestItem::CaseArgName)
-                .collect(),
-        }
-    }
-
-    struct TestCaseBuilder {
-        item_fn: ItemFn,
-        info: RsTestInfo,
-    }
-
-    impl TestCaseBuilder {
-        fn new(item_fn: ItemFn) -> Self {
-            let info: RsTestInfo = into_rstest_data(&item_fn).into();
-            Self { item_fn, info }
-        }
-
-        fn from<S: AsRef<str>>(s: S) -> Self {
-            Self::new(s.as_ref().ast())
-        }
-
-        fn set_async(mut self, is_async: bool) -> Self {
-            self.item_fn.set_async(is_async);
-            self
-        }
-
-        fn push_case<T: Into<TestCase>>(mut self, case: T) -> Self {
-            self.info.push_case(case.into());
-            self
-        }
-
-        fn extend<T: Into<TestCase>>(mut self, cases: impl Iterator<Item = T>) -> Self {
-            self.info.extend(cases.map(Into::into));
-            self
-        }
-
-        fn take(self) -> (ItemFn, RsTestInfo) {
-            (self.item_fn, self.info)
-        }
-
-        fn add_notrace(mut self, pats: Vec<Pat>) -> Self {
-            self.info.attributes.add_notraces(pats);
-            self
-        }
-    }
-
-    fn one_simple_case() -> (ItemFn, RsTestInfo) {
-        TestCaseBuilder::from(r#"fn test(mut fix: String) { println!("user code") }"#)
-            .push_case(r#"String::from("3")"#)
-            .take()
-    }
-
-    fn some_simple_cases(cases: i32) -> (ItemFn, RsTestInfo) {
-        TestCaseBuilder::from(r#"fn test(mut fix: String) { println!("user code") }"#)
-            .extend((0..cases).map(|_| r#"String::from("3")"#))
-            .take()
-    }
-
-    #[test]
-    fn create_a_module_named_as_test_function() {
-        let (item_fn, info) =
-            TestCaseBuilder::from("fn should_be_the_module_name(mut fix: String) {}").take();
-
-        let tokens = parametrize(item_fn, info);
-
-        let output = TestsGroup::from(tokens);
-
-        assert_eq!(output.module.ident, "should_be_the_module_name");
-    }
-
-    #[test]
-    fn copy_user_function() {
-        let (item_fn, info) = TestCaseBuilder::from(
-            r#"fn should_be_the_module_name(mut fix: String) { println!("user code") }"#,
-        )
-        .take();
-
-        let tokens = parametrize(item_fn.clone(), info);
-
-        let mut output = TestsGroup::from(tokens);
-        let test_impl: Stmt = output.requested_test.block.stmts.last().cloned().unwrap();
-
-        output.requested_test.attrs = vec![];
-        assert_eq!(output.requested_test.sig, item_fn.sig);
-        assert_eq!(test_impl.display_code(), item_fn.block.display_code());
-    }
-
-    #[test]
-    fn should_not_copy_should_panic_attribute() {
-        let (item_fn, info) = TestCaseBuilder::from(
-            r#"#[should_panic] fn with_should_panic(mut fix: String) { println!("user code") }"#,
-        )
-        .take();
-
-        let tokens = parametrize(item_fn.clone(), info);
-
-        let output = TestsGroup::from(tokens);
-
-        assert!(!format!("{:?}", output.requested_test.attrs).contains("should_panic"));
-    }
-
-    #[test]
-    fn should_mark_test_with_given_attributes() {
-        let (item_fn, info) =
-            TestCaseBuilder::from(r#"#[should_panic] #[other(value)] fn test(s: String){}"#)
-                .push_case(r#"String::from("3")"#)
-                .take();
-
-        let tokens = parametrize(item_fn.clone(), info);
-
-        let tests = TestsGroup::from(tokens).get_all_tests();
-
-        // Sanity check
-        assert!(tests.len() > 0);
-
-        for t in tests {
-            assert_eq!(item_fn.attrs, &t.attrs[1..]);
-        }
-    }
-
-    #[rstest]
-    #[case::empty("")]
-    #[case::some_attrs("#[a]#[b::c]#[should_panic]")]
-    fn should_add_attributes_given_in_the_test_case(
-        #[case] fnattrs: &str,
-        #[values("", "#[should_panic]", "#[first]#[second(arg)]")] case_attrs: &str,
-    ) {
-        let given_attrs = attrs(fnattrs);
-        let case_attrs = attrs(case_attrs);
-        let (mut item_fn, info) = TestCaseBuilder::from(r#"fn test(v: i32){}"#)
-            .push_case(TestCase::from("42").with_attrs(case_attrs.clone()))
-            .take();
-
-        item_fn.attrs = given_attrs.clone();
-
-        let tokens = parametrize(item_fn, info);
-
-        let test_attrs = &TestsGroup::from(tokens).get_all_tests()[0].attrs[1..];
-
-        let l = given_attrs.len();
-
-        assert_eq!(case_attrs.as_slice(), &test_attrs[l..]);
-        assert_eq!(given_attrs.as_slice(), &test_attrs[..l]);
-    }
-
-    #[test]
-    fn mark_user_function_as_test() {
-        let (item_fn, info) = TestCaseBuilder::from(
-            r#"fn should_be_the_module_name(mut fix: String) { println!("user code") }"#,
-        )
-        .take();
-        let tokens = parametrize(item_fn, info);
-
-        let output = TestsGroup::from(tokens);
-
-        assert_eq!(
-            output.requested_test.attrs,
-            vec![parse_quote! {#[cfg(test)]}]
-        );
-    }
-
-    #[test]
-    fn mark_module_as_test() {
-        let (item_fn, info) = TestCaseBuilder::from(
-            r#"fn should_be_the_module_name(mut fix: String) { println!("user code") }"#,
-        )
-        .take();
-        let tokens = parametrize(item_fn, info);
-
-        let output = TestsGroup::from(tokens);
-
-        assert_eq!(output.module.attrs, vec![parse_quote! {#[cfg(test)]}]);
-    }
-
-    #[test]
-    fn add_a_test_case() {
-        let (item_fn, info) = one_simple_case();
-
-        let tokens = parametrize(item_fn, info);
-
-        let tests = TestsGroup::from(tokens).get_all_tests();
-
-        assert_eq!(1, tests.len());
-        assert!(&tests[0].sig.ident.to_string().starts_with("case_"))
-    }
-
-    #[test]
-    fn add_return_type_if_any() {
-        let (item_fn, info) =
-            TestCaseBuilder::from("fn function(fix: String) -> Result<i32, String> { Ok(42) }")
-                .push_case(r#"String::from("3")"#)
-                .take();
-
-        let tokens = parametrize(item_fn.clone(), info);
-
-        let tests = TestsGroup::from(tokens).get_all_tests();
-
-        assert_eq!(tests[0].sig.output, item_fn.sig.output);
-    }
-
-    #[test]
-    fn not_copy_user_function() {
-        let t_name = "test_name";
-        let (item_fn, info) = TestCaseBuilder::from(format!(
-            "fn {}(fix: String) -> Result<i32, String> {{ Ok(42) }}",
-            t_name
-        ))
-        .push_case(r#"String::from("3")"#)
-        .take();
-
-        let tokens = parametrize(item_fn, info);
-
-        let test = &TestsGroup::from(tokens).get_all_tests()[0];
-        let inner_functions = extract_inner_functions(&test.block);
-
-        assert_eq!(0, inner_functions.filter(|f| f.sig.ident == t_name).count());
-    }
-
-    #[test]
-    fn starts_case_number_from_1() {
-        let (item_fn, info) = one_simple_case();
-
-        let tokens = parametrize(item_fn.clone(), info);
-
-        let tests = TestsGroup::from(tokens).get_all_tests();
-
-        assert!(
-            &tests[0].sig.ident.to_string().starts_with("case_1"),
-            "Should starts with case_1 but is {}",
-            tests[0].sig.ident.to_string()
-        )
-    }
-
-    #[test]
-    fn add_all_test_cases() {
-        let (item_fn, info) = some_simple_cases(5);
-
-        let tokens = parametrize(item_fn.clone(), info);
-
-        let tests = TestsGroup::from(tokens).get_all_tests();
-
-        let valid_names = tests
-            .iter()
-            .filter(|it| it.sig.ident.to_string().starts_with("case_"));
-        assert_eq!(5, valid_names.count())
-    }
-
-    #[test]
-    fn left_pad_case_number_by_zeros() {
-        let (item_fn, info) = some_simple_cases(1000);
-
-        let tokens = parametrize(item_fn.clone(), info);
-
-        let tests = TestsGroup::from(tokens).get_all_tests();
-
-        let first_name = tests[0].sig.ident.to_string();
-        let last_name = tests[999].sig.ident.to_string();
-
-        assert!(
-            first_name.ends_with("_0001"),
-            "Should ends by _0001 but is {}",
-            first_name
-        );
-        assert!(
-            last_name.ends_with("_1000"),
-            "Should ends by _1000 but is {}",
-            last_name
-        );
-
-        let valid_names = tests
-            .iter()
-            .filter(|it| it.sig.ident.to_string().len() == first_name.len());
-        assert_eq!(1000, valid_names.count())
-    }
-
-    #[test]
-    fn use_description_if_any() {
-        let (item_fn, mut info) = one_simple_case();
-        let description = "show_this_description";
-
-        if let &mut RsTestItem::TestCase(ref mut case) = &mut info.data.items[1] {
-            case.description = Some(parse_str::<Ident>(description).unwrap());
-        } else {
-            panic!("Test case should be the second one");
-        }
-
-        let tokens = parametrize(item_fn.clone(), info);
-
-        let tests = TestsGroup::from(tokens).get_all_tests();
-
-        assert!(tests[0]
-            .sig
-            .ident
-            .to_string()
-            .ends_with(&format!("_{}", description)));
-    }
-
-    #[rstest]
-    #[case::sync(false)]
-    #[case::async_fn(true)]
-    fn use_injected_test_attribute_to_mark_test_functions_if_any(
-        #[case] is_async: bool,
-        #[values(
-            "#[test]",
-            "#[other::test]",
-            "#[very::complicated::path::test]",
-            "#[prev]#[test]",
-            "#[test]#[after]",
-            "#[prev]#[other::test]"
-        )]
-        attributes: &str,
-    ) {
-        let attributes = attrs(attributes);
-        let (mut item_fn, info) = TestCaseBuilder::from(r#"fn test(s: String){}"#)
-            .push_case(r#"String::from("3")"#)
-            .set_async(is_async)
-            .take();
-        item_fn.attrs = attributes.clone();
-        item_fn.set_async(is_async);
-
-        let tokens = parametrize(item_fn.clone(), info);
-
-        let test = &TestsGroup::from(tokens).get_all_tests()[0];
-
-        assert_eq!(attributes, test.attrs);
-    }
-
-    #[rstest]
-    #[case::sync(false, parse_quote! { #[test] })]
-    #[case::async_fn(true, parse_quote! { #[async_std::test] })]
-    fn add_default_test_attribute(
-        #[case] is_async: bool,
-        #[case] test_attribute: Attribute,
-        #[values(
-            "",
-            "#[no_one]",
-            "#[should_panic]",
-            "#[should_panic]#[other]",
-            "#[a::b::c]#[should_panic]"
-        )]
-        attributes: &str,
-    ) {
-        let attributes = attrs(attributes);
-        let (mut item_fn, info) = TestCaseBuilder::from(
-            r#"fn should_be_the_module_name(mut fix: String) { println!("user code") }"#,
-        )
-        .push_case("42")
-        .set_async(is_async)
-        .take();
-        item_fn.attrs = attributes.clone();
-
-        let tokens = parametrize(item_fn, info);
-
-        let tests = TestsGroup::from(tokens).get_all_tests();
-
-        assert_eq!(tests[0].attrs[0], test_attribute);
-        assert_eq!(&tests[0].attrs[1..], attributes.as_slice());
-    }
-
-    #[test]
-    fn add_future_boilerplate_if_requested() {
-        let (item_fn, mut info) = TestCaseBuilder::from(
-            r#"async fn test(async_ref_u32: &u32, async_u32: u32,simple: u32) { }"#,
-        )
-        .take();
-
-        let mut arguments = ArgumentsInfo::default();
-        arguments.add_future(pat("async_ref_u32"));
-        arguments.add_future(pat("async_u32"));
-
-        info.arguments = arguments;
-
-        let tokens = parametrize(item_fn.clone(), info);
-        let test_function = TestsGroup::from(tokens).requested_test;
-
-        let expected = parse_str::<syn::ItemFn>(
-            r#"async fn test<'_async_ref_u32>(
-                        async_ref_u32: impl std::future::Future<Output = &'_async_ref_u32 u32>, 
-                        async_u32: impl std::future::Future<Output = u32>, 
-                        simple: u32
-                    )
-                    { }
-                    "#,
-        )
-        .unwrap();
-
-        assert_eq!(test_function.sig, expected.sig);
-    }
-
-    #[rstest]
-    #[case::sync(false, false)]
-    #[case::async_fn(true, true)]
-    fn use_await_for_async_test_function(#[case] is_async: bool, #[case] use_await: bool) {
-        let (item_fn, info) =
-            TestCaseBuilder::from(r#"fn test(mut fix: String) { println!("user code") }"#)
-                .set_async(is_async)
-                .push_case(r#"String::from("3")"#)
-                .take();
-
-        let tokens = parametrize(item_fn, info);
-
-        let tests = TestsGroup::from(tokens).get_all_tests();
-
-        let last_stmt = tests[0].block.stmts.last().unwrap();
-
-        assert_eq!(use_await, last_stmt.is_await());
-    }
-
-    #[test]
-    fn trace_arguments_value() {
-        let (item_fn, info) =
-            TestCaseBuilder::from(r#"#[trace] fn test(a_trace_me: i32, b_trace_me: i32) {}"#)
-                .push_case(TestCase::from_iter(vec!["1", "2"]))
-                .push_case(TestCase::from_iter(vec!["3", "4"]))
-                .take();
-
-        let tokens = parametrize(item_fn, info);
-
-        let tests = TestsGroup::from(tokens).get_all_tests();
-
-        assert!(tests.len() > 0);
-        for test in tests {
-            for name in &["a_trace_me", "b_trace_me"] {
-                assert_in!(test.block.display_code(), trace_argument_code_string(name));
-            }
-        }
-    }
-
-    #[test]
-    fn trace_just_some_arguments_value() {
-        let (item_fn, info) =
-            TestCaseBuilder::from(r#"#[trace] fn test(a_trace_me: i32, b_no_trace_me: i32, c_no_trace_me: i32, d_trace_me: i32) {}"#)
-                .push_case(TestCase::from_iter(vec!["1", "2", "1", "2"]))
-                .push_case(TestCase::from_iter(vec!["3", "4", "3", "4"]))
-                .add_notrace(to_pats!(["b_no_trace_me", "c_no_trace_me"]))
-                .take();
-
-        let tokens = parametrize(item_fn, info);
-
-        let tests = TestsGroup::from(tokens).get_all_tests();
-
-        assert!(tests.len() > 0);
-        for test in tests {
-            for should_be_present in &["a_trace_me", "d_trace_me"] {
-                assert_in!(
-                    test.block.display_code(),
-                    trace_argument_code_string(should_be_present)
-                );
-            }
-            for should_not_be_present in &["b_trace_me", "c_trace_me"] {
-                assert_not_in!(
-                    test.block.display_code(),
-                    trace_argument_code_string(should_not_be_present)
-                );
-            }
-        }
-    }
-
-    #[test]
-    fn trace_just_one_case() {
-        let (item_fn, info) =
-            TestCaseBuilder::from(r#"fn test(a_no_trace_me: i32, b_trace_me: i32) {}"#)
-                .push_case(TestCase::from_iter(vec!["1", "2"]))
-                .push_case(TestCase::from_iter(vec!["3", "4"]).with_attrs(attrs("#[trace]")))
-                .add_notrace(to_pats!(["a_no_trace_me"]))
-                .take();
-
-        let tokens = parametrize(item_fn, info);
-
-        let tests = TestsGroup::from(tokens).get_all_tests();
-
-        assert_not_in!(
-            tests[0].block.display_code(),
-            trace_argument_code_string("b_trace_me")
-        );
-        assert_in!(
-            tests[1].block.display_code(),
-            trace_argument_code_string("b_trace_me")
-        );
-        assert_not_in!(
-            tests[1].block.display_code(),
-            trace_argument_code_string("a_no_trace_me")
-        );
-    }
-
-    #[test]
-    fn use_global_await() {
-        let (item_fn, mut info) = TestCaseBuilder::from(r#"fn test(a: i32, b:i32, c:i32) {}"#)
-            .push_case(TestCase::from_iter(vec!["1", "2", "3"]))
-            .push_case(TestCase::from_iter(vec!["1", "2", "3"]))
-            .take();
-        info.arguments.set_global_await(true);
-        info.arguments.add_future(pat("a"));
-        info.arguments.add_future(pat("b"));
-
-        let tokens = parametrize(item_fn, info);
-
-        let tests = TestsGroup::from(tokens);
-
-        let code = tests.requested_test.block.display_code();
-
-        assert_in!(code, await_argument_code_string("a"));
-        assert_in!(code, await_argument_code_string("b"));
-        assert_not_in!(code, await_argument_code_string("c"));
-    }
-
-    #[test]
-    fn use_selective_await() {
-        let (item_fn, mut info) = TestCaseBuilder::from(r#"fn test(a: i32, b:i32, c:i32) {}"#)
-            .push_case(TestCase::from_iter(vec!["1", "2", "3"]))
-            .push_case(TestCase::from_iter(vec!["1", "2", "3"]))
-            .take();
-        info.arguments.set_future(pat("a"), FutureArg::Define);
-        info.arguments.set_future(pat("b"), FutureArg::Await);
-
-        let tokens = parametrize(item_fn, info);
-
-        let tests = TestsGroup::from(tokens);
-
-        let code = tests.requested_test.block.display_code();
-
-        assert_not_in!(code, await_argument_code_string("a"));
-        assert_in!(code, await_argument_code_string("b"));
-        assert_not_in!(code, await_argument_code_string("c"));
-    }
-}
-
-mod matrix_cases_should {
-    use rstest_test::{assert_in, assert_not_in};
-
-    use crate::parse::{
-        arguments::{ArgumentsInfo, FutureArg},
-        rstest::RsTestData,
-    };
-
-    /// Should test matrix tests render without take in account MatrixInfo to RsTestInfo
-    /// transformation
-    use super::{assert_eq, *};
-
-    fn into_rstest_data(item_fn: &ItemFn) -> RsTestData {
-        RsTestData {
-            items: fn_args_pats(item_fn)
-                .cloned()
-                .map(|it| {
-                    ValueList {
-                        arg: it,
-                        values: vec![],
-                    }
-                    .into()
-                })
-                .collect(),
-        }
-    }
-
-    #[test]
-    fn create_a_module_named_as_test_function() {
-        let item_fn = "fn should_be_the_module_name(mut fix: String) {}".ast();
-        let data = into_rstest_data(&item_fn);
-
-        let tokens = matrix(item_fn.clone(), data.into());
-
-        let output = TestsGroup::from(tokens);
-
-        assert_eq!(output.module.ident, "should_be_the_module_name");
-    }
-
-    #[test]
-    fn copy_user_function() {
-        let item_fn =
-            r#"fn should_be_the_module_name(mut fix: String) { println!("user code") }"#.ast();
-        let data = into_rstest_data(&item_fn);
-
-        let tokens = matrix(item_fn.clone(), data.into());
-
-        let mut output = TestsGroup::from(tokens);
-        let test_impl: Stmt = output.requested_test.block.stmts.last().cloned().unwrap();
-
-        output.requested_test.attrs = vec![];
-        assert_eq!(output.requested_test.sig, item_fn.sig);
-        assert_eq!(test_impl.display_code(), item_fn.block.display_code());
-    }
-
-    #[test]
-    fn not_copy_user_function() {
-        let t_name = "test_name";
-        let item_fn: ItemFn = format!(
-            "fn {}(fix: String) -> Result<i32, String> {{ Ok(42) }}",
-            t_name
-        )
-        .ast();
-        let info = RsTestInfo {
-            data: RsTestData {
-                items: vec![values_list("fix", &["1"]).into()].into(),
-            },
-            ..Default::default()
-        };
-
-        let tokens = matrix(item_fn, info);
-
-        let test = &TestsGroup::from(tokens).get_all_tests()[0];
-        let inner_functions = extract_inner_functions(&test.block);
-
-        assert_eq!(0, inner_functions.filter(|f| f.sig.ident == t_name).count());
-    }
-
-    #[test]
-    fn not_copy_should_panic_attribute() {
-        let item_fn =
-            r#"#[should_panic] fn with_should_panic(mut fix: String) { println!("user code") }"#
-                .ast();
-        let info = RsTestInfo {
-            data: RsTestData {
-                items: vec![values_list("fix", &["1"]).into()].into(),
-            },
-            ..Default::default()
-        };
-
-        let tokens = matrix(item_fn, info);
-
-        let output = TestsGroup::from(tokens);
-
-        assert!(!format!("{:?}", output.requested_test.attrs).contains("should_panic"));
-    }
-
-    #[test]
-    fn should_mark_test_with_given_attributes() {
-        let item_fn: ItemFn = r#"#[should_panic] #[other(value)] fn test(_s: String){}"#.ast();
-
-        let info = RsTestInfo {
-            data: RsTestData {
-                items: vec![values_list("fix", &["1"]).into()].into(),
-            },
-            ..Default::default()
-        };
-        let tokens = matrix(item_fn.clone(), info);
-
-        let tests = TestsGroup::from(tokens).get_all_tests();
-
-        // Sanity check
-        assert!(tests.len() > 0);
-
-        for t in tests {
-            let end = t.attrs.len() - 1;
-            assert_eq!(item_fn.attrs, &t.attrs[1..end]);
-        }
-    }
-
-    #[test]
-    fn add_return_type_if_any() {
-        let item_fn: ItemFn = "fn function(fix: String) -> Result<i32, String> { Ok(42) }".ast();
-        let info = RsTestInfo {
-            data: RsTestData {
-                items: vec![values_list("fix", &["1", "2", "3"]).into()].into(),
-            },
-            ..Default::default()
-        };
-
-        let tokens = matrix(item_fn.clone(), info);
-
-        let tests = TestsGroup::from(tokens).get_tests();
-
-        assert_eq!(tests[0].sig.output, item_fn.sig.output);
-        assert_eq!(tests[1].sig.output, item_fn.sig.output);
-        assert_eq!(tests[2].sig.output, item_fn.sig.output);
-    }
-
-    #[test]
-    fn mark_user_function_as_test() {
-        let item_fn =
-            r#"fn should_be_the_module_name(mut fix: String) { println!("user code") }"#.ast();
-        let data = into_rstest_data(&item_fn);
-
-        let tokens = matrix(item_fn.clone(), data.into());
-
-        let output = TestsGroup::from(tokens);
-
-        let expected = parse2::<ItemFn>(quote! {
-            #[cfg(test)]
-            fn some() {}
-        })
-        .unwrap()
-        .attrs;
-
-        assert_eq!(expected, output.requested_test.attrs);
-    }
-
-    #[test]
-    fn mark_module_as_test() {
-        let item_fn =
-            r#"fn should_be_the_module_name(mut fix: String) { println!("user code") }"#.ast();
-        let data = into_rstest_data(&item_fn);
-
-        let tokens = matrix(item_fn.clone(), data.into());
-
-        let output = TestsGroup::from(tokens);
-
-        let expected = parse2::<ItemMod>(quote! {
-            #[cfg(test)]
-            mod some {}
-        })
-        .unwrap()
-        .attrs;
-
-        assert_eq!(expected, output.module.attrs);
-    }
-
-    #[test]
-    fn with_just_one_arg() {
-        let arg_name = "fix";
-        let info = RsTestInfo {
-            data: RsTestData {
-                items: vec![values_list(arg_name, &["1", "2", "3"]).into()].into(),
-            },
-            ..Default::default()
-        };
-
-        let item_fn = format!(r#"fn test({}: u32) {{ println!("user code") }}"#, arg_name).ast();
-
-        let tokens = matrix(item_fn, info);
-
-        let tests = TestsGroup::from(tokens).get_tests();
-
-        assert_eq!(3, tests.len());
-        assert!(&tests[0].sig.ident.to_string().starts_with("fix_"))
-    }
-
-    #[rstest]
-    #[case::sync(false)]
-    #[case::async_fn(true)]
-    fn use_injected_test_attribute_to_mark_test_functions_if_any(
-        #[case] is_async: bool,
-        #[values(
-            "#[test]",
-            "#[other::test]",
-            "#[very::complicated::path::test]",
-            "#[prev]#[test]",
-            "#[test]#[after]",
-            "#[prev]#[other::test]"
-        )]
-        attributes: &str,
-    ) {
-        let attributes = attrs(attributes);
-        let filter = attrs("#[allow(non_snake_case)]");
-        let data = RsTestData {
-            items: vec![values_list("v", &["1", "2", "3"]).into()].into(),
-        };
-        let mut item_fn: ItemFn = r#"fn test(v: u32) {{ println!("user code") }}"#.ast();
-        item_fn.set_async(is_async);
-        item_fn.attrs = attributes.clone();
-
-        let tokens = matrix(item_fn, data.into());
-
-        let tests = TestsGroup::from(tokens).get_all_tests();
-
-        // Sanity check
-        assert!(tests.len() > 0);
-
-        for test in tests {
-            let filtered: Vec<_> = test
-                .attrs
-                .into_iter()
-                .filter(|a| !filter.contains(a))
-                .collect();
-            assert_eq!(attributes, filtered);
-        }
-    }
-
-    #[rstest]
-    #[case::sync(false, parse_quote! { #[test] })]
-    #[case::async_fn(true, parse_quote! { #[async_std::test] })]
-    fn add_default_test_attribute(
-        #[case] is_async: bool,
-        #[case] test_attribute: Attribute,
-        #[values(
-            "",
-            "#[no_one]",
-            "#[should_panic]",
-            "#[should_panic]#[other]",
-            "#[a::b::c]#[should_panic]"
-        )]
-        attributes: &str,
-    ) {
-        let attributes = attrs(attributes);
-        let data = RsTestData {
-            items: vec![values_list("v", &["1", "2", "3"]).into()].into(),
-        };
-
-        let mut item_fn: ItemFn = r#"fn test(v: u32) {{ println!("user code") }}"#.ast();
-        item_fn.set_async(is_async);
-        item_fn.attrs = attributes.clone();
-
-        let tokens = matrix(item_fn, data.into());
-
-        let tests = TestsGroup::from(tokens).get_all_tests();
-
-        // Sanity check
-        assert!(tests.len() > 0);
-
-        for test in tests {
-            assert_eq!(test.attrs[0], test_attribute);
-            assert_eq!(&test.attrs[1..test.attrs.len() - 1], attributes.as_slice());
-        }
-    }
-
-    #[test]
-    fn add_future_boilerplate_if_requested() {
-        let item_fn = r#"async fn test(async_ref_u32: &u32, async_u32: u32,simple: u32) { }"#.ast();
-
-        let mut arguments = ArgumentsInfo::default();
-        arguments.add_future(pat("async_ref_u32"));
-        arguments.add_future(pat("async_u32"));
-
-        let info = RsTestInfo {
-            arguments,
-            ..Default::default()
-        };
-
-        let tokens = matrix(item_fn, info);
-
-        let test_function = TestsGroup::from(tokens).requested_test;
-
-        let expected = parse_str::<syn::ItemFn>(
-            r#"async fn test<'_async_ref_u32>(
-                        async_ref_u32: impl std::future::Future<Output = &'_async_ref_u32 u32>, 
-                        async_u32: impl std::future::Future<Output = u32>, 
-                        simple: u32
-                    )
-                    { }
-                    "#,
-        )
-        .unwrap();
-
-        assert_eq!(test_function.sig, expected.sig);
-    }
-
-    #[rstest]
-    fn add_allow_non_snake_case(
-        #[values(
-            "",
-            "#[no_one]",
-            "#[should_panic]",
-            "#[should_panic]#[other]",
-            "#[a::b::c]#[should_panic]"
-        )]
-        attributes: &str,
-    ) {
-        let attributes = attrs(attributes);
-        let non_snake_case = &attrs("#[allow(non_snake_case)]")[0];
-        let data = RsTestData {
-            items: vec![values_list("v", &["1", "2", "3"]).into()].into(),
-        };
-
-        let mut item_fn: ItemFn = r#"fn test(v: u32) {{ println!("user code") }}"#.ast();
-        item_fn.attrs = attributes.clone();
-
-        let tokens = matrix(item_fn, data.into());
-
-        let tests = TestsGroup::from(tokens).get_all_tests();
-
-        // Sanity check
-        assert!(tests.len() > 0);
-
-        for test in tests {
-            assert_eq!(test.attrs.last().unwrap(), non_snake_case);
-            assert_eq!(&test.attrs[1..test.attrs.len() - 1], attributes.as_slice());
-        }
-    }
-
-    #[rstest]
-    #[case::sync(false, false)]
-    #[case::async_fn(true, true)]
-    fn use_await_for_async_test_function(#[case] is_async: bool, #[case] use_await: bool) {
-        let data = RsTestData {
-            items: vec![values_list("v", &["1", "2", "3"]).into()].into(),
-        };
-
-        let mut item_fn: ItemFn = r#"fn test(v: u32) {{ println!("user code") }}"#.ast();
-        item_fn.set_async(is_async);
-
-        let tokens = matrix(item_fn, data.into());
-
-        let tests = TestsGroup::from(tokens).get_all_tests();
-
-        // Sanity check
-        assert!(tests.len() > 0);
-
-        for test in tests {
-            let last_stmt = test.block.stmts.last().unwrap();
-            assert_eq!(use_await, last_stmt.is_await());
-        }
-    }
-
-    #[test]
-    fn trace_arguments_value() {
-        let data = RsTestData {
-            items: vec![
-                values_list("a_trace_me", &["1", "2"]).into(),
-                values_list("b_trace_me", &["3", "4"]).into(),
-            ]
-            .into(),
-        };
-        let item_fn: ItemFn = r#"#[trace] fn test(a_trace_me: u32, b_trace_me: u32) {}"#.ast();
-
-        let tokens = matrix(item_fn, data.into());
-
-        let tests = TestsGroup::from(tokens).get_all_tests();
-
-        assert!(tests.len() > 0);
-        for test in tests {
-            for name in &["a_trace_me", "b_trace_me"] {
-                assert_in!(test.block.display_code(), trace_argument_code_string(name));
-            }
-        }
-    }
-
-    #[test]
-    fn trace_just_some_arguments_value() {
-        let data = RsTestData {
-            items: vec![
-                values_list("a_trace_me", &["1", "2"]).into(),
-                values_list("b_no_trace_me", &["3", "4"]).into(),
-                values_list("c_no_trace_me", &["5", "6"]).into(),
-                values_list("d_trace_me", &["7", "8"]).into(),
-            ]
-            .into(),
-        };
-        let mut attributes: RsTestAttributes = Default::default();
-        attributes.add_notraces(vec![pat("b_no_trace_me"), pat("c_no_trace_me")]);
-        let item_fn: ItemFn = r#"#[trace] fn test(a_trace_me: u32, b_no_trace_me: u32, c_no_trace_me: u32, d_trace_me: u32) {}"#.ast();
-
-        let tokens = matrix(
-            item_fn,
-            RsTestInfo {
-                data,
-                attributes,
-                ..Default::default()
-            },
-        );
-
-        let tests = TestsGroup::from(tokens).get_all_tests();
-
-        assert!(tests.len() > 0);
-        for test in tests {
-            for should_be_present in &["a_trace_me", "d_trace_me"] {
-                assert_in!(
-                    test.block.display_code(),
-                    trace_argument_code_string(should_be_present)
-                );
-            }
-            for should_not_be_present in &["b_no_trace_me", "c_no_trace_me"] {
-                assert_not_in!(
-                    test.block.display_code(),
-                    trace_argument_code_string(should_not_be_present)
-                );
-            }
-        }
-    }
-
-    #[test]
-    fn use_global_await() {
-        let item_fn: ItemFn = r#"fn test(a: i32, b:i32, c:i32) {}"#.ast();
-        let data = RsTestData {
-            items: vec![
-                values_list("a", &["1"]).into(),
-                values_list("b", &["2"]).into(),
-                values_list("c", &["3"]).into(),
-            ]
-            .into(),
-        };
-        let mut info = RsTestInfo {
-            data,
-            attributes: Default::default(),
-            arguments: Default::default(),
-        };
-        info.arguments.set_global_await(true);
-        info.arguments.add_future(pat("a"));
-        info.arguments.add_future(pat("b"));
-
-        let tokens = matrix(item_fn, info);
-
-        let tests = TestsGroup::from(tokens);
-
-        let code = tests.requested_test.block.display_code();
-
-        assert_in!(code, await_argument_code_string("a"));
-        assert_in!(code, await_argument_code_string("b"));
-        assert_not_in!(code, await_argument_code_string("c"));
-    }
-
-    #[test]
-    fn use_selective_await() {
-        let item_fn: ItemFn = r#"fn test(a: i32, b:i32, c:i32) {}"#.ast();
-        let data = RsTestData {
-            items: vec![
-                values_list("a", &["1"]).into(),
-                values_list("b", &["2"]).into(),
-                values_list("c", &["3"]).into(),
-            ]
-            .into(),
-        };
-        let mut info = RsTestInfo {
-            data,
-            attributes: Default::default(),
-            arguments: Default::default(),
-        };
-
-        info.arguments.set_future(pat("a"), FutureArg::Define);
-        info.arguments.set_future(pat("b"), FutureArg::Await);
-
-        let tokens = matrix(item_fn, info);
-
-        let tests = TestsGroup::from(tokens);
-
-        let code = tests.requested_test.block.display_code();
-
-        assert_not_in!(code, await_argument_code_string("a"));
-        assert_in!(code, await_argument_code_string("b"));
-        assert_not_in!(code, await_argument_code_string("c"));
-    }
-
-    mod two_args_should {
-        /// Should test matrix tests render without take in account MatrixInfo to RsTestInfo
-        /// transformation
-        use super::{assert_eq, *};
-
-        fn fixture<'a>() -> (Vec<&'a str>, ItemFn, RsTestInfo) {
-            let names = vec!["first", "second"];
-            (
-                names.clone(),
-                format!(
-                    r#"fn test({}: u32, {}: u32) {{ println!("user code") }}"#,
-                    names[0], names[1]
-                )
-                .ast(),
-                RsTestInfo {
-                    data: RsTestData {
-                        items: vec![
-                            values_list(names[0], &["1", "2", "3"]).into(),
-                            values_list(names[1], &["1", "2"]).into(),
-                        ],
-                    },
-                    ..Default::default()
-                },
-            )
-        }
-
-        #[test]
-        fn contain_a_module_for_each_first_arg() {
-            let (names, item_fn, info) = fixture();
-
-            let tokens = matrix(item_fn, info);
-
-            let modules = TestsGroup::from(tokens).module.get_modules().names();
-
-            let expected = (1..=3)
-                .map(|i| format!("{}_{}", names[0], i))
-                .collect::<Vec<_>>();
-
-            assert_eq!(expected.len(), modules.len());
-            for (e, m) in expected.into_iter().zip(modules.into_iter()) {
-                assert_in!(m, e);
-            }
-        }
-
-        #[test]
-        fn annotate_modules_with_allow_non_snake_name() {
-            let (_, item_fn, info) = fixture();
-            let non_snake_case = &attrs("#[allow(non_snake_case)]")[0];
-
-            let tokens = matrix(item_fn, info);
-
-            let modules = TestsGroup::from(tokens).module.get_modules();
-
-            for module in modules {
-                assert!(module.attrs.contains(&non_snake_case));
-            }
-        }
-
-        #[test]
-        fn create_all_tests() {
-            let (_, item_fn, info) = fixture();
-
-            let tokens = matrix(item_fn, info);
-
-            let tests = TestsGroup::from(tokens).module.get_all_tests().names();
-
-            assert_eq!(6, tests.len());
-        }
-
-        #[test]
-        fn create_all_modules_with_the_same_functions() {
-            let (_, item_fn, info) = fixture();
-
-            let tokens = matrix(item_fn, info);
-
-            let tests = TestsGroup::from(tokens)
-                .module
-                .get_modules()
-                .into_iter()
-                .map(|m| m.get_tests().names())
-                .collect::<Vec<_>>();
-
-            assert_eq!(tests[0], tests[1]);
-            assert_eq!(tests[1], tests[2]);
-        }
-
-        #[test]
-        fn test_name_should_contain_argument_name() {
-            let (names, item_fn, info) = fixture();
-
-            let tokens = matrix(item_fn, info);
-
-            let tests = TestsGroup::from(tokens).module.get_modules()[0]
-                .get_tests()
-                .names();
-
-            let expected = (1..=2)
-                .map(|i| format!("{}_{}", names[1], i))
-                .collect::<Vec<_>>();
-
-            assert_eq!(expected.len(), tests.len());
-            for (e, m) in expected.into_iter().zip(tests.into_iter()) {
-                assert_in!(m, e);
-            }
-        }
-    }
-
-    #[test]
-    fn three_args_should_create_all_function_4_mods_at_the_first_level_and_3_at_the_second() {
-        let (first, second, third) = ("first", "second", "third");
-        let info = RsTestInfo {
-            data: RsTestData {
-                items: vec![
-                    values_list(first, &["1", "2", "3", "4"]).into(),
-                    values_list(second, &["1", "2", "3"]).into(),
-                    values_list(third, &["1", "2"]).into(),
-                ],
-            },
-            ..Default::default()
-        };
-        let item_fn = format!(
-            r#"fn test({}: u32, {}: u32, {}: u32) {{ println!("user code") }}"#,
-            first, second, third
-        )
-        .ast();
-
-        let tokens = matrix(item_fn, info);
-
-        let tg = TestsGroup::from(tokens);
-
-        assert_eq!(24, tg.module.get_all_tests().len());
-        assert_eq!(4, tg.module.get_modules().len());
-        assert_eq!(3, tg.module.get_modules()[0].get_modules().len());
-        assert_eq!(3, tg.module.get_modules()[3].get_modules().len());
-        assert_eq!(
-            2,
-            tg.module.get_modules()[0].get_modules()[0]
-                .get_tests()
-                .len()
-        );
-        assert_eq!(
-            2,
-            tg.module.get_modules()[3].get_modules()[1]
-                .get_tests()
-                .len()
-        );
-    }
-
-    #[test]
-    fn pad_case_index() {
-        let item_fn: ItemFn =
-            r#"fn test(first: u32, second: u32, third: u32) { println!("user code") }"#.ast();
-        let values = (1..=100).map(|i| i.to_string()).collect::<Vec<_>>();
-        let info = RsTestInfo {
-            data: RsTestData {
-                items: vec![
-                    values_list("first", values.as_ref()).into(),
-                    values_list("second", values[..10].as_ref()).into(),
-                    values_list("third", values[..2].as_ref()).into(),
-                ],
-            },
-            ..Default::default()
-        };
-
-        let tokens = matrix(item_fn.clone(), info);
-
-        let tg = TestsGroup::from(tokens);
-
-        let mods = tg.get_modules().names();
-
-        assert_in!(mods[0], "first_001");
-        assert_in!(mods[99], "first_100");
-
-        let mods = tg.get_modules()[0].get_modules().names();
-
-        assert_in!(mods[0], "second_01");
-        assert_in!(mods[9], "second_10");
-
-        let functions = tg.get_modules()[0].get_modules()[1].get_tests().names();
-
-        assert_in!(functions[0], "third_1");
-        assert_in!(functions[1], "third_2");
-    }
-}
-
-mod complete_should {
-    use crate::parse::rstest::RsTestData;
-
-    use super::{assert_eq, *};
-
-    fn rendered_case(fn_name: &str) -> TestsGroup {
-        let item_fn: ItemFn = format!(
-            r#"         #[first]
-                        #[second(arg)]
-                        fn {}(
-                            fix: u32,
-                            a: f64, b: f32,
-                            x: i32, y: i32) {{}}"#,
-            fn_name
-        )
-        .ast();
-        let data = RsTestData {
-            items: vec![
-                fixture("fix", &["2"]).into(),
-                ident("a").into(),
-                ident("b").into(),
-                vec!["1f64", "2f32"]
-                    .into_iter()
-                    .collect::<TestCase>()
-                    .into(),
-                TestCase {
-                    description: Some(ident("description")),
-                    ..vec!["3f64", "4f32"].into_iter().collect::<TestCase>()
-                }
-                .with_attrs(attrs("#[third]#[forth(other)]"))
-                .into(),
-                values_list("x", &["12", "-2"]).into(),
-                values_list("y", &["-3", "42"]).into(),
-            ],
-        };
-
-        matrix(item_fn.clone(), data.into()).into()
-    }
-
-    fn test_case() -> TestsGroup {
-        rendered_case("test_function")
-    }
-
-    #[test]
-    fn use_function_name_as_outer_module() {
-        let rendered = rendered_case("should_be_the_outer_module_name");
-
-        assert_eq!(rendered.module.ident, "should_be_the_outer_module_name")
-    }
-
-    #[test]
-    fn have_one_module_for_each_parametrized_case() {
-        let rendered = test_case();
-
-        assert_eq!(
-            vec!["case_1", "case_2_description"],
-            rendered
-                .get_modules()
-                .iter()
-                .map(|m| m.ident.to_string())
-                .collect::<Vec<_>>()
-        );
-    }
-
-    #[test]
-    fn implement_exactly_8_tests() {
-        let rendered = test_case();
-
-        assert_eq!(8, rendered.get_all_tests().len());
-    }
-
-    #[test]
-    fn implement_exactly_4_tests_in_each_module() {
-        let modules = test_case().module.get_modules();
-
-        assert_eq!(4, modules[0].get_all_tests().len());
-        assert_eq!(4, modules[1].get_all_tests().len());
-    }
-
-    #[test]
-    fn assign_same_case_value_for_each_test() {
-        let modules = test_case().module.get_modules();
-
-        for f in modules[0].get_all_tests() {
-            let assignments = Assignments::collect_assignments(&f);
-            assert_eq!(assignments.0["a"], expr("1f64"));
-            assert_eq!(assignments.0["b"], expr("2f32"));
-        }
-
-        for f in modules[1].get_all_tests() {
-            let assignments = Assignments::collect_assignments(&f);
-            assert_eq!(assignments.0["a"], expr("3f64"));
-            assert_eq!(assignments.0["b"], expr("4f32"));
-        }
-    }
-
-    #[test]
-    fn assign_all_case_combination_in_tests() {
-        let modules = test_case().module.get_modules();
-
-        let cases = vec![("12", "-3"), ("12", "42"), ("-2", "-3"), ("-2", "42")];
-        for module in modules {
-            for ((x, y), f) in cases.iter().zip(module.get_all_tests().iter()) {
-                let assignments = Assignments::collect_assignments(f);
-                assert_eq!(assignments.0["x"], expr(x));
-                assert_eq!(assignments.0["y"], expr(y));
-            }
-        }
-    }
-
-    #[test]
-    fn mark_test_with_given_attributes() {
-        let modules = test_case().module.get_modules();
-        let attrs = attrs("#[first]#[second(arg)]");
-
-        for f in modules[0].get_all_tests() {
-            let end = f.attrs.len() - 1;
-            assert_eq!(attrs, &f.attrs[1..end]);
-        }
-        for f in modules[1].get_all_tests() {
-            assert_eq!(attrs, &f.attrs[1..3]);
-        }
-    }
-    #[test]
-    fn should_add_attributes_given_in_the_test_case() {
-        let modules = test_case().module.get_modules();
-        let attrs = attrs("#[third]#[forth(other)]");
-
-        for f in modules[1].get_all_tests() {
-            assert_eq!(attrs, &f.attrs[3..5]);
-        }
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/render/wrapper.rs b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/render/wrapper.rs
deleted file mode 100644
index a513a7c6..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/render/wrapper.rs
+++ /dev/null
@@ -1,19 +0,0 @@
-use proc_macro2::TokenStream;
-use quote::{quote, ToTokens};
-use syn::Ident;
-
-pub(crate) trait WrapByModule {
-    fn wrap_by_mod(&self, mod_name: &Ident) -> TokenStream;
-}
-
-impl<T: ToTokens> WrapByModule for T {
-    fn wrap_by_mod(&self, mod_name: &Ident) -> TokenStream {
-        quote! {
-            mod #mod_name {
-                use super::*;
-
-                #self
-            }
-        }
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/resolver.rs b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/resolver.rs
deleted file mode 100644
index a9bd59d..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/resolver.rs
+++ /dev/null
@@ -1,230 +0,0 @@
-/// Define `Resolver` trait and implement it on some hashmaps and also define the `Resolver` tuple
-/// composition. Provide also some utility functions related to how to create a `Resolver` and
-/// resolving render.
-///
-use std::borrow::Cow;
-use std::collections::HashMap;
-
-use syn::{parse_quote, Expr, Pat};
-
-use crate::parse::Fixture;
-
-pub(crate) mod fixtures {
-    use quote::format_ident;
-
-    use crate::parse::arguments::ArgumentsInfo;
-
-    use super::*;
-
-    pub(crate) fn get<'a>(
-        arguments: &ArgumentsInfo,
-        fixtures: impl Iterator<Item = &'a Fixture>,
-    ) -> impl Resolver + 'a {
-        fixtures
-            .map(|f| {
-                (
-                    arguments.inner_pat(&f.arg).clone(),
-                    extract_resolve_expression(f),
-                )
-            })
-            .collect::<HashMap<_, Expr>>()
-    }
-
-    fn extract_resolve_expression(fixture: &Fixture) -> syn::Expr {
-        let resolve = fixture.resolve.clone();
-        let positional = &fixture.positional.0;
-        let f_name = match positional.len() {
-            0 => format_ident!("default"),
-            l => format_ident!("partial_{}", l),
-        };
-        parse_quote! { #resolve::#f_name(#(#positional), *) }
-    }
-
-    #[cfg(test)]
-    mod should {
-        use super::*;
-        use crate::test::{assert_eq, *};
-
-        #[rstest]
-        #[case(&[], "default()")]
-        #[case(&["my_expression"], "partial_1(my_expression)")]
-        #[case(&["first", "other"], "partial_2(first, other)")]
-        fn resolve_by_use_the_given_name(
-            #[case] args: &[&str],
-            #[case] expected: &str,
-            #[values(None, Some("minnie"), Some("__destruct_1"))] inner_pat: Option<&str>,
-        ) {
-            let data = vec![fixture("pippo", args)];
-            let mut arguments: ArgumentsInfo = Default::default();
-            let mut request = pat("pippo");
-            if let Some(inner) = inner_pat {
-                arguments.set_inner_pat(pat("pippo"), pat(inner));
-                request = pat(inner);
-            }
-
-            let resolver = get(&arguments, data.iter());
-
-            let resolved = resolver.resolve(&request).unwrap().into_owned();
-
-            assert_eq!(resolved, format!("pippo::{}", expected).ast());
-        }
-
-        #[rstest]
-        #[case(&[], "default()")]
-        #[case(&["my_expression"], "partial_1(my_expression)")]
-        #[case(&["first", "other"], "partial_2(first, other)")]
-        fn resolve_by_use_the_resolve_field(
-            #[case] args: &[&str],
-            #[case] expected: &str,
-            #[values("pluto", "minnie::pluto")] resolver_path: &str,
-            #[values(None, Some("minnie"), Some("__destruct_1"))] inner_pat: Option<&str>,
-        ) {
-            let data = vec![fixture("pippo", args).with_resolve(resolver_path)];
-            let mut arguments: ArgumentsInfo = Default::default();
-            let mut request = pat("pippo");
-            if let Some(inner) = inner_pat {
-                arguments.set_inner_pat(pat("pippo"), pat(inner));
-                request = pat(inner);
-            }
-            let resolver = get(&arguments, data.iter());
-
-            let resolved = resolver.resolve(&request).unwrap().into_owned();
-
-            assert_eq!(resolved, format!("{}::{}", resolver_path, expected).ast());
-        }
-    }
-}
-
-pub(crate) mod values {
-    use super::*;
-    use crate::parse::fixture::ArgumentValue;
-
-    pub(crate) fn get<'a>(values: impl Iterator<Item = &'a ArgumentValue>) -> impl Resolver + 'a {
-        values
-            .map(|av| (av.arg.clone(), &av.expr))
-            .collect::<HashMap<_, &'a Expr>>()
-    }
-
-    #[cfg(test)]
-    mod should {
-        use super::*;
-        use crate::test::{assert_eq, *};
-
-        #[test]
-        fn resolve_by_use_the_given_name() {
-            let data = vec![
-                arg_value("pippo", "42"),
-                arg_value("donaldduck", "vec![1,2]"),
-            ];
-            let resolver = get(data.iter());
-
-            assert_eq!(
-                resolver.resolve(&pat("pippo")).unwrap().into_owned(),
-                "42".ast()
-            );
-            assert_eq!(
-                resolver.resolve(&pat("donaldduck")).unwrap().into_owned(),
-                "vec![1,2]".ast()
-            );
-        }
-    }
-}
-
-/// A trait that `resolve` the given ident to expression code to assign the value.
-pub(crate) trait Resolver {
-    fn resolve(&self, arg: &Pat) -> Option<Cow<Expr>>;
-}
-
-impl<'a> Resolver for HashMap<Pat, &'a Expr> {
-    fn resolve(&self, arg: &Pat) -> Option<Cow<Expr>> {
-        self.get(arg)
-            .or_else(|| self.get(&pat_invert_mutability(arg)))
-            .map(|&c| Cow::Borrowed(c))
-    }
-}
-
-impl Resolver for HashMap<Pat, Expr> {
-    fn resolve(&self, arg: &Pat) -> Option<Cow<Expr>> {
-        self.get(arg).map(Cow::Borrowed)
-    }
-}
-
-impl<R1: Resolver, R2: Resolver> Resolver for (R1, R2) {
-    fn resolve(&self, arg: &Pat) -> Option<Cow<Expr>> {
-        self.0.resolve(arg).or_else(|| self.1.resolve(arg))
-    }
-}
-
-impl<R: Resolver + ?Sized> Resolver for &R {
-    fn resolve(&self, arg: &Pat) -> Option<Cow<Expr>> {
-        (*self).resolve(arg)
-    }
-}
-
-impl<R: Resolver + ?Sized> Resolver for Box<R> {
-    fn resolve(&self, arg: &Pat) -> Option<Cow<Expr>> {
-        (**self).resolve(arg)
-    }
-}
-
-impl Resolver for (Pat, Expr) {
-    fn resolve(&self, arg: &Pat) -> Option<Cow<Expr>> {
-        if arg == &self.0 {
-            Some(Cow::Borrowed(&self.1))
-        } else {
-            None
-        }
-    }
-}
-
-pub(crate) fn pat_invert_mutability(p: &Pat) -> Pat {
-    match p.clone() {
-        Pat::Ident(mut ident) => {
-            ident.mutability = match ident.mutability {
-                Some(_) => None,
-                None => Some(syn::parse_quote! { mut }),
-            };
-            syn::Pat::Ident(ident)
-        }
-        p => p,
-    }
-}
-
-#[cfg(test)]
-mod should {
-    use super::*;
-    use crate::test::{assert_eq, *};
-    use syn::parse_str;
-
-    #[test]
-    fn return_the_given_expression() {
-        let ast = parse_str("fn function(mut foo: String) {}").unwrap();
-        let arg = first_arg_pat(&ast);
-        let expected = expr("bar()");
-        let mut resolver = HashMap::new();
-
-        resolver.insert(pat("foo").with_mut(), &expected);
-
-        assert_eq!(expected, (&resolver).resolve(&arg).unwrap().into_owned())
-    }
-
-    #[test]
-    fn return_the_given_expression_also_if_not_mut_searched() {
-        let ast = parse_str("fn function(foo: String) {}").unwrap();
-        let arg = first_arg_pat(&ast);
-        let expected = expr("bar()");
-        let mut resolver = HashMap::new();
-
-        resolver.insert(pat("foo").with_mut(), &expected);
-
-        assert_eq!(expected, (&resolver).resolve(&arg).unwrap().into_owned())
-    }
-
-    #[test]
-    fn return_none_for_unknown_argument() {
-        let ast = "fn function(mut fix: String) {}".ast();
-        let arg = first_arg_pat(&ast);
-
-        assert!(EmptyResolver.resolve(&arg).is_none())
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/test.rs b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/test.rs
deleted file mode 100644
index 672c5c5c..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/test.rs
+++ /dev/null
@@ -1,380 +0,0 @@
-#![macro_use]
-
-/// Unit testing utility module. Collect a bunch of functions&macro and impls to simplify unit
-/// testing bolilerplate.
-///
-use std::borrow::Cow;
-
-pub(crate) use pretty_assertions::assert_eq;
-use proc_macro2::TokenTree;
-use quote::quote;
-pub(crate) use rstest::{fixture, rstest};
-use syn::{parse::Parse, parse2, parse_quote, parse_str, Error, Expr, Ident, Pat, Stmt};
-use utils::fn_args_pats;
-
-use super::*;
-use crate::parse::{
-    fixture::{FixtureData, FixtureItem},
-    rstest::{RsTestData, RsTestItem},
-    testcase::TestCase,
-    vlist::ValueList,
-    Attribute, Fixture, Positional,
-};
-use crate::resolver::Resolver;
-use parse::fixture::ArgumentValue;
-
-macro_rules! to_args {
-    ($e:expr) => {{
-        $e.iter().map(expr).collect::<Vec<_>>()
-    }};
-}
-
-macro_rules! to_fnargs {
-    ($e:expr) => {{
-        $e.iter().map(fn_arg).collect::<Vec<_>>()
-    }};
-}
-
-macro_rules! to_exprs {
-    ($e:expr) => {
-        $e.iter().map(|s| expr(s)).collect::<Vec<_>>()
-    };
-}
-
-macro_rules! to_strs {
-    ($e:expr) => {
-        $e.iter().map(ToString::to_string).collect::<Vec<_>>()
-    };
-}
-
-macro_rules! to_idents {
-    ($e:expr) => {
-        $e.iter().map(|s| ident(s)).collect::<Vec<_>>()
-    };
-}
-
-macro_rules! to_pats {
-    ($e:expr) => {
-        $e.iter().map(|s| pat(s)).collect::<Vec<_>>()
-    };
-}
-
-struct Outer<T>(T);
-impl<T: Parse> Parse for Outer<T> {
-    fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> {
-        let outer: Ident = input.parse()?;
-        if outer == "outer" {
-            let content;
-            let _ = syn::parenthesized!(content in input);
-            content.parse().map(Outer)
-        } else {
-            Err(Error::new(outer.span(), "Expected 'outer'"))
-        }
-    }
-}
-
-pub(crate) fn parse_meta<T: syn::parse::Parse, S: AsRef<str>>(test_case: S) -> T {
-    let to_parse = format!(
-        r#"
-        #[outer({})]
-        fn to_parse() {{}}
-        "#,
-        test_case.as_ref()
-    );
-
-    let item_fn = parse_str::<ItemFn>(&to_parse).expect(&format!("Cannot parse '{}'", to_parse));
-
-    let tokens = quote!(
-        #item_fn
-    );
-
-    let tt = tokens.into_iter().skip(1).next().unwrap();
-
-    if let TokenTree::Group(g) = tt {
-        let ts = g.stream();
-        parse2::<Outer<T>>(ts).unwrap().0
-    } else {
-        panic!("Cannot find group in {:#?}", tt)
-    }
-}
-
-pub(crate) trait ToAst {
-    fn ast<T: Parse>(self) -> T;
-}
-
-impl ToAst for &str {
-    fn ast<T: Parse>(self) -> T {
-        parse_str(self).unwrap()
-    }
-}
-
-impl ToAst for String {
-    fn ast<T: Parse>(self) -> T {
-        parse_str(&self).unwrap()
-    }
-}
-
-impl ToAst for proc_macro2::TokenStream {
-    fn ast<T: Parse>(self) -> T {
-        parse2(self).unwrap()
-    }
-}
-
-pub(crate) fn ident(s: impl AsRef<str>) -> Ident {
-    s.as_ref().ast()
-}
-
-pub(crate) fn path(s: impl AsRef<str>) -> syn::Path {
-    s.as_ref().ast()
-}
-
-pub(crate) fn pat(s: impl AsRef<str>) -> syn::Pat {
-    syn::parse::Parser::parse_str(Pat::parse_single, s.as_ref()).unwrap()
-}
-
-pub trait PatBuilder {
-    fn with_mut(self) -> Self;
-}
-
-impl PatBuilder for syn::Pat {
-    fn with_mut(self) -> Self {
-        match self {
-            Pat::Ident(mut ident) => {
-                ident.mutability = Some("mut".ast());
-                syn::Pat::Ident(ident)
-            }
-            _ => unimplemented!("Unsupported pattern: {:?}", self,),
-        }
-    }
-}
-
-pub(crate) fn expr(s: impl AsRef<str>) -> syn::Expr {
-    s.as_ref().ast()
-}
-
-pub(crate) fn fn_arg(s: impl AsRef<str>) -> syn::FnArg {
-    s.as_ref().ast()
-}
-
-pub(crate) fn attr(s: impl AsRef<str>) -> syn::Attribute {
-    let a = attrs(s);
-    assert_eq!(1, a.len());
-    a.into_iter().next().unwrap()
-}
-
-pub(crate) fn attrs(s: impl AsRef<str>) -> Vec<syn::Attribute> {
-    parse_str::<ItemFn>(&format!(
-        r#"{}
-           fn _no_name_() {{}}   
-        "#,
-        s.as_ref()
-    ))
-    .unwrap()
-    .attrs
-}
-
-pub(crate) fn fixture(name: impl AsRef<str>, args: &[&str]) -> Fixture {
-    let name = name.as_ref().to_owned();
-    Fixture::new(pat(&name), path(&name), Positional(to_exprs!(args)))
-}
-
-pub(crate) fn arg_value(name: impl AsRef<str>, value: impl AsRef<str>) -> ArgumentValue {
-    ArgumentValue::new(pat(name), expr(value))
-}
-
-pub(crate) fn values_list<S: AsRef<str>>(arg: &str, values: &[S]) -> ValueList {
-    ValueList {
-        arg: pat(arg),
-        values: values.into_iter().map(|s| expr(s).into()).collect(),
-    }
-}
-
-pub(crate) fn first_arg_pat(ast: &ItemFn) -> &Pat {
-    fn_args_pats(&ast).next().unwrap()
-}
-
-pub(crate) fn extract_inner_functions(block: &syn::Block) -> impl Iterator<Item = &syn::ItemFn> {
-    block.stmts.iter().filter_map(|s| match s {
-        syn::Stmt::Item(syn::Item::Fn(f)) => Some(f),
-        _ => None,
-    })
-}
-
-pub(crate) fn literal_expressions_str() -> Vec<&'static str> {
-    vec![
-        "42",
-        "42isize",
-        "1.0",
-        "-1",
-        "-1.0",
-        "true",
-        "1_000_000u64",
-        "0b10100101u8",
-        r#""42""#,
-        "b'H'",
-    ]
-}
-
-pub(crate) trait ExtractArgs {
-    fn args(&self) -> Vec<Expr>;
-}
-
-impl ExtractArgs for TestCase {
-    fn args(&self) -> Vec<Expr> {
-        self.args.iter().cloned().collect()
-    }
-}
-
-impl ExtractArgs for ValueList {
-    fn args(&self) -> Vec<Expr> {
-        self.values.iter().map(|v| v.expr.clone()).collect()
-    }
-}
-
-impl Attribute {
-    pub fn attr<S: AsRef<str>>(s: S) -> Self {
-        Attribute::Attr(ident(s))
-    }
-
-    pub fn tagged<SI: AsRef<str>, SA: AsRef<str>>(tag: SI, attrs: Vec<SA>) -> Self {
-        Attribute::Tagged(ident(tag), attrs.into_iter().map(pat).collect())
-    }
-
-    pub fn typed<S: AsRef<str>, T: AsRef<str>>(tag: S, inner: T) -> Self {
-        Attribute::Type(ident(tag), parse_str(inner.as_ref()).unwrap())
-    }
-}
-
-impl RsTestInfo {
-    pub fn push_case(&mut self, case: TestCase) {
-        self.data.items.push(RsTestItem::TestCase(case));
-    }
-
-    pub fn extend(&mut self, cases: impl Iterator<Item = TestCase>) {
-        self.data.items.extend(cases.map(RsTestItem::TestCase));
-    }
-}
-
-impl Fixture {
-    pub fn with_resolve(mut self, resolve_path: &str) -> Self {
-        self.resolve = path(resolve_path);
-        self
-    }
-}
-
-impl TestCase {
-    pub fn with_description(mut self, description: &str) -> Self {
-        self.description = Some(ident(description));
-        self
-    }
-
-    pub fn with_attrs(mut self, attrs: Vec<syn::Attribute>) -> Self {
-        self.attrs = attrs;
-        self
-    }
-}
-
-impl<A: AsRef<str>> FromIterator<A> for TestCase {
-    fn from_iter<T: IntoIterator<Item = A>>(iter: T) -> Self {
-        TestCase {
-            args: iter.into_iter().map(expr).collect(),
-            attrs: Default::default(),
-            description: None,
-        }
-    }
-}
-
-impl<'a> From<&'a str> for TestCase {
-    fn from(argument: &'a str) -> Self {
-        std::iter::once(argument).collect()
-    }
-}
-
-impl From<Vec<RsTestItem>> for RsTestData {
-    fn from(items: Vec<RsTestItem>) -> Self {
-        Self { items }
-    }
-}
-
-impl From<RsTestData> for RsTestInfo {
-    fn from(data: RsTestData) -> Self {
-        Self {
-            data,
-            ..Default::default()
-        }
-    }
-}
-
-impl From<Vec<Expr>> for Positional {
-    fn from(data: Vec<Expr>) -> Self {
-        Positional(data)
-    }
-}
-
-impl From<Vec<FixtureItem>> for FixtureData {
-    fn from(fixtures: Vec<FixtureItem>) -> Self {
-        Self { items: fixtures }
-    }
-}
-
-pub(crate) struct EmptyResolver;
-
-impl<'a> Resolver for EmptyResolver {
-    fn resolve(&self, _pat: &Pat) -> Option<Cow<Expr>> {
-        None
-    }
-}
-
-pub(crate) trait IsAwait {
-    fn is_await(&self) -> bool;
-}
-
-impl IsAwait for Stmt {
-    fn is_await(&self) -> bool {
-        match self {
-            Stmt::Expr(Expr::Await(_), _) => true,
-            _ => false,
-        }
-    }
-}
-
-pub(crate) trait DisplayCode {
-    fn display_code(&self) -> String;
-}
-
-impl<T: ToTokens> DisplayCode for T {
-    fn display_code(&self) -> String {
-        self.to_token_stream().to_string()
-    }
-}
-
-impl crate::parse::fixture::FixtureInfo {
-    pub(crate) fn with_once(mut self) -> Self {
-        self.arguments.set_once(Some(attr("#[once]")));
-        self
-    }
-}
-
-pub(crate) fn await_argument_code_string(arg_name: &str) -> String {
-    let arg_name = ident(arg_name);
-    let statement: Stmt = parse_quote! {
-        let #arg_name = #arg_name.await;
-    };
-    statement.display_code()
-}
-
-pub(crate) fn ref_argument_code_string(arg_name: &str) -> String {
-    let arg_name = ident(arg_name);
-    let statement: Expr = parse_quote! {
-        &#arg_name
-    };
-    statement.display_code()
-}
-
-pub(crate) fn mut_await_argument_code_string(arg_name: &str) -> String {
-    let arg_name = ident(arg_name);
-    let statement: Stmt = parse_quote! {
-        let mut #arg_name = #arg_name.await;
-    };
-    statement.display_code()
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/utils.rs b/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/utils.rs
deleted file mode 100644
index e97da22..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/utils.rs
+++ /dev/null
@@ -1,424 +0,0 @@
-/// Contains some unsorted functions used across others modules
-///
-use quote::format_ident;
-use std::collections::{HashMap, HashSet};
-use unicode_ident::is_xid_continue;
-
-use crate::refident::{MaybeIdent, MaybePat};
-use syn::{Attribute, Expr, FnArg, Generics, Ident, ItemFn, Pat, ReturnType, Type, WherePredicate};
-
-/// Return an iterator over fn arguments items.
-///
-pub(crate) fn fn_args_pats(test: &ItemFn) -> impl Iterator<Item = &Pat> {
-    fn_args(test).filter_map(MaybePat::maybe_pat)
-}
-
-pub(crate) fn compare_pat(a: &Pat, b: &Pat) -> bool {
-    match (a, b) {
-        (Pat::Ident(a), Pat::Ident(b)) => a.ident == b.ident,
-        (Pat::Tuple(a), Pat::Tuple(b)) => a.elems == b.elems,
-        (Pat::TupleStruct(a), Pat::TupleStruct(b)) => a.path == b.path && a.elems == b.elems,
-        (Pat::Struct(a), Pat::Struct(b)) => a.path == b.path && a.fields == b.fields,
-        _ => false,
-    }
-}
-
-/// Return if function declaration has an ident
-///
-pub(crate) fn fn_args_has_pat(fn_decl: &ItemFn, pat: &Pat) -> bool {
-    fn_args_pats(fn_decl).any(|id| compare_pat(id, pat))
-}
-
-/// Return an iterator over fn arguments.
-///
-pub(crate) fn fn_args(item_fn: &ItemFn) -> impl Iterator<Item = &FnArg> {
-    item_fn.sig.inputs.iter()
-}
-
-pub(crate) fn attr_ends_with(attr: &Attribute, segment: &syn::PathSegment) -> bool {
-    attr.path().segments.iter().last() == Some(segment)
-}
-
-pub(crate) fn attr_starts_with(attr: &Attribute, segment: &syn::PathSegment) -> bool {
-    attr.path().segments.iter().next() == Some(segment)
-}
-
-pub(crate) fn attr_is(attr: &Attribute, name: &str) -> bool {
-    attr.path().is_ident(&format_ident!("{}", name))
-}
-
-pub(crate) fn attr_in(attr: &Attribute, names: &[&str]) -> bool {
-    names
-        .iter()
-        .any(|name| attr.path().is_ident(&format_ident!("{}", name)))
-}
-
-pub(crate) trait IsLiteralExpression {
-    fn is_literal(&self) -> bool;
-}
-
-impl<E: AsRef<Expr>> IsLiteralExpression for E {
-    fn is_literal(&self) -> bool {
-        matches!(
-            self.as_ref(),
-            Expr::Lit(syn::ExprLit {
-                lit: syn::Lit::Str(_),
-                ..
-            })
-        )
-    }
-}
-
-// Recoursive search id by reference till find one in ends
-fn _is_used(
-    visited: &mut HashSet<Ident>,
-    id: &Ident,
-    references: &HashMap<Ident, HashSet<Ident>>,
-    ends: &HashSet<Ident>,
-) -> bool {
-    if visited.contains(id) {
-        return false;
-    }
-    visited.insert(id.clone());
-    if ends.contains(id) {
-        return true;
-    }
-    if references.contains_key(id) {
-        for referred in references.get(id).unwrap() {
-            if _is_used(visited, referred, references, ends) {
-                return true;
-            }
-        }
-    }
-    false
-}
-
-// Recoursive search id by reference till find one in ends
-fn is_used(id: &Ident, references: &HashMap<Ident, HashSet<Ident>>, ends: &HashSet<Ident>) -> bool {
-    let mut visited = Default::default();
-    _is_used(&mut visited, id, references, ends)
-}
-
-impl MaybeIdent for syn::WherePredicate {
-    fn maybe_ident(&self) -> Option<&Ident> {
-        match self {
-            WherePredicate::Type(syn::PredicateType { bounded_ty: t, .. }) => {
-                first_type_path_segment_ident(t)
-            }
-            WherePredicate::Lifetime(syn::PredicateLifetime { lifetime, .. }) => {
-                Some(&lifetime.ident)
-            }
-            _ => None,
-        }
-    }
-}
-
-#[derive(Default)]
-struct SearchSimpleTypeName(HashSet<Ident>);
-
-impl SearchSimpleTypeName {
-    fn take(self) -> HashSet<Ident> {
-        self.0
-    }
-
-    fn visit_inputs<'a>(&mut self, inputs: impl Iterator<Item = &'a FnArg>) {
-        use syn::visit::Visit;
-        inputs.for_each(|fn_arg| self.visit_fn_arg(fn_arg));
-    }
-    fn visit_output(&mut self, output: &ReturnType) {
-        use syn::visit::Visit;
-        self.visit_return_type(output);
-    }
-
-    fn collect_from_type_param(tp: &syn::TypeParam) -> Self {
-        let mut s: Self = Default::default();
-        use syn::visit::Visit;
-        s.visit_type_param(tp);
-        s
-    }
-
-    fn collect_from_where_predicate(wp: &syn::WherePredicate) -> Self {
-        let mut s: Self = Default::default();
-        use syn::visit::Visit;
-        s.visit_where_predicate(wp);
-        s
-    }
-}
-
-impl<'ast> syn::visit::Visit<'ast> for SearchSimpleTypeName {
-    fn visit_path(&mut self, p: &'ast syn::Path) {
-        if let Some(id) = p.get_ident() {
-            self.0.insert(id.clone());
-        }
-        syn::visit::visit_path(self, p)
-    }
-
-    fn visit_lifetime(&mut self, i: &'ast syn::Lifetime) {
-        self.0.insert(i.ident.clone());
-        syn::visit::visit_lifetime(self, i)
-    }
-}
-
-// Take generics definitions and where clauses and return the
-// a map from simple types (lifetime names or type with just names)
-// to a set of all simple types that use it as some costrain.
-fn extract_references_map(generics: &Generics) -> HashMap<Ident, HashSet<Ident>> {
-    let mut references = HashMap::<Ident, HashSet<Ident>>::default();
-    // Extracts references from types param
-    generics.type_params().for_each(|tp| {
-        SearchSimpleTypeName::collect_from_type_param(tp)
-            .take()
-            .into_iter()
-            .for_each(|id| {
-                references.entry(id).or_default().insert(tp.ident.clone());
-            });
-    });
-    // Extracts references from where clauses
-    generics
-        .where_clause
-        .iter()
-        .flat_map(|wc| wc.predicates.iter())
-        .filter_map(|wp| wp.maybe_ident().map(|id| (id, wp)))
-        .for_each(|(ref_ident, wp)| {
-            SearchSimpleTypeName::collect_from_where_predicate(wp)
-                .take()
-                .into_iter()
-                .for_each(|id| {
-                    references.entry(id).or_default().insert(ref_ident.clone());
-                });
-        });
-    references
-}
-
-// Return a hash set that contains all types and lifetimes referenced
-// in input/output expressed by a single ident.
-fn references_ident_types<'a>(
-    generics: &Generics,
-    inputs: impl Iterator<Item = &'a FnArg>,
-    output: &ReturnType,
-) -> HashSet<Ident> {
-    let mut used: SearchSimpleTypeName = Default::default();
-    used.visit_output(output);
-    used.visit_inputs(inputs);
-    let references = extract_references_map(generics);
-    let mut used = used.take();
-    let input_output = used.clone();
-    // Extend the input output collected ref with the transitive ones:
-    used.extend(
-        generics
-            .params
-            .iter()
-            .filter_map(MaybeIdent::maybe_ident)
-            .filter(|&id| is_used(id, &references, &input_output))
-            .cloned(),
-    );
-    used
-}
-
-fn filtered_predicates(mut wc: syn::WhereClause, valids: &HashSet<Ident>) -> syn::WhereClause {
-    wc.predicates = wc
-        .predicates
-        .clone()
-        .into_iter()
-        .filter(|wp| {
-            wp.maybe_ident()
-                .map(|t| valids.contains(t))
-                .unwrap_or_default()
-        })
-        .collect();
-    wc
-}
-
-fn filtered_generics<'a>(
-    params: impl Iterator<Item = syn::GenericParam> + 'a,
-    valids: &'a HashSet<Ident>,
-) -> impl Iterator<Item = syn::GenericParam> + 'a {
-    params.filter(move |p| match p.maybe_ident() {
-        Some(id) => valids.contains(id),
-        None => false,
-    })
-}
-
-//noinspection RsTypeCheck
-pub(crate) fn generics_clean_up<'a>(
-    original: &Generics,
-    inputs: impl Iterator<Item = &'a FnArg>,
-    output: &ReturnType,
-) -> syn::Generics {
-    let used = references_ident_types(original, inputs, output);
-    let mut result: Generics = original.clone();
-    result.params = filtered_generics(result.params.into_iter(), &used).collect();
-    result.where_clause = result.where_clause.map(|wc| filtered_predicates(wc, &used));
-    result
-}
-
-// If type is not self and doesn't starts with :: return the first ident
-// of its path segment: only if is a simple path.
-// If type is a simple ident just return the this ident. That is useful to
-// find the base type for associate type indication
-fn first_type_path_segment_ident(t: &Type) -> Option<&Ident> {
-    match t {
-        Type::Path(tp) if tp.qself.is_none() && tp.path.leading_colon.is_none() => tp
-            .path
-            .segments
-            .iter()
-            .next()
-            .and_then(|ps| match ps.arguments {
-                syn::PathArguments::None => Some(&ps.ident),
-                _ => None,
-            }),
-        _ => None,
-    }
-}
-
-pub(crate) fn fn_arg_mutability(arg: &FnArg) -> Option<syn::token::Mut> {
-    match arg {
-        FnArg::Typed(syn::PatType { pat, .. }) => match pat.as_ref() {
-            syn::Pat::Ident(syn::PatIdent { mutability, .. }) => *mutability,
-            _ => None,
-        },
-        _ => None,
-    }
-}
-
-pub(crate) fn sanitize_ident(name: &str) -> String {
-    name.chars()
-        .filter(|c| !c.is_whitespace())
-        .map(|c| match c {
-            '"' | '\'' => "__".to_owned(),
-            ':' | '(' | ')' | '{' | '}' | '[' | ']' | ',' | '.' | '*' | '+' | '/' | '-' | '%'
-            | '^' | '!' | '&' | '|' => "_".to_owned(),
-            _ => c.to_string(),
-        })
-        .collect::<String>()
-        .chars()
-        .filter(|&c| is_xid_continue(c))
-        .collect()
-}
-
-#[cfg(test)]
-mod test {
-    use syn::parse_quote;
-
-    use super::*;
-    use crate::test::{assert_eq, *};
-
-    #[test]
-    fn fn_args_has_pat_should() {
-        let item_fn = parse_quote! {
-            fn the_function(first: u32, second: u32) {}
-        };
-
-        assert!(fn_args_has_pat(&item_fn, &pat("first")));
-        assert!(!fn_args_has_pat(&item_fn, &pat("third")));
-    }
-
-    #[rstest]
-    #[case::base("fn foo<A, B, C>(a: A) -> B {}", &["A", "B"])]
-    #[case::use_const_in_array("fn foo<A, const B: usize, C>(a: A) -> [u32; B] {}", &["A", "B", "u32"])]
-    #[case::in_type_args("fn foo<A, const B: usize, C>(a: A) -> SomeType<B> {}", &["A", "B"])]
-    #[case::in_type_args("fn foo<A, const B: usize, C>(a: SomeType<A>, b: SomeType<B>) {}", &["A", "B"])]
-    #[case::pointers("fn foo<A, B, C>(a: *const A, b: &B) {}", &["A", "B"])]
-    #[case::lifetime("fn foo<'a, A, B, C>(a: A, b: &'a B) {}", &["a", "A", "B"])]
-    #[case::transitive_lifetime("fn foo<'a, A, B, C>(a: A, b: B) where B: Iterator<Item=A> + 'a {}", &["a", "A", "B"])]
-    #[case::associated("fn foo<'a, A:Copy, C>(b: impl Iterator<Item=A> + 'a) {}", &["a", "A"])]
-    #[case::transitive_in_defs("fn foo<A:Copy, B: Iterator<Item=A>>(b: B) {}", &["A", "B"])]
-    #[case::transitive_in_where("fn foo<A:Copy, B>(b: B) where B: Iterator<Item=A> {}", &["A", "B"])]
-    #[case::transitive_const("fn foo<const A: usize, B, C>(b: B) where B: Some<A> {}", &["A", "B"])]
-    #[case::transitive_lifetime("fn foo<'a, A, B, C>(a: A, b: B) where B: Iterator<Item=A> + 'a {}", &["a", "A", "B"])]
-    #[case::transitive_lifetime(r#"fn foo<'a, 'b, 'c, 'd, A, B, C>
-        (a: A, b: B) 
-        where B: Iterator<Item=A> + 'c, 
-        'c: 'a + 'b {}"#, &["a", "b", "c", "A", "B"])]
-    fn references_ident_types_should(#[case] f: &str, #[case] expected: &[&str]) {
-        let f: ItemFn = f.ast();
-        let used = references_ident_types(&f.sig.generics, f.sig.inputs.iter(), &f.sig.output);
-
-        let expected = to_idents!(expected)
-            .into_iter()
-            .collect::<std::collections::HashSet<_>>();
-
-        assert_eq!(expected, used);
-    }
-
-    #[rstest]
-    #[case::remove_not_in_output(
-        r#"fn test<R: AsRef<str>, B, F, H: Iterator<Item=u32>>() -> (H, B, String, &str)
-                        where F: ToString,
-                        B: Borrow<u32>
-                        {}"#,
-        r#"fn test<B, H: Iterator<Item=u32>>() -> (H, B, String, &str)
-                        where B: Borrow<u32>
-                {}"#
-    )]
-    #[case::not_remove_used_in_arguments(
-        r#"fn test<R: AsRef<str>, B, F, H: Iterator<Item=u32>>
-                    (h: H, it: impl Iterator<Item=R>, j: &[B])
-                    where F: ToString,
-                    B: Borrow<u32>
-                {}"#,
-        r#"fn test<R: AsRef<str>, B, H: Iterator<Item=u32>>
-                    (h: H, it: impl Iterator<Item=R>, j: &[B])
-                    where
-                    B: Borrow<u32>
-                {}"#
-    )]
-    #[case::dont_remove_transitive(
-        r#"fn test<A, B, C, D, const F: usize, O>(a: A) where 
-            B: AsRef<C>,
-            A: Iterator<Item=[B; F]>,
-            D: ArsRef<O> {}"#,
-        r#"fn test<A, B, C, const F: usize>(a: A) where 
-            B: AsRef<C>,
-            A: Iterator<Item=[B; F]> {}"#
-    )]
-    #[case::remove_unused_lifetime(
-        "fn test<'a, 'b, 'c, 'd, 'e, 'f, 'g, A>(a: &'a uint32, b: impl AsRef<A> + 'b) where 'b: 'c + 'd, A: Copy + 'e, 'f: 'g {}",
-        "fn test<'a, 'b, 'c, 'd, 'e, A>(a: &'a uint32, b: impl AsRef<A> + 'b) where 'b: 'c + 'd, A: Copy + 'e {}"
-    )]
-    #[case::remove_unused_const(
-        r#"fn test<const A: usize, const B: usize, const C: usize, const D: usize, T, O>
-            (a: [u32; A], b: SomeType<B>, c: T) where 
-            T: Iterator<Item=[i32; C]>,
-            O: AsRef<D> 
-            {}"#,
-        r#"fn test<const A: usize, const B: usize, const C: usize, T>
-            (a: [u32; A], b: SomeType<B>, c: T) where 
-            T: Iterator<Item=[i32; C]>
-            {}"#
-    )]
-    fn generics_cleaner(#[case] code: &str, #[case] expected: &str) {
-        // Should remove all generics parameters that are not present in output
-        let item_fn: ItemFn = code.ast();
-
-        let expected: ItemFn = expected.ast();
-
-        let cleaned = generics_clean_up(
-            &item_fn.sig.generics,
-            item_fn.sig.inputs.iter(),
-            &item_fn.sig.output,
-        );
-
-        assert_eq!(expected.sig.generics, cleaned);
-    }
-
-    #[rstest]
-    #[case("1", "1")]
-    #[case(r#""1""#, "__1__")]
-    #[case(r#"Some::SomeElse"#, "Some__SomeElse")]
-    #[case(r#""minnie".to_owned()"#, "__minnie___to_owned__")]
-    #[case(
-        r#"vec![1 ,   2, 
-    3]"#,
-        "vec__1_2_3_"
-    )]
-    #[case(
-        r#"some_macro!("first", {second}, [third])"#,
-        "some_macro____first____second___third__"
-    )]
-    #[case(r#"'x'"#, "__x__")]
-    #[case::ops(r#"a*b+c/d-e%f^g"#, "a_b_c_d_e_f_g")]
-    fn sanitaze_ident_name(#[case] expression: impl AsRef<str>, #[case] expected: impl AsRef<str>) {
-        assert_eq!(expected.as_ref(), sanitize_ident(expression.as_ref()));
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/.cargo-checksum.json
deleted file mode 100644
index 697c9ce..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/.cargo-checksum.json
+++ /dev/null
@@ -1 +0,0 @@
-{"files":{}}
diff --git a/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/.cargo_vcs_info.json
deleted file mode 100644
index dd48ed55..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/.cargo_vcs_info.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
-  "git": {
-    "sha1": "eeca449cca83e24150e46739e797aa82e9142809"
-  },
-  "path_in_vcs": ""
-}
\ No newline at end of file
diff --git a/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/.github/FUNDING.yml b/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/.github/FUNDING.yml
deleted file mode 100644
index 90ee18e..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/.github/FUNDING.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-github: [djc]
-patreon: dochtman
diff --git a/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/.github/dependabot.yml b/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/.github/dependabot.yml
deleted file mode 100644
index 93a4164..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/.github/dependabot.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-version: 2
-updates:
-- package-ecosystem: cargo
-  directory: "/"
-  schedule:
-    interval: daily
-    time: "04:00"
-  open-pull-requests-limit: 10
-  ignore:
-  - dependency-name: semver
-    versions:
-    - "> 1.0, < 2"
diff --git a/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/.github/workflows/rust.yml b/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/.github/workflows/rust.yml
deleted file mode 100644
index 6a1c4f8..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/.github/workflows/rust.yml
+++ /dev/null
@@ -1,70 +0,0 @@
-name: CI
-
-on:
-  push:
-    branches: ['master']
-  pull_request:
-
-jobs:
-  test:
-    strategy:
-      matrix:
-        os: [ubuntu-latest, macos-latest, windows-latest]
-        rust: [stable, beta, nightly, 1.32.0]
-        exclude:
-          - os: macos-latest
-            rust: beta
-          - os: windows-latest
-            rust: beta
-          - os: macos-latest
-            rust: nightly
-          - os: windows-latest
-            rust: nightly
-          - os: macos-latest
-            rust: 1.32.0
-          - os: windows-latest
-            rust: 1.32.0
-
-    runs-on: ${{ matrix.os }}
-
-    steps:
-      - uses: actions/checkout@v2
-      - uses: actions-rs/toolchain@v1
-        with:
-          profile: minimal
-          toolchain: ${{ matrix.rust }}
-          override: true
-      - uses: actions-rs/cargo@v1
-        with:
-          command: build
-          args: --all-features --all-targets
-      - uses: actions-rs/cargo@v1
-        with:
-          command: test
-          args: --all-features
-
-  lint:
-    runs-on: ubuntu-latest
-    steps:
-      - uses: actions/checkout@v2
-      - uses: actions-rs/toolchain@v1
-        with:
-          profile: minimal
-          toolchain: stable
-          override: true
-          components: rustfmt, clippy
-      - uses: actions-rs/cargo@v1
-        with:
-          command: fmt
-          args: --all -- --check
-      - uses: actions-rs/cargo@v1
-        if: always()
-        with:
-          command: clippy
-          args: --all-targets --all-features -- -D warnings
-
-  audit:
-    runs-on: ubuntu-latest
-    steps:
-    - uses: actions/checkout@v2
-    - uses: EmbarkStudios/cargo-deny-action@v1
diff --git a/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/.gitignore b/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/.gitignore
deleted file mode 100644
index 22d9b57..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-target
-Cargo.lock
-.DS_Store
diff --git a/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/Cargo.toml
deleted file mode 100644
index 7d3e880b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/Cargo.toml
+++ /dev/null
@@ -1,44 +0,0 @@
-# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
-#
-# When uploading crates to the registry Cargo will automatically
-# "normalize" Cargo.toml files for maximal compatibility
-# with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g., crates.io) dependencies.
-#
-# If you are reading this file be aware that the original Cargo.toml
-# will likely look very different (and much more reasonable).
-# See Cargo.toml.orig for the original contents.
-
-[package]
-edition = "2018"
-rust-version = "1.32"
-name = "rustc_version"
-version = "0.4.1"
-build = false
-autobins = false
-autoexamples = false
-autotests = false
-autobenches = false
-description = "A library for querying the version of a installed rustc compiler"
-documentation = "https://docs.rs/rustc_version/"
-readme = "README.md"
-keywords = [
-    "version",
-    "rustc",
-]
-license = "MIT OR Apache-2.0"
-repository = "https://github.com/djc/rustc-version-rs"
-
-[lib]
-name = "rustc_version"
-path = "src/lib.rs"
-
-[[test]]
-name = "all"
-path = "tests/all.rs"
-
-[dependencies.semver]
-version = "1.0"
-
-[dev-dependencies.doc-comment]
-version = "0.3"
diff --git a/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/Cargo.toml.orig
deleted file mode 100644
index 0c7c487..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/Cargo.toml.orig
+++ /dev/null
@@ -1,17 +0,0 @@
-[package]
-name = "rustc_version"
-version = "0.4.1"
-rust-version = "1.32"
-license = "MIT OR Apache-2.0"
-description = "A library for querying the version of a installed rustc compiler"
-readme = "README.md"
-documentation = "https://docs.rs/rustc_version/"
-repository = "https://github.com/djc/rustc-version-rs"
-keywords = ["version", "rustc"]
-edition = "2018"
-
-[dependencies]
-semver = "1.0"
-
-[dev-dependencies]
-doc-comment = "0.3"
diff --git a/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/LICENSE-APACHE b/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/LICENSE-APACHE
deleted file mode 100644
index 16fe87b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/LICENSE-APACHE
+++ /dev/null
@@ -1,201 +0,0 @@
-                              Apache License
-                        Version 2.0, January 2004
-                     http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-   "License" shall mean the terms and conditions for use, reproduction,
-   and distribution as defined by Sections 1 through 9 of this document.
-
-   "Licensor" shall mean the copyright owner or entity authorized by
-   the copyright owner that is granting the License.
-
-   "Legal Entity" shall mean the union of the acting entity and all
-   other entities that control, are controlled by, or are under common
-   control with that entity. For the purposes of this definition,
-   "control" means (i) the power, direct or indirect, to cause the
-   direction or management of such entity, whether by contract or
-   otherwise, or (ii) ownership of fifty percent (50%) or more of the
-   outstanding shares, or (iii) beneficial ownership of such entity.
-
-   "You" (or "Your") shall mean an individual or Legal Entity
-   exercising permissions granted by this License.
-
-   "Source" form shall mean the preferred form for making modifications,
-   including but not limited to software source code, documentation
-   source, and configuration files.
-
-   "Object" form shall mean any form resulting from mechanical
-   transformation or translation of a Source form, including but
-   not limited to compiled object code, generated documentation,
-   and conversions to other media types.
-
-   "Work" shall mean the work of authorship, whether in Source or
-   Object form, made available under the License, as indicated by a
-   copyright notice that is included in or attached to the work
-   (an example is provided in the Appendix below).
-
-   "Derivative Works" shall mean any work, whether in Source or Object
-   form, that is based on (or derived from) the Work and for which the
-   editorial revisions, annotations, elaborations, or other modifications
-   represent, as a whole, an original work of authorship. For the purposes
-   of this License, Derivative Works shall not include works that remain
-   separable from, or merely link (or bind by name) to the interfaces of,
-   the Work and Derivative Works thereof.
-
-   "Contribution" shall mean any work of authorship, including
-   the original version of the Work and any modifications or additions
-   to that Work or Derivative Works thereof, that is intentionally
-   submitted to Licensor for inclusion in the Work by the copyright owner
-   or by an individual or Legal Entity authorized to submit on behalf of
-   the copyright owner. For the purposes of this definition, "submitted"
-   means any form of electronic, verbal, or written communication sent
-   to the Licensor or its representatives, including but not limited to
-   communication on electronic mailing lists, source code control systems,
-   and issue tracking systems that are managed by, or on behalf of, the
-   Licensor for the purpose of discussing and improving the Work, but
-   excluding communication that is conspicuously marked or otherwise
-   designated in writing by the copyright owner as "Not a Contribution."
-
-   "Contributor" shall mean Licensor and any individual or Legal Entity
-   on behalf of whom a Contribution has been received by Licensor and
-   subsequently incorporated within the Work.
-
-2. Grant of Copyright License. Subject to the terms and conditions of
-   this License, each Contributor hereby grants to You a perpetual,
-   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-   copyright license to reproduce, prepare Derivative Works of,
-   publicly display, publicly perform, sublicense, and distribute the
-   Work and such Derivative Works in Source or Object form.
-
-3. Grant of Patent License. Subject to the terms and conditions of
-   this License, each Contributor hereby grants to You a perpetual,
-   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-   (except as stated in this section) patent license to make, have made,
-   use, offer to sell, sell, import, and otherwise transfer the Work,
-   where such license applies only to those patent claims licensable
-   by such Contributor that are necessarily infringed by their
-   Contribution(s) alone or by combination of their Contribution(s)
-   with the Work to which such Contribution(s) was submitted. If You
-   institute patent litigation against any entity (including a
-   cross-claim or counterclaim in a lawsuit) alleging that the Work
-   or a Contribution incorporated within the Work constitutes direct
-   or contributory patent infringement, then any patent licenses
-   granted to You under this License for that Work shall terminate
-   as of the date such litigation is filed.
-
-4. Redistribution. You may reproduce and distribute copies of the
-   Work or Derivative Works thereof in any medium, with or without
-   modifications, and in Source or Object form, provided that You
-   meet the following conditions:
-
-   (a) You must give any other recipients of the Work or
-       Derivative Works a copy of this License; and
-
-   (b) You must cause any modified files to carry prominent notices
-       stating that You changed the files; and
-
-   (c) You must retain, in the Source form of any Derivative Works
-       that You distribute, all copyright, patent, trademark, and
-       attribution notices from the Source form of the Work,
-       excluding those notices that do not pertain to any part of
-       the Derivative Works; and
-
-   (d) If the Work includes a "NOTICE" text file as part of its
-       distribution, then any Derivative Works that You distribute must
-       include a readable copy of the attribution notices contained
-       within such NOTICE file, excluding those notices that do not
-       pertain to any part of the Derivative Works, in at least one
-       of the following places: within a NOTICE text file distributed
-       as part of the Derivative Works; within the Source form or
-       documentation, if provided along with the Derivative Works; or,
-       within a display generated by the Derivative Works, if and
-       wherever such third-party notices normally appear. The contents
-       of the NOTICE file are for informational purposes only and
-       do not modify the License. You may add Your own attribution
-       notices within Derivative Works that You distribute, alongside
-       or as an addendum to the NOTICE text from the Work, provided
-       that such additional attribution notices cannot be construed
-       as modifying the License.
-
-   You may add Your own copyright statement to Your modifications and
-   may provide additional or different license terms and conditions
-   for use, reproduction, or distribution of Your modifications, or
-   for any such Derivative Works as a whole, provided Your use,
-   reproduction, and distribution of the Work otherwise complies with
-   the conditions stated in this License.
-
-5. Submission of Contributions. Unless You explicitly state otherwise,
-   any Contribution intentionally submitted for inclusion in the Work
-   by You to the Licensor shall be under the terms and conditions of
-   this License, without any additional terms or conditions.
-   Notwithstanding the above, nothing herein shall supersede or modify
-   the terms of any separate license agreement you may have executed
-   with Licensor regarding such Contributions.
-
-6. Trademarks. This License does not grant permission to use the trade
-   names, trademarks, service marks, or product names of the Licensor,
-   except as required for reasonable and customary use in describing the
-   origin of the Work and reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty. Unless required by applicable law or
-   agreed to in writing, Licensor provides the Work (and each
-   Contributor provides its Contributions) on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-   implied, including, without limitation, any warranties or conditions
-   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-   PARTICULAR PURPOSE. You are solely responsible for determining the
-   appropriateness of using or redistributing the Work and assume any
-   risks associated with Your exercise of permissions under this License.
-
-8. Limitation of Liability. In no event and under no legal theory,
-   whether in tort (including negligence), contract, or otherwise,
-   unless required by applicable law (such as deliberate and grossly
-   negligent acts) or agreed to in writing, shall any Contributor be
-   liable to You for damages, including any direct, indirect, special,
-   incidental, or consequential damages of any character arising as a
-   result of this License or out of the use or inability to use the
-   Work (including but not limited to damages for loss of goodwill,
-   work stoppage, computer failure or malfunction, or any and all
-   other commercial damages or losses), even if such Contributor
-   has been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability. While redistributing
-   the Work or Derivative Works thereof, You may choose to offer,
-   and charge a fee for, acceptance of support, warranty, indemnity,
-   or other liability obligations and/or rights consistent with this
-   License. However, in accepting such obligations, You may act only
-   on Your own behalf and on Your sole responsibility, not on behalf
-   of any other Contributor, and only if You agree to indemnify,
-   defend, and hold each Contributor harmless for any liability
-   incurred by, or claims asserted against, such Contributor by reason
-   of your accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work.
-
-   To apply the Apache License to your work, attach the following
-   boilerplate notice, with the fields enclosed by brackets "[]"
-   replaced with your own identifying information. (Don't include
-   the brackets!)  The text should be enclosed in the appropriate
-   comment syntax for the file format. We also recommend that a
-   file or class name and description of purpose be included on the
-   same "printed page" as the copyright notice for easier
-   identification within third-party archives.
-
-Copyright [yyyy] [name of copyright owner]
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-	http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/LICENSE-MIT b/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/LICENSE-MIT
deleted file mode 100644
index 40b8817..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/LICENSE-MIT
+++ /dev/null
@@ -1,25 +0,0 @@
-Copyright (c) 2016 The Rust Project Developers
-
-Permission is hereby granted, free of charge, to any
-person obtaining a copy of this software and associated
-documentation files (the "Software"), to deal in the
-Software without restriction, including without
-limitation the rights to use, copy, modify, merge,
-publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software
-is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice
-shall be included in all copies or substantial portions
-of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
-ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
-TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
-PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
-IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-DEALINGS IN THE SOFTWARE.
diff --git a/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/README.md b/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/README.md
deleted file mode 100644
index 9f3f7a5..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/README.md
+++ /dev/null
@@ -1,81 +0,0 @@
-rustc-version-rs
-================
-
-[![Documentation](https://docs.rs/rustc_version/badge.svg)](https://docs.rs/rustc_version/)
-[![Crates.io](https://img.shields.io/crates/v/rustc_version.svg)](https://crates.io/crates/rustc_version)
-[![Build status](https://github.com/djc/rustc-version-rs/workflows/CI/badge.svg)](https://github.com/djc/rustc-version-rs/actions?query=workflow%3ACI)
-
-A library for querying the version of a `rustc` compiler.
-
-This can be used by build scripts or other tools dealing with Rust sources
-to make decisions based on the version of the compiler. Current MSRV is 1.32.0.
-
-If this is of interest, also consider looking at these other crates:
-
-* [autocfg](https://crates.io/crates/autocfg/), which helps with feature detection instead of depending on compiler versions
-* [rustversion](https://github.com/dtolnay/rustversion) provides a procedural macro with no other dependencies
-
-# Getting Started
-
-[rustc-version-rs is available on crates.io](https://crates.io/crates/rustc_version).
-It is recommended to look there for the newest released version, as well as links to the newest builds of the docs.
-
-At the point of the last update of this README, the latest published version could be used like this:
-
-Add the following dependency to your Cargo manifest...
-
-```toml
-[build-dependencies]
-rustc_version = "0.2"
-```
-
-... and see the [docs](https://docs.rs/rustc_version) for how to use it.
-
-# Example
-
-```rust
-// This could be a cargo build script
-
-use rustc_version::{version, version_meta, Channel, Version};
-
-fn main() {
-    // Assert we haven't travelled back in time
-    assert!(version().unwrap().major >= 1);
-
-    // Set cfg flags depending on release channel
-    match version_meta().unwrap().channel {
-        Channel::Stable => {
-            println!("cargo:rustc-cfg=RUSTC_IS_STABLE");
-        }
-        Channel::Beta => {
-            println!("cargo:rustc-cfg=RUSTC_IS_BETA");
-        }
-        Channel::Nightly => {
-            println!("cargo:rustc-cfg=RUSTC_IS_NIGHTLY");
-        }
-        Channel::Dev => {
-            println!("cargo:rustc-cfg=RUSTC_IS_DEV");
-        }
-    }
-
-    // Check for a minimum version
-    if version().unwrap() >= Version::parse("1.4.0").unwrap() {
-        println!("cargo:rustc-cfg=compiler_has_important_bugfix");
-    }
-}
-```
-
-## License
-
-Licensed under either of
-
- * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
- * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
-
-at your option.
-
-### Contribution
-
-Unless you explicitly state otherwise, any contribution intentionally submitted
-for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any
-additional terms or conditions.
diff --git a/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/deny.toml b/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/deny.toml
deleted file mode 100644
index ab17368..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/deny.toml
+++ /dev/null
@@ -1,2 +0,0 @@
-[licenses]
-allow = ["MIT"]
diff --git a/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/src/lib.rs
deleted file mode 100644
index d14c768..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/src/lib.rs
+++ /dev/null
@@ -1,424 +0,0 @@
-// Copyright 2016 rustc-version-rs developers
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![warn(missing_docs)]
-
-//! Simple library for getting the version information of a `rustc`
-//! compiler.
-//!
-//! This can be used by build scripts or other tools dealing with Rust sources
-//! to make decisions based on the version of the compiler.
-//!
-//! It calls `$RUSTC --version -v` and parses the output, falling
-//! back to `rustc` if `$RUSTC` is not set.
-//!
-//! # Example
-//!
-//! ```rust
-//! // This could be a cargo build script
-//!
-//! use rustc_version::{version, version_meta, Channel, Version};
-//!
-//! // Assert we haven't travelled back in time
-//! assert!(version().unwrap().major >= 1);
-//!
-//! // Set cfg flags depending on release channel
-//! match version_meta().unwrap().channel {
-//!     Channel::Stable => {
-//!         println!("cargo:rustc-cfg=RUSTC_IS_STABLE");
-//!     }
-//!     Channel::Beta => {
-//!         println!("cargo:rustc-cfg=RUSTC_IS_BETA");
-//!     }
-//!     Channel::Nightly => {
-//!         println!("cargo:rustc-cfg=RUSTC_IS_NIGHTLY");
-//!     }
-//!     Channel::Dev => {
-//!         println!("cargo:rustc-cfg=RUSTC_IS_DEV");
-//!     }
-//! }
-//!
-//! // Check for a minimum version
-//! if version().unwrap() >= Version::parse("1.4.0").unwrap() {
-//!     println!("cargo:rustc-cfg=compiler_has_important_bugfix");
-//! }
-//! ```
-
-#[cfg(test)]
-#[macro_use]
-extern crate doc_comment;
-
-#[cfg(test)]
-doctest!("../README.md");
-
-use std::collections::HashMap;
-use std::process::Command;
-use std::{env, error, fmt, io, num, str};
-use std::{ffi::OsString, str::FromStr};
-
-// Convenience re-export to allow version comparison without needing to add
-// semver crate.
-pub use semver::Version;
-
-use Error::*;
-
-/// Release channel of the compiler.
-#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
-pub enum Channel {
-    /// Development release channel
-    Dev,
-    /// Nightly release channel
-    Nightly,
-    /// Beta release channel
-    Beta,
-    /// Stable release channel
-    Stable,
-}
-
-/// LLVM version
-///
-/// LLVM's version numbering scheme is not semver compatible until version 4.0
-///
-/// rustc [just prints the major and minor versions], so other parts of the version are not included.
-///
-/// [just prints the major and minor versions]: https://github.com/rust-lang/rust/blob/b5c9e2448c9ace53ad5c11585803894651b18b0a/compiler/rustc_codegen_llvm/src/llvm_util.rs#L173-L178
-#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
-pub struct LlvmVersion {
-    // fields must be ordered major, minor for comparison to be correct
-    /// Major version
-    pub major: u64,
-    /// Minor version
-    pub minor: u64,
-    // TODO: expose micro version here
-}
-
-impl fmt::Display for LlvmVersion {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        write!(f, "{}.{}", self.major, self.minor)
-    }
-}
-
-impl FromStr for LlvmVersion {
-    type Err = LlvmVersionParseError;
-
-    fn from_str(s: &str) -> Result<Self, Self::Err> {
-        let mut parts = s
-            .split('.')
-            .map(|part| -> Result<u64, LlvmVersionParseError> {
-                if part == "0" {
-                    Ok(0)
-                } else if part.starts_with('0') {
-                    Err(LlvmVersionParseError::ComponentMustNotHaveLeadingZeros)
-                } else if part.starts_with('-') || part.starts_with('+') {
-                    Err(LlvmVersionParseError::ComponentMustNotHaveSign)
-                } else {
-                    Ok(part.parse()?)
-                }
-            });
-
-        let major = parts.next().unwrap()?;
-        let mut minor = 0;
-
-        if let Some(part) = parts.next() {
-            minor = part?;
-        } else if major < 4 {
-            // LLVM versions earlier than 4.0 have significant minor versions, so require the minor version in this case.
-            return Err(LlvmVersionParseError::MinorVersionRequiredBefore4);
-        }
-
-        if let Some(Err(e)) = parts.next() {
-            return Err(e);
-        }
-
-        if parts.next().is_some() {
-            return Err(LlvmVersionParseError::TooManyComponents);
-        }
-
-        Ok(Self { major, minor })
-    }
-}
-
-/// Rustc version plus metadata like git short hash and build date.
-#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
-pub struct VersionMeta {
-    /// Version of the compiler
-    pub semver: Version,
-
-    /// Git short hash of the build of the compiler
-    pub commit_hash: Option<String>,
-
-    /// Commit date of the compiler
-    pub commit_date: Option<String>,
-
-    /// Build date of the compiler; this was removed between Rust 1.0.0 and 1.1.0.
-    pub build_date: Option<String>,
-
-    /// Release channel of the compiler
-    pub channel: Channel,
-
-    /// Host target triple of the compiler
-    pub host: String,
-
-    /// Short version string of the compiler
-    pub short_version_string: String,
-
-    /// Version of LLVM used by the compiler
-    pub llvm_version: Option<LlvmVersion>,
-}
-
-impl VersionMeta {
-    /// Returns the version metadata for `cmd`, which should be a `rustc` command.
-    pub fn for_command(mut cmd: Command) -> Result<VersionMeta> {
-        let out = cmd
-            .arg("-vV")
-            .output()
-            .map_err(Error::CouldNotExecuteCommand)?;
-
-        if !out.status.success() {
-            return Err(Error::CommandError {
-                stdout: String::from_utf8_lossy(&out.stdout).into(),
-                stderr: String::from_utf8_lossy(&out.stderr).into(),
-            });
-        }
-
-        version_meta_for(str::from_utf8(&out.stdout)?)
-    }
-}
-
-/// Returns the `rustc` SemVer version.
-pub fn version() -> Result<Version> {
-    Ok(version_meta()?.semver)
-}
-
-/// Returns the `rustc` SemVer version and additional metadata
-/// like the git short hash and build date.
-pub fn version_meta() -> Result<VersionMeta> {
-    let rustc = env::var_os("RUSTC").unwrap_or_else(|| OsString::from("rustc"));
-    let cmd = if let Some(wrapper) = env::var_os("RUSTC_WRAPPER").filter(|w| !w.is_empty()) {
-        let mut cmd = Command::new(wrapper);
-        cmd.arg(rustc);
-        cmd
-    } else {
-        Command::new(rustc)
-    };
-
-    VersionMeta::for_command(cmd)
-}
-
-/// Parses a "rustc -vV" output string and returns
-/// the SemVer version and additional metadata
-/// like the git short hash and build date.
-pub fn version_meta_for(verbose_version_string: &str) -> Result<VersionMeta> {
-    let mut map = HashMap::new();
-    for (i, line) in verbose_version_string.lines().enumerate() {
-        if i == 0 {
-            map.insert("short", line);
-            continue;
-        }
-
-        let mut parts = line.splitn(2, ": ");
-        let key = match parts.next() {
-            Some(key) => key,
-            None => continue,
-        };
-
-        if let Some(value) = parts.next() {
-            map.insert(key, value);
-        }
-    }
-
-    let short_version_string = expect_key("short", &map)?;
-    let host = expect_key("host", &map)?;
-    let release = expect_key("release", &map)?;
-    let semver: Version = release.parse()?;
-
-    let channel = match semver.pre.split('.').next().unwrap() {
-        "" => Channel::Stable,
-        "dev" => Channel::Dev,
-        "beta" => Channel::Beta,
-        "nightly" => Channel::Nightly,
-        x => return Err(Error::UnknownPreReleaseTag(x.to_owned())),
-    };
-
-    let commit_hash = expect_key_or_unknown("commit-hash", &map)?;
-    let commit_date = expect_key_or_unknown("commit-date", &map)?;
-    let build_date = map
-        .get("build-date")
-        .filter(|&v| *v != "unknown")
-        .map(|&v| String::from(v));
-    let llvm_version = match map.get("LLVM version") {
-        Some(&v) => Some(v.parse()?),
-        None => None,
-    };
-
-    Ok(VersionMeta {
-        semver,
-        commit_hash,
-        commit_date,
-        build_date,
-        channel,
-        host,
-        short_version_string,
-        llvm_version,
-    })
-}
-
-fn expect_key_or_unknown(key: &str, map: &HashMap<&str, &str>) -> Result<Option<String>, Error> {
-    match map.get(key) {
-        Some(&"unknown") => Ok(None),
-        Some(&v) => Ok(Some(String::from(v))),
-        None => Err(Error::UnexpectedVersionFormat),
-    }
-}
-
-fn expect_key(key: &str, map: &HashMap<&str, &str>) -> Result<String, Error> {
-    map.get(key)
-        .map(|&v| String::from(v))
-        .ok_or(Error::UnexpectedVersionFormat)
-}
-
-/// LLVM Version Parse Error
-#[derive(Debug)]
-pub enum LlvmVersionParseError {
-    /// An error occurred in parsing a version component as an integer
-    ParseIntError(num::ParseIntError),
-    /// A version component must not have leading zeros
-    ComponentMustNotHaveLeadingZeros,
-    /// A version component has a sign
-    ComponentMustNotHaveSign,
-    /// Minor version component must be zero on LLVM versions later than 4.0
-    MinorVersionMustBeZeroAfter4,
-    /// Minor version component is required on LLVM versions earlier than 4.0
-    MinorVersionRequiredBefore4,
-    /// Too many components
-    TooManyComponents,
-}
-
-impl From<num::ParseIntError> for LlvmVersionParseError {
-    fn from(e: num::ParseIntError) -> Self {
-        LlvmVersionParseError::ParseIntError(e)
-    }
-}
-
-impl fmt::Display for LlvmVersionParseError {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        match self {
-            LlvmVersionParseError::ParseIntError(e) => {
-                write!(f, "error parsing LLVM version component: {}", e)
-            }
-            LlvmVersionParseError::ComponentMustNotHaveLeadingZeros => {
-                write!(f, "a version component must not have leading zeros")
-            }
-            LlvmVersionParseError::ComponentMustNotHaveSign => {
-                write!(f, "a version component must not have a sign")
-            }
-            LlvmVersionParseError::MinorVersionMustBeZeroAfter4 => write!(
-                f,
-                "LLVM's minor version component must be 0 for versions greater than 4.0"
-            ),
-            LlvmVersionParseError::MinorVersionRequiredBefore4 => write!(
-                f,
-                "LLVM's minor version component is required for versions less than 4.0"
-            ),
-            LlvmVersionParseError::TooManyComponents => write!(f, "too many version components"),
-        }
-    }
-}
-
-impl error::Error for LlvmVersionParseError {
-    fn source(&self) -> Option<&(dyn error::Error + 'static)> {
-        match self {
-            LlvmVersionParseError::ParseIntError(e) => Some(e),
-            LlvmVersionParseError::ComponentMustNotHaveLeadingZeros
-            | LlvmVersionParseError::ComponentMustNotHaveSign
-            | LlvmVersionParseError::MinorVersionMustBeZeroAfter4
-            | LlvmVersionParseError::MinorVersionRequiredBefore4
-            | LlvmVersionParseError::TooManyComponents => None,
-        }
-    }
-}
-
-/// The error type for this crate.
-#[derive(Debug)]
-pub enum Error {
-    /// An error occurred while trying to find the `rustc` to run.
-    CouldNotExecuteCommand(io::Error),
-    /// Error output from the command that was run.
-    CommandError {
-        /// stdout output from the command
-        stdout: String,
-        /// stderr output from the command
-        stderr: String,
-    },
-    /// The output of `rustc -vV` was not valid utf-8.
-    Utf8Error(str::Utf8Error),
-    /// The output of `rustc -vV` was not in the expected format.
-    UnexpectedVersionFormat,
-    /// An error occurred in parsing the semver.
-    SemVerError(semver::Error),
-    /// The pre-release tag is unknown.
-    UnknownPreReleaseTag(String),
-    /// An error occurred in parsing a `LlvmVersion`.
-    LlvmVersionError(LlvmVersionParseError),
-}
-
-impl fmt::Display for Error {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        match *self {
-            CouldNotExecuteCommand(ref e) => write!(f, "could not execute command: {}", e),
-            CommandError {
-                ref stdout,
-                ref stderr,
-            } => write!(
-                f,
-                "error from command -- stderr:\n\n{}\n\nstderr:\n\n{}",
-                stderr, stdout,
-            ),
-            Utf8Error(_) => write!(f, "invalid UTF-8 output from `rustc -vV`"),
-            UnexpectedVersionFormat => write!(f, "unexpected `rustc -vV` format"),
-            SemVerError(ref e) => write!(f, "error parsing version: {}", e),
-            UnknownPreReleaseTag(ref i) => write!(f, "unknown pre-release tag: {}", i),
-            LlvmVersionError(ref e) => write!(f, "error parsing LLVM's version: {}", e),
-        }
-    }
-}
-
-impl error::Error for Error {
-    fn source(&self) -> Option<&(dyn error::Error + 'static)> {
-        match *self {
-            CouldNotExecuteCommand(ref e) => Some(e),
-            CommandError { .. } => None,
-            Utf8Error(ref e) => Some(e),
-            UnexpectedVersionFormat => None,
-            SemVerError(ref e) => Some(e),
-            UnknownPreReleaseTag(_) => None,
-            LlvmVersionError(ref e) => Some(e),
-        }
-    }
-}
-
-macro_rules! impl_from {
-    ($($err_ty:ty => $variant:ident),* $(,)*) => {
-        $(
-            impl From<$err_ty> for Error {
-                fn from(e: $err_ty) -> Error {
-                    Error::$variant(e)
-                }
-            }
-        )*
-    }
-}
-
-impl_from! {
-    str::Utf8Error => Utf8Error,
-    semver::Error => SemVerError,
-    LlvmVersionParseError => LlvmVersionError,
-}
-
-/// The result type for this crate.
-pub type Result<T, E = Error> = std::result::Result<T, E>;
diff --git a/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/tests/all.rs b/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/tests/all.rs
deleted file mode 100644
index c3cff70..0000000
--- a/third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/tests/all.rs
+++ /dev/null
@@ -1,456 +0,0 @@
-#![allow(clippy::match_like_matches_macro)]
-
-use std::process::Command;
-
-use rustc_version::{
-    version, version_meta, version_meta_for, Channel, Error, LlvmVersion, LlvmVersionParseError,
-    Version, VersionMeta,
-};
-
-#[test]
-fn rustc_error() {
-    let mut cmd = Command::new("rustc");
-    cmd.arg("--FOO");
-    let stderr = match VersionMeta::for_command(cmd) {
-        Err(Error::CommandError { stdout: _, stderr }) => stderr,
-        _ => panic!("command error expected"),
-    };
-    assert_eq!(stderr, "error: Unrecognized option: \'FOO\'\n\n");
-}
-
-#[test]
-fn smoketest() {
-    let v = version().unwrap();
-    assert!(v.major >= 1);
-
-    let v = version_meta().unwrap();
-    assert!(v.semver.major >= 1);
-
-    assert!(version().unwrap() >= Version::parse("1.0.0").unwrap());
-}
-
-#[test]
-fn parse_1_0_0() {
-    let version = version_meta_for(
-        "rustc 1.0.0 (a59de37e9 2015-05-13) (built 2015-05-14)
-binary: rustc
-commit-hash: a59de37e99060162a2674e3ff45409ac73595c0e
-commit-date: 2015-05-13
-build-date: 2015-05-14
-host: x86_64-unknown-linux-gnu
-release: 1.0.0",
-    )
-    .unwrap();
-
-    assert_eq!(version.semver, Version::parse("1.0.0").unwrap());
-    assert_eq!(
-        version.commit_hash,
-        Some("a59de37e99060162a2674e3ff45409ac73595c0e".into())
-    );
-    assert_eq!(version.commit_date, Some("2015-05-13".into()));
-    assert_eq!(version.build_date, Some("2015-05-14".into()));
-    assert_eq!(version.channel, Channel::Stable);
-    assert_eq!(version.host, "x86_64-unknown-linux-gnu");
-    assert_eq!(
-        version.short_version_string,
-        "rustc 1.0.0 (a59de37e9 2015-05-13) (built 2015-05-14)"
-    );
-    assert_eq!(version.llvm_version, None);
-}
-
-#[test]
-fn parse_unknown() {
-    let version = version_meta_for(
-        "rustc 1.3.0
-binary: rustc
-commit-hash: unknown
-commit-date: unknown
-host: x86_64-unknown-linux-gnu
-release: 1.3.0",
-    )
-    .unwrap();
-
-    assert_eq!(version.semver, Version::parse("1.3.0").unwrap());
-    assert_eq!(version.commit_hash, None);
-    assert_eq!(version.commit_date, None);
-    assert_eq!(version.channel, Channel::Stable);
-    assert_eq!(version.host, "x86_64-unknown-linux-gnu");
-    assert_eq!(version.short_version_string, "rustc 1.3.0");
-    assert_eq!(version.llvm_version, None);
-}
-
-#[test]
-fn parse_nightly() {
-    let version = version_meta_for(
-        "rustc 1.5.0-nightly (65d5c0833 2015-09-29)
-binary: rustc
-commit-hash: 65d5c083377645a115c4ac23a620d3581b9562b6
-commit-date: 2015-09-29
-host: x86_64-unknown-linux-gnu
-release: 1.5.0-nightly",
-    )
-    .unwrap();
-
-    assert_eq!(version.semver, Version::parse("1.5.0-nightly").unwrap());
-    assert_eq!(
-        version.commit_hash,
-        Some("65d5c083377645a115c4ac23a620d3581b9562b6".into())
-    );
-    assert_eq!(version.commit_date, Some("2015-09-29".into()));
-    assert_eq!(version.channel, Channel::Nightly);
-    assert_eq!(version.host, "x86_64-unknown-linux-gnu");
-    assert_eq!(
-        version.short_version_string,
-        "rustc 1.5.0-nightly (65d5c0833 2015-09-29)"
-    );
-    assert_eq!(version.llvm_version, None);
-}
-
-#[test]
-fn parse_stable() {
-    let version = version_meta_for(
-        "rustc 1.3.0 (9a92aaf19 2015-09-15)
-binary: rustc
-commit-hash: 9a92aaf19a64603b02b4130fe52958cc12488900
-commit-date: 2015-09-15
-host: x86_64-unknown-linux-gnu
-release: 1.3.0",
-    )
-    .unwrap();
-
-    assert_eq!(version.semver, Version::parse("1.3.0").unwrap());
-    assert_eq!(
-        version.commit_hash,
-        Some("9a92aaf19a64603b02b4130fe52958cc12488900".into())
-    );
-    assert_eq!(version.commit_date, Some("2015-09-15".into()));
-    assert_eq!(version.channel, Channel::Stable);
-    assert_eq!(version.host, "x86_64-unknown-linux-gnu");
-    assert_eq!(
-        version.short_version_string,
-        "rustc 1.3.0 (9a92aaf19 2015-09-15)"
-    );
-    assert_eq!(version.llvm_version, None);
-}
-
-#[test]
-fn parse_1_16_0_nightly() {
-    let version = version_meta_for(
-        "rustc 1.16.0-nightly (5d994d8b7 2017-01-05)
-binary: rustc
-commit-hash: 5d994d8b7e482e87467d4a521911477bd8284ce3
-commit-date: 2017-01-05
-host: x86_64-unknown-linux-gnu
-release: 1.16.0-nightly
-LLVM version: 3.9",
-    )
-    .unwrap();
-
-    assert_eq!(version.semver, Version::parse("1.16.0-nightly").unwrap());
-    assert_eq!(
-        version.commit_hash,
-        Some("5d994d8b7e482e87467d4a521911477bd8284ce3".into())
-    );
-    assert_eq!(version.commit_date, Some("2017-01-05".into()));
-    assert_eq!(version.channel, Channel::Nightly);
-    assert_eq!(version.host, "x86_64-unknown-linux-gnu");
-    assert_eq!(
-        version.short_version_string,
-        "rustc 1.16.0-nightly (5d994d8b7 2017-01-05)"
-    );
-    assert_eq!(
-        version.llvm_version,
-        Some(LlvmVersion { major: 3, minor: 9 })
-    );
-}
-
-#[test]
-fn parse_1_47_0_stable() {
-    let version = version_meta_for(
-        "rustc 1.47.0 (18bf6b4f0 2020-10-07)
-binary: rustc
-commit-hash: 18bf6b4f01a6feaf7259ba7cdae58031af1b7b39
-commit-date: 2020-10-07
-host: powerpc64le-unknown-linux-gnu
-release: 1.47.0
-LLVM version: 11.0",
-    )
-    .unwrap();
-
-    assert_eq!(version.semver, Version::parse("1.47.0").unwrap());
-    assert_eq!(
-        version.commit_hash,
-        Some("18bf6b4f01a6feaf7259ba7cdae58031af1b7b39".into())
-    );
-    assert_eq!(version.commit_date, Some("2020-10-07".into()));
-    assert_eq!(version.channel, Channel::Stable);
-    assert_eq!(version.host, "powerpc64le-unknown-linux-gnu");
-    assert_eq!(
-        version.short_version_string,
-        "rustc 1.47.0 (18bf6b4f0 2020-10-07)"
-    );
-    assert_eq!(
-        version.llvm_version,
-        Some(LlvmVersion {
-            major: 11,
-            minor: 0,
-        })
-    );
-}
-
-#[test]
-fn parse_llvm_micro() {
-    let version = version_meta_for(
-        "rustc 1.51.0-nightly (4253153db 2021-01-17)
-binary: rustc
-commit-hash: 4253153db205251f72ea4493687a31e04a2a8ca0
-commit-date: 2021-01-17
-host: x86_64-pc-windows-msvc
-release: 1.51.0-nightly
-LLVM version: 11.0.1",
-    )
-    .unwrap();
-
-    assert_eq!(version.semver, Version::parse("1.51.0-nightly").unwrap());
-    assert_eq!(
-        version.commit_hash.unwrap(),
-        "4253153db205251f72ea4493687a31e04a2a8ca0"
-    );
-    assert_eq!(version.commit_date.unwrap(), "2021-01-17");
-    assert_eq!(version.host, "x86_64-pc-windows-msvc");
-    assert_eq!(
-        version.short_version_string,
-        "rustc 1.51.0-nightly (4253153db 2021-01-17)"
-    );
-    assert_eq!(
-        version.llvm_version,
-        Some(LlvmVersion {
-            major: 11,
-            minor: 0
-        })
-    );
-}
-
-#[test]
-fn parse_debian_buster() {
-    let version = version_meta_for(
-        "rustc 1.41.1
-binary: rustc
-commit-hash: unknown
-commit-date: unknown
-host: powerpc64le-unknown-linux-gnu
-release: 1.41.1
-LLVM version: 7.0",
-    )
-    .unwrap();
-
-    assert_eq!(version.semver, Version::parse("1.41.1").unwrap());
-    assert_eq!(version.commit_hash, None);
-    assert_eq!(version.commit_date, None);
-    assert_eq!(version.channel, Channel::Stable);
-    assert_eq!(version.host, "powerpc64le-unknown-linux-gnu");
-    assert_eq!(version.short_version_string, "rustc 1.41.1");
-    assert_eq!(
-        version.llvm_version,
-        Some(LlvmVersion { major: 7, minor: 0 })
-    );
-}
-
-#[test]
-fn parse_termux() {
-    let version = version_meta_for(
-        "rustc 1.46.0
-binary: rustc
-commit-hash: unknown
-commit-date: unknown
-host: aarch64-linux-android
-release: 1.46.0
-LLVM version: 10.0",
-    )
-    .unwrap();
-
-    assert_eq!(version.semver, Version::parse("1.46.0").unwrap());
-    assert_eq!(version.commit_hash, None);
-    assert_eq!(version.commit_date, None);
-    assert_eq!(version.channel, Channel::Stable);
-    assert_eq!(version.host, "aarch64-linux-android");
-    assert_eq!(version.short_version_string, "rustc 1.46.0");
-    assert_eq!(
-        version.llvm_version,
-        Some(LlvmVersion {
-            major: 10,
-            minor: 0,
-        })
-    );
-}
-
-#[test]
-fn parse_llvm_version_empty() {
-    let res: Result<LlvmVersion, _> = "".parse();
-    assert!(match res {
-        Err(LlvmVersionParseError::ParseIntError(_)) => true,
-        _ => false,
-    });
-}
-
-#[test]
-fn parse_llvm_version_invalid_char() {
-    let res: Result<LlvmVersion, _> = "A".parse();
-    assert!(match res {
-        Err(LlvmVersionParseError::ParseIntError(_)) => true,
-        _ => false,
-    });
-}
-
-#[test]
-fn parse_llvm_version_overflow() {
-    let res: Result<LlvmVersion, _> = "9999999999999999999999999999999".parse();
-    assert!(match res {
-        Err(LlvmVersionParseError::ParseIntError(_)) => true,
-        _ => false,
-    });
-}
-
-#[test]
-fn parse_llvm_version_leading_zero_on_zero() {
-    let res: Result<LlvmVersion, _> = "00".parse();
-    assert!(match res {
-        Err(LlvmVersionParseError::ComponentMustNotHaveLeadingZeros) => true,
-        _ => false,
-    });
-}
-
-#[test]
-fn parse_llvm_version_leading_zero_on_nonzero() {
-    let res: Result<LlvmVersion, _> = "01".parse();
-    assert!(match res {
-        Err(LlvmVersionParseError::ComponentMustNotHaveLeadingZeros) => true,
-        _ => false,
-    });
-}
-
-#[test]
-fn parse_llvm_version_4_components() {
-    let res: Result<LlvmVersion, _> = "4.0.0.0".parse();
-
-    assert!(match res {
-        Err(LlvmVersionParseError::TooManyComponents) => true,
-        _ => false,
-    });
-}
-
-#[test]
-fn parse_llvm_version_component_sign_plus() {
-    let res: Result<LlvmVersion, _> = "1.+3".parse();
-
-    assert!(match res {
-        Err(LlvmVersionParseError::ComponentMustNotHaveSign) => true,
-        _ => false,
-    });
-}
-
-#[test]
-fn parse_llvm_version_component_sign_minus() {
-    let res: Result<LlvmVersion, _> = "1.-3".parse();
-
-    assert!(match res {
-        Err(LlvmVersionParseError::ComponentMustNotHaveSign) => true,
-        _ => false,
-    });
-}
-
-#[test]
-fn parse_llvm_version_3() {
-    let res: Result<LlvmVersion, _> = "3".parse();
-
-    assert!(match res {
-        Err(LlvmVersionParseError::MinorVersionRequiredBefore4) => true,
-        _ => false,
-    });
-}
-
-#[test]
-fn parse_llvm_version_5() {
-    let v: LlvmVersion = "5".parse().unwrap();
-    assert_eq!(v, LlvmVersion { major: 5, minor: 0 });
-}
-
-#[test]
-fn parse_llvm_version_5_0() {
-    let v: LlvmVersion = "5.0".parse().unwrap();
-    assert_eq!(v, LlvmVersion { major: 5, minor: 0 });
-}
-
-#[test]
-fn parse_llvm_version_4_0() {
-    let v: LlvmVersion = "4.0".parse().unwrap();
-    assert_eq!(v, LlvmVersion { major: 4, minor: 0 });
-}
-
-#[test]
-fn parse_llvm_version_3_0() {
-    let v: LlvmVersion = "3.0".parse().unwrap();
-    assert_eq!(v, LlvmVersion { major: 3, minor: 0 });
-}
-
-#[test]
-fn parse_llvm_version_3_9() {
-    let v: LlvmVersion = "3.9".parse().unwrap();
-    assert_eq!(v, LlvmVersion { major: 3, minor: 9 });
-}
-
-#[test]
-fn parse_llvm_version_11_0() {
-    let v: LlvmVersion = "11.0".parse().unwrap();
-    assert_eq!(
-        v,
-        LlvmVersion {
-            major: 11,
-            minor: 0
-        }
-    );
-}
-
-#[test]
-fn parse_llvm_version_11() {
-    let v: LlvmVersion = "11".parse().unwrap();
-    assert_eq!(
-        v,
-        LlvmVersion {
-            major: 11,
-            minor: 0
-        }
-    );
-}
-
-#[test]
-fn test_llvm_version_comparison() {
-    // check that field order is correct
-    assert!(LlvmVersion { major: 3, minor: 9 } < LlvmVersion { major: 4, minor: 0 });
-}
-
-/*
-#[test]
-fn version_matches_replacement() {
-    let f = |s1: &str, s2: &str| {
-        let a = Version::parse(s1).unwrap();
-        let b = Version::parse(s2).unwrap();
-        println!("{} <= {} : {}", s1, s2, a <= b);
-    };
-
-    println!();
-
-    f("1.5.0",         "1.5.0");
-    f("1.5.0-nightly", "1.5.0");
-    f("1.5.0",         "1.5.0-nightly");
-    f("1.5.0-nightly", "1.5.0-nightly");
-
-    f("1.5.0",         "1.6.0");
-    f("1.5.0-nightly", "1.6.0");
-    f("1.5.0",         "1.6.0-nightly");
-    f("1.5.0-nightly", "1.6.0-nightly");
-
-    panic!();
-
-}
-*/
diff --git a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/.cargo-checksum.json
deleted file mode 100644
index 697c9ce..0000000
--- a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/.cargo-checksum.json
+++ /dev/null
@@ -1 +0,0 @@
-{"files":{}}
diff --git a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/.cargo_vcs_info.json
deleted file mode 100644
index 4f34e3c..0000000
--- a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/.cargo_vcs_info.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
-  "git": {
-    "sha1": "3e64fdbfce78bfbd2eb97bdbdc50ce4d62c9831b"
-  },
-  "path_in_vcs": ""
-}
\ No newline at end of file
diff --git a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/.github/FUNDING.yml b/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/.github/FUNDING.yml
deleted file mode 100644
index 7507077..0000000
--- a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/.github/FUNDING.yml
+++ /dev/null
@@ -1 +0,0 @@
-github: dtolnay
diff --git a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/.github/workflows/ci.yml b/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/.github/workflows/ci.yml
deleted file mode 100644
index 2808a6f..0000000
--- a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/.github/workflows/ci.yml
+++ /dev/null
@@ -1,140 +0,0 @@
-name: CI
-
-on:
-  push:
-  pull_request:
-  workflow_dispatch:
-  schedule: [cron: "40 1 * * *"]
-
-permissions:
-  contents: read
-
-env:
-  RUSTFLAGS: -Dwarnings
-
-jobs:
-  pre_ci:
-    uses: dtolnay/.github/.github/workflows/pre_ci.yml@master
-
-  test:
-    name: Rust ${{matrix.rust}}
-    needs: pre_ci
-    if: needs.pre_ci.outputs.continue
-    runs-on: ubuntu-latest
-    strategy:
-      fail-fast: false
-      matrix:
-        rust: [nightly, beta, stable, 1.52.0, 1.46.0, 1.40.0, 1.39.0, 1.36.0, 1.33.0, 1.32.0, 1.31.0]
-    timeout-minutes: 45
-    steps:
-      - uses: actions/checkout@v4
-      - uses: dtolnay/rust-toolchain@master
-        with:
-          toolchain: ${{matrix.rust}}
-      - name: Enable type layout randomization
-        run: echo RUSTFLAGS=${RUSTFLAGS}\ -Zrandomize-layout >> $GITHUB_ENV
-        if: matrix.rust == 'nightly'
-      - run: cargo test
-      - run: cargo check --no-default-features
-      - run: cargo check --features serde
-      - run: cargo check --no-default-features --features serde
-      - uses: actions/upload-artifact@v4
-        if: matrix.rust == 'nightly' && always()
-        with:
-          name: Cargo.lock
-          path: Cargo.lock
-        continue-on-error: true
-
-  node:
-    name: Node
-    needs: pre_ci
-    if: needs.pre_ci.outputs.continue
-    runs-on: ubuntu-latest
-    timeout-minutes: 45
-    steps:
-      - uses: actions/checkout@v4
-      - uses: dtolnay/rust-toolchain@stable
-      - run: npm install semver
-      - run: cargo test
-        env:
-          RUSTFLAGS: --cfg test_node_semver ${{env.RUSTFLAGS}}
-
-  minimal:
-    name: Minimal versions
-    needs: pre_ci
-    if: needs.pre_ci.outputs.continue
-    runs-on: ubuntu-latest
-    timeout-minutes: 45
-    steps:
-      - uses: actions/checkout@v4
-      - uses: dtolnay/rust-toolchain@nightly
-      - run: cargo generate-lockfile -Z minimal-versions
-      - run: cargo check --locked --features serde
-
-  doc:
-    name: Documentation
-    needs: pre_ci
-    if: needs.pre_ci.outputs.continue
-    runs-on: ubuntu-latest
-    timeout-minutes: 45
-    env:
-      RUSTDOCFLAGS: -Dwarnings
-    steps:
-      - uses: actions/checkout@v4
-      - uses: dtolnay/rust-toolchain@nightly
-      - uses: dtolnay/install@cargo-docs-rs
-      - run: cargo docs-rs
-
-  clippy:
-    name: Clippy
-    runs-on: ubuntu-latest
-    if: github.event_name != 'pull_request'
-    timeout-minutes: 45
-    steps:
-      - uses: actions/checkout@v4
-      - uses: dtolnay/rust-toolchain@clippy
-      - run: cargo clippy --tests --benches -- -Dclippy::all -Dclippy::pedantic
-
-  miri:
-    name: Miri
-    needs: pre_ci
-    if: needs.pre_ci.outputs.continue
-    runs-on: ubuntu-latest
-    env:
-      MIRIFLAGS: -Zmiri-strict-provenance
-    timeout-minutes: 45
-    steps:
-      - uses: actions/checkout@v4
-      - uses: dtolnay/rust-toolchain@miri
-      - name: Run cargo miri test (64-bit little endian)
-        run: cargo miri test --target x86_64-unknown-linux-gnu
-      - name: Run cargo miri test (64-bit big endian)
-        run: cargo miri test --target powerpc64-unknown-linux-gnu
-      - name: Run cargo miri test (32-bit little endian)
-        run: cargo miri test --target i686-unknown-linux-gnu
-      - name: Run cargo miri test (32-bit big endian)
-        run: cargo miri test --target mips-unknown-linux-gnu
-
-  fuzz:
-    name: Fuzz
-    needs: pre_ci
-    if: needs.pre_ci.outputs.continue
-    runs-on: ubuntu-latest
-    timeout-minutes: 45
-    steps:
-      - uses: actions/checkout@v4
-      - uses: dtolnay/rust-toolchain@nightly
-      - uses: dtolnay/install@cargo-fuzz
-      - run: cargo fuzz check
-
-  outdated:
-    name: Outdated
-    runs-on: ubuntu-latest
-    if: github.event_name != 'pull_request'
-    timeout-minutes: 45
-    steps:
-      - uses: actions/checkout@v4
-      - uses: dtolnay/rust-toolchain@stable
-      - uses: dtolnay/install@cargo-outdated
-      - run: cargo outdated --workspace --exit-code 1
-      - run: cargo outdated --manifest-path fuzz/Cargo.toml --exit-code 1
diff --git a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/.gitignore b/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/.gitignore
deleted file mode 100644
index 1fcb0fb..0000000
--- a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-/node_modules/
-/package-lock.json
-/package.json
-/target/
-/Cargo.lock
diff --git a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/Cargo.lock b/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/Cargo.lock
deleted file mode 100644
index 3cc4c29..0000000
--- a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/Cargo.lock
+++ /dev/null
@@ -1,65 +0,0 @@
-# This file is automatically @generated by Cargo.
-# It is not intended for manual editing.
-version = 3
-
-[[package]]
-name = "proc-macro2"
-version = "1.0.94"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84"
-dependencies = [
- "unicode-ident",
-]
-
-[[package]]
-name = "quote"
-version = "1.0.39"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c1f1914ce909e1658d9907913b4b91947430c7d9be598b15a1912935b8c04801"
-dependencies = [
- "proc-macro2",
-]
-
-[[package]]
-name = "semver"
-version = "1.0.26"
-dependencies = [
- "serde",
-]
-
-[[package]]
-name = "serde"
-version = "1.0.218"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e8dfc9d19bdbf6d17e22319da49161d5d0108e4188e8b680aef6299eed22df60"
-dependencies = [
- "serde_derive",
-]
-
-[[package]]
-name = "serde_derive"
-version = "1.0.218"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f09503e191f4e797cb8aac08e9a4a4695c5edf6a2e70e376d961ddd5c969f82b"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
-]
-
-[[package]]
-name = "syn"
-version = "2.0.99"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e02e925281e18ffd9d640e234264753c43edc62d64b2d4cf898f1bc5e75f3fc2"
-dependencies = [
- "proc-macro2",
- "quote",
- "unicode-ident",
-]
-
-[[package]]
-name = "unicode-ident"
-version = "1.0.17"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "00e2473a93778eb0bad35909dff6a10d28e63f792f16ed15e404fca9d5eeedbe"
diff --git a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/Cargo.toml
deleted file mode 100644
index 4b33e17..0000000
--- a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/Cargo.toml
+++ /dev/null
@@ -1,78 +0,0 @@
-# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
-#
-# When uploading crates to the registry Cargo will automatically
-# "normalize" Cargo.toml files for maximal compatibility
-# with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g., crates.io) dependencies.
-#
-# If you are reading this file be aware that the original Cargo.toml
-# will likely look very different (and much more reasonable).
-# See Cargo.toml.orig for the original contents.
-
-[package]
-edition = "2018"
-rust-version = "1.31"
-name = "semver"
-version = "1.0.26"
-authors = ["David Tolnay <dtolnay@gmail.com>"]
-build = "build.rs"
-autolib = false
-autobins = false
-autoexamples = false
-autotests = false
-autobenches = false
-description = "Parser and evaluator for Cargo's flavor of Semantic Versioning"
-documentation = "https://docs.rs/semver"
-readme = "README.md"
-keywords = ["cargo"]
-categories = [
-    "data-structures",
-    "no-std",
-]
-license = "MIT OR Apache-2.0"
-repository = "https://github.com/dtolnay/semver"
-
-[package.metadata.docs.rs]
-rustdoc-args = [
-    "--generate-link-to-definition",
-    "--extern-html-root-url=core=https://doc.rust-lang.org",
-    "--extern-html-root-url=alloc=https://doc.rust-lang.org",
-    "--extern-html-root-url=std=https://doc.rust-lang.org",
-]
-targets = ["x86_64-unknown-linux-gnu"]
-
-[package.metadata.playground]
-features = ["serde"]
-
-[features]
-default = ["std"]
-std = []
-
-[lib]
-name = "semver"
-path = "src/lib.rs"
-
-[[test]]
-name = "test_autotrait"
-path = "tests/test_autotrait.rs"
-
-[[test]]
-name = "test_identifier"
-path = "tests/test_identifier.rs"
-
-[[test]]
-name = "test_version"
-path = "tests/test_version.rs"
-
-[[test]]
-name = "test_version_req"
-path = "tests/test_version_req.rs"
-
-[[bench]]
-name = "parse"
-path = "benches/parse.rs"
-
-[dependencies.serde]
-version = "1.0.194"
-optional = true
-default-features = false
diff --git a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/Cargo.toml.orig
deleted file mode 100644
index ff91eb8..0000000
--- a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/Cargo.toml.orig
+++ /dev/null
@@ -1,31 +0,0 @@
-[package]
-name = "semver"
-version = "1.0.26"
-authors = ["David Tolnay <dtolnay@gmail.com>"]
-categories = ["data-structures", "no-std"]
-description = "Parser and evaluator for Cargo's flavor of Semantic Versioning"
-documentation = "https://docs.rs/semver"
-edition = "2018"
-keywords = ["cargo"]
-license = "MIT OR Apache-2.0"
-repository = "https://github.com/dtolnay/semver"
-rust-version = "1.31"
-
-[features]
-default = ["std"]
-std = []
-
-[dependencies]
-serde = { version = "1.0.194", optional = true, default-features = false }
-
-[package.metadata.docs.rs]
-targets = ["x86_64-unknown-linux-gnu"]
-rustdoc-args = [
-    "--generate-link-to-definition",
-    "--extern-html-root-url=core=https://doc.rust-lang.org",
-    "--extern-html-root-url=alloc=https://doc.rust-lang.org",
-    "--extern-html-root-url=std=https://doc.rust-lang.org",
-]
-
-[package.metadata.playground]
-features = ["serde"]
diff --git a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/LICENSE-APACHE b/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/LICENSE-APACHE
deleted file mode 100644
index 1b5ec8b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/LICENSE-APACHE
+++ /dev/null
@@ -1,176 +0,0 @@
-                              Apache License
-                        Version 2.0, January 2004
-                     http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-   "License" shall mean the terms and conditions for use, reproduction,
-   and distribution as defined by Sections 1 through 9 of this document.
-
-   "Licensor" shall mean the copyright owner or entity authorized by
-   the copyright owner that is granting the License.
-
-   "Legal Entity" shall mean the union of the acting entity and all
-   other entities that control, are controlled by, or are under common
-   control with that entity. For the purposes of this definition,
-   "control" means (i) the power, direct or indirect, to cause the
-   direction or management of such entity, whether by contract or
-   otherwise, or (ii) ownership of fifty percent (50%) or more of the
-   outstanding shares, or (iii) beneficial ownership of such entity.
-
-   "You" (or "Your") shall mean an individual or Legal Entity
-   exercising permissions granted by this License.
-
-   "Source" form shall mean the preferred form for making modifications,
-   including but not limited to software source code, documentation
-   source, and configuration files.
-
-   "Object" form shall mean any form resulting from mechanical
-   transformation or translation of a Source form, including but
-   not limited to compiled object code, generated documentation,
-   and conversions to other media types.
-
-   "Work" shall mean the work of authorship, whether in Source or
-   Object form, made available under the License, as indicated by a
-   copyright notice that is included in or attached to the work
-   (an example is provided in the Appendix below).
-
-   "Derivative Works" shall mean any work, whether in Source or Object
-   form, that is based on (or derived from) the Work and for which the
-   editorial revisions, annotations, elaborations, or other modifications
-   represent, as a whole, an original work of authorship. For the purposes
-   of this License, Derivative Works shall not include works that remain
-   separable from, or merely link (or bind by name) to the interfaces of,
-   the Work and Derivative Works thereof.
-
-   "Contribution" shall mean any work of authorship, including
-   the original version of the Work and any modifications or additions
-   to that Work or Derivative Works thereof, that is intentionally
-   submitted to Licensor for inclusion in the Work by the copyright owner
-   or by an individual or Legal Entity authorized to submit on behalf of
-   the copyright owner. For the purposes of this definition, "submitted"
-   means any form of electronic, verbal, or written communication sent
-   to the Licensor or its representatives, including but not limited to
-   communication on electronic mailing lists, source code control systems,
-   and issue tracking systems that are managed by, or on behalf of, the
-   Licensor for the purpose of discussing and improving the Work, but
-   excluding communication that is conspicuously marked or otherwise
-   designated in writing by the copyright owner as "Not a Contribution."
-
-   "Contributor" shall mean Licensor and any individual or Legal Entity
-   on behalf of whom a Contribution has been received by Licensor and
-   subsequently incorporated within the Work.
-
-2. Grant of Copyright License. Subject to the terms and conditions of
-   this License, each Contributor hereby grants to You a perpetual,
-   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-   copyright license to reproduce, prepare Derivative Works of,
-   publicly display, publicly perform, sublicense, and distribute the
-   Work and such Derivative Works in Source or Object form.
-
-3. Grant of Patent License. Subject to the terms and conditions of
-   this License, each Contributor hereby grants to You a perpetual,
-   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-   (except as stated in this section) patent license to make, have made,
-   use, offer to sell, sell, import, and otherwise transfer the Work,
-   where such license applies only to those patent claims licensable
-   by such Contributor that are necessarily infringed by their
-   Contribution(s) alone or by combination of their Contribution(s)
-   with the Work to which such Contribution(s) was submitted. If You
-   institute patent litigation against any entity (including a
-   cross-claim or counterclaim in a lawsuit) alleging that the Work
-   or a Contribution incorporated within the Work constitutes direct
-   or contributory patent infringement, then any patent licenses
-   granted to You under this License for that Work shall terminate
-   as of the date such litigation is filed.
-
-4. Redistribution. You may reproduce and distribute copies of the
-   Work or Derivative Works thereof in any medium, with or without
-   modifications, and in Source or Object form, provided that You
-   meet the following conditions:
-
-   (a) You must give any other recipients of the Work or
-       Derivative Works a copy of this License; and
-
-   (b) You must cause any modified files to carry prominent notices
-       stating that You changed the files; and
-
-   (c) You must retain, in the Source form of any Derivative Works
-       that You distribute, all copyright, patent, trademark, and
-       attribution notices from the Source form of the Work,
-       excluding those notices that do not pertain to any part of
-       the Derivative Works; and
-
-   (d) If the Work includes a "NOTICE" text file as part of its
-       distribution, then any Derivative Works that You distribute must
-       include a readable copy of the attribution notices contained
-       within such NOTICE file, excluding those notices that do not
-       pertain to any part of the Derivative Works, in at least one
-       of the following places: within a NOTICE text file distributed
-       as part of the Derivative Works; within the Source form or
-       documentation, if provided along with the Derivative Works; or,
-       within a display generated by the Derivative Works, if and
-       wherever such third-party notices normally appear. The contents
-       of the NOTICE file are for informational purposes only and
-       do not modify the License. You may add Your own attribution
-       notices within Derivative Works that You distribute, alongside
-       or as an addendum to the NOTICE text from the Work, provided
-       that such additional attribution notices cannot be construed
-       as modifying the License.
-
-   You may add Your own copyright statement to Your modifications and
-   may provide additional or different license terms and conditions
-   for use, reproduction, or distribution of Your modifications, or
-   for any such Derivative Works as a whole, provided Your use,
-   reproduction, and distribution of the Work otherwise complies with
-   the conditions stated in this License.
-
-5. Submission of Contributions. Unless You explicitly state otherwise,
-   any Contribution intentionally submitted for inclusion in the Work
-   by You to the Licensor shall be under the terms and conditions of
-   this License, without any additional terms or conditions.
-   Notwithstanding the above, nothing herein shall supersede or modify
-   the terms of any separate license agreement you may have executed
-   with Licensor regarding such Contributions.
-
-6. Trademarks. This License does not grant permission to use the trade
-   names, trademarks, service marks, or product names of the Licensor,
-   except as required for reasonable and customary use in describing the
-   origin of the Work and reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty. Unless required by applicable law or
-   agreed to in writing, Licensor provides the Work (and each
-   Contributor provides its Contributions) on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-   implied, including, without limitation, any warranties or conditions
-   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-   PARTICULAR PURPOSE. You are solely responsible for determining the
-   appropriateness of using or redistributing the Work and assume any
-   risks associated with Your exercise of permissions under this License.
-
-8. Limitation of Liability. In no event and under no legal theory,
-   whether in tort (including negligence), contract, or otherwise,
-   unless required by applicable law (such as deliberate and grossly
-   negligent acts) or agreed to in writing, shall any Contributor be
-   liable to You for damages, including any direct, indirect, special,
-   incidental, or consequential damages of any character arising as a
-   result of this License or out of the use or inability to use the
-   Work (including but not limited to damages for loss of goodwill,
-   work stoppage, computer failure or malfunction, or any and all
-   other commercial damages or losses), even if such Contributor
-   has been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability. While redistributing
-   the Work or Derivative Works thereof, You may choose to offer,
-   and charge a fee for, acceptance of support, warranty, indemnity,
-   or other liability obligations and/or rights consistent with this
-   License. However, in accepting such obligations, You may act only
-   on Your own behalf and on Your sole responsibility, not on behalf
-   of any other Contributor, and only if You agree to indemnify,
-   defend, and hold each Contributor harmless for any liability
-   incurred by, or claims asserted against, such Contributor by reason
-   of your accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
diff --git a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/LICENSE-MIT b/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/LICENSE-MIT
deleted file mode 100644
index 31aa7938..0000000
--- a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/LICENSE-MIT
+++ /dev/null
@@ -1,23 +0,0 @@
-Permission is hereby granted, free of charge, to any
-person obtaining a copy of this software and associated
-documentation files (the "Software"), to deal in the
-Software without restriction, including without
-limitation the rights to use, copy, modify, merge,
-publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software
-is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice
-shall be included in all copies or substantial portions
-of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
-ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
-TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
-PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
-IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-DEALINGS IN THE SOFTWARE.
diff --git a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/README.md b/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/README.md
deleted file mode 100644
index a9a1cb8..0000000
--- a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/README.md
+++ /dev/null
@@ -1,84 +0,0 @@
-semver
-======
-
-[<img alt="github" src="https://img.shields.io/badge/github-dtolnay/semver-8da0cb?style=for-the-badge&labelColor=555555&logo=github" height="20">](https://github.com/dtolnay/semver)
-[<img alt="crates.io" src="https://img.shields.io/crates/v/semver.svg?style=for-the-badge&color=fc8d62&logo=rust" height="20">](https://crates.io/crates/semver)
-[<img alt="docs.rs" src="https://img.shields.io/badge/docs.rs-semver-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs" height="20">](https://docs.rs/semver)
-[<img alt="build status" src="https://img.shields.io/github/actions/workflow/status/dtolnay/semver/ci.yml?branch=master&style=for-the-badge" height="20">](https://github.com/dtolnay/semver/actions?query=branch%3Amaster)
-
-A parser and evaluator for Cargo's flavor of Semantic Versioning.
-
-Semantic Versioning (see <https://semver.org>) is a guideline for how version
-numbers are assigned and incremented. It is widely followed within the
-Cargo/crates.io ecosystem for Rust.
-
-```toml
-[dependencies]
-semver = "1.0"
-```
-
-*Compiler support: requires rustc 1.31+*
-
-<br>
-
-## Example
-
-```rust
-use semver::{BuildMetadata, Prerelease, Version, VersionReq};
-
-fn main() {
-    let req = VersionReq::parse(">=1.2.3, <1.8.0").unwrap();
-
-    // Check whether this requirement matches version 1.2.3-alpha.1 (no)
-    let version = Version {
-        major: 1,
-        minor: 2,
-        patch: 3,
-        pre: Prerelease::new("alpha.1").unwrap(),
-        build: BuildMetadata::EMPTY,
-    };
-    assert!(!req.matches(&version));
-
-    // Check whether it matches 1.3.0 (yes it does)
-    let version = Version::parse("1.3.0").unwrap();
-    assert!(req.matches(&version));
-}
-```
-
-<br>
-
-## Scope of this crate
-
-Besides Cargo, several other package ecosystems and package managers for other
-languages also use SemVer:&ensp;RubyGems/Bundler for Ruby, npm for JavaScript,
-Composer for PHP, CocoaPods for Objective-C...
-
-The `semver` crate is specifically intended to implement Cargo's interpretation
-of Semantic Versioning.
-
-Where the various tools differ in their interpretation or implementation of the
-spec, this crate follows the implementation choices made by Cargo. If you are
-operating on version numbers from some other package ecosystem, you will want to
-use a different semver library which is appropriate to that ecosystem.
-
-The extent of Cargo's SemVer support is documented in the *[Specifying
-Dependencies]* chapter of the Cargo reference.
-
-[Specifying Dependencies]: https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html
-
-<br>
-
-#### License
-
-<sup>
-Licensed under either of <a href="LICENSE-APACHE">Apache License, Version
-2.0</a> or <a href="LICENSE-MIT">MIT license</a> at your option.
-</sup>
-
-<br>
-
-<sub>
-Unless you explicitly state otherwise, any contribution intentionally submitted
-for inclusion in this crate by you, as defined in the Apache-2.0 license, shall
-be dual licensed as above, without any additional terms or conditions.
-</sub>
diff --git a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/benches/parse.rs b/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/benches/parse.rs
deleted file mode 100644
index d6aded7..0000000
--- a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/benches/parse.rs
+++ /dev/null
@@ -1,24 +0,0 @@
-#![feature(test)]
-
-extern crate test;
-
-use semver::{Prerelease, Version, VersionReq};
-use test::{black_box, Bencher};
-
-#[bench]
-fn parse_prerelease(b: &mut Bencher) {
-    let text = "x.7.z.92";
-    b.iter(|| black_box(text).parse::<Prerelease>().unwrap());
-}
-
-#[bench]
-fn parse_version(b: &mut Bencher) {
-    let text = "1.0.2021-beta+exp.sha.5114f85";
-    b.iter(|| black_box(text).parse::<Version>().unwrap());
-}
-
-#[bench]
-fn parse_version_req(b: &mut Bencher) {
-    let text = ">=1.2.3, <2.0.0";
-    b.iter(|| black_box(text).parse::<VersionReq>().unwrap());
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/build.rs b/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/build.rs
deleted file mode 100644
index ae0aae9..0000000
--- a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/build.rs
+++ /dev/null
@@ -1,87 +0,0 @@
-use std::env;
-use std::process::Command;
-use std::str;
-
-fn main() {
-    println!("cargo:rerun-if-changed=build.rs");
-
-    let compiler = match rustc_minor_version() {
-        Some(compiler) => compiler,
-        None => return,
-    };
-
-    if compiler >= 80 {
-        println!("cargo:rustc-check-cfg=cfg(no_alloc_crate)");
-        println!("cargo:rustc-check-cfg=cfg(no_const_vec_new)");
-        println!("cargo:rustc-check-cfg=cfg(no_exhaustive_int_match)");
-        println!("cargo:rustc-check-cfg=cfg(no_non_exhaustive)");
-        println!("cargo:rustc-check-cfg=cfg(no_nonzero_bitscan)");
-        println!("cargo:rustc-check-cfg=cfg(no_str_strip_prefix)");
-        println!("cargo:rustc-check-cfg=cfg(no_track_caller)");
-        println!("cargo:rustc-check-cfg=cfg(no_unsafe_op_in_unsafe_fn_lint)");
-        println!("cargo:rustc-check-cfg=cfg(test_node_semver)");
-    }
-
-    if compiler < 33 {
-        // Exhaustive integer patterns. On older compilers, a final `_` arm is
-        // required even if every possible integer value is otherwise covered.
-        // https://github.com/rust-lang/rust/issues/50907
-        println!("cargo:rustc-cfg=no_exhaustive_int_match");
-    }
-
-    if compiler < 36 {
-        // extern crate alloc.
-        // https://blog.rust-lang.org/2019/07/04/Rust-1.36.0.html#the-alloc-crate-is-stable
-        println!("cargo:rustc-cfg=no_alloc_crate");
-    }
-
-    if compiler < 39 {
-        // const Vec::new.
-        // https://doc.rust-lang.org/std/vec/struct.Vec.html#method.new
-        println!("cargo:rustc-cfg=no_const_vec_new");
-    }
-
-    if compiler < 40 {
-        // #[non_exhaustive].
-        // https://blog.rust-lang.org/2019/12/19/Rust-1.40.0.html#non_exhaustive-structs-enums-and-variants
-        println!("cargo:rustc-cfg=no_non_exhaustive");
-    }
-
-    if compiler < 45 {
-        // String::strip_prefix.
-        // https://doc.rust-lang.org/std/primitive.str.html#method.strip_prefix
-        println!("cargo:rustc-cfg=no_str_strip_prefix");
-    }
-
-    if compiler < 46 {
-        // #[track_caller].
-        // https://blog.rust-lang.org/2020/08/27/Rust-1.46.0.html#track_caller
-        println!("cargo:rustc-cfg=no_track_caller");
-    }
-
-    if compiler < 52 {
-        // #![deny(unsafe_op_in_unsafe_fn)].
-        // https://github.com/rust-lang/rust/issues/71668
-        println!("cargo:rustc-cfg=no_unsafe_op_in_unsafe_fn_lint");
-    }
-
-    if compiler < 53 {
-        // Efficient intrinsics for count-leading-zeros and count-trailing-zeros
-        // on NonZero integers stabilized in 1.53.0. On many architectures these
-        // are more efficient than counting zeros on ordinary zeroable integers.
-        // https://doc.rust-lang.org/std/num/struct.NonZeroU64.html#method.leading_zeros
-        // https://doc.rust-lang.org/std/num/struct.NonZeroU64.html#method.trailing_zeros
-        println!("cargo:rustc-cfg=no_nonzero_bitscan");
-    }
-}
-
-fn rustc_minor_version() -> Option<u32> {
-    let rustc = env::var_os("RUSTC")?;
-    let output = Command::new(rustc).arg("--version").output().ok()?;
-    let version = str::from_utf8(&output.stdout).ok()?;
-    let mut pieces = version.split('.');
-    if pieces.next() != Some("rustc 1") {
-        return None;
-    }
-    pieces.next()?.parse().ok()
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/backport.rs b/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/backport.rs
deleted file mode 100644
index b5e1d02..0000000
--- a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/backport.rs
+++ /dev/null
@@ -1,23 +0,0 @@
-#[cfg(no_str_strip_prefix)] // rustc <1.45
-pub(crate) trait StripPrefixExt {
-    fn strip_prefix(&self, ch: char) -> Option<&str>;
-}
-
-#[cfg(no_str_strip_prefix)]
-impl StripPrefixExt for str {
-    fn strip_prefix(&self, ch: char) -> Option<&str> {
-        if self.starts_with(ch) {
-            Some(&self[ch.len_utf8()..])
-        } else {
-            None
-        }
-    }
-}
-
-pub(crate) use crate::alloc::vec::Vec;
-
-#[cfg(no_alloc_crate)] // rustc <1.36
-pub(crate) mod alloc {
-    pub use std::alloc;
-    pub use std::vec;
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/display.rs b/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/display.rs
deleted file mode 100644
index 3c2871bb..0000000
--- a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/display.rs
+++ /dev/null
@@ -1,165 +0,0 @@
-use crate::{BuildMetadata, Comparator, Op, Prerelease, Version, VersionReq};
-use core::fmt::{self, Alignment, Debug, Display, Write};
-
-impl Display for Version {
-    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-        let do_display = |formatter: &mut fmt::Formatter| -> fmt::Result {
-            write!(formatter, "{}.{}.{}", self.major, self.minor, self.patch)?;
-            if !self.pre.is_empty() {
-                write!(formatter, "-{}", self.pre)?;
-            }
-            if !self.build.is_empty() {
-                write!(formatter, "+{}", self.build)?;
-            }
-            Ok(())
-        };
-
-        let do_len = || -> usize {
-            digits(self.major)
-                + 1
-                + digits(self.minor)
-                + 1
-                + digits(self.patch)
-                + !self.pre.is_empty() as usize
-                + self.pre.len()
-                + !self.build.is_empty() as usize
-                + self.build.len()
-        };
-
-        pad(formatter, do_display, do_len)
-    }
-}
-
-impl Display for VersionReq {
-    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-        if self.comparators.is_empty() {
-            return formatter.write_str("*");
-        }
-        for (i, comparator) in self.comparators.iter().enumerate() {
-            if i > 0 {
-                formatter.write_str(", ")?;
-            }
-            write!(formatter, "{}", comparator)?;
-        }
-        Ok(())
-    }
-}
-
-impl Display for Comparator {
-    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-        let op = match self.op {
-            Op::Exact => "=",
-            Op::Greater => ">",
-            Op::GreaterEq => ">=",
-            Op::Less => "<",
-            Op::LessEq => "<=",
-            Op::Tilde => "~",
-            Op::Caret => "^",
-            Op::Wildcard => "",
-            #[cfg(no_non_exhaustive)]
-            Op::__NonExhaustive => unreachable!(),
-        };
-        formatter.write_str(op)?;
-        write!(formatter, "{}", self.major)?;
-        if let Some(minor) = &self.minor {
-            write!(formatter, ".{}", minor)?;
-            if let Some(patch) = &self.patch {
-                write!(formatter, ".{}", patch)?;
-                if !self.pre.is_empty() {
-                    write!(formatter, "-{}", self.pre)?;
-                }
-            } else if self.op == Op::Wildcard {
-                formatter.write_str(".*")?;
-            }
-        } else if self.op == Op::Wildcard {
-            formatter.write_str(".*")?;
-        }
-        Ok(())
-    }
-}
-
-impl Display for Prerelease {
-    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-        formatter.write_str(self.as_str())
-    }
-}
-
-impl Display for BuildMetadata {
-    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-        formatter.write_str(self.as_str())
-    }
-}
-
-impl Debug for Version {
-    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-        let mut debug = formatter.debug_struct("Version");
-        debug
-            .field("major", &self.major)
-            .field("minor", &self.minor)
-            .field("patch", &self.patch);
-        if !self.pre.is_empty() {
-            debug.field("pre", &self.pre);
-        }
-        if !self.build.is_empty() {
-            debug.field("build", &self.build);
-        }
-        debug.finish()
-    }
-}
-
-impl Debug for Prerelease {
-    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-        write!(formatter, "Prerelease(\"{}\")", self)
-    }
-}
-
-impl Debug for BuildMetadata {
-    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-        write!(formatter, "BuildMetadata(\"{}\")", self)
-    }
-}
-
-fn pad(
-    formatter: &mut fmt::Formatter,
-    do_display: impl FnOnce(&mut fmt::Formatter) -> fmt::Result,
-    do_len: impl FnOnce() -> usize,
-) -> fmt::Result {
-    let min_width = match formatter.width() {
-        Some(min_width) => min_width,
-        None => return do_display(formatter),
-    };
-
-    let len = do_len();
-    if len >= min_width {
-        return do_display(formatter);
-    }
-
-    let default_align = Alignment::Left;
-    let align = formatter.align().unwrap_or(default_align);
-    let padding = min_width - len;
-    let (pre_pad, post_pad) = match align {
-        Alignment::Left => (0, padding),
-        Alignment::Right => (padding, 0),
-        Alignment::Center => (padding / 2, (padding + 1) / 2),
-    };
-
-    let fill = formatter.fill();
-    for _ in 0..pre_pad {
-        formatter.write_char(fill)?;
-    }
-
-    do_display(formatter)?;
-
-    for _ in 0..post_pad {
-        formatter.write_char(fill)?;
-    }
-    Ok(())
-}
-
-fn digits(val: u64) -> usize {
-    if val < 10 {
-        1
-    } else {
-        1 + digits(val / 10)
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/error.rs b/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/error.rs
deleted file mode 100644
index 44c3b587..0000000
--- a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/error.rs
+++ /dev/null
@@ -1,126 +0,0 @@
-use crate::parse::Error;
-use core::fmt::{self, Debug, Display};
-
-pub(crate) enum ErrorKind {
-    Empty,
-    UnexpectedEnd(Position),
-    UnexpectedChar(Position, char),
-    UnexpectedCharAfter(Position, char),
-    ExpectedCommaFound(Position, char),
-    LeadingZero(Position),
-    Overflow(Position),
-    EmptySegment(Position),
-    IllegalCharacter(Position),
-    WildcardNotTheOnlyComparator(char),
-    UnexpectedAfterWildcard,
-    ExcessiveComparators,
-}
-
-#[derive(Copy, Clone, Eq, PartialEq)]
-pub(crate) enum Position {
-    Major,
-    Minor,
-    Patch,
-    Pre,
-    Build,
-}
-
-#[cfg(feature = "std")]
-#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
-impl std::error::Error for Error {}
-
-impl Display for Error {
-    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-        match &self.kind {
-            ErrorKind::Empty => formatter.write_str("empty string, expected a semver version"),
-            ErrorKind::UnexpectedEnd(pos) => {
-                write!(formatter, "unexpected end of input while parsing {}", pos)
-            }
-            ErrorKind::UnexpectedChar(pos, ch) => {
-                write!(
-                    formatter,
-                    "unexpected character {} while parsing {}",
-                    QuotedChar(*ch),
-                    pos,
-                )
-            }
-            ErrorKind::UnexpectedCharAfter(pos, ch) => {
-                write!(
-                    formatter,
-                    "unexpected character {} after {}",
-                    QuotedChar(*ch),
-                    pos,
-                )
-            }
-            ErrorKind::ExpectedCommaFound(pos, ch) => {
-                write!(
-                    formatter,
-                    "expected comma after {}, found {}",
-                    pos,
-                    QuotedChar(*ch),
-                )
-            }
-            ErrorKind::LeadingZero(pos) => {
-                write!(formatter, "invalid leading zero in {}", pos)
-            }
-            ErrorKind::Overflow(pos) => {
-                write!(formatter, "value of {} exceeds u64::MAX", pos)
-            }
-            ErrorKind::EmptySegment(pos) => {
-                write!(formatter, "empty identifier segment in {}", pos)
-            }
-            ErrorKind::IllegalCharacter(pos) => {
-                write!(formatter, "unexpected character in {}", pos)
-            }
-            ErrorKind::WildcardNotTheOnlyComparator(ch) => {
-                write!(
-                    formatter,
-                    "wildcard req ({}) must be the only comparator in the version req",
-                    ch,
-                )
-            }
-            ErrorKind::UnexpectedAfterWildcard => {
-                formatter.write_str("unexpected character after wildcard in version req")
-            }
-            ErrorKind::ExcessiveComparators => {
-                formatter.write_str("excessive number of version comparators")
-            }
-        }
-    }
-}
-
-impl Display for Position {
-    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-        formatter.write_str(match self {
-            Position::Major => "major version number",
-            Position::Minor => "minor version number",
-            Position::Patch => "patch version number",
-            Position::Pre => "pre-release identifier",
-            Position::Build => "build metadata",
-        })
-    }
-}
-
-impl Debug for Error {
-    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-        formatter.write_str("Error(\"")?;
-        Display::fmt(self, formatter)?;
-        formatter.write_str("\")")?;
-        Ok(())
-    }
-}
-
-struct QuotedChar(char);
-
-impl Display for QuotedChar {
-    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-        // Standard library versions prior to https://github.com/rust-lang/rust/pull/95345
-        // print character 0 as '\u{0}'. We prefer '\0' to keep error messages
-        // the same across all supported Rust versions.
-        if self.0 == '\0' {
-            formatter.write_str("'\\0'")
-        } else {
-            write!(formatter, "{:?}", self.0)
-        }
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/eval.rs b/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/eval.rs
deleted file mode 100644
index e6e38949..0000000
--- a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/eval.rs
+++ /dev/null
@@ -1,181 +0,0 @@
-use crate::{Comparator, Op, Version, VersionReq};
-
-pub(crate) fn matches_req(req: &VersionReq, ver: &Version) -> bool {
-    for cmp in &req.comparators {
-        if !matches_impl(cmp, ver) {
-            return false;
-        }
-    }
-
-    if ver.pre.is_empty() {
-        return true;
-    }
-
-    // If a version has a prerelease tag (for example, 1.2.3-alpha.3) then it
-    // will only be allowed to satisfy req if at least one comparator with the
-    // same major.minor.patch also has a prerelease tag.
-    for cmp in &req.comparators {
-        if pre_is_compatible(cmp, ver) {
-            return true;
-        }
-    }
-
-    false
-}
-
-pub(crate) fn matches_comparator(cmp: &Comparator, ver: &Version) -> bool {
-    matches_impl(cmp, ver) && (ver.pre.is_empty() || pre_is_compatible(cmp, ver))
-}
-
-fn matches_impl(cmp: &Comparator, ver: &Version) -> bool {
-    match cmp.op {
-        Op::Exact | Op::Wildcard => matches_exact(cmp, ver),
-        Op::Greater => matches_greater(cmp, ver),
-        Op::GreaterEq => matches_exact(cmp, ver) || matches_greater(cmp, ver),
-        Op::Less => matches_less(cmp, ver),
-        Op::LessEq => matches_exact(cmp, ver) || matches_less(cmp, ver),
-        Op::Tilde => matches_tilde(cmp, ver),
-        Op::Caret => matches_caret(cmp, ver),
-        #[cfg(no_non_exhaustive)]
-        Op::__NonExhaustive => unreachable!(),
-    }
-}
-
-fn matches_exact(cmp: &Comparator, ver: &Version) -> bool {
-    if ver.major != cmp.major {
-        return false;
-    }
-
-    if let Some(minor) = cmp.minor {
-        if ver.minor != minor {
-            return false;
-        }
-    }
-
-    if let Some(patch) = cmp.patch {
-        if ver.patch != patch {
-            return false;
-        }
-    }
-
-    ver.pre == cmp.pre
-}
-
-fn matches_greater(cmp: &Comparator, ver: &Version) -> bool {
-    if ver.major != cmp.major {
-        return ver.major > cmp.major;
-    }
-
-    match cmp.minor {
-        None => return false,
-        Some(minor) => {
-            if ver.minor != minor {
-                return ver.minor > minor;
-            }
-        }
-    }
-
-    match cmp.patch {
-        None => return false,
-        Some(patch) => {
-            if ver.patch != patch {
-                return ver.patch > patch;
-            }
-        }
-    }
-
-    ver.pre > cmp.pre
-}
-
-fn matches_less(cmp: &Comparator, ver: &Version) -> bool {
-    if ver.major != cmp.major {
-        return ver.major < cmp.major;
-    }
-
-    match cmp.minor {
-        None => return false,
-        Some(minor) => {
-            if ver.minor != minor {
-                return ver.minor < minor;
-            }
-        }
-    }
-
-    match cmp.patch {
-        None => return false,
-        Some(patch) => {
-            if ver.patch != patch {
-                return ver.patch < patch;
-            }
-        }
-    }
-
-    ver.pre < cmp.pre
-}
-
-fn matches_tilde(cmp: &Comparator, ver: &Version) -> bool {
-    if ver.major != cmp.major {
-        return false;
-    }
-
-    if let Some(minor) = cmp.minor {
-        if ver.minor != minor {
-            return false;
-        }
-    }
-
-    if let Some(patch) = cmp.patch {
-        if ver.patch != patch {
-            return ver.patch > patch;
-        }
-    }
-
-    ver.pre >= cmp.pre
-}
-
-fn matches_caret(cmp: &Comparator, ver: &Version) -> bool {
-    if ver.major != cmp.major {
-        return false;
-    }
-
-    let minor = match cmp.minor {
-        None => return true,
-        Some(minor) => minor,
-    };
-
-    let patch = match cmp.patch {
-        None => {
-            if cmp.major > 0 {
-                return ver.minor >= minor;
-            } else {
-                return ver.minor == minor;
-            }
-        }
-        Some(patch) => patch,
-    };
-
-    if cmp.major > 0 {
-        if ver.minor != minor {
-            return ver.minor > minor;
-        } else if ver.patch != patch {
-            return ver.patch > patch;
-        }
-    } else if minor > 0 {
-        if ver.minor != minor {
-            return false;
-        } else if ver.patch != patch {
-            return ver.patch > patch;
-        }
-    } else if ver.minor != minor || ver.patch != patch {
-        return false;
-    }
-
-    ver.pre >= cmp.pre
-}
-
-fn pre_is_compatible(cmp: &Comparator, ver: &Version) -> bool {
-    cmp.major == ver.major
-        && cmp.minor == Some(ver.minor)
-        && cmp.patch == Some(ver.patch)
-        && !cmp.pre.is_empty()
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/identifier.rs b/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/identifier.rs
deleted file mode 100644
index 4cc387b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/identifier.rs
+++ /dev/null
@@ -1,426 +0,0 @@
-// This module implements Identifier, a short-optimized string allowed to
-// contain only the ASCII characters hyphen, dot, 0-9, A-Z, a-z.
-//
-// As of mid-2021, the distribution of pre-release lengths on crates.io is:
-//
-//     length  count         length  count         length  count
-//        0  355929            11      81            24       2
-//        1     208            12      48            25       6
-//        2     236            13      55            26      10
-//        3    1909            14      25            27       4
-//        4    1284            15      15            28       1
-//        5    1742            16      35            30       1
-//        6    3440            17       9            31       5
-//        7    5624            18       6            32       1
-//        8    1321            19      12            36       2
-//        9     179            20       2            37     379
-//       10      65            23      11
-//
-// and the distribution of build metadata lengths is:
-//
-//     length  count         length  count         length  count
-//        0  364445             8    7725            18       1
-//        1      72             9      16            19       1
-//        2       7            10      85            20       1
-//        3      28            11      17            22       4
-//        4       9            12      10            26       1
-//        5      68            13       9            27       1
-//        6      73            14      10            40       5
-//        7      53            15       6
-//
-// Therefore it really behooves us to be able to use the entire 8 bytes of a
-// pointer for inline storage. For both pre-release and build metadata there are
-// vastly more strings with length exactly 8 bytes than the sum over all lengths
-// longer than 8 bytes.
-//
-// To differentiate the inline representation from the heap allocated long
-// representation, we'll allocate heap pointers with 2-byte alignment so that
-// they are guaranteed to have an unset least significant bit. Then in the repr
-// we store for pointers, we rotate a 1 into the most significant bit of the
-// most significant byte, which is never set for an ASCII byte.
-//
-// Inline repr:
-//
-//     0xxxxxxx 0xxxxxxx 0xxxxxxx 0xxxxxxx 0xxxxxxx 0xxxxxxx 0xxxxxxx 0xxxxxxx
-//
-// Heap allocated repr:
-//
-//     1ppppppp pppppppp pppppppp pppppppp pppppppp pppppppp pppppppp pppppppp 0
-//     ^ most significant bit   least significant bit of orig ptr, rotated out ^
-//
-// Since the most significant bit doubles as a sign bit for the similarly sized
-// signed integer type, the CPU has an efficient instruction for inspecting it,
-// meaning we can differentiate between an inline repr and a heap allocated repr
-// in one instruction. Effectively an inline repr always looks like a positive
-// i64 while a heap allocated repr always looks like a negative i64.
-//
-// For the inline repr, we store \0 padding on the end of the stored characters,
-// and thus the string length is readily determined efficiently by a cttz (count
-// trailing zeros) or bsf (bit scan forward) instruction.
-//
-// For the heap allocated repr, the length is encoded as a base-128 varint at
-// the head of the allocation.
-//
-// Empty strings are stored as an all-1 bit pattern, corresponding to -1i64.
-// Consequently the all-0 bit pattern is never a legal representation in any
-// repr, leaving it available as a niche for downstream code. For example this
-// allows size_of::<Version>() == size_of::<Option<Version>>().
-
-use crate::alloc::alloc::{alloc, dealloc, handle_alloc_error, Layout};
-use core::isize;
-use core::mem;
-use core::num::{NonZeroU64, NonZeroUsize};
-use core::ptr::{self, NonNull};
-use core::slice;
-use core::str;
-use core::usize;
-
-const PTR_BYTES: usize = mem::size_of::<NonNull<u8>>();
-
-// If pointers are already 8 bytes or bigger, then 0. If pointers are smaller
-// than 8 bytes, then Identifier will contain a byte array to raise its size up
-// to 8 bytes total.
-const TAIL_BYTES: usize = 8 * (PTR_BYTES < 8) as usize - PTR_BYTES * (PTR_BYTES < 8) as usize;
-
-#[repr(C, align(8))]
-pub(crate) struct Identifier {
-    head: NonNull<u8>,
-    tail: [u8; TAIL_BYTES],
-}
-
-impl Identifier {
-    pub(crate) const fn empty() -> Self {
-        // This is a separate constant because unsafe function calls are not
-        // allowed in a const fn body, only in a const, until later rustc than
-        // what we support.
-        const HEAD: NonNull<u8> = unsafe { NonNull::new_unchecked(!0 as *mut u8) };
-
-        // `mov rax, -1`
-        Identifier {
-            head: HEAD,
-            tail: [!0; TAIL_BYTES],
-        }
-    }
-
-    // SAFETY: string must be ASCII and not contain \0 bytes.
-    pub(crate) unsafe fn new_unchecked(string: &str) -> Self {
-        let len = string.len();
-        debug_assert!(len <= isize::MAX as usize);
-        match len as u64 {
-            0 => Self::empty(),
-            1..=8 => {
-                let mut bytes = [0u8; mem::size_of::<Identifier>()];
-                // SAFETY: string is big enough to read len bytes, bytes is big
-                // enough to write len bytes, and they do not overlap.
-                unsafe { ptr::copy_nonoverlapping(string.as_ptr(), bytes.as_mut_ptr(), len) };
-                // SAFETY: the head field is nonzero because the input string
-                // was at least 1 byte of ASCII and did not contain \0.
-                unsafe { mem::transmute::<[u8; mem::size_of::<Identifier>()], Identifier>(bytes) }
-            }
-            9..=0xff_ffff_ffff_ffff => {
-                // SAFETY: len is in a range that does not contain 0.
-                let size = bytes_for_varint(unsafe { NonZeroUsize::new_unchecked(len) }) + len;
-                let align = 2;
-                // On 32-bit and 16-bit architecture, check for size overflowing
-                // isize::MAX. Making an allocation request bigger than this to
-                // the allocator is considered UB. All allocations (including
-                // static ones) are limited to isize::MAX so we're guaranteed
-                // len <= isize::MAX, and we know bytes_for_varint(len) <= 5
-                // because 128**5 > isize::MAX, which means the only problem
-                // that can arise is when isize::MAX - 5 <= len <= isize::MAX.
-                // This is pretty much guaranteed to be malicious input so we
-                // don't need to care about returning a good error message.
-                if mem::size_of::<usize>() < 8 {
-                    let max_alloc = usize::MAX / 2 - align;
-                    assert!(size <= max_alloc);
-                }
-                // SAFETY: align is not zero, align is a power of two, and
-                // rounding size up to align does not overflow isize::MAX.
-                let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
-                // SAFETY: layout's size is nonzero.
-                let ptr = unsafe { alloc(layout) };
-                if ptr.is_null() {
-                    handle_alloc_error(layout);
-                }
-                let mut write = ptr;
-                let mut varint_remaining = len;
-                while varint_remaining > 0 {
-                    // SAFETY: size is bytes_for_varint(len) bytes + len bytes.
-                    // This is writing the first bytes_for_varint(len) bytes.
-                    unsafe { ptr::write(write, varint_remaining as u8 | 0x80) };
-                    varint_remaining >>= 7;
-                    // SAFETY: still in bounds of the same allocation.
-                    write = unsafe { write.add(1) };
-                }
-                // SAFETY: size is bytes_for_varint(len) bytes + len bytes. This
-                // is writing to the last len bytes.
-                unsafe { ptr::copy_nonoverlapping(string.as_ptr(), write, len) };
-                Identifier {
-                    head: ptr_to_repr(ptr),
-                    tail: [0; TAIL_BYTES],
-                }
-            }
-            0x100_0000_0000_0000..=0xffff_ffff_ffff_ffff => {
-                unreachable!("please refrain from storing >64 petabytes of text in semver version");
-            }
-            #[cfg(no_exhaustive_int_match)] // rustc <1.33
-            _ => unreachable!(),
-        }
-    }
-
-    pub(crate) fn is_empty(&self) -> bool {
-        // `cmp rdi, -1` -- basically: `repr as i64 == -1`
-        let empty = Self::empty();
-        let is_empty = self.head == empty.head && self.tail == empty.tail;
-        // The empty representation does nothing on Drop. We can't let this one
-        // drop normally because `impl Drop for Identifier` calls is_empty; that
-        // would be an infinite recursion.
-        mem::forget(empty);
-        is_empty
-    }
-
-    fn is_inline(&self) -> bool {
-        // `test rdi, rdi` -- basically: `repr as i64 >= 0`
-        self.head.as_ptr() as usize >> (PTR_BYTES * 8 - 1) == 0
-    }
-
-    fn is_empty_or_inline(&self) -> bool {
-        // `cmp rdi, -2` -- basically: `repr as i64 > -2`
-        self.is_empty() || self.is_inline()
-    }
-
-    pub(crate) fn as_str(&self) -> &str {
-        if self.is_empty() {
-            ""
-        } else if self.is_inline() {
-            // SAFETY: repr is in the inline representation.
-            unsafe { inline_as_str(self) }
-        } else {
-            // SAFETY: repr is in the heap allocated representation.
-            unsafe { ptr_as_str(&self.head) }
-        }
-    }
-
-    pub(crate) fn ptr_eq(&self, rhs: &Self) -> bool {
-        self.head == rhs.head && self.tail == rhs.tail
-    }
-}
-
-impl Clone for Identifier {
-    fn clone(&self) -> Self {
-        if self.is_empty_or_inline() {
-            Identifier {
-                head: self.head,
-                tail: self.tail,
-            }
-        } else {
-            let ptr = repr_to_ptr(self.head);
-            // SAFETY: ptr is one of our own heap allocations.
-            let len = unsafe { decode_len(ptr) };
-            let size = bytes_for_varint(len) + len.get();
-            let align = 2;
-            // SAFETY: align is not zero, align is a power of two, and rounding
-            // size up to align does not overflow isize::MAX. This is just
-            // duplicating a previous allocation where all of these guarantees
-            // were already made.
-            let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
-            // SAFETY: layout's size is nonzero.
-            let clone = unsafe { alloc(layout) };
-            if clone.is_null() {
-                handle_alloc_error(layout);
-            }
-            // SAFETY: new allocation cannot overlap the previous one (this was
-            // not a realloc). The argument ptrs are readable/writeable
-            // respectively for size bytes.
-            unsafe { ptr::copy_nonoverlapping(ptr, clone, size) }
-            Identifier {
-                head: ptr_to_repr(clone),
-                tail: [0; TAIL_BYTES],
-            }
-        }
-    }
-}
-
-impl Drop for Identifier {
-    fn drop(&mut self) {
-        if self.is_empty_or_inline() {
-            return;
-        }
-        let ptr = repr_to_ptr_mut(self.head);
-        // SAFETY: ptr is one of our own heap allocations.
-        let len = unsafe { decode_len(ptr) };
-        let size = bytes_for_varint(len) + len.get();
-        let align = 2;
-        // SAFETY: align is not zero, align is a power of two, and rounding
-        // size up to align does not overflow isize::MAX. These guarantees were
-        // made when originally allocating this memory.
-        let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
-        // SAFETY: ptr was previously allocated by the same allocator with the
-        // same layout.
-        unsafe { dealloc(ptr, layout) }
-    }
-}
-
-impl PartialEq for Identifier {
-    fn eq(&self, rhs: &Self) -> bool {
-        if self.ptr_eq(rhs) {
-            // Fast path (most common)
-            true
-        } else if self.is_empty_or_inline() || rhs.is_empty_or_inline() {
-            false
-        } else {
-            // SAFETY: both reprs are in the heap allocated representation.
-            unsafe { ptr_as_str(&self.head) == ptr_as_str(&rhs.head) }
-        }
-    }
-}
-
-unsafe impl Send for Identifier {}
-unsafe impl Sync for Identifier {}
-
-// We use heap pointers that are 2-byte aligned, meaning they have an
-// insignificant 0 in the least significant bit. We take advantage of that
-// unneeded bit to rotate a 1 into the most significant bit to make the repr
-// distinguishable from ASCII bytes.
-fn ptr_to_repr(original: *mut u8) -> NonNull<u8> {
-    // `mov eax, 1`
-    // `shld rax, rdi, 63`
-    let modified = (original as usize | 1).rotate_right(1);
-
-    // `original + (modified - original)`, but being mindful of provenance.
-    let diff = modified.wrapping_sub(original as usize);
-    let modified = original.wrapping_add(diff);
-
-    // SAFETY: the most significant bit of repr is known to be set, so the value
-    // is not zero.
-    unsafe { NonNull::new_unchecked(modified) }
-}
-
-// Shift out the 1 previously placed into the most significant bit of the least
-// significant byte. Shift in a low 0 bit to reconstruct the original 2-byte
-// aligned pointer.
-fn repr_to_ptr(modified: NonNull<u8>) -> *const u8 {
-    // `lea rax, [rdi + rdi]`
-    let modified = modified.as_ptr();
-    let original = (modified as usize) << 1;
-
-    // `modified + (original - modified)`, but being mindful of provenance.
-    let diff = original.wrapping_sub(modified as usize);
-    modified.wrapping_add(diff)
-}
-
-fn repr_to_ptr_mut(repr: NonNull<u8>) -> *mut u8 {
-    repr_to_ptr(repr) as *mut u8
-}
-
-// Compute the length of the inline string, assuming the argument is in short
-// string representation. Short strings are stored as 1 to 8 nonzero ASCII
-// bytes, followed by \0 padding for the remaining bytes.
-//
-// SAFETY: the identifier must indeed be in the inline representation.
-unsafe fn inline_len(repr: &Identifier) -> NonZeroUsize {
-    // SAFETY: Identifier's layout is align(8) and at least size 8. We're doing
-    // an aligned read of the first 8 bytes from it. The bytes are not all zero
-    // because inline strings are at least 1 byte long and cannot contain \0.
-    let repr = unsafe { ptr::read(repr as *const Identifier as *const NonZeroU64) };
-
-    // Rustc >=1.53 has intrinsics for counting zeros on a non-zeroable integer.
-    // On many architectures these are more efficient than counting on ordinary
-    // zeroable integers (bsf vs cttz). On rustc <1.53 without those intrinsics,
-    // we count zeros in the u64 rather than the NonZeroU64.
-    #[cfg(no_nonzero_bitscan)]
-    let repr = repr.get();
-
-    #[cfg(target_endian = "little")]
-    let zero_bits_on_string_end = repr.leading_zeros();
-    #[cfg(target_endian = "big")]
-    let zero_bits_on_string_end = repr.trailing_zeros();
-
-    let nonzero_bytes = 8 - zero_bits_on_string_end as usize / 8;
-
-    // SAFETY: repr is nonzero, so it has at most 63 zero bits on either end,
-    // thus at least one nonzero byte.
-    unsafe { NonZeroUsize::new_unchecked(nonzero_bytes) }
-}
-
-// SAFETY: repr must be in the inline representation, i.e. at least 1 and at
-// most 8 nonzero ASCII bytes padded on the end with \0 bytes.
-unsafe fn inline_as_str(repr: &Identifier) -> &str {
-    let ptr = repr as *const Identifier as *const u8;
-    let len = unsafe { inline_len(repr) }.get();
-    // SAFETY: we are viewing the nonzero ASCII prefix of the inline repr's
-    // contents as a slice of bytes. Input/output lifetimes are correctly
-    // associated.
-    let slice = unsafe { slice::from_raw_parts(ptr, len) };
-    // SAFETY: the string contents are known to be only ASCII bytes, which are
-    // always valid UTF-8.
-    unsafe { str::from_utf8_unchecked(slice) }
-}
-
-// Decode varint. Varints consist of between one and eight base-128 digits, each
-// of which is stored in a byte with most significant bit set. Adjacent to the
-// varint in memory there is guaranteed to be at least 9 ASCII bytes, each of
-// which has an unset most significant bit.
-//
-// SAFETY: ptr must be one of our own heap allocations, with the varint header
-// already written.
-unsafe fn decode_len(ptr: *const u8) -> NonZeroUsize {
-    // SAFETY: There is at least one byte of varint followed by at least 9 bytes
-    // of string content, which is at least 10 bytes total for the allocation,
-    // so reading the first two is no problem.
-    let [first, second] = unsafe { ptr::read(ptr as *const [u8; 2]) };
-    if second < 0x80 {
-        // SAFETY: the length of this heap allocated string has been encoded as
-        // one base-128 digit, so the length is at least 9 and at most 127. It
-        // cannot be zero.
-        unsafe { NonZeroUsize::new_unchecked((first & 0x7f) as usize) }
-    } else {
-        return unsafe { decode_len_cold(ptr) };
-
-        // Identifiers 128 bytes or longer. This is not exercised by any crate
-        // version currently published to crates.io.
-        #[cold]
-        #[inline(never)]
-        unsafe fn decode_len_cold(mut ptr: *const u8) -> NonZeroUsize {
-            let mut len = 0;
-            let mut shift = 0;
-            loop {
-                // SAFETY: varint continues while there are bytes having the
-                // most significant bit set, i.e. until we start hitting the
-                // ASCII string content with msb unset.
-                let byte = unsafe { *ptr };
-                if byte < 0x80 {
-                    // SAFETY: the string length is known to be 128 bytes or
-                    // longer.
-                    return unsafe { NonZeroUsize::new_unchecked(len) };
-                }
-                // SAFETY: still in bounds of the same allocation.
-                ptr = unsafe { ptr.add(1) };
-                len += ((byte & 0x7f) as usize) << shift;
-                shift += 7;
-            }
-        }
-    }
-}
-
-// SAFETY: repr must be in the heap allocated representation, with varint header
-// and string contents already written.
-unsafe fn ptr_as_str(repr: &NonNull<u8>) -> &str {
-    let ptr = repr_to_ptr(*repr);
-    let len = unsafe { decode_len(ptr) };
-    let header = bytes_for_varint(len);
-    let slice = unsafe { slice::from_raw_parts(ptr.add(header), len.get()) };
-    // SAFETY: all identifier contents are ASCII bytes, which are always valid
-    // UTF-8.
-    unsafe { str::from_utf8_unchecked(slice) }
-}
-
-// Number of base-128 digits required for the varint representation of a length.
-fn bytes_for_varint(len: NonZeroUsize) -> usize {
-    #[cfg(no_nonzero_bitscan)] // rustc <1.53
-    let len = len.get();
-
-    let usize_bits = mem::size_of::<usize>() * 8;
-    let len_bits = usize_bits - len.leading_zeros() as usize;
-    (len_bits + 6) / 7
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/impls.rs b/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/impls.rs
deleted file mode 100644
index 280c802..0000000
--- a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/impls.rs
+++ /dev/null
@@ -1,163 +0,0 @@
-use crate::backport::*;
-use crate::identifier::Identifier;
-use crate::{BuildMetadata, Comparator, Prerelease, VersionReq};
-use core::cmp::Ordering;
-use core::hash::{Hash, Hasher};
-use core::iter::FromIterator;
-use core::ops::Deref;
-
-impl Default for Identifier {
-    fn default() -> Self {
-        Identifier::empty()
-    }
-}
-
-impl Eq for Identifier {}
-
-impl Hash for Identifier {
-    fn hash<H: Hasher>(&self, hasher: &mut H) {
-        self.as_str().hash(hasher);
-    }
-}
-
-impl Deref for Prerelease {
-    type Target = str;
-
-    fn deref(&self) -> &Self::Target {
-        self.identifier.as_str()
-    }
-}
-
-impl Deref for BuildMetadata {
-    type Target = str;
-
-    fn deref(&self) -> &Self::Target {
-        self.identifier.as_str()
-    }
-}
-
-impl PartialOrd for Prerelease {
-    fn partial_cmp(&self, rhs: &Self) -> Option<Ordering> {
-        Some(self.cmp(rhs))
-    }
-}
-
-impl PartialOrd for BuildMetadata {
-    fn partial_cmp(&self, rhs: &Self) -> Option<Ordering> {
-        Some(self.cmp(rhs))
-    }
-}
-
-impl Ord for Prerelease {
-    fn cmp(&self, rhs: &Self) -> Ordering {
-        if self.identifier.ptr_eq(&rhs.identifier) {
-            return Ordering::Equal;
-        }
-
-        match self.is_empty() {
-            // A real release compares greater than prerelease.
-            true => return Ordering::Greater,
-            // Prerelease compares less than the real release.
-            false if rhs.is_empty() => return Ordering::Less,
-            false => {}
-        }
-
-        let lhs = self.as_str().split('.');
-        let mut rhs = rhs.as_str().split('.');
-
-        for lhs in lhs {
-            let rhs = match rhs.next() {
-                // Spec: "A larger set of pre-release fields has a higher
-                // precedence than a smaller set, if all of the preceding
-                // identifiers are equal."
-                None => return Ordering::Greater,
-                Some(rhs) => rhs,
-            };
-
-            let string_cmp = || Ord::cmp(lhs, rhs);
-            let is_ascii_digit = |b: u8| b.is_ascii_digit();
-            let ordering = match (
-                lhs.bytes().all(is_ascii_digit),
-                rhs.bytes().all(is_ascii_digit),
-            ) {
-                // Respect numeric ordering, for example 99 < 100. Spec says:
-                // "Identifiers consisting of only digits are compared
-                // numerically."
-                (true, true) => Ord::cmp(&lhs.len(), &rhs.len()).then_with(string_cmp),
-                // Spec: "Numeric identifiers always have lower precedence than
-                // non-numeric identifiers."
-                (true, false) => return Ordering::Less,
-                (false, true) => return Ordering::Greater,
-                // Spec: "Identifiers with letters or hyphens are compared
-                // lexically in ASCII sort order."
-                (false, false) => string_cmp(),
-            };
-
-            if ordering != Ordering::Equal {
-                return ordering;
-            }
-        }
-
-        if rhs.next().is_none() {
-            Ordering::Equal
-        } else {
-            Ordering::Less
-        }
-    }
-}
-
-impl Ord for BuildMetadata {
-    fn cmp(&self, rhs: &Self) -> Ordering {
-        if self.identifier.ptr_eq(&rhs.identifier) {
-            return Ordering::Equal;
-        }
-
-        let lhs = self.as_str().split('.');
-        let mut rhs = rhs.as_str().split('.');
-
-        for lhs in lhs {
-            let rhs = match rhs.next() {
-                None => return Ordering::Greater,
-                Some(rhs) => rhs,
-            };
-
-            let is_ascii_digit = |b: u8| b.is_ascii_digit();
-            let ordering = match (
-                lhs.bytes().all(is_ascii_digit),
-                rhs.bytes().all(is_ascii_digit),
-            ) {
-                (true, true) => {
-                    // 0 < 00 < 1 < 01 < 001 < 2 < 02 < 002 < 10
-                    let lhval = lhs.trim_start_matches('0');
-                    let rhval = rhs.trim_start_matches('0');
-                    Ord::cmp(&lhval.len(), &rhval.len())
-                        .then_with(|| Ord::cmp(lhval, rhval))
-                        .then_with(|| Ord::cmp(&lhs.len(), &rhs.len()))
-                }
-                (true, false) => return Ordering::Less,
-                (false, true) => return Ordering::Greater,
-                (false, false) => Ord::cmp(lhs, rhs),
-            };
-
-            if ordering != Ordering::Equal {
-                return ordering;
-            }
-        }
-
-        if rhs.next().is_none() {
-            Ordering::Equal
-        } else {
-            Ordering::Less
-        }
-    }
-}
-
-impl FromIterator<Comparator> for VersionReq {
-    fn from_iter<I>(iter: I) -> Self
-    where
-        I: IntoIterator<Item = Comparator>,
-    {
-        let comparators = Vec::from_iter(iter);
-        VersionReq { comparators }
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/lib.rs
deleted file mode 100644
index f96adb5e..0000000
--- a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/lib.rs
+++ /dev/null
@@ -1,580 +0,0 @@
-//! [![github]](https://github.com/dtolnay/semver)&ensp;[![crates-io]](https://crates.io/crates/semver)&ensp;[![docs-rs]](https://docs.rs/semver)
-//!
-//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github
-//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust
-//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs
-//!
-//! <br>
-//!
-//! A parser and evaluator for Cargo's flavor of Semantic Versioning.
-//!
-//! Semantic Versioning (see <https://semver.org>) is a guideline for how
-//! version numbers are assigned and incremented. It is widely followed within
-//! the Cargo/crates.io ecosystem for Rust.
-//!
-//! <br>
-//!
-//! # Example
-//!
-//! ```
-//! use semver::{BuildMetadata, Prerelease, Version, VersionReq};
-//!
-//! fn main() {
-//!     let req = VersionReq::parse(">=1.2.3, <1.8.0").unwrap();
-//!
-//!     // Check whether this requirement matches version 1.2.3-alpha.1 (no)
-//!     let version = Version {
-//!         major: 1,
-//!         minor: 2,
-//!         patch: 3,
-//!         pre: Prerelease::new("alpha.1").unwrap(),
-//!         build: BuildMetadata::EMPTY,
-//!     };
-//!     assert!(!req.matches(&version));
-//!
-//!     // Check whether it matches 1.3.0 (yes it does)
-//!     let version = Version::parse("1.3.0").unwrap();
-//!     assert!(req.matches(&version));
-//! }
-//! ```
-//!
-//! <br><br>
-//!
-//! # Scope of this crate
-//!
-//! Besides Cargo, several other package ecosystems and package managers for
-//! other languages also use SemVer:&ensp;RubyGems/Bundler for Ruby, npm for
-//! JavaScript, Composer for PHP, CocoaPods for Objective-C...
-//!
-//! The `semver` crate is specifically intended to implement Cargo's
-//! interpretation of Semantic Versioning.
-//!
-//! Where the various tools differ in their interpretation or implementation of
-//! the spec, this crate follows the implementation choices made by Cargo. If
-//! you are operating on version numbers from some other package ecosystem, you
-//! will want to use a different semver library which is appropriate to that
-//! ecosystem.
-//!
-//! The extent of Cargo's SemVer support is documented in the *[Specifying
-//! Dependencies]* chapter of the Cargo reference.
-//!
-//! [Specifying Dependencies]: https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html
-
-#![doc(html_root_url = "https://docs.rs/semver/1.0.26")]
-#![cfg_attr(docsrs, feature(doc_cfg))]
-#![cfg_attr(all(not(feature = "std"), not(no_alloc_crate)), no_std)]
-#![cfg_attr(not(no_unsafe_op_in_unsafe_fn_lint), deny(unsafe_op_in_unsafe_fn))]
-#![cfg_attr(no_unsafe_op_in_unsafe_fn_lint, allow(unused_unsafe))]
-#![cfg_attr(no_str_strip_prefix, allow(unstable_name_collisions))]
-#![allow(
-    clippy::cast_lossless,
-    clippy::cast_possible_truncation,
-    clippy::doc_markdown,
-    clippy::incompatible_msrv,
-    clippy::items_after_statements,
-    clippy::manual_map,
-    clippy::match_bool,
-    clippy::missing_errors_doc,
-    clippy::must_use_candidate,
-    clippy::needless_doctest_main,
-    clippy::ptr_as_ptr,
-    clippy::redundant_else,
-    clippy::semicolon_if_nothing_returned, // https://github.com/rust-lang/rust-clippy/issues/7324
-    clippy::similar_names,
-    clippy::unnested_or_patterns,
-    clippy::unseparated_literal_suffix,
-    clippy::wildcard_imports
-)]
-
-#[cfg(not(no_alloc_crate))]
-extern crate alloc;
-
-mod backport;
-mod display;
-mod error;
-mod eval;
-mod identifier;
-mod impls;
-mod parse;
-
-#[cfg(feature = "serde")]
-mod serde;
-
-use crate::identifier::Identifier;
-use core::cmp::Ordering;
-use core::str::FromStr;
-
-#[allow(unused_imports)]
-use crate::backport::*;
-
-pub use crate::parse::Error;
-
-/// **SemVer version** as defined by <https://semver.org>.
-///
-/// # Syntax
-///
-/// - The major, minor, and patch numbers may be any integer 0 through u64::MAX.
-///   When representing a SemVer version as a string, each number is written as
-///   a base 10 integer. For example, `1.0.119`.
-///
-/// - Leading zeros are forbidden in those positions. For example `1.01.00` is
-///   invalid as a SemVer version.
-///
-/// - The pre-release identifier, if present, must conform to the syntax
-///   documented for [`Prerelease`].
-///
-/// - The build metadata, if present, must conform to the syntax documented for
-///   [`BuildMetadata`].
-///
-/// - Whitespace is not allowed anywhere in the version.
-///
-/// # Total ordering
-///
-/// Given any two SemVer versions, one is less than, greater than, or equal to
-/// the other. Versions may be compared against one another using Rust's usual
-/// comparison operators.
-///
-/// - The major, minor, and patch number are compared numerically from left to
-///   right, lexicographically ordered as a 3-tuple of integers. So for example
-///   version `1.5.0` is less than version `1.19.0`, despite the fact that
-///   "1.19.0" &lt; "1.5.0" as ASCIIbetically compared strings and 1.19 &lt; 1.5
-///   as real numbers.
-///
-/// - When major, minor, and patch are equal, a pre-release version is
-///   considered less than the ordinary release:&ensp;version `1.0.0-alpha.1` is
-///   less than version `1.0.0`.
-///
-/// - Two pre-releases of the same major, minor, patch are compared by
-///   lexicographic ordering of dot-separated components of the pre-release
-///   string.
-///
-///   - Identifiers consisting of only digits are compared
-///     numerically:&ensp;`1.0.0-pre.8` is less than `1.0.0-pre.12`.
-///
-///   - Identifiers that contain a letter or hyphen are compared in ASCII sort
-///     order:&ensp;`1.0.0-pre12` is less than `1.0.0-pre8`.
-///
-///   - Any numeric identifier is always less than any non-numeric
-///     identifier:&ensp;`1.0.0-pre.1` is less than `1.0.0-pre.x`.
-///
-/// Example:&ensp;`1.0.0-alpha`&ensp;&lt;&ensp;`1.0.0-alpha.1`&ensp;&lt;&ensp;`1.0.0-alpha.beta`&ensp;&lt;&ensp;`1.0.0-beta`&ensp;&lt;&ensp;`1.0.0-beta.2`&ensp;&lt;&ensp;`1.0.0-beta.11`&ensp;&lt;&ensp;`1.0.0-rc.1`&ensp;&lt;&ensp;`1.0.0`
-#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
-pub struct Version {
-    pub major: u64,
-    pub minor: u64,
-    pub patch: u64,
-    pub pre: Prerelease,
-    pub build: BuildMetadata,
-}
-
-/// **SemVer version requirement** describing the intersection of some version
-/// comparators, such as `>=1.2.3, <1.8`.
-///
-/// # Syntax
-///
-/// - Either `*` (meaning "any"), or one or more comma-separated comparators.
-///
-/// - A [`Comparator`] is an operator ([`Op`]) and a partial version, separated
-///   by optional whitespace. For example `>=1.0.0` or `>=1.0`.
-///
-/// - Build metadata is syntactically permitted on the partial versions, but is
-///   completely ignored, as it's never relevant to whether any comparator
-///   matches a particular version.
-///
-/// - Whitespace is permitted around commas and around operators. Whitespace is
-///   not permitted within a partial version, i.e. anywhere between the major
-///   version number and its minor, patch, pre-release, or build metadata.
-#[derive(Clone, Eq, PartialEq, Hash, Debug)]
-#[cfg_attr(no_const_vec_new, derive(Default))]
-pub struct VersionReq {
-    pub comparators: Vec<Comparator>,
-}
-
-/// A pair of comparison operator and partial version, such as `>=1.2`. Forms
-/// one piece of a VersionReq.
-#[derive(Clone, Eq, PartialEq, Hash, Debug)]
-pub struct Comparator {
-    pub op: Op,
-    pub major: u64,
-    pub minor: Option<u64>,
-    /// Patch is only allowed if minor is Some.
-    pub patch: Option<u64>,
-    /// Non-empty pre-release is only allowed if patch is Some.
-    pub pre: Prerelease,
-}
-
-/// SemVer comparison operator: `=`, `>`, `>=`, `<`, `<=`, `~`, `^`, `*`.
-///
-/// # Op::Exact
-/// - &ensp;**`=I.J.K`**&emsp;&mdash;&emsp;exactly the version I.J.K
-/// - &ensp;**`=I.J`**&emsp;&mdash;&emsp;equivalent to `>=I.J.0, <I.(J+1).0`
-/// - &ensp;**`=I`**&emsp;&mdash;&emsp;equivalent to `>=I.0.0, <(I+1).0.0`
-///
-/// # Op::Greater
-/// - &ensp;**`>I.J.K`**
-/// - &ensp;**`>I.J`**&emsp;&mdash;&emsp;equivalent to `>=I.(J+1).0`
-/// - &ensp;**`>I`**&emsp;&mdash;&emsp;equivalent to `>=(I+1).0.0`
-///
-/// # Op::GreaterEq
-/// - &ensp;**`>=I.J.K`**
-/// - &ensp;**`>=I.J`**&emsp;&mdash;&emsp;equivalent to `>=I.J.0`
-/// - &ensp;**`>=I`**&emsp;&mdash;&emsp;equivalent to `>=I.0.0`
-///
-/// # Op::Less
-/// - &ensp;**`<I.J.K`**
-/// - &ensp;**`<I.J`**&emsp;&mdash;&emsp;equivalent to `<I.J.0`
-/// - &ensp;**`<I`**&emsp;&mdash;&emsp;equivalent to `<I.0.0`
-///
-/// # Op::LessEq
-/// - &ensp;**`<=I.J.K`**
-/// - &ensp;**`<=I.J`**&emsp;&mdash;&emsp;equivalent to `<I.(J+1).0`
-/// - &ensp;**`<=I`**&emsp;&mdash;&emsp;equivalent to `<(I+1).0.0`
-///
-/// # Op::Tilde&emsp;("patch" updates)
-/// *Tilde requirements allow the **patch** part of the semver version (the third number) to increase.*
-/// - &ensp;**`~I.J.K`**&emsp;&mdash;&emsp;equivalent to `>=I.J.K, <I.(J+1).0`
-/// - &ensp;**`~I.J`**&emsp;&mdash;&emsp;equivalent to `=I.J`
-/// - &ensp;**`~I`**&emsp;&mdash;&emsp;equivalent to `=I`
-///
-/// # Op::Caret&emsp;("compatible" updates)
-/// *Caret requirements allow parts that are **right of the first nonzero** part of the semver version to increase.*
-/// - &ensp;**`^I.J.K`**&ensp;(for I\>0)&emsp;&mdash;&emsp;equivalent to `>=I.J.K, <(I+1).0.0`
-/// - &ensp;**`^0.J.K`**&ensp;(for J\>0)&emsp;&mdash;&emsp;equivalent to `>=0.J.K, <0.(J+1).0`
-/// - &ensp;**`^0.0.K`**&emsp;&mdash;&emsp;equivalent to `=0.0.K`
-/// - &ensp;**`^I.J`**&ensp;(for I\>0 or J\>0)&emsp;&mdash;&emsp;equivalent to `^I.J.0`
-/// - &ensp;**`^0.0`**&emsp;&mdash;&emsp;equivalent to `=0.0`
-/// - &ensp;**`^I`**&emsp;&mdash;&emsp;equivalent to `=I`
-///
-/// # Op::Wildcard
-/// - &ensp;**`I.J.*`**&emsp;&mdash;&emsp;equivalent to `=I.J`
-/// - &ensp;**`I.*`**&ensp;or&ensp;**`I.*.*`**&emsp;&mdash;&emsp;equivalent to `=I`
-#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)]
-#[cfg_attr(not(no_non_exhaustive), non_exhaustive)]
-pub enum Op {
-    Exact,
-    Greater,
-    GreaterEq,
-    Less,
-    LessEq,
-    Tilde,
-    Caret,
-    Wildcard,
-
-    #[cfg(no_non_exhaustive)] // rustc <1.40
-    #[doc(hidden)]
-    __NonExhaustive,
-}
-
-/// Optional pre-release identifier on a version string. This comes after `-` in
-/// a SemVer version, like `1.0.0-alpha.1`
-///
-/// # Examples
-///
-/// Some real world pre-release idioms drawn from crates.io:
-///
-/// - **[mio]** <code>0.7.0-<b>alpha.1</b></code> &mdash; the most common style
-///   for numbering pre-releases.
-///
-/// - **[pest]** <code>1.0.0-<b>beta.8</b></code>,&ensp;<code>1.0.0-<b>rc.0</b></code>
-///   &mdash; this crate makes a distinction between betas and release
-///   candidates.
-///
-/// - **[sassers]** <code>0.11.0-<b>shitshow</b></code> &mdash; ???.
-///
-/// - **[atomic-utils]** <code>0.0.0-<b>reserved</b></code> &mdash; a squatted
-///   crate name.
-///
-/// [mio]: https://crates.io/crates/mio
-/// [pest]: https://crates.io/crates/pest
-/// [atomic-utils]: https://crates.io/crates/atomic-utils
-/// [sassers]: https://crates.io/crates/sassers
-///
-/// *Tip:* Be aware that if you are planning to number your own pre-releases,
-/// you should prefer to separate the numeric part from any non-numeric
-/// identifiers by using a dot in between. That is, prefer pre-releases
-/// `alpha.1`, `alpha.2`, etc rather than `alpha1`, `alpha2` etc. The SemVer
-/// spec's rule for pre-release precedence has special treatment of numeric
-/// components in the pre-release string, but only if there are no non-digit
-/// characters in the same dot-separated component. So you'd have `alpha.2` &lt;
-/// `alpha.11` as intended, but `alpha11` &lt; `alpha2`.
-///
-/// # Syntax
-///
-/// Pre-release strings are a series of dot separated identifiers immediately
-/// following the patch version. Identifiers must comprise only ASCII
-/// alphanumerics and hyphens: `0-9`, `A-Z`, `a-z`, `-`. Identifiers must not be
-/// empty. Numeric identifiers must not include leading zeros.
-///
-/// # Total ordering
-///
-/// Pre-releases have a total order defined by the SemVer spec. It uses
-/// lexicographic ordering of dot-separated components. Identifiers consisting
-/// of only digits are compared numerically. Otherwise, identifiers are compared
-/// in ASCII sort order. Any numeric identifier is always less than any
-/// non-numeric identifier.
-///
-/// Example:&ensp;`alpha`&ensp;&lt;&ensp;`alpha.85`&ensp;&lt;&ensp;`alpha.90`&ensp;&lt;&ensp;`alpha.200`&ensp;&lt;&ensp;`alpha.0a`&ensp;&lt;&ensp;`alpha.1a0`&ensp;&lt;&ensp;`alpha.a`&ensp;&lt;&ensp;`beta`
-#[derive(Default, Clone, Eq, PartialEq, Hash)]
-pub struct Prerelease {
-    identifier: Identifier,
-}
-
-/// Optional build metadata identifier. This comes after `+` in a SemVer
-/// version, as in `0.8.1+zstd.1.5.0`.
-///
-/// # Examples
-///
-/// Some real world build metadata idioms drawn from crates.io:
-///
-/// - **[libgit2-sys]** <code>0.12.20+<b>1.1.0</b></code> &mdash; for this
-///   crate, the build metadata indicates the version of the C libgit2 library
-///   that the Rust crate is built against.
-///
-/// - **[mashup]** <code>0.1.13+<b>deprecated</b></code> &mdash; just the word
-///   "deprecated" for a crate that has been superseded by another. Eventually
-///   people will take notice of this in Cargo's build output where it lists the
-///   crates being compiled.
-///
-/// - **[google-bigquery2]** <code>2.0.4+<b>20210327</b></code> &mdash; this
-///   library is automatically generated from an official API schema, and the
-///   build metadata indicates the date on which that schema was last captured.
-///
-/// - **[fbthrift-git]** <code>0.0.6+<b>c7fcc0e</b></code> &mdash; this crate is
-///   published from snapshots of a big company monorepo. In monorepo
-///   development, there is no concept of versions, and all downstream code is
-///   just updated atomically in the same commit that breaking changes to a
-///   library are landed. Therefore for crates.io purposes, every published
-///   version must be assumed to be incompatible with the previous. The build
-///   metadata provides the source control hash of the snapshotted code.
-///
-/// [libgit2-sys]: https://crates.io/crates/libgit2-sys
-/// [mashup]: https://crates.io/crates/mashup
-/// [google-bigquery2]: https://crates.io/crates/google-bigquery2
-/// [fbthrift-git]: https://crates.io/crates/fbthrift-git
-///
-/// # Syntax
-///
-/// Build metadata is a series of dot separated identifiers immediately
-/// following the patch or pre-release version. Identifiers must comprise only
-/// ASCII alphanumerics and hyphens: `0-9`, `A-Z`, `a-z`, `-`. Identifiers must
-/// not be empty. Leading zeros *are* allowed, unlike any other place in the
-/// SemVer grammar.
-///
-/// # Total ordering
-///
-/// Build metadata is ignored in evaluating `VersionReq`; it plays no role in
-/// whether a `Version` matches any one of the comparison operators.
-///
-/// However for comparing build metadatas among one another, they do have a
-/// total order which is determined by lexicographic ordering of dot-separated
-/// components. Identifiers consisting of only digits are compared numerically.
-/// Otherwise, identifiers are compared in ASCII sort order. Any numeric
-/// identifier is always less than any non-numeric identifier.
-///
-/// Example:&ensp;`demo`&ensp;&lt;&ensp;`demo.85`&ensp;&lt;&ensp;`demo.90`&ensp;&lt;&ensp;`demo.090`&ensp;&lt;&ensp;`demo.200`&ensp;&lt;&ensp;`demo.1a0`&ensp;&lt;&ensp;`demo.a`&ensp;&lt;&ensp;`memo`
-#[derive(Default, Clone, Eq, PartialEq, Hash)]
-pub struct BuildMetadata {
-    identifier: Identifier,
-}
-
-impl Version {
-    /// Create `Version` with an empty pre-release and build metadata.
-    ///
-    /// Equivalent to:
-    ///
-    /// ```
-    /// # use semver::{BuildMetadata, Prerelease, Version};
-    /// #
-    /// # const fn new(major: u64, minor: u64, patch: u64) -> Version {
-    /// Version {
-    ///     major,
-    ///     minor,
-    ///     patch,
-    ///     pre: Prerelease::EMPTY,
-    ///     build: BuildMetadata::EMPTY,
-    /// }
-    /// # }
-    /// ```
-    pub const fn new(major: u64, minor: u64, patch: u64) -> Self {
-        Version {
-            major,
-            minor,
-            patch,
-            pre: Prerelease::EMPTY,
-            build: BuildMetadata::EMPTY,
-        }
-    }
-
-    /// Create `Version` by parsing from string representation.
-    ///
-    /// # Errors
-    ///
-    /// Possible reasons for the parse to fail include:
-    ///
-    /// - `1.0` &mdash; too few numeric components. A SemVer version must have
-    ///   exactly three. If you are looking at something that has fewer than
-    ///   three numbers in it, it's possible it is a `VersionReq` instead (with
-    ///   an implicit default `^` comparison operator).
-    ///
-    /// - `1.0.01` &mdash; a numeric component has a leading zero.
-    ///
-    /// - `1.0.unknown` &mdash; unexpected character in one of the components.
-    ///
-    /// - `1.0.0-` or `1.0.0+` &mdash; the pre-release or build metadata are
-    ///   indicated present but empty.
-    ///
-    /// - `1.0.0-alpha_123` &mdash; pre-release or build metadata have something
-    ///   outside the allowed characters, which are `0-9`, `A-Z`, `a-z`, `-`,
-    ///   and `.` (dot).
-    ///
-    /// - `23456789999999999999.0.0` &mdash; overflow of a u64.
-    pub fn parse(text: &str) -> Result<Self, Error> {
-        Version::from_str(text)
-    }
-
-    /// Compare the major, minor, patch, and pre-release value of two versions,
-    /// disregarding build metadata. Versions that differ only in build metadata
-    /// are considered equal. This comparison is what the SemVer spec refers to
-    /// as "precedence".
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use semver::Version;
-    ///
-    /// let mut versions = [
-    ///     "1.20.0+c144a98".parse::<Version>().unwrap(),
-    ///     "1.20.0".parse().unwrap(),
-    ///     "1.0.0".parse().unwrap(),
-    ///     "1.0.0-alpha".parse().unwrap(),
-    ///     "1.20.0+bc17664".parse().unwrap(),
-    /// ];
-    ///
-    /// // This is a stable sort, so it preserves the relative order of equal
-    /// // elements. The three 1.20.0 versions differ only in build metadata so
-    /// // they are not reordered relative to one another.
-    /// versions.sort_by(Version::cmp_precedence);
-    /// assert_eq!(versions, [
-    ///     "1.0.0-alpha".parse().unwrap(),
-    ///     "1.0.0".parse().unwrap(),
-    ///     "1.20.0+c144a98".parse().unwrap(),
-    ///     "1.20.0".parse().unwrap(),
-    ///     "1.20.0+bc17664".parse().unwrap(),
-    /// ]);
-    ///
-    /// // Totally order the versions, including comparing the build metadata.
-    /// versions.sort();
-    /// assert_eq!(versions, [
-    ///     "1.0.0-alpha".parse().unwrap(),
-    ///     "1.0.0".parse().unwrap(),
-    ///     "1.20.0".parse().unwrap(),
-    ///     "1.20.0+bc17664".parse().unwrap(),
-    ///     "1.20.0+c144a98".parse().unwrap(),
-    /// ]);
-    /// ```
-    pub fn cmp_precedence(&self, other: &Self) -> Ordering {
-        Ord::cmp(
-            &(self.major, self.minor, self.patch, &self.pre),
-            &(other.major, other.minor, other.patch, &other.pre),
-        )
-    }
-}
-
-impl VersionReq {
-    /// A `VersionReq` with no constraint on the version numbers it matches.
-    /// Equivalent to `VersionReq::parse("*").unwrap()`.
-    ///
-    /// In terms of comparators this is equivalent to `>=0.0.0`.
-    ///
-    /// Counterintuitively a `*` VersionReq does not match every possible
-    /// version number. In particular, in order for *any* `VersionReq` to match
-    /// a pre-release version, the `VersionReq` must contain at least one
-    /// `Comparator` that has an explicit major, minor, and patch version
-    /// identical to the pre-release being matched, and that has a nonempty
-    /// pre-release component. Since `*` is not written with an explicit major,
-    /// minor, and patch version, and does not contain a nonempty pre-release
-    /// component, it does not match any pre-release versions.
-    #[cfg(not(no_const_vec_new))] // rustc <1.39
-    pub const STAR: Self = VersionReq {
-        comparators: Vec::new(),
-    };
-
-    /// Create `VersionReq` by parsing from string representation.
-    ///
-    /// # Errors
-    ///
-    /// Possible reasons for the parse to fail include:
-    ///
-    /// - `>a.b` &mdash; unexpected characters in the partial version.
-    ///
-    /// - `@1.0.0` &mdash; unrecognized comparison operator.
-    ///
-    /// - `^1.0.0, ` &mdash; unexpected end of input.
-    ///
-    /// - `>=1.0 <2.0` &mdash; missing comma between comparators.
-    ///
-    /// - `*.*` &mdash; unsupported wildcard syntax.
-    pub fn parse(text: &str) -> Result<Self, Error> {
-        VersionReq::from_str(text)
-    }
-
-    /// Evaluate whether the given `Version` satisfies the version requirement
-    /// described by `self`.
-    pub fn matches(&self, version: &Version) -> bool {
-        eval::matches_req(self, version)
-    }
-}
-
-/// The default VersionReq is the same as [`VersionReq::STAR`].
-#[cfg(not(no_const_vec_new))]
-impl Default for VersionReq {
-    fn default() -> Self {
-        VersionReq::STAR
-    }
-}
-
-impl Comparator {
-    pub fn parse(text: &str) -> Result<Self, Error> {
-        Comparator::from_str(text)
-    }
-
-    pub fn matches(&self, version: &Version) -> bool {
-        eval::matches_comparator(self, version)
-    }
-}
-
-impl Prerelease {
-    pub const EMPTY: Self = Prerelease {
-        identifier: Identifier::empty(),
-    };
-
-    pub fn new(text: &str) -> Result<Self, Error> {
-        Prerelease::from_str(text)
-    }
-
-    pub fn as_str(&self) -> &str {
-        self.identifier.as_str()
-    }
-
-    pub fn is_empty(&self) -> bool {
-        self.identifier.is_empty()
-    }
-}
-
-impl BuildMetadata {
-    pub const EMPTY: Self = BuildMetadata {
-        identifier: Identifier::empty(),
-    };
-
-    pub fn new(text: &str) -> Result<Self, Error> {
-        BuildMetadata::from_str(text)
-    }
-
-    pub fn as_str(&self) -> &str {
-        self.identifier.as_str()
-    }
-
-    pub fn is_empty(&self) -> bool {
-        self.identifier.is_empty()
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/parse.rs b/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/parse.rs
deleted file mode 100644
index e92d87ab..0000000
--- a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/parse.rs
+++ /dev/null
@@ -1,409 +0,0 @@
-use crate::backport::*;
-use crate::error::{ErrorKind, Position};
-use crate::identifier::Identifier;
-use crate::{BuildMetadata, Comparator, Op, Prerelease, Version, VersionReq};
-use core::str::FromStr;
-
-/// Error parsing a SemVer version or version requirement.
-///
-/// # Example
-///
-/// ```
-/// use semver::Version;
-///
-/// fn main() {
-///     let err = Version::parse("1.q.r").unwrap_err();
-///
-///     // "unexpected character 'q' while parsing minor version number"
-///     eprintln!("{}", err);
-/// }
-/// ```
-pub struct Error {
-    pub(crate) kind: ErrorKind,
-}
-
-impl FromStr for Version {
-    type Err = Error;
-
-    fn from_str(text: &str) -> Result<Self, Self::Err> {
-        if text.is_empty() {
-            return Err(Error::new(ErrorKind::Empty));
-        }
-
-        let mut pos = Position::Major;
-        let (major, text) = numeric_identifier(text, pos)?;
-        let text = dot(text, pos)?;
-
-        pos = Position::Minor;
-        let (minor, text) = numeric_identifier(text, pos)?;
-        let text = dot(text, pos)?;
-
-        pos = Position::Patch;
-        let (patch, text) = numeric_identifier(text, pos)?;
-
-        if text.is_empty() {
-            return Ok(Version::new(major, minor, patch));
-        }
-
-        let (pre, text) = if let Some(text) = text.strip_prefix('-') {
-            pos = Position::Pre;
-            let (pre, text) = prerelease_identifier(text)?;
-            if pre.is_empty() {
-                return Err(Error::new(ErrorKind::EmptySegment(pos)));
-            }
-            (pre, text)
-        } else {
-            (Prerelease::EMPTY, text)
-        };
-
-        let (build, text) = if let Some(text) = text.strip_prefix('+') {
-            pos = Position::Build;
-            let (build, text) = build_identifier(text)?;
-            if build.is_empty() {
-                return Err(Error::new(ErrorKind::EmptySegment(pos)));
-            }
-            (build, text)
-        } else {
-            (BuildMetadata::EMPTY, text)
-        };
-
-        if let Some(unexpected) = text.chars().next() {
-            return Err(Error::new(ErrorKind::UnexpectedCharAfter(pos, unexpected)));
-        }
-
-        Ok(Version {
-            major,
-            minor,
-            patch,
-            pre,
-            build,
-        })
-    }
-}
-
-impl FromStr for VersionReq {
-    type Err = Error;
-
-    fn from_str(text: &str) -> Result<Self, Self::Err> {
-        let text = text.trim_start_matches(' ');
-        if let Some((ch, text)) = wildcard(text) {
-            let rest = text.trim_start_matches(' ');
-            if rest.is_empty() {
-                #[cfg(not(no_const_vec_new))]
-                return Ok(VersionReq::STAR);
-                #[cfg(no_const_vec_new)] // rustc <1.39
-                return Ok(VersionReq {
-                    comparators: Vec::new(),
-                });
-            } else if rest.starts_with(',') {
-                return Err(Error::new(ErrorKind::WildcardNotTheOnlyComparator(ch)));
-            } else {
-                return Err(Error::new(ErrorKind::UnexpectedAfterWildcard));
-            }
-        }
-
-        let depth = 0;
-        let mut comparators = Vec::new();
-        let len = version_req(text, &mut comparators, depth)?;
-        unsafe { comparators.set_len(len) }
-        Ok(VersionReq { comparators })
-    }
-}
-
-impl FromStr for Comparator {
-    type Err = Error;
-
-    fn from_str(text: &str) -> Result<Self, Self::Err> {
-        let text = text.trim_start_matches(' ');
-        let (comparator, pos, rest) = comparator(text)?;
-        if !rest.is_empty() {
-            let unexpected = rest.chars().next().unwrap();
-            return Err(Error::new(ErrorKind::UnexpectedCharAfter(pos, unexpected)));
-        }
-        Ok(comparator)
-    }
-}
-
-impl FromStr for Prerelease {
-    type Err = Error;
-
-    fn from_str(text: &str) -> Result<Self, Self::Err> {
-        let (pre, rest) = prerelease_identifier(text)?;
-        if !rest.is_empty() {
-            return Err(Error::new(ErrorKind::IllegalCharacter(Position::Pre)));
-        }
-        Ok(pre)
-    }
-}
-
-impl FromStr for BuildMetadata {
-    type Err = Error;
-
-    fn from_str(text: &str) -> Result<Self, Self::Err> {
-        let (build, rest) = build_identifier(text)?;
-        if !rest.is_empty() {
-            return Err(Error::new(ErrorKind::IllegalCharacter(Position::Build)));
-        }
-        Ok(build)
-    }
-}
-
-impl Error {
-    fn new(kind: ErrorKind) -> Self {
-        Error { kind }
-    }
-}
-
-impl Op {
-    const DEFAULT: Self = Op::Caret;
-}
-
-fn numeric_identifier(input: &str, pos: Position) -> Result<(u64, &str), Error> {
-    let mut len = 0;
-    let mut value = 0u64;
-
-    while let Some(&digit) = input.as_bytes().get(len) {
-        if digit < b'0' || digit > b'9' {
-            break;
-        }
-        if value == 0 && len > 0 {
-            return Err(Error::new(ErrorKind::LeadingZero(pos)));
-        }
-        match value
-            .checked_mul(10)
-            .and_then(|value| value.checked_add((digit - b'0') as u64))
-        {
-            Some(sum) => value = sum,
-            None => return Err(Error::new(ErrorKind::Overflow(pos))),
-        }
-        len += 1;
-    }
-
-    if len > 0 {
-        Ok((value, &input[len..]))
-    } else if let Some(unexpected) = input[len..].chars().next() {
-        Err(Error::new(ErrorKind::UnexpectedChar(pos, unexpected)))
-    } else {
-        Err(Error::new(ErrorKind::UnexpectedEnd(pos)))
-    }
-}
-
-fn wildcard(input: &str) -> Option<(char, &str)> {
-    if let Some(rest) = input.strip_prefix('*') {
-        Some(('*', rest))
-    } else if let Some(rest) = input.strip_prefix('x') {
-        Some(('x', rest))
-    } else if let Some(rest) = input.strip_prefix('X') {
-        Some(('X', rest))
-    } else {
-        None
-    }
-}
-
-fn dot(input: &str, pos: Position) -> Result<&str, Error> {
-    if let Some(rest) = input.strip_prefix('.') {
-        Ok(rest)
-    } else if let Some(unexpected) = input.chars().next() {
-        Err(Error::new(ErrorKind::UnexpectedCharAfter(pos, unexpected)))
-    } else {
-        Err(Error::new(ErrorKind::UnexpectedEnd(pos)))
-    }
-}
-
-fn prerelease_identifier(input: &str) -> Result<(Prerelease, &str), Error> {
-    let (string, rest) = identifier(input, Position::Pre)?;
-    let identifier = unsafe { Identifier::new_unchecked(string) };
-    Ok((Prerelease { identifier }, rest))
-}
-
-fn build_identifier(input: &str) -> Result<(BuildMetadata, &str), Error> {
-    let (string, rest) = identifier(input, Position::Build)?;
-    let identifier = unsafe { Identifier::new_unchecked(string) };
-    Ok((BuildMetadata { identifier }, rest))
-}
-
-fn identifier(input: &str, pos: Position) -> Result<(&str, &str), Error> {
-    let mut accumulated_len = 0;
-    let mut segment_len = 0;
-    let mut segment_has_nondigit = false;
-
-    loop {
-        match input.as_bytes().get(accumulated_len + segment_len) {
-            Some(b'A'..=b'Z') | Some(b'a'..=b'z') | Some(b'-') => {
-                segment_len += 1;
-                segment_has_nondigit = true;
-            }
-            Some(b'0'..=b'9') => {
-                segment_len += 1;
-            }
-            boundary => {
-                if segment_len == 0 {
-                    if accumulated_len == 0 && boundary != Some(&b'.') {
-                        return Ok(("", input));
-                    } else {
-                        return Err(Error::new(ErrorKind::EmptySegment(pos)));
-                    }
-                }
-                if pos == Position::Pre
-                    && segment_len > 1
-                    && !segment_has_nondigit
-                    && input[accumulated_len..].starts_with('0')
-                {
-                    return Err(Error::new(ErrorKind::LeadingZero(pos)));
-                }
-                accumulated_len += segment_len;
-                if boundary == Some(&b'.') {
-                    accumulated_len += 1;
-                    segment_len = 0;
-                    segment_has_nondigit = false;
-                } else {
-                    return Ok(input.split_at(accumulated_len));
-                }
-            }
-        }
-    }
-}
-
-fn op(input: &str) -> (Op, &str) {
-    let bytes = input.as_bytes();
-    if bytes.first() == Some(&b'=') {
-        (Op::Exact, &input[1..])
-    } else if bytes.first() == Some(&b'>') {
-        if bytes.get(1) == Some(&b'=') {
-            (Op::GreaterEq, &input[2..])
-        } else {
-            (Op::Greater, &input[1..])
-        }
-    } else if bytes.first() == Some(&b'<') {
-        if bytes.get(1) == Some(&b'=') {
-            (Op::LessEq, &input[2..])
-        } else {
-            (Op::Less, &input[1..])
-        }
-    } else if bytes.first() == Some(&b'~') {
-        (Op::Tilde, &input[1..])
-    } else if bytes.first() == Some(&b'^') {
-        (Op::Caret, &input[1..])
-    } else {
-        (Op::DEFAULT, input)
-    }
-}
-
-fn comparator(input: &str) -> Result<(Comparator, Position, &str), Error> {
-    let (mut op, text) = op(input);
-    let default_op = input.len() == text.len();
-    let text = text.trim_start_matches(' ');
-
-    let mut pos = Position::Major;
-    let (major, text) = numeric_identifier(text, pos)?;
-    let mut has_wildcard = false;
-
-    let (minor, text) = if let Some(text) = text.strip_prefix('.') {
-        pos = Position::Minor;
-        if let Some((_, text)) = wildcard(text) {
-            has_wildcard = true;
-            if default_op {
-                op = Op::Wildcard;
-            }
-            (None, text)
-        } else {
-            let (minor, text) = numeric_identifier(text, pos)?;
-            (Some(minor), text)
-        }
-    } else {
-        (None, text)
-    };
-
-    let (patch, text) = if let Some(text) = text.strip_prefix('.') {
-        pos = Position::Patch;
-        if let Some((_, text)) = wildcard(text) {
-            if default_op {
-                op = Op::Wildcard;
-            }
-            (None, text)
-        } else if has_wildcard {
-            return Err(Error::new(ErrorKind::UnexpectedAfterWildcard));
-        } else {
-            let (patch, text) = numeric_identifier(text, pos)?;
-            (Some(patch), text)
-        }
-    } else {
-        (None, text)
-    };
-
-    let (pre, text) = if patch.is_some() && text.starts_with('-') {
-        pos = Position::Pre;
-        let text = &text[1..];
-        let (pre, text) = prerelease_identifier(text)?;
-        if pre.is_empty() {
-            return Err(Error::new(ErrorKind::EmptySegment(pos)));
-        }
-        (pre, text)
-    } else {
-        (Prerelease::EMPTY, text)
-    };
-
-    let text = if patch.is_some() && text.starts_with('+') {
-        pos = Position::Build;
-        let text = &text[1..];
-        let (build, text) = build_identifier(text)?;
-        if build.is_empty() {
-            return Err(Error::new(ErrorKind::EmptySegment(pos)));
-        }
-        text
-    } else {
-        text
-    };
-
-    let text = text.trim_start_matches(' ');
-
-    let comparator = Comparator {
-        op,
-        major,
-        minor,
-        patch,
-        pre,
-    };
-
-    Ok((comparator, pos, text))
-}
-
-fn version_req(input: &str, out: &mut Vec<Comparator>, depth: usize) -> Result<usize, Error> {
-    let (comparator, pos, text) = match comparator(input) {
-        Ok(success) => success,
-        Err(mut error) => {
-            if let Some((ch, mut rest)) = wildcard(input) {
-                rest = rest.trim_start_matches(' ');
-                if rest.is_empty() || rest.starts_with(',') {
-                    error.kind = ErrorKind::WildcardNotTheOnlyComparator(ch);
-                }
-            }
-            return Err(error);
-        }
-    };
-
-    if text.is_empty() {
-        out.reserve_exact(depth + 1);
-        unsafe { out.as_mut_ptr().add(depth).write(comparator) }
-        return Ok(depth + 1);
-    }
-
-    let text = if let Some(text) = text.strip_prefix(',') {
-        text.trim_start_matches(' ')
-    } else {
-        let unexpected = text.chars().next().unwrap();
-        return Err(Error::new(ErrorKind::ExpectedCommaFound(pos, unexpected)));
-    };
-
-    const MAX_COMPARATORS: usize = 32;
-    if depth + 1 == MAX_COMPARATORS {
-        return Err(Error::new(ErrorKind::ExcessiveComparators));
-    }
-
-    // Recurse to collect parsed Comparator objects on the stack. We perform a
-    // single allocation to allocate exactly the right sized Vec only once the
-    // total number of comparators is known.
-    let len = version_req(text, out, depth + 1)?;
-    unsafe { out.as_mut_ptr().add(depth).write(comparator) }
-    Ok(len)
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/serde.rs b/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/serde.rs
deleted file mode 100644
index 1fcc7d87..0000000
--- a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/serde.rs
+++ /dev/null
@@ -1,109 +0,0 @@
-use crate::{Comparator, Version, VersionReq};
-use core::fmt;
-use serde::de::{Deserialize, Deserializer, Error, Visitor};
-use serde::ser::{Serialize, Serializer};
-
-impl Serialize for Version {
-    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
-    where
-        S: Serializer,
-    {
-        serializer.collect_str(self)
-    }
-}
-
-impl Serialize for VersionReq {
-    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
-    where
-        S: Serializer,
-    {
-        serializer.collect_str(self)
-    }
-}
-
-impl Serialize for Comparator {
-    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
-    where
-        S: Serializer,
-    {
-        serializer.collect_str(self)
-    }
-}
-
-impl<'de> Deserialize<'de> for Version {
-    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
-    where
-        D: Deserializer<'de>,
-    {
-        struct VersionVisitor;
-
-        impl<'de> Visitor<'de> for VersionVisitor {
-            type Value = Version;
-
-            fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-                formatter.write_str("semver version")
-            }
-
-            fn visit_str<E>(self, string: &str) -> Result<Self::Value, E>
-            where
-                E: Error,
-            {
-                string.parse().map_err(Error::custom)
-            }
-        }
-
-        deserializer.deserialize_str(VersionVisitor)
-    }
-}
-
-impl<'de> Deserialize<'de> for VersionReq {
-    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
-    where
-        D: Deserializer<'de>,
-    {
-        struct VersionReqVisitor;
-
-        impl<'de> Visitor<'de> for VersionReqVisitor {
-            type Value = VersionReq;
-
-            fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-                formatter.write_str("semver version")
-            }
-
-            fn visit_str<E>(self, string: &str) -> Result<Self::Value, E>
-            where
-                E: Error,
-            {
-                string.parse().map_err(Error::custom)
-            }
-        }
-
-        deserializer.deserialize_str(VersionReqVisitor)
-    }
-}
-
-impl<'de> Deserialize<'de> for Comparator {
-    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
-    where
-        D: Deserializer<'de>,
-    {
-        struct ComparatorVisitor;
-
-        impl<'de> Visitor<'de> for ComparatorVisitor {
-            type Value = Comparator;
-
-            fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-                formatter.write_str("semver comparator")
-            }
-
-            fn visit_str<E>(self, string: &str) -> Result<Self::Value, E>
-            where
-                E: Error,
-            {
-                string.parse().map_err(Error::custom)
-            }
-        }
-
-        deserializer.deserialize_str(ComparatorVisitor)
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/tests/node/mod.rs b/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/tests/node/mod.rs
deleted file mode 100644
index eb50673..0000000
--- a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/tests/node/mod.rs
+++ /dev/null
@@ -1,43 +0,0 @@
-#![cfg(test_node_semver)]
-
-use semver::Version;
-use std::fmt::{self, Display};
-use std::process::Command;
-
-#[derive(Default, Eq, PartialEq, Hash, Debug)]
-pub(super) struct VersionReq(semver::VersionReq);
-
-impl VersionReq {
-    pub(super) const STAR: Self = VersionReq(semver::VersionReq::STAR);
-
-    pub(super) fn matches(&self, version: &Version) -> bool {
-        let out = Command::new("node")
-            .arg("-e")
-            .arg(format!(
-                "console.log(require('semver').satisfies('{}', '{}'))",
-                version,
-                self.to_string().replace(',', ""),
-            ))
-            .output()
-            .unwrap();
-        if out.stdout == b"true\n" {
-            true
-        } else if out.stdout == b"false\n" {
-            false
-        } else {
-            let s = String::from_utf8_lossy(&out.stdout) + String::from_utf8_lossy(&out.stderr);
-            panic!("unexpected output: {}", s);
-        }
-    }
-}
-
-impl Display for VersionReq {
-    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-        Display::fmt(&self.0, formatter)
-    }
-}
-
-#[cfg_attr(not(no_track_caller), track_caller)]
-pub(super) fn req(text: &str) -> VersionReq {
-    VersionReq(crate::util::req(text))
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/tests/test_autotrait.rs b/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/tests/test_autotrait.rs
deleted file mode 100644
index 5d16689..0000000
--- a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/tests/test_autotrait.rs
+++ /dev/null
@@ -1,14 +0,0 @@
-#![allow(clippy::extra_unused_type_parameters)]
-
-fn assert_send_sync<T: Send + Sync>() {}
-
-#[test]
-fn test() {
-    assert_send_sync::<semver::BuildMetadata>();
-    assert_send_sync::<semver::Comparator>();
-    assert_send_sync::<semver::Error>();
-    assert_send_sync::<semver::Prerelease>();
-    assert_send_sync::<semver::Version>();
-    assert_send_sync::<semver::VersionReq>();
-    assert_send_sync::<semver::Op>();
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/tests/test_identifier.rs b/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/tests/test_identifier.rs
deleted file mode 100644
index 40d8596..0000000
--- a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/tests/test_identifier.rs
+++ /dev/null
@@ -1,51 +0,0 @@
-#![allow(
-    clippy::eq_op,
-    clippy::needless_pass_by_value,
-    clippy::toplevel_ref_arg,
-    clippy::wildcard_imports
-)]
-
-mod util;
-
-use crate::util::*;
-use semver::Prerelease;
-
-#[test]
-fn test_new() {
-    fn test(identifier: Prerelease, expected: &str) {
-        assert_eq!(identifier.is_empty(), expected.is_empty());
-        assert_eq!(identifier.len(), expected.len());
-        assert_eq!(identifier.as_str(), expected);
-        assert_eq!(identifier, identifier);
-        assert_eq!(identifier, identifier.clone());
-    }
-
-    let ref mut string = String::new();
-    let limit = if cfg!(miri) { 40 } else { 280 }; // miri is slow
-    for _ in 0..limit {
-        test(prerelease(string), string);
-        string.push('1');
-    }
-
-    if !cfg!(miri) {
-        let ref string = string.repeat(20000);
-        test(prerelease(string), string);
-    }
-}
-
-#[test]
-fn test_eq() {
-    assert_eq!(prerelease("-"), prerelease("-"));
-    assert_ne!(prerelease("a"), prerelease("aa"));
-    assert_ne!(prerelease("aa"), prerelease("a"));
-    assert_ne!(prerelease("aaaaaaaaa"), prerelease("a"));
-    assert_ne!(prerelease("a"), prerelease("aaaaaaaaa"));
-    assert_ne!(prerelease("aaaaaaaaa"), prerelease("bbbbbbbbb"));
-    assert_ne!(build_metadata("1"), build_metadata("001"));
-}
-
-#[test]
-fn test_prerelease() {
-    let err = prerelease_err("1.b\0");
-    assert_to_string(err, "unexpected character in pre-release identifier");
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/tests/test_version.rs b/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/tests/test_version.rs
deleted file mode 100644
index 991087f..0000000
--- a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/tests/test_version.rs
+++ /dev/null
@@ -1,250 +0,0 @@
-#![allow(
-    clippy::nonminimal_bool,
-    clippy::too_many_lines,
-    clippy::wildcard_imports
-)]
-
-mod util;
-
-use crate::util::*;
-use semver::{BuildMetadata, Prerelease, Version};
-
-#[test]
-fn test_parse() {
-    let err = version_err("");
-    assert_to_string(err, "empty string, expected a semver version");
-
-    let err = version_err("  ");
-    assert_to_string(
-        err,
-        "unexpected character ' ' while parsing major version number",
-    );
-
-    let err = version_err("1");
-    assert_to_string(
-        err,
-        "unexpected end of input while parsing major version number",
-    );
-
-    let err = version_err("1.2");
-    assert_to_string(
-        err,
-        "unexpected end of input while parsing minor version number",
-    );
-
-    let err = version_err("1.2.3-");
-    assert_to_string(err, "empty identifier segment in pre-release identifier");
-
-    let err = version_err("a.b.c");
-    assert_to_string(
-        err,
-        "unexpected character 'a' while parsing major version number",
-    );
-
-    let err = version_err("1.2.3 abc");
-    assert_to_string(err, "unexpected character ' ' after patch version number");
-
-    let err = version_err("1.2.3-01");
-    assert_to_string(err, "invalid leading zero in pre-release identifier");
-
-    let err = version_err("1.2.3++");
-    assert_to_string(err, "empty identifier segment in build metadata");
-
-    let err = version_err("07");
-    assert_to_string(err, "invalid leading zero in major version number");
-
-    let err = version_err("111111111111111111111.0.0");
-    assert_to_string(err, "value of major version number exceeds u64::MAX");
-
-    let err = version_err("8\0");
-    assert_to_string(err, "unexpected character '\\0' after major version number");
-
-    let parsed = version("1.2.3");
-    let expected = Version::new(1, 2, 3);
-    assert_eq!(parsed, expected);
-    let expected = Version {
-        major: 1,
-        minor: 2,
-        patch: 3,
-        pre: Prerelease::EMPTY,
-        build: BuildMetadata::EMPTY,
-    };
-    assert_eq!(parsed, expected);
-
-    let parsed = version("1.2.3-alpha1");
-    let expected = Version {
-        major: 1,
-        minor: 2,
-        patch: 3,
-        pre: prerelease("alpha1"),
-        build: BuildMetadata::EMPTY,
-    };
-    assert_eq!(parsed, expected);
-
-    let parsed = version("1.2.3+build5");
-    let expected = Version {
-        major: 1,
-        minor: 2,
-        patch: 3,
-        pre: Prerelease::EMPTY,
-        build: build_metadata("build5"),
-    };
-    assert_eq!(parsed, expected);
-
-    let parsed = version("1.2.3+5build");
-    let expected = Version {
-        major: 1,
-        minor: 2,
-        patch: 3,
-        pre: Prerelease::EMPTY,
-        build: build_metadata("5build"),
-    };
-    assert_eq!(parsed, expected);
-
-    let parsed = version("1.2.3-alpha1+build5");
-    let expected = Version {
-        major: 1,
-        minor: 2,
-        patch: 3,
-        pre: prerelease("alpha1"),
-        build: build_metadata("build5"),
-    };
-    assert_eq!(parsed, expected);
-
-    let parsed = version("1.2.3-1.alpha1.9+build5.7.3aedf");
-    let expected = Version {
-        major: 1,
-        minor: 2,
-        patch: 3,
-        pre: prerelease("1.alpha1.9"),
-        build: build_metadata("build5.7.3aedf"),
-    };
-    assert_eq!(parsed, expected);
-
-    let parsed = version("1.2.3-0a.alpha1.9+05build.7.3aedf");
-    let expected = Version {
-        major: 1,
-        minor: 2,
-        patch: 3,
-        pre: prerelease("0a.alpha1.9"),
-        build: build_metadata("05build.7.3aedf"),
-    };
-    assert_eq!(parsed, expected);
-
-    let parsed = version("0.4.0-beta.1+0851523");
-    let expected = Version {
-        major: 0,
-        minor: 4,
-        patch: 0,
-        pre: prerelease("beta.1"),
-        build: build_metadata("0851523"),
-    };
-    assert_eq!(parsed, expected);
-
-    // for https://nodejs.org/dist/index.json, where some older npm versions are "1.1.0-beta-10"
-    let parsed = version("1.1.0-beta-10");
-    let expected = Version {
-        major: 1,
-        minor: 1,
-        patch: 0,
-        pre: prerelease("beta-10"),
-        build: BuildMetadata::EMPTY,
-    };
-    assert_eq!(parsed, expected);
-}
-
-#[test]
-fn test_eq() {
-    assert_eq!(version("1.2.3"), version("1.2.3"));
-    assert_eq!(version("1.2.3-alpha1"), version("1.2.3-alpha1"));
-    assert_eq!(version("1.2.3+build.42"), version("1.2.3+build.42"));
-    assert_eq!(version("1.2.3-alpha1+42"), version("1.2.3-alpha1+42"));
-}
-
-#[test]
-fn test_ne() {
-    assert_ne!(version("0.0.0"), version("0.0.1"));
-    assert_ne!(version("0.0.0"), version("0.1.0"));
-    assert_ne!(version("0.0.0"), version("1.0.0"));
-    assert_ne!(version("1.2.3-alpha"), version("1.2.3-beta"));
-    assert_ne!(version("1.2.3+23"), version("1.2.3+42"));
-}
-
-#[test]
-fn test_display() {
-    assert_to_string(version("1.2.3"), "1.2.3");
-    assert_to_string(version("1.2.3-alpha1"), "1.2.3-alpha1");
-    assert_to_string(version("1.2.3+build.42"), "1.2.3+build.42");
-    assert_to_string(version("1.2.3-alpha1+42"), "1.2.3-alpha1+42");
-}
-
-#[test]
-fn test_lt() {
-    assert!(version("0.0.0") < version("1.2.3-alpha2"));
-    assert!(version("1.0.0") < version("1.2.3-alpha2"));
-    assert!(version("1.2.0") < version("1.2.3-alpha2"));
-    assert!(version("1.2.3-alpha1") < version("1.2.3"));
-    assert!(version("1.2.3-alpha1") < version("1.2.3-alpha2"));
-    assert!(!(version("1.2.3-alpha2") < version("1.2.3-alpha2")));
-    assert!(version("1.2.3+23") < version("1.2.3+42"));
-}
-
-#[test]
-fn test_le() {
-    assert!(version("0.0.0") <= version("1.2.3-alpha2"));
-    assert!(version("1.0.0") <= version("1.2.3-alpha2"));
-    assert!(version("1.2.0") <= version("1.2.3-alpha2"));
-    assert!(version("1.2.3-alpha1") <= version("1.2.3-alpha2"));
-    assert!(version("1.2.3-alpha2") <= version("1.2.3-alpha2"));
-    assert!(version("1.2.3+23") <= version("1.2.3+42"));
-}
-
-#[test]
-fn test_gt() {
-    assert!(version("1.2.3-alpha2") > version("0.0.0"));
-    assert!(version("1.2.3-alpha2") > version("1.0.0"));
-    assert!(version("1.2.3-alpha2") > version("1.2.0"));
-    assert!(version("1.2.3-alpha2") > version("1.2.3-alpha1"));
-    assert!(version("1.2.3") > version("1.2.3-alpha2"));
-    assert!(!(version("1.2.3-alpha2") > version("1.2.3-alpha2")));
-    assert!(!(version("1.2.3+23") > version("1.2.3+42")));
-}
-
-#[test]
-fn test_ge() {
-    assert!(version("1.2.3-alpha2") >= version("0.0.0"));
-    assert!(version("1.2.3-alpha2") >= version("1.0.0"));
-    assert!(version("1.2.3-alpha2") >= version("1.2.0"));
-    assert!(version("1.2.3-alpha2") >= version("1.2.3-alpha1"));
-    assert!(version("1.2.3-alpha2") >= version("1.2.3-alpha2"));
-    assert!(!(version("1.2.3+23") >= version("1.2.3+42")));
-}
-
-#[test]
-fn test_spec_order() {
-    let vs = [
-        "1.0.0-alpha",
-        "1.0.0-alpha.1",
-        "1.0.0-alpha.beta",
-        "1.0.0-beta",
-        "1.0.0-beta.2",
-        "1.0.0-beta.11",
-        "1.0.0-rc.1",
-        "1.0.0",
-    ];
-    let mut i = 1;
-    while i < vs.len() {
-        let a = version(vs[i - 1]);
-        let b = version(vs[i]);
-        assert!(a < b, "nope {:?} < {:?}", a, b);
-        i += 1;
-    }
-}
-
-#[test]
-fn test_align() {
-    let version = version("1.2.3-rc1");
-    assert_eq!("1.2.3-rc1           ", format!("{:20}", version));
-    assert_eq!("*****1.2.3-rc1******", format!("{:*^20}", version));
-    assert_eq!("           1.2.3-rc1", format!("{:>20}", version));
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/tests/test_version_req.rs b/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/tests/test_version_req.rs
deleted file mode 100644
index 1ed2358..0000000
--- a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/tests/test_version_req.rs
+++ /dev/null
@@ -1,485 +0,0 @@
-#![allow(
-    clippy::missing_panics_doc,
-    clippy::shadow_unrelated,
-    clippy::toplevel_ref_arg,
-    clippy::wildcard_imports
-)]
-
-mod node;
-mod util;
-
-use crate::util::*;
-use std::collections::hash_map::DefaultHasher;
-use std::hash::{Hash, Hasher};
-
-#[cfg(test_node_semver)]
-use node::{req, VersionReq};
-#[cfg(not(test_node_semver))]
-use semver::VersionReq;
-
-#[cfg_attr(not(no_track_caller), track_caller)]
-fn assert_match_all(req: &VersionReq, versions: &[&str]) {
-    for string in versions {
-        let parsed = version(string);
-        assert!(req.matches(&parsed), "did not match {}", string);
-    }
-}
-
-#[cfg_attr(not(no_track_caller), track_caller)]
-fn assert_match_none(req: &VersionReq, versions: &[&str]) {
-    for string in versions {
-        let parsed = version(string);
-        assert!(!req.matches(&parsed), "matched {}", string);
-    }
-}
-
-#[test]
-fn test_basic() {
-    let ref r = req("1.0.0");
-    assert_to_string(r, "^1.0.0");
-    assert_match_all(r, &["1.0.0", "1.1.0", "1.0.1"]);
-    assert_match_none(r, &["0.9.9", "0.10.0", "0.1.0", "1.0.0-pre", "1.0.1-pre"]);
-}
-
-#[test]
-#[cfg(not(no_const_vec_new))]
-fn test_default() {
-    let ref r = VersionReq::default();
-    assert_eq!(r, &VersionReq::STAR);
-}
-
-#[test]
-fn test_exact() {
-    let ref r = req("=1.0.0");
-    assert_to_string(r, "=1.0.0");
-    assert_match_all(r, &["1.0.0"]);
-    assert_match_none(r, &["1.0.1", "0.9.9", "0.10.0", "0.1.0", "1.0.0-pre"]);
-
-    let ref r = req("=0.9.0");
-    assert_to_string(r, "=0.9.0");
-    assert_match_all(r, &["0.9.0"]);
-    assert_match_none(r, &["0.9.1", "1.9.0", "0.0.9", "0.9.0-pre"]);
-
-    let ref r = req("=0.0.2");
-    assert_to_string(r, "=0.0.2");
-    assert_match_all(r, &["0.0.2"]);
-    assert_match_none(r, &["0.0.1", "0.0.3", "0.0.2-pre"]);
-
-    let ref r = req("=0.1.0-beta2.a");
-    assert_to_string(r, "=0.1.0-beta2.a");
-    assert_match_all(r, &["0.1.0-beta2.a"]);
-    assert_match_none(r, &["0.9.1", "0.1.0", "0.1.1-beta2.a", "0.1.0-beta2"]);
-
-    let ref r = req("=0.1.0+meta");
-    assert_to_string(r, "=0.1.0");
-    assert_match_all(r, &["0.1.0", "0.1.0+meta", "0.1.0+any"]);
-}
-
-#[test]
-pub fn test_greater_than() {
-    let ref r = req(">= 1.0.0");
-    assert_to_string(r, ">=1.0.0");
-    assert_match_all(r, &["1.0.0", "2.0.0"]);
-    assert_match_none(r, &["0.1.0", "0.0.1", "1.0.0-pre", "2.0.0-pre"]);
-
-    let ref r = req(">= 2.1.0-alpha2");
-    assert_to_string(r, ">=2.1.0-alpha2");
-    assert_match_all(r, &["2.1.0-alpha2", "2.1.0-alpha3", "2.1.0", "3.0.0"]);
-    assert_match_none(
-        r,
-        &["2.0.0", "2.1.0-alpha1", "2.0.0-alpha2", "3.0.0-alpha2"],
-    );
-}
-
-#[test]
-pub fn test_less_than() {
-    let ref r = req("< 1.0.0");
-    assert_to_string(r, "<1.0.0");
-    assert_match_all(r, &["0.1.0", "0.0.1"]);
-    assert_match_none(r, &["1.0.0", "1.0.0-beta", "1.0.1", "0.9.9-alpha"]);
-
-    let ref r = req("<= 2.1.0-alpha2");
-    assert_match_all(r, &["2.1.0-alpha2", "2.1.0-alpha1", "2.0.0", "1.0.0"]);
-    assert_match_none(
-        r,
-        &["2.1.0", "2.2.0-alpha1", "2.0.0-alpha2", "1.0.0-alpha2"],
-    );
-
-    let ref r = req(">1.0.0-alpha, <1.0.0");
-    assert_match_all(r, &["1.0.0-beta"]);
-
-    let ref r = req(">1.0.0-alpha, <1.0");
-    assert_match_none(r, &["1.0.0-beta"]);
-
-    let ref r = req(">1.0.0-alpha, <1");
-    assert_match_none(r, &["1.0.0-beta"]);
-}
-
-#[test]
-pub fn test_multiple() {
-    let ref r = req("> 0.0.9, <= 2.5.3");
-    assert_to_string(r, ">0.0.9, <=2.5.3");
-    assert_match_all(r, &["0.0.10", "1.0.0", "2.5.3"]);
-    assert_match_none(r, &["0.0.8", "2.5.4"]);
-
-    let ref r = req("0.3.0, 0.4.0");
-    assert_to_string(r, "^0.3.0, ^0.4.0");
-    assert_match_none(r, &["0.0.8", "0.3.0", "0.4.0"]);
-
-    let ref r = req("<= 0.2.0, >= 0.5.0");
-    assert_to_string(r, "<=0.2.0, >=0.5.0");
-    assert_match_none(r, &["0.0.8", "0.3.0", "0.5.1"]);
-
-    let ref r = req("0.1.0, 0.1.4, 0.1.6");
-    assert_to_string(r, "^0.1.0, ^0.1.4, ^0.1.6");
-    assert_match_all(r, &["0.1.6", "0.1.9"]);
-    assert_match_none(r, &["0.1.0", "0.1.4", "0.2.0"]);
-
-    let err = req_err("> 0.1.0,");
-    assert_to_string(
-        err,
-        "unexpected end of input while parsing major version number",
-    );
-
-    let err = req_err("> 0.3.0, ,");
-    assert_to_string(
-        err,
-        "unexpected character ',' while parsing major version number",
-    );
-
-    let ref r = req(">=0.5.1-alpha3, <0.6");
-    assert_to_string(r, ">=0.5.1-alpha3, <0.6");
-    assert_match_all(
-        r,
-        &[
-            "0.5.1-alpha3",
-            "0.5.1-alpha4",
-            "0.5.1-beta",
-            "0.5.1",
-            "0.5.5",
-        ],
-    );
-    assert_match_none(
-        r,
-        &["0.5.1-alpha1", "0.5.2-alpha3", "0.5.5-pre", "0.5.0-pre"],
-    );
-    assert_match_none(r, &["0.6.0", "0.6.0-pre"]);
-
-    // https://github.com/steveklabnik/semver/issues/56
-    let err = req_err("1.2.3 - 2.3.4");
-    assert_to_string(err, "expected comma after patch version number, found '-'");
-
-    let err = req_err(">1, >2, >3, >4, >5, >6, >7, >8, >9, >10, >11, >12, >13, >14, >15, >16, >17, >18, >19, >20, >21, >22, >23, >24, >25, >26, >27, >28, >29, >30, >31, >32, >33");
-    assert_to_string(err, "excessive number of version comparators");
-}
-
-#[test]
-pub fn test_whitespace_delimited_comparator_sets() {
-    // https://github.com/steveklabnik/semver/issues/55
-    let err = req_err("> 0.0.9 <= 2.5.3");
-    assert_to_string(err, "expected comma after patch version number, found '<'");
-}
-
-#[test]
-pub fn test_tilde() {
-    let ref r = req("~1");
-    assert_match_all(r, &["1.0.0", "1.0.1", "1.1.1"]);
-    assert_match_none(r, &["0.9.1", "2.9.0", "0.0.9"]);
-
-    let ref r = req("~1.2");
-    assert_match_all(r, &["1.2.0", "1.2.1"]);
-    assert_match_none(r, &["1.1.1", "1.3.0", "0.0.9"]);
-
-    let ref r = req("~1.2.2");
-    assert_match_all(r, &["1.2.2", "1.2.4"]);
-    assert_match_none(r, &["1.2.1", "1.9.0", "1.0.9", "2.0.1", "0.1.3"]);
-
-    let ref r = req("~1.2.3-beta.2");
-    assert_match_all(r, &["1.2.3", "1.2.4", "1.2.3-beta.2", "1.2.3-beta.4"]);
-    assert_match_none(r, &["1.3.3", "1.1.4", "1.2.3-beta.1", "1.2.4-beta.2"]);
-}
-
-#[test]
-pub fn test_caret() {
-    let ref r = req("^1");
-    assert_match_all(r, &["1.1.2", "1.1.0", "1.2.1", "1.0.1"]);
-    assert_match_none(r, &["0.9.1", "2.9.0", "0.1.4"]);
-    assert_match_none(r, &["1.0.0-beta1", "0.1.0-alpha", "1.0.1-pre"]);
-
-    let ref r = req("^1.1");
-    assert_match_all(r, &["1.1.2", "1.1.0", "1.2.1"]);
-    assert_match_none(r, &["0.9.1", "2.9.0", "1.0.1", "0.1.4"]);
-
-    let ref r = req("^1.1.2");
-    assert_match_all(r, &["1.1.2", "1.1.4", "1.2.1"]);
-    assert_match_none(r, &["0.9.1", "2.9.0", "1.1.1", "0.0.1"]);
-    assert_match_none(r, &["1.1.2-alpha1", "1.1.3-alpha1", "2.9.0-alpha1"]);
-
-    let ref r = req("^0.1.2");
-    assert_match_all(r, &["0.1.2", "0.1.4"]);
-    assert_match_none(r, &["0.9.1", "2.9.0", "1.1.1", "0.0.1"]);
-    assert_match_none(r, &["0.1.2-beta", "0.1.3-alpha", "0.2.0-pre"]);
-
-    let ref r = req("^0.5.1-alpha3");
-    assert_match_all(
-        r,
-        &[
-            "0.5.1-alpha3",
-            "0.5.1-alpha4",
-            "0.5.1-beta",
-            "0.5.1",
-            "0.5.5",
-        ],
-    );
-    assert_match_none(
-        r,
-        &[
-            "0.5.1-alpha1",
-            "0.5.2-alpha3",
-            "0.5.5-pre",
-            "0.5.0-pre",
-            "0.6.0",
-        ],
-    );
-
-    let ref r = req("^0.0.2");
-    assert_match_all(r, &["0.0.2"]);
-    assert_match_none(r, &["0.9.1", "2.9.0", "1.1.1", "0.0.1", "0.1.4"]);
-
-    let ref r = req("^0.0");
-    assert_match_all(r, &["0.0.2", "0.0.0"]);
-    assert_match_none(r, &["0.9.1", "2.9.0", "1.1.1", "0.1.4"]);
-
-    let ref r = req("^0");
-    assert_match_all(r, &["0.9.1", "0.0.2", "0.0.0"]);
-    assert_match_none(r, &["2.9.0", "1.1.1"]);
-
-    let ref r = req("^1.4.2-beta.5");
-    assert_match_all(
-        r,
-        &["1.4.2", "1.4.3", "1.4.2-beta.5", "1.4.2-beta.6", "1.4.2-c"],
-    );
-    assert_match_none(
-        r,
-        &[
-            "0.9.9",
-            "2.0.0",
-            "1.4.2-alpha",
-            "1.4.2-beta.4",
-            "1.4.3-beta.5",
-        ],
-    );
-}
-
-#[test]
-pub fn test_wildcard() {
-    let err = req_err("");
-    assert_to_string(
-        err,
-        "unexpected end of input while parsing major version number",
-    );
-
-    let ref r = req("*");
-    assert_match_all(r, &["0.9.1", "2.9.0", "0.0.9", "1.0.1", "1.1.1"]);
-    assert_match_none(r, &["1.0.0-pre"]);
-
-    for s in &["x", "X"] {
-        assert_eq!(*r, req(s));
-    }
-
-    let ref r = req("1.*");
-    assert_match_all(r, &["1.2.0", "1.2.1", "1.1.1", "1.3.0"]);
-    assert_match_none(r, &["0.0.9", "1.2.0-pre"]);
-
-    for s in &["1.x", "1.X", "1.*.*"] {
-        assert_eq!(*r, req(s));
-    }
-
-    let ref r = req("1.2.*");
-    assert_match_all(r, &["1.2.0", "1.2.2", "1.2.4"]);
-    assert_match_none(r, &["1.9.0", "1.0.9", "2.0.1", "0.1.3", "1.2.2-pre"]);
-
-    for s in &["1.2.x", "1.2.X"] {
-        assert_eq!(*r, req(s));
-    }
-}
-
-#[test]
-pub fn test_logical_or() {
-    // https://github.com/steveklabnik/semver/issues/57
-    let err = req_err("=1.2.3 || =2.3.4");
-    assert_to_string(err, "expected comma after patch version number, found '|'");
-
-    let err = req_err("1.1 || =1.2.3");
-    assert_to_string(err, "expected comma after minor version number, found '|'");
-
-    let err = req_err("6.* || 8.* || >= 10.*");
-    assert_to_string(err, "expected comma after minor version number, found '|'");
-}
-
-#[test]
-pub fn test_any() {
-    #[cfg(not(no_const_vec_new))]
-    let ref r = VersionReq::STAR;
-    #[cfg(no_const_vec_new)]
-    let ref r = VersionReq {
-        comparators: Vec::new(),
-    };
-    assert_match_all(r, &["0.0.1", "0.1.0", "1.0.0"]);
-}
-
-#[test]
-pub fn test_pre() {
-    let ref r = req("=2.1.1-really.0");
-    assert_match_all(r, &["2.1.1-really.0"]);
-}
-
-#[test]
-pub fn test_parse() {
-    let err = req_err("\0");
-    assert_to_string(
-        err,
-        "unexpected character '\\0' while parsing major version number",
-    );
-
-    let err = req_err(">= >= 0.0.2");
-    assert_to_string(
-        err,
-        "unexpected character '>' while parsing major version number",
-    );
-
-    let err = req_err(">== 0.0.2");
-    assert_to_string(
-        err,
-        "unexpected character '=' while parsing major version number",
-    );
-
-    let err = req_err("a.0.0");
-    assert_to_string(
-        err,
-        "unexpected character 'a' while parsing major version number",
-    );
-
-    let err = req_err("1.0.0-");
-    assert_to_string(err, "empty identifier segment in pre-release identifier");
-
-    let err = req_err(">=");
-    assert_to_string(
-        err,
-        "unexpected end of input while parsing major version number",
-    );
-}
-
-#[test]
-fn test_comparator_parse() {
-    let parsed = comparator("1.2.3-alpha");
-    assert_to_string(parsed, "^1.2.3-alpha");
-
-    let parsed = comparator("2.X");
-    assert_to_string(parsed, "2.*");
-
-    let parsed = comparator("2");
-    assert_to_string(parsed, "^2");
-
-    let parsed = comparator("2.x.x");
-    assert_to_string(parsed, "2.*");
-
-    let err = comparator_err("1.2.3-01");
-    assert_to_string(err, "invalid leading zero in pre-release identifier");
-
-    let err = comparator_err("1.2.3+4.");
-    assert_to_string(err, "empty identifier segment in build metadata");
-
-    let err = comparator_err(">");
-    assert_to_string(
-        err,
-        "unexpected end of input while parsing major version number",
-    );
-
-    let err = comparator_err("1.");
-    assert_to_string(
-        err,
-        "unexpected end of input while parsing minor version number",
-    );
-
-    let err = comparator_err("1.*.");
-    assert_to_string(err, "unexpected character after wildcard in version req");
-
-    let err = comparator_err("1.2.3+4ÿ");
-    assert_to_string(err, "unexpected character 'ÿ' after build metadata");
-}
-
-#[test]
-fn test_cargo3202() {
-    let ref r = req("0.*.*");
-    assert_to_string(r, "0.*");
-    assert_match_all(r, &["0.5.0"]);
-
-    let ref r = req("0.0.*");
-    assert_to_string(r, "0.0.*");
-}
-
-#[test]
-fn test_digit_after_wildcard() {
-    let err = req_err("*.1");
-    assert_to_string(err, "unexpected character after wildcard in version req");
-
-    let err = req_err("1.*.1");
-    assert_to_string(err, "unexpected character after wildcard in version req");
-
-    let err = req_err(">=1.*.1");
-    assert_to_string(err, "unexpected character after wildcard in version req");
-}
-
-#[test]
-fn test_eq_hash() {
-    fn calculate_hash(value: impl Hash) -> u64 {
-        let mut hasher = DefaultHasher::new();
-        value.hash(&mut hasher);
-        hasher.finish()
-    }
-
-    assert!(req("^1") == req("^1"));
-    assert!(calculate_hash(req("^1")) == calculate_hash(req("^1")));
-    assert!(req("^1") != req("^2"));
-}
-
-#[test]
-fn test_leading_digit_in_pre_and_build() {
-    for op in &["=", ">", ">=", "<", "<=", "~", "^"] {
-        // digit then alpha
-        req(&format!("{} 1.2.3-1a", op));
-        req(&format!("{} 1.2.3+1a", op));
-
-        // digit then alpha (leading zero)
-        req(&format!("{} 1.2.3-01a", op));
-        req(&format!("{} 1.2.3+01", op));
-
-        // multiple
-        req(&format!("{} 1.2.3-1+1", op));
-        req(&format!("{} 1.2.3-1-1+1-1-1", op));
-        req(&format!("{} 1.2.3-1a+1a", op));
-        req(&format!("{} 1.2.3-1a-1a+1a-1a-1a", op));
-    }
-}
-
-#[test]
-fn test_wildcard_and_another() {
-    let err = req_err("*, 0.20.0-any");
-    assert_to_string(
-        err,
-        "wildcard req (*) must be the only comparator in the version req",
-    );
-
-    let err = req_err("0.20.0-any, *");
-    assert_to_string(
-        err,
-        "wildcard req (*) must be the only comparator in the version req",
-    );
-
-    let err = req_err("0.20.0-any, *, 1.0");
-    assert_to_string(
-        err,
-        "wildcard req (*) must be the only comparator in the version req",
-    );
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/tests/util/mod.rs b/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/tests/util/mod.rs
deleted file mode 100644
index 07d691f..0000000
--- a/third_party/rust/chromium_crates_io/vendor/semver-1.0.26/tests/util/mod.rs
+++ /dev/null
@@ -1,54 +0,0 @@
-#![allow(dead_code)]
-
-use semver::{BuildMetadata, Comparator, Error, Prerelease, Version, VersionReq};
-use std::fmt::Display;
-
-#[cfg_attr(not(no_track_caller), track_caller)]
-pub(super) fn version(text: &str) -> Version {
-    Version::parse(text).unwrap()
-}
-
-#[cfg_attr(not(no_track_caller), track_caller)]
-pub(super) fn version_err(text: &str) -> Error {
-    Version::parse(text).unwrap_err()
-}
-
-#[cfg_attr(not(no_track_caller), track_caller)]
-pub(super) fn req(text: &str) -> VersionReq {
-    VersionReq::parse(text).unwrap()
-}
-
-#[cfg_attr(not(no_track_caller), track_caller)]
-pub(super) fn req_err(text: &str) -> Error {
-    VersionReq::parse(text).unwrap_err()
-}
-
-#[cfg_attr(not(no_track_caller), track_caller)]
-pub(super) fn comparator(text: &str) -> Comparator {
-    Comparator::parse(text).unwrap()
-}
-
-#[cfg_attr(not(no_track_caller), track_caller)]
-pub(super) fn comparator_err(text: &str) -> Error {
-    Comparator::parse(text).unwrap_err()
-}
-
-#[cfg_attr(not(no_track_caller), track_caller)]
-pub(super) fn prerelease(text: &str) -> Prerelease {
-    Prerelease::new(text).unwrap()
-}
-
-#[cfg_attr(not(no_track_caller), track_caller)]
-pub(super) fn prerelease_err(text: &str) -> Error {
-    Prerelease::new(text).unwrap_err()
-}
-
-#[cfg_attr(not(no_track_caller), track_caller)]
-pub(super) fn build_metadata(text: &str) -> BuildMetadata {
-    BuildMetadata::new(text).unwrap()
-}
-
-#[cfg_attr(not(no_track_caller), track_caller)]
-pub(super) fn assert_to_string(value: impl Display, expected: &str) {
-    assert_eq!(value.to_string(), expected);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/.cargo-checksum.json
deleted file mode 100644
index 697c9ce..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/.cargo-checksum.json
+++ /dev/null
@@ -1 +0,0 @@
-{"files":{}}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/.cargo_vcs_info.json
deleted file mode 100644
index 75e01f9d2..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/.cargo_vcs_info.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
-  "git": {
-    "sha1": "762783414e6c4f8d670c9d87eb04913efb80d3be"
-  },
-  "path_in_vcs": ""
-}
\ No newline at end of file
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/.github/workflows/ci.yml b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/.github/workflows/ci.yml
deleted file mode 100644
index 77d611a..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/.github/workflows/ci.yml
+++ /dev/null
@@ -1,156 +0,0 @@
-name: CI
-
-on:
-  push:
-  pull_request:
-  workflow_dispatch:
-  schedule: [cron: "40 1 * * *"]
-
-permissions:
-  contents: read
-
-env:
-  RUSTFLAGS: -Dwarnings
-
-jobs:
-  test:
-    name: Rust nightly ${{matrix.os == 'windows' && '(windows)' || ''}}
-    runs-on: ${{matrix.os}}-latest
-    strategy:
-      fail-fast: false
-      matrix:
-        os: [ubuntu, windows]
-    timeout-minutes: 45
-    steps:
-      - uses: actions/checkout@v4
-      - uses: dtolnay/rust-toolchain@nightly
-      - run: cargo test
-      - run: cargo test --features preserve_order --tests -- --skip ui --exact
-      - run: cargo test --features float_roundtrip --tests -- --skip ui --exact
-      - run: cargo test --features arbitrary_precision --tests -- --skip ui --exact
-      - run: cargo test --features float_roundtrip,arbitrary_precision --tests -- --skip ui --exact
-      - run: cargo test --features raw_value --tests -- --skip ui --exact
-      - run: cargo test --features unbounded_depth --tests -- --skip ui --exact
-      - uses: actions/upload-artifact@v4
-        if: matrix.os == 'ubuntu' && always()
-        with:
-          name: Cargo.lock
-          path: Cargo.lock
-        continue-on-error: true
-
-  build:
-    name: Rust ${{matrix.rust}} ${{matrix.os == 'windows' && '(windows)' || ''}}
-    runs-on: ${{matrix.os}}-latest
-    strategy:
-      fail-fast: false
-      matrix:
-        rust: [beta, 1.65.0, 1.56.1]
-        os: [ubuntu]
-        include:
-          - rust: stable
-            os: ubuntu
-            target: aarch64-unknown-none
-          - rust: stable
-            os: windows
-    timeout-minutes: 45
-    steps:
-      - uses: actions/checkout@v4
-      - uses: dtolnay/rust-toolchain@master
-        with:
-          toolchain: ${{matrix.rust}}
-          targets: ${{matrix.target}}
-      - run: cargo check --manifest-path tests/crate/Cargo.toml
-      - run: cargo check --manifest-path tests/crate/Cargo.toml --features float_roundtrip
-      - run: cargo check --manifest-path tests/crate/Cargo.toml --features arbitrary_precision
-      - run: cargo check --manifest-path tests/crate/Cargo.toml --features raw_value
-      - run: cargo check --manifest-path tests/crate/Cargo.toml --features unbounded_depth
-      - run: cargo check --manifest-path tests/crate/Cargo.toml --no-default-features --features alloc
-      - run: cargo check --manifest-path tests/crate/Cargo.toml --no-default-features --features alloc,arbitrary_precision
-      - run: cargo check --manifest-path tests/crate/Cargo.toml --no-default-features --features alloc,raw_value
-      - run: cargo check --manifest-path tests/crate/Cargo.toml --features serde_json/preserve_order
-        if: matrix.rust != '1.56.1'
-      - run: cargo check --manifest-path tests/crate/Cargo.toml --no-default-features --features alloc,serde_json/preserve_order
-        if: matrix.rust != '1.56.1'
-      - name: Build without std
-        run: cargo check --manifest-path tests/crate/Cargo.toml --target ${{matrix.target}} --no-default-features --features alloc
-        if: matrix.target
-
-  minimal:
-    name: Minimal versions
-    runs-on: ubuntu-latest
-    timeout-minutes: 45
-    steps:
-      - uses: actions/checkout@v4
-      - uses: dtolnay/rust-toolchain@nightly
-      - run: cargo generate-lockfile -Z minimal-versions
-      - run: cargo check --locked
-
-  miri:
-    name: Miri (${{matrix.name}})
-    runs-on: ubuntu-latest
-    strategy:
-      fail-fast: false
-      matrix:
-        include:
-          - name: 64-bit little endian
-            target: x86_64-unknown-linux-gnu
-          - name: 64-bit big endian
-            target: powerpc64-unknown-linux-gnu
-          - name: 32-bit little endian
-            target: i686-unknown-linux-gnu
-          - name: 32-bit big endian
-            target: mips-unknown-linux-gnu
-    env:
-      MIRIFLAGS: -Zmiri-strict-provenance
-    timeout-minutes: 45
-    steps:
-      - uses: actions/checkout@v4
-      - uses: dtolnay/rust-toolchain@miri
-      - run: cargo miri setup
-      - run: cargo miri test --target ${{matrix.target}}
-      - run: cargo miri test --target ${{matrix.target}} --features preserve_order,float_roundtrip,arbitrary_precision,raw_value
-
-  clippy:
-    name: Clippy
-    runs-on: ubuntu-latest
-    if: github.event_name != 'pull_request'
-    timeout-minutes: 45
-    steps:
-      - uses: actions/checkout@v4
-      - uses: dtolnay/rust-toolchain@clippy
-      - run: cargo clippy --tests -- -Dclippy::all -Dclippy::pedantic
-      - run: cargo clippy --all-features --tests -- -Dclippy::all -Dclippy::pedantic
-
-  doc:
-    name: Documentation
-    runs-on: ubuntu-latest
-    timeout-minutes: 45
-    env:
-      RUSTDOCFLAGS: -Dwarnings
-    steps:
-      - uses: actions/checkout@v4
-      - uses: dtolnay/rust-toolchain@nightly
-      - uses: dtolnay/install@cargo-docs-rs
-      - run: cargo docs-rs
-
-  fuzz:
-    name: Fuzz
-    runs-on: ubuntu-latest
-    timeout-minutes: 45
-    steps:
-      - uses: actions/checkout@v4
-      - uses: dtolnay/rust-toolchain@nightly
-      - uses: dtolnay/install@cargo-fuzz
-      - run: cargo fuzz check
-
-  outdated:
-    name: Outdated
-    runs-on: ubuntu-latest
-    if: github.event_name != 'pull_request'
-    timeout-minutes: 45
-    steps:
-      - uses: actions/checkout@v4
-      - uses: dtolnay/rust-toolchain@stable
-      - uses: dtolnay/install@cargo-outdated
-      - run: cargo outdated --exit-code 1
-      - run: cargo outdated --manifest-path fuzz/Cargo.toml --exit-code 1
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/.gitignore b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/.gitignore
deleted file mode 100644
index e9e21997..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-/target/
-/Cargo.lock
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/CONTRIBUTING.md b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/CONTRIBUTING.md
deleted file mode 100644
index 26e1578..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/CONTRIBUTING.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# Contributing to Serde
-
-Serde welcomes contribution from everyone in the form of suggestions, bug
-reports, pull requests, and feedback. This document gives some guidance if you
-are thinking of helping us.
-
-## Submitting bug reports and feature requests
-
-Serde development is spread across lots of repositories. In general, prefer to
-open issues against the main [serde-rs/serde] repository unless the topic is
-clearly specific to JSON.
-
-[serde-rs/serde]: https://github.com/serde-rs/serde
-
-When reporting a bug or asking for help, please include enough details so that
-the people helping you can reproduce the behavior you are seeing. For some tips
-on how to approach this, read about how to produce a [Minimal, Complete, and
-Verifiable example].
-
-[Minimal, Complete, and Verifiable example]: https://stackoverflow.com/help/mcve
-
-When making a feature request, please make it clear what problem you intend to
-solve with the feature, any ideas for how Serde could support solving that
-problem, any possible alternatives, and any disadvantages.
-
-## Running the test suite
-
-We encourage you to check that the test suite passes locally before submitting a
-pull request with your changes. If anything does not pass, typically it will be
-easier to iterate and fix it locally than waiting for the CI servers to run
-tests for you.
-
-The test suite requires a nightly compiler.
-
-```sh
-# Run the full test suite, including doc test and compile-tests
-cargo test
-```
-
-## Conduct
-
-In all Serde-related forums, we follow the [Rust Code of Conduct]. For
-escalation or moderation issues please contact Erick (erick.tryzelaar@gmail.com)
-instead of the Rust moderation team.
-
-[Rust Code of Conduct]: https://www.rust-lang.org/policies/code-of-conduct
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/Cargo.lock b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/Cargo.lock
deleted file mode 100644
index 4f20a2e..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/Cargo.lock
+++ /dev/null
@@ -1,417 +0,0 @@
-# This file is automatically @generated by Cargo.
-# It is not intended for manual editing.
-version = 3
-
-[[package]]
-name = "automod"
-version = "1.0.14"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "edf3ee19dbc0a46d740f6f0926bde8c50f02bdbc7b536842da28f6ac56513a8b"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
-]
-
-[[package]]
-name = "cc"
-version = "1.2.16"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "be714c154be609ec7f5dad223a33bf1482fff90472de28f7362806e6d4832b8c"
-dependencies = [
- "shlex",
-]
-
-[[package]]
-name = "cfg-if"
-version = "1.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
-
-[[package]]
-name = "dissimilar"
-version = "1.0.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "59f8e79d1fbf76bdfbde321e902714bf6c49df88a7dda6fc682fc2979226962d"
-
-[[package]]
-name = "equivalent"
-version = "1.0.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f"
-
-[[package]]
-name = "glob"
-version = "0.3.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2"
-
-[[package]]
-name = "hashbrown"
-version = "0.15.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289"
-
-[[package]]
-name = "indexmap"
-version = "2.7.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652"
-dependencies = [
- "equivalent",
- "hashbrown",
-]
-
-[[package]]
-name = "indoc"
-version = "2.0.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b248f5224d1d606005e02c97f5aa4e88eeb230488bcc03bc9ca4d7991399f2b5"
-
-[[package]]
-name = "itoa"
-version = "1.0.14"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674"
-
-[[package]]
-name = "libc"
-version = "0.2.170"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "875b3680cb2f8f71bdcf9a30f38d48282f5d3c95cbf9b3fa57269bb5d5c06828"
-
-[[package]]
-name = "memchr"
-version = "2.7.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
-
-[[package]]
-name = "proc-macro2"
-version = "1.0.94"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84"
-dependencies = [
- "unicode-ident",
-]
-
-[[package]]
-name = "psm"
-version = "0.1.25"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f58e5423e24c18cc840e1c98370b3993c6649cd1678b4d24318bcf0a083cbe88"
-dependencies = [
- "cc",
-]
-
-[[package]]
-name = "quote"
-version = "1.0.39"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c1f1914ce909e1658d9907913b4b91947430c7d9be598b15a1912935b8c04801"
-dependencies = [
- "proc-macro2",
-]
-
-[[package]]
-name = "ref-cast"
-version = "1.0.24"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4a0ae411dbe946a674d89546582cea4ba2bb8defac896622d6496f14c23ba5cf"
-dependencies = [
- "ref-cast-impl",
-]
-
-[[package]]
-name = "ref-cast-impl"
-version = "1.0.24"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1165225c21bff1f3bbce98f5a1f889949bc902d3575308cc7b0de30b4f6d27c7"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
-]
-
-[[package]]
-name = "rustversion"
-version = "1.0.19"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4"
-
-[[package]]
-name = "ryu"
-version = "1.0.19"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6ea1a2d0a644769cc99faa24c3ad26b379b786fe7c36fd3c546254801650e6dd"
-
-[[package]]
-name = "serde"
-version = "1.0.218"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e8dfc9d19bdbf6d17e22319da49161d5d0108e4188e8b680aef6299eed22df60"
-dependencies = [
- "serde_derive",
-]
-
-[[package]]
-name = "serde_bytes"
-version = "0.11.16"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "364fec0df39c49a083c9a8a18a23a6bcfd9af130fe9fe321d18520a0d113e09e"
-dependencies = [
- "serde",
-]
-
-[[package]]
-name = "serde_derive"
-version = "1.0.218"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f09503e191f4e797cb8aac08e9a4a4695c5edf6a2e70e376d961ddd5c969f82b"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
-]
-
-[[package]]
-name = "serde_json"
-version = "1.0.139"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "44f86c3acccc9c65b153fe1b85a3be07fe5515274ec9f0653b4a0875731c72a6"
-dependencies = [
- "itoa",
- "memchr",
- "ryu",
- "serde",
-]
-
-[[package]]
-name = "serde_json"
-version = "1.0.140"
-dependencies = [
- "automod",
- "indexmap",
- "indoc",
- "itoa",
- "memchr",
- "ref-cast",
- "rustversion",
- "ryu",
- "serde",
- "serde_bytes",
- "serde_derive",
- "serde_stacker",
- "trybuild",
-]
-
-[[package]]
-name = "serde_spanned"
-version = "0.6.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1"
-dependencies = [
- "serde",
-]
-
-[[package]]
-name = "serde_stacker"
-version = "0.1.12"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "69c8defe6c780725cce4ec6ad3bd91e321baf6fa4e255df1f31e345d507ef01a"
-dependencies = [
- "serde",
- "stacker",
-]
-
-[[package]]
-name = "shlex"
-version = "1.3.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
-
-[[package]]
-name = "stacker"
-version = "0.1.19"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d9156ebd5870ef293bfb43f91c7a74528d363ec0d424afe24160ed5a4343d08a"
-dependencies = [
- "cc",
- "cfg-if",
- "libc",
- "psm",
- "windows-sys",
-]
-
-[[package]]
-name = "syn"
-version = "2.0.99"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e02e925281e18ffd9d640e234264753c43edc62d64b2d4cf898f1bc5e75f3fc2"
-dependencies = [
- "proc-macro2",
- "quote",
- "unicode-ident",
-]
-
-[[package]]
-name = "target-triple"
-version = "0.1.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1ac9aa371f599d22256307c24a9d748c041e548cbf599f35d890f9d365361790"
-
-[[package]]
-name = "termcolor"
-version = "1.4.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755"
-dependencies = [
- "winapi-util",
-]
-
-[[package]]
-name = "toml"
-version = "0.8.20"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cd87a5cdd6ffab733b2f74bc4fd7ee5fff6634124999ac278c35fc78c6120148"
-dependencies = [
- "serde",
- "serde_spanned",
- "toml_datetime",
- "toml_edit",
-]
-
-[[package]]
-name = "toml_datetime"
-version = "0.6.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41"
-dependencies = [
- "serde",
-]
-
-[[package]]
-name = "toml_edit"
-version = "0.22.24"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474"
-dependencies = [
- "indexmap",
- "serde",
- "serde_spanned",
- "toml_datetime",
- "winnow",
-]
-
-[[package]]
-name = "trybuild"
-version = "1.0.103"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b812699e0c4f813b872b373a4471717d9eb550da14b311058a4d9cf4173cbca6"
-dependencies = [
- "dissimilar",
- "glob",
- "serde",
- "serde_derive",
- "serde_json 1.0.139",
- "target-triple",
- "termcolor",
- "toml",
-]
-
-[[package]]
-name = "unicode-ident"
-version = "1.0.17"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "00e2473a93778eb0bad35909dff6a10d28e63f792f16ed15e404fca9d5eeedbe"
-
-[[package]]
-name = "winapi-util"
-version = "0.1.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
-dependencies = [
- "windows-sys",
-]
-
-[[package]]
-name = "windows-sys"
-version = "0.59.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
-dependencies = [
- "windows-targets",
-]
-
-[[package]]
-name = "windows-targets"
-version = "0.52.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
-dependencies = [
- "windows_aarch64_gnullvm",
- "windows_aarch64_msvc",
- "windows_i686_gnu",
- "windows_i686_gnullvm",
- "windows_i686_msvc",
- "windows_x86_64_gnu",
- "windows_x86_64_gnullvm",
- "windows_x86_64_msvc",
-]
-
-[[package]]
-name = "windows_aarch64_gnullvm"
-version = "0.52.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
-
-[[package]]
-name = "windows_aarch64_msvc"
-version = "0.52.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
-
-[[package]]
-name = "windows_i686_gnu"
-version = "0.52.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
-
-[[package]]
-name = "windows_i686_gnullvm"
-version = "0.52.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
-
-[[package]]
-name = "windows_i686_msvc"
-version = "0.52.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
-
-[[package]]
-name = "windows_x86_64_gnu"
-version = "0.52.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
-
-[[package]]
-name = "windows_x86_64_gnullvm"
-version = "0.52.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
-
-[[package]]
-name = "windows_x86_64_msvc"
-version = "0.52.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
-
-[[package]]
-name = "winnow"
-version = "0.7.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0e7f4ea97f6f78012141bcdb6a216b2609f0979ada50b20ca5b52dde2eac2bb1"
-dependencies = [
- "memchr",
-]
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/Cargo.toml
deleted file mode 100644
index 2fa24f5e..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/Cargo.toml
+++ /dev/null
@@ -1,157 +0,0 @@
-# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
-#
-# When uploading crates to the registry Cargo will automatically
-# "normalize" Cargo.toml files for maximal compatibility
-# with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g., crates.io) dependencies.
-#
-# If you are reading this file be aware that the original Cargo.toml
-# will likely look very different (and much more reasonable).
-# See Cargo.toml.orig for the original contents.
-
-[package]
-edition = "2021"
-rust-version = "1.56"
-name = "serde_json"
-version = "1.0.140"
-authors = [
-    "Erick Tryzelaar <erick.tryzelaar@gmail.com>",
-    "David Tolnay <dtolnay@gmail.com>",
-]
-build = "build.rs"
-autolib = false
-autobins = false
-autoexamples = false
-autotests = false
-autobenches = false
-description = "A JSON serialization file format"
-documentation = "https://docs.rs/serde_json"
-readme = "README.md"
-keywords = [
-    "json",
-    "serde",
-    "serialization",
-]
-categories = [
-    "encoding",
-    "parser-implementations",
-    "no-std",
-]
-license = "MIT OR Apache-2.0"
-repository = "https://github.com/serde-rs/json"
-
-[package.metadata.docs.rs]
-features = [
-    "preserve_order",
-    "raw_value",
-    "unbounded_depth",
-]
-rustdoc-args = [
-    "--generate-link-to-definition",
-    "--extern-html-root-url=core=https://doc.rust-lang.org",
-    "--extern-html-root-url=alloc=https://doc.rust-lang.org",
-    "--extern-html-root-url=std=https://doc.rust-lang.org",
-]
-targets = ["x86_64-unknown-linux-gnu"]
-
-[package.metadata.playground]
-features = [
-    "float_roundtrip",
-    "raw_value",
-    "unbounded_depth",
-]
-
-[features]
-alloc = ["serde/alloc"]
-arbitrary_precision = []
-default = ["std"]
-float_roundtrip = []
-preserve_order = [
-    "indexmap",
-    "std",
-]
-raw_value = []
-std = [
-    "memchr/std",
-    "serde/std",
-]
-unbounded_depth = []
-
-[lib]
-name = "serde_json"
-path = "src/lib.rs"
-
-[[test]]
-name = "compiletest"
-path = "tests/compiletest.rs"
-
-[[test]]
-name = "debug"
-path = "tests/debug.rs"
-
-[[test]]
-name = "lexical"
-path = "tests/lexical.rs"
-
-[[test]]
-name = "map"
-path = "tests/map.rs"
-
-[[test]]
-name = "regression"
-path = "tests/regression.rs"
-
-[[test]]
-name = "stream"
-path = "tests/stream.rs"
-
-[[test]]
-name = "test"
-path = "tests/test.rs"
-
-[dependencies.indexmap]
-version = "2.2.3"
-optional = true
-
-[dependencies.itoa]
-version = "1.0"
-
-[dependencies.memchr]
-version = "2"
-default-features = false
-
-[dependencies.ryu]
-version = "1.0"
-
-[dependencies.serde]
-version = "1.0.194"
-default-features = false
-
-[dev-dependencies.automod]
-version = "1.0.11"
-
-[dev-dependencies.indoc]
-version = "2.0.2"
-
-[dev-dependencies.ref-cast]
-version = "1.0.18"
-
-[dev-dependencies.rustversion]
-version = "1.0.13"
-
-[dev-dependencies.serde]
-version = "1.0.194"
-features = ["derive"]
-
-[dev-dependencies.serde_bytes]
-version = "0.11.10"
-
-[dev-dependencies.serde_derive]
-version = "1.0.166"
-
-[dev-dependencies.serde_stacker]
-version = "0.1.8"
-
-[dev-dependencies.trybuild]
-version = "1.0.81"
-features = ["diff"]
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/Cargo.toml.orig
deleted file mode 100644
index 866c313..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/Cargo.toml.orig
+++ /dev/null
@@ -1,91 +0,0 @@
-[package]
-name = "serde_json"
-version = "1.0.140"
-authors = ["Erick Tryzelaar <erick.tryzelaar@gmail.com>", "David Tolnay <dtolnay@gmail.com>"]
-categories = ["encoding", "parser-implementations", "no-std"]
-description = "A JSON serialization file format"
-documentation = "https://docs.rs/serde_json"
-edition = "2021"
-keywords = ["json", "serde", "serialization"]
-license = "MIT OR Apache-2.0"
-repository = "https://github.com/serde-rs/json"
-rust-version = "1.56"
-
-[dependencies]
-indexmap = { version = "2.2.3", optional = true }
-itoa = "1.0"
-memchr = { version = "2", default-features = false }
-ryu = "1.0"
-serde = { version = "1.0.194", default-features = false }
-
-[dev-dependencies]
-automod = "1.0.11"
-indoc = "2.0.2"
-ref-cast = "1.0.18"
-rustversion = "1.0.13"
-serde = { version = "1.0.194", features = ["derive"] }
-serde_bytes = "0.11.10"
-serde_derive = "1.0.166"
-serde_stacker = "0.1.8"
-trybuild = { version = "1.0.81", features = ["diff"] }
-
-[package.metadata.docs.rs]
-features = ["preserve_order", "raw_value", "unbounded_depth"]
-targets = ["x86_64-unknown-linux-gnu"]
-rustdoc-args = [
-    "--generate-link-to-definition",
-    "--extern-html-root-url=core=https://doc.rust-lang.org",
-    "--extern-html-root-url=alloc=https://doc.rust-lang.org",
-    "--extern-html-root-url=std=https://doc.rust-lang.org",
-]
-
-[package.metadata.playground]
-features = ["float_roundtrip", "raw_value", "unbounded_depth"]
-
-
-### FEATURES #################################################################
-
-[features]
-default = ["std"]
-
-std = ["memchr/std", "serde/std"]
-
-# Provide integration for heap-allocated collections without depending on the
-# rest of the Rust standard library.
-# NOTE: Disabling both `std` *and* `alloc` features is not supported yet.
-alloc = ["serde/alloc"]
-
-# Make serde_json::Map use a representation which maintains insertion order.
-# This allows data to be read into a Value and written back to a JSON string
-# while preserving the order of map keys in the input.
-preserve_order = ["indexmap", "std"]
-
-# Use sufficient precision when parsing fixed precision floats from JSON to
-# ensure that they maintain accuracy when round-tripped through JSON. This comes
-# at an approximately 2x performance cost for parsing floats compared to the
-# default best-effort precision.
-#
-# Unlike arbitrary_precision, this feature makes f64 -> JSON -> f64 produce
-# output identical to the input.
-float_roundtrip = []
-
-# Use an arbitrary precision number representation for serde_json::Number. This
-# allows JSON numbers of arbitrary size/precision to be read into a Number and
-# written back to a JSON string without loss of precision.
-#
-# Unlike float_roundtrip, this feature makes JSON -> serde_json::Number -> JSON
-# produce output identical to the input.
-arbitrary_precision = []
-
-# Provide a RawValue type that can hold unprocessed JSON during deserialization.
-raw_value = []
-
-# Provide a method disable_recursion_limit to parse arbitrarily deep JSON
-# structures without any consideration for overflowing the stack. When using
-# this feature, you will want to provide some other way to protect against stack
-# overflows, such as by wrapping your Deserializer in the dynamically growing
-# stack adapter provided by the serde_stacker crate. Additionally you will need
-# to be careful around other recursive operations on the parsed result which may
-# overflow the stack after deserialization has completed, including, but not
-# limited to, Display and Debug and Drop impls.
-unbounded_depth = []
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/LICENSE-APACHE b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/LICENSE-APACHE
deleted file mode 100644
index 1b5ec8b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/LICENSE-APACHE
+++ /dev/null
@@ -1,176 +0,0 @@
-                              Apache License
-                        Version 2.0, January 2004
-                     http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-   "License" shall mean the terms and conditions for use, reproduction,
-   and distribution as defined by Sections 1 through 9 of this document.
-
-   "Licensor" shall mean the copyright owner or entity authorized by
-   the copyright owner that is granting the License.
-
-   "Legal Entity" shall mean the union of the acting entity and all
-   other entities that control, are controlled by, or are under common
-   control with that entity. For the purposes of this definition,
-   "control" means (i) the power, direct or indirect, to cause the
-   direction or management of such entity, whether by contract or
-   otherwise, or (ii) ownership of fifty percent (50%) or more of the
-   outstanding shares, or (iii) beneficial ownership of such entity.
-
-   "You" (or "Your") shall mean an individual or Legal Entity
-   exercising permissions granted by this License.
-
-   "Source" form shall mean the preferred form for making modifications,
-   including but not limited to software source code, documentation
-   source, and configuration files.
-
-   "Object" form shall mean any form resulting from mechanical
-   transformation or translation of a Source form, including but
-   not limited to compiled object code, generated documentation,
-   and conversions to other media types.
-
-   "Work" shall mean the work of authorship, whether in Source or
-   Object form, made available under the License, as indicated by a
-   copyright notice that is included in or attached to the work
-   (an example is provided in the Appendix below).
-
-   "Derivative Works" shall mean any work, whether in Source or Object
-   form, that is based on (or derived from) the Work and for which the
-   editorial revisions, annotations, elaborations, or other modifications
-   represent, as a whole, an original work of authorship. For the purposes
-   of this License, Derivative Works shall not include works that remain
-   separable from, or merely link (or bind by name) to the interfaces of,
-   the Work and Derivative Works thereof.
-
-   "Contribution" shall mean any work of authorship, including
-   the original version of the Work and any modifications or additions
-   to that Work or Derivative Works thereof, that is intentionally
-   submitted to Licensor for inclusion in the Work by the copyright owner
-   or by an individual or Legal Entity authorized to submit on behalf of
-   the copyright owner. For the purposes of this definition, "submitted"
-   means any form of electronic, verbal, or written communication sent
-   to the Licensor or its representatives, including but not limited to
-   communication on electronic mailing lists, source code control systems,
-   and issue tracking systems that are managed by, or on behalf of, the
-   Licensor for the purpose of discussing and improving the Work, but
-   excluding communication that is conspicuously marked or otherwise
-   designated in writing by the copyright owner as "Not a Contribution."
-
-   "Contributor" shall mean Licensor and any individual or Legal Entity
-   on behalf of whom a Contribution has been received by Licensor and
-   subsequently incorporated within the Work.
-
-2. Grant of Copyright License. Subject to the terms and conditions of
-   this License, each Contributor hereby grants to You a perpetual,
-   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-   copyright license to reproduce, prepare Derivative Works of,
-   publicly display, publicly perform, sublicense, and distribute the
-   Work and such Derivative Works in Source or Object form.
-
-3. Grant of Patent License. Subject to the terms and conditions of
-   this License, each Contributor hereby grants to You a perpetual,
-   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-   (except as stated in this section) patent license to make, have made,
-   use, offer to sell, sell, import, and otherwise transfer the Work,
-   where such license applies only to those patent claims licensable
-   by such Contributor that are necessarily infringed by their
-   Contribution(s) alone or by combination of their Contribution(s)
-   with the Work to which such Contribution(s) was submitted. If You
-   institute patent litigation against any entity (including a
-   cross-claim or counterclaim in a lawsuit) alleging that the Work
-   or a Contribution incorporated within the Work constitutes direct
-   or contributory patent infringement, then any patent licenses
-   granted to You under this License for that Work shall terminate
-   as of the date such litigation is filed.
-
-4. Redistribution. You may reproduce and distribute copies of the
-   Work or Derivative Works thereof in any medium, with or without
-   modifications, and in Source or Object form, provided that You
-   meet the following conditions:
-
-   (a) You must give any other recipients of the Work or
-       Derivative Works a copy of this License; and
-
-   (b) You must cause any modified files to carry prominent notices
-       stating that You changed the files; and
-
-   (c) You must retain, in the Source form of any Derivative Works
-       that You distribute, all copyright, patent, trademark, and
-       attribution notices from the Source form of the Work,
-       excluding those notices that do not pertain to any part of
-       the Derivative Works; and
-
-   (d) If the Work includes a "NOTICE" text file as part of its
-       distribution, then any Derivative Works that You distribute must
-       include a readable copy of the attribution notices contained
-       within such NOTICE file, excluding those notices that do not
-       pertain to any part of the Derivative Works, in at least one
-       of the following places: within a NOTICE text file distributed
-       as part of the Derivative Works; within the Source form or
-       documentation, if provided along with the Derivative Works; or,
-       within a display generated by the Derivative Works, if and
-       wherever such third-party notices normally appear. The contents
-       of the NOTICE file are for informational purposes only and
-       do not modify the License. You may add Your own attribution
-       notices within Derivative Works that You distribute, alongside
-       or as an addendum to the NOTICE text from the Work, provided
-       that such additional attribution notices cannot be construed
-       as modifying the License.
-
-   You may add Your own copyright statement to Your modifications and
-   may provide additional or different license terms and conditions
-   for use, reproduction, or distribution of Your modifications, or
-   for any such Derivative Works as a whole, provided Your use,
-   reproduction, and distribution of the Work otherwise complies with
-   the conditions stated in this License.
-
-5. Submission of Contributions. Unless You explicitly state otherwise,
-   any Contribution intentionally submitted for inclusion in the Work
-   by You to the Licensor shall be under the terms and conditions of
-   this License, without any additional terms or conditions.
-   Notwithstanding the above, nothing herein shall supersede or modify
-   the terms of any separate license agreement you may have executed
-   with Licensor regarding such Contributions.
-
-6. Trademarks. This License does not grant permission to use the trade
-   names, trademarks, service marks, or product names of the Licensor,
-   except as required for reasonable and customary use in describing the
-   origin of the Work and reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty. Unless required by applicable law or
-   agreed to in writing, Licensor provides the Work (and each
-   Contributor provides its Contributions) on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-   implied, including, without limitation, any warranties or conditions
-   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-   PARTICULAR PURPOSE. You are solely responsible for determining the
-   appropriateness of using or redistributing the Work and assume any
-   risks associated with Your exercise of permissions under this License.
-
-8. Limitation of Liability. In no event and under no legal theory,
-   whether in tort (including negligence), contract, or otherwise,
-   unless required by applicable law (such as deliberate and grossly
-   negligent acts) or agreed to in writing, shall any Contributor be
-   liable to You for damages, including any direct, indirect, special,
-   incidental, or consequential damages of any character arising as a
-   result of this License or out of the use or inability to use the
-   Work (including but not limited to damages for loss of goodwill,
-   work stoppage, computer failure or malfunction, or any and all
-   other commercial damages or losses), even if such Contributor
-   has been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability. While redistributing
-   the Work or Derivative Works thereof, You may choose to offer,
-   and charge a fee for, acceptance of support, warranty, indemnity,
-   or other liability obligations and/or rights consistent with this
-   License. However, in accepting such obligations, You may act only
-   on Your own behalf and on Your sole responsibility, not on behalf
-   of any other Contributor, and only if You agree to indemnify,
-   defend, and hold each Contributor harmless for any liability
-   incurred by, or claims asserted against, such Contributor by reason
-   of your accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/LICENSE-MIT b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/LICENSE-MIT
deleted file mode 100644
index 31aa7938..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/LICENSE-MIT
+++ /dev/null
@@ -1,23 +0,0 @@
-Permission is hereby granted, free of charge, to any
-person obtaining a copy of this software and associated
-documentation files (the "Software"), to deal in the
-Software without restriction, including without
-limitation the rights to use, copy, modify, merge,
-publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software
-is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice
-shall be included in all copies or substantial portions
-of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
-ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
-TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
-PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
-IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-DEALINGS IN THE SOFTWARE.
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/README.md b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/README.md
deleted file mode 100644
index be70b7b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/README.md
+++ /dev/null
@@ -1,390 +0,0 @@
-# Serde JSON &emsp; [![Build Status]][actions] [![Latest Version]][crates.io] [![Rustc Version 1.36+]][rustc]
-
-[Build Status]: https://img.shields.io/github/actions/workflow/status/serde-rs/json/ci.yml?branch=master
-[actions]: https://github.com/serde-rs/json/actions?query=branch%3Amaster
-[Latest Version]: https://img.shields.io/crates/v/serde_json.svg
-[crates.io]: https://crates.io/crates/serde\_json
-[Rustc Version 1.36+]: https://img.shields.io/badge/rustc-1.36+-lightgray.svg
-[rustc]: https://blog.rust-lang.org/2019/07/04/Rust-1.36.0.html
-
-**Serde is a framework for *ser*ializing and *de*serializing Rust data structures efficiently and generically.**
-
----
-
-```toml
-[dependencies]
-serde_json = "1.0"
-```
-
-You may be looking for:
-
-- [JSON API documentation](https://docs.rs/serde_json)
-- [Serde API documentation](https://docs.rs/serde)
-- [Detailed documentation about Serde](https://serde.rs/)
-- [Setting up `#[derive(Serialize, Deserialize)]`](https://serde.rs/derive.html)
-- [Release notes](https://github.com/serde-rs/json/releases)
-
-JSON is a ubiquitous open-standard format that uses human-readable text to
-transmit data objects consisting of key-value pairs.
-
-```json
-{
-    "name": "John Doe",
-    "age": 43,
-    "address": {
-        "street": "10 Downing Street",
-        "city": "London"
-    },
-    "phones": [
-        "+44 1234567",
-        "+44 2345678"
-    ]
-}
-```
-
-There are three common ways that you might find yourself needing to work with
-JSON data in Rust.
-
- - **As text data.** An unprocessed string of JSON data that you receive on an
-   HTTP endpoint, read from a file, or prepare to send to a remote server.
- - **As an untyped or loosely typed representation.** Maybe you want to check
-   that some JSON data is valid before passing it on, but without knowing the
-   structure of what it contains. Or you want to do very basic manipulations
-   like insert a key in a particular spot.
- - **As a strongly typed Rust data structure.** When you expect all or most of
-   your data to conform to a particular structure and want to get real work done
-   without JSON's loosey-goosey nature tripping you up.
-
-Serde JSON provides efficient, flexible, safe ways of converting data between
-each of these representations.
-
-## Operating on untyped JSON values
-
-Any valid JSON data can be manipulated in the following recursive enum
-representation. This data structure is [`serde_json::Value`][value].
-
-```rust
-enum Value {
-    Null,
-    Bool(bool),
-    Number(Number),
-    String(String),
-    Array(Vec<Value>),
-    Object(Map<String, Value>),
-}
-```
-
-A string of JSON data can be parsed into a `serde_json::Value` by the
-[`serde_json::from_str`][from_str] function. There is also
-[`from_slice`][from_slice] for parsing from a byte slice `&[u8]` and
-[`from_reader`][from_reader] for parsing from any `io::Read` like a File or a
-TCP stream.
-
-<div align="right">
-<a href="https://play.rust-lang.org/?edition=2018&gist=d69d8e3156d4bb81c4461b60b772ab72" target="_blank">
-<img align="center" width="85" src="https://raw.githubusercontent.com/serde-rs/serde-rs.github.io/master/img/runtab.png">
-</a>
-</div>
-
-```rust
-use serde_json::{Result, Value};
-
-fn untyped_example() -> Result<()> {
-    // Some JSON input data as a &str. Maybe this comes from the user.
-    let data = r#"
-        {
-            "name": "John Doe",
-            "age": 43,
-            "phones": [
-                "+44 1234567",
-                "+44 2345678"
-            ]
-        }"#;
-
-    // Parse the string of data into serde_json::Value.
-    let v: Value = serde_json::from_str(data)?;
-
-    // Access parts of the data by indexing with square brackets.
-    println!("Please call {} at the number {}", v["name"], v["phones"][0]);
-
-    Ok(())
-}
-```
-
-The result of square bracket indexing like `v["name"]` is a borrow of the data
-at that index, so the type is `&Value`. A JSON map can be indexed with string
-keys, while a JSON array can be indexed with integer keys. If the type of the
-data is not right for the type with which it is being indexed, or if a map does
-not contain the key being indexed, or if the index into a vector is out of
-bounds, the returned element is `Value::Null`.
-
-When a `Value` is printed, it is printed as a JSON string. So in the code above,
-the output looks like `Please call "John Doe" at the number "+44 1234567"`. The
-quotation marks appear because `v["name"]` is a `&Value` containing a JSON
-string and its JSON representation is `"John Doe"`. Printing as a plain string
-without quotation marks involves converting from a JSON string to a Rust string
-with [`as_str()`] or avoiding the use of `Value` as described in the following
-section.
-
-[`as_str()`]: https://docs.rs/serde_json/1/serde_json/enum.Value.html#method.as_str
-
-The `Value` representation is sufficient for very basic tasks but can be tedious
-to work with for anything more significant. Error handling is verbose to
-implement correctly, for example imagine trying to detect the presence of
-unrecognized fields in the input data. The compiler is powerless to help you
-when you make a mistake, for example imagine typoing `v["name"]` as `v["nmae"]`
-in one of the dozens of places it is used in your code.
-
-## Parsing JSON as strongly typed data structures
-
-Serde provides a powerful way of mapping JSON data into Rust data structures
-largely automatically.
-
-<div align="right">
-<a href="https://play.rust-lang.org/?edition=2018&gist=15cfab66d38ff8a15a9cf1d8d897ac68" target="_blank">
-<img align="center" width="85" src="https://raw.githubusercontent.com/serde-rs/serde-rs.github.io/master/img/runtab.png">
-</a>
-</div>
-
-```rust
-use serde::{Deserialize, Serialize};
-use serde_json::Result;
-
-#[derive(Serialize, Deserialize)]
-struct Person {
-    name: String,
-    age: u8,
-    phones: Vec<String>,
-}
-
-fn typed_example() -> Result<()> {
-    // Some JSON input data as a &str. Maybe this comes from the user.
-    let data = r#"
-        {
-            "name": "John Doe",
-            "age": 43,
-            "phones": [
-                "+44 1234567",
-                "+44 2345678"
-            ]
-        }"#;
-
-    // Parse the string of data into a Person object. This is exactly the
-    // same function as the one that produced serde_json::Value above, but
-    // now we are asking it for a Person as output.
-    let p: Person = serde_json::from_str(data)?;
-
-    // Do things just like with any other Rust data structure.
-    println!("Please call {} at the number {}", p.name, p.phones[0]);
-
-    Ok(())
-}
-```
-
-This is the same `serde_json::from_str` function as before, but this time we
-assign the return value to a variable of type `Person` so Serde will
-automatically interpret the input data as a `Person` and produce informative
-error messages if the layout does not conform to what a `Person` is expected to
-look like.
-
-Any type that implements Serde's `Deserialize` trait can be deserialized this
-way. This includes built-in Rust standard library types like `Vec<T>` and
-`HashMap<K, V>`, as well as any structs or enums annotated with
-`#[derive(Deserialize)]`.
-
-Once we have `p` of type `Person`, our IDE and the Rust compiler can help us use
-it correctly like they do for any other Rust code. The IDE can autocomplete
-field names to prevent typos, which was impossible in the `serde_json::Value`
-representation. And the Rust compiler can check that when we write
-`p.phones[0]`, then `p.phones` is guaranteed to be a `Vec<String>` so indexing
-into it makes sense and produces a `String`.
-
-The necessary setup for using Serde's derive macros is explained on the *[Using
-derive]* page of the Serde site.
-
-[Using derive]: https://serde.rs/derive.html
-
-## Constructing JSON values
-
-Serde JSON provides a [`json!` macro][macro] to build `serde_json::Value`
-objects with very natural JSON syntax.
-
-<div align="right">
-<a href="https://play.rust-lang.org/?edition=2018&gist=6ccafad431d72b62e77cc34c8e879b24" target="_blank">
-<img align="center" width="85" src="https://raw.githubusercontent.com/serde-rs/serde-rs.github.io/master/img/runtab.png">
-</a>
-</div>
-
-```rust
-use serde_json::json;
-
-fn main() {
-    // The type of `john` is `serde_json::Value`
-    let john = json!({
-        "name": "John Doe",
-        "age": 43,
-        "phones": [
-            "+44 1234567",
-            "+44 2345678"
-        ]
-    });
-
-    println!("first phone number: {}", john["phones"][0]);
-
-    // Convert to a string of JSON and print it out
-    println!("{}", john.to_string());
-}
-```
-
-The `Value::to_string()` function converts a `serde_json::Value` into a `String`
-of JSON text.
-
-One neat thing about the `json!` macro is that variables and expressions can be
-interpolated directly into the JSON value as you are building it. Serde will
-check at compile time that the value you are interpolating is able to be
-represented as JSON.
-
-<div align="right">
-<a href="https://play.rust-lang.org/?edition=2018&gist=f9101a6e61dfc9e02c6a67f315ed24f2" target="_blank">
-<img align="center" width="85" src="https://raw.githubusercontent.com/serde-rs/serde-rs.github.io/master/img/runtab.png">
-</a>
-</div>
-
-```rust
-let full_name = "John Doe";
-let age_last_year = 42;
-
-// The type of `john` is `serde_json::Value`
-let john = json!({
-    "name": full_name,
-    "age": age_last_year + 1,
-    "phones": [
-        format!("+44 {}", random_phone())
-    ]
-});
-```
-
-This is amazingly convenient, but we have the problem we had before with
-`Value`: the IDE and Rust compiler cannot help us if we get it wrong. Serde JSON
-provides a better way of serializing strongly-typed data structures into JSON
-text.
-
-## Creating JSON by serializing data structures
-
-A data structure can be converted to a JSON string by
-[`serde_json::to_string`][to_string]. There is also
-[`serde_json::to_vec`][to_vec] which serializes to a `Vec<u8>` and
-[`serde_json::to_writer`][to_writer] which serializes to any `io::Write`
-such as a File or a TCP stream.
-
-<div align="right">
-<a href="https://play.rust-lang.org/?edition=2018&gist=3472242a08ed2ff88a944f2a2283b0ee" target="_blank">
-<img align="center" width="85" src="https://raw.githubusercontent.com/serde-rs/serde-rs.github.io/master/img/runtab.png">
-</a>
-</div>
-
-```rust
-use serde::{Deserialize, Serialize};
-use serde_json::Result;
-
-#[derive(Serialize, Deserialize)]
-struct Address {
-    street: String,
-    city: String,
-}
-
-fn print_an_address() -> Result<()> {
-    // Some data structure.
-    let address = Address {
-        street: "10 Downing Street".to_owned(),
-        city: "London".to_owned(),
-    };
-
-    // Serialize it to a JSON string.
-    let j = serde_json::to_string(&address)?;
-
-    // Print, write to a file, or send to an HTTP server.
-    println!("{}", j);
-
-    Ok(())
-}
-```
-
-Any type that implements Serde's `Serialize` trait can be serialized this way.
-This includes built-in Rust standard library types like `Vec<T>` and `HashMap<K,
-V>`, as well as any structs or enums annotated with `#[derive(Serialize)]`.
-
-## Performance
-
-It is fast. You should expect in the ballpark of 500 to 1000 megabytes per
-second deserialization and 600 to 900 megabytes per second serialization,
-depending on the characteristics of your data. This is competitive with the
-fastest C and C++ JSON libraries or even 30% faster for many use cases.
-Benchmarks live in the [serde-rs/json-benchmark] repo.
-
-[serde-rs/json-benchmark]: https://github.com/serde-rs/json-benchmark
-
-## Getting help
-
-Serde is one of the most widely used Rust libraries, so any place that
-Rustaceans congregate will be able to help you out. For chat, consider trying
-the [#rust-questions] or [#rust-beginners] channels of the unofficial community
-Discord (invite: <https://discord.gg/rust-lang-community>), the [#rust-usage] or
-[#beginners] channels of the official Rust Project Discord (invite:
-<https://discord.gg/rust-lang>), or the [#general][zulip] stream in Zulip. For
-asynchronous, consider the [\[rust\] tag on StackOverflow][stackoverflow], the
-[/r/rust] subreddit which has a pinned weekly easy questions post, or the Rust
-[Discourse forum][discourse]. It's acceptable to file a support issue in this
-repo, but they tend not to get as many eyes as any of the above and may get
-closed without a response after some time.
-
-[#rust-questions]: https://discord.com/channels/273534239310479360/274215136414400513
-[#rust-beginners]: https://discord.com/channels/273534239310479360/273541522815713281
-[#rust-usage]: https://discord.com/channels/442252698964721669/443150878111694848
-[#beginners]: https://discord.com/channels/442252698964721669/448238009733742612
-[zulip]: https://rust-lang.zulipchat.com/#narrow/stream/122651-general
-[stackoverflow]: https://stackoverflow.com/questions/tagged/rust
-[/r/rust]: https://www.reddit.com/r/rust
-[discourse]: https://users.rust-lang.org
-
-## No-std support
-
-As long as there is a memory allocator, it is possible to use serde_json without
-the rest of the Rust standard library. Disable the default "std" feature and
-enable the "alloc" feature:
-
-```toml
-[dependencies]
-serde_json = { version = "1.0", default-features = false, features = ["alloc"] }
-```
-
-For JSON support in Serde without a memory allocator, please see the
-[`serde-json-core`] crate.
-
-[`serde-json-core`]: https://github.com/rust-embedded-community/serde-json-core
-
-[value]: https://docs.rs/serde_json/1/serde_json/value/enum.Value.html
-[from_str]: https://docs.rs/serde_json/1/serde_json/de/fn.from_str.html
-[from_slice]: https://docs.rs/serde_json/1/serde_json/de/fn.from_slice.html
-[from_reader]: https://docs.rs/serde_json/1/serde_json/de/fn.from_reader.html
-[to_string]: https://docs.rs/serde_json/1/serde_json/ser/fn.to_string.html
-[to_vec]: https://docs.rs/serde_json/1/serde_json/ser/fn.to_vec.html
-[to_writer]: https://docs.rs/serde_json/1/serde_json/ser/fn.to_writer.html
-[macro]: https://docs.rs/serde_json/1/serde_json/macro.json.html
-
-<br>
-
-#### License
-
-<sup>
-Licensed under either of <a href="LICENSE-APACHE">Apache License, Version
-2.0</a> or <a href="LICENSE-MIT">MIT license</a> at your option.
-</sup>
-
-<br>
-
-<sub>
-Unless you explicitly state otherwise, any contribution intentionally submitted
-for inclusion in this crate by you, as defined in the Apache-2.0 license, shall
-be dual licensed as above, without any additional terms or conditions.
-</sub>
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/build.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/build.rs
deleted file mode 100644
index 29907eaf..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/build.rs
+++ /dev/null
@@ -1,29 +0,0 @@
-use std::env;
-
-fn main() {
-    println!("cargo:rerun-if-changed=build.rs");
-
-    println!("cargo:rustc-check-cfg=cfg(fast_arithmetic, values(\"32\", \"64\"))");
-
-    // Decide ideal limb width for arithmetic in the float parser and string
-    // parser.
-    let target_arch = env::var_os("CARGO_CFG_TARGET_ARCH").unwrap();
-    let target_pointer_width = env::var_os("CARGO_CFG_TARGET_POINTER_WIDTH").unwrap();
-    if target_arch == "aarch64"
-        || target_arch == "loongarch64"
-        || target_arch == "mips64"
-        || target_arch == "powerpc64"
-        || target_arch == "wasm32"
-        || target_arch == "x86_64"
-        || target_pointer_width == "64"
-    {
-        // The above list of architectures are ones that have native support for
-        // 64-bit arithmetic, but which have some targets using a smaller
-        // pointer width. Examples include aarch64-unknown-linux-gnu_ilp32 and
-        // x86_64-unknown-linux-gnux32. So our choice of limb width is not
-        // equivalent to using usize everywhere.
-        println!("cargo:rustc-cfg=fast_arithmetic=\"64\"");
-    } else {
-        println!("cargo:rustc-cfg=fast_arithmetic=\"32\"");
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/de.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/de.rs
deleted file mode 100644
index 4080c54..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/de.rs
+++ /dev/null
@@ -1,2702 +0,0 @@
-//! Deserialize JSON data to a Rust data structure.
-
-use crate::error::{Error, ErrorCode, Result};
-#[cfg(feature = "float_roundtrip")]
-use crate::lexical;
-use crate::number::Number;
-use crate::read::{self, Fused, Reference};
-use alloc::string::String;
-use alloc::vec::Vec;
-#[cfg(feature = "float_roundtrip")]
-use core::iter;
-use core::iter::FusedIterator;
-use core::marker::PhantomData;
-use core::result;
-use core::str::FromStr;
-use serde::de::{self, Expected, Unexpected};
-use serde::forward_to_deserialize_any;
-
-#[cfg(feature = "arbitrary_precision")]
-use crate::number::NumberDeserializer;
-
-pub use crate::read::{Read, SliceRead, StrRead};
-
-#[cfg(feature = "std")]
-#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
-pub use crate::read::IoRead;
-
-//////////////////////////////////////////////////////////////////////////////
-
-/// A structure that deserializes JSON into Rust values.
-pub struct Deserializer<R> {
-    read: R,
-    scratch: Vec<u8>,
-    remaining_depth: u8,
-    #[cfg(feature = "float_roundtrip")]
-    single_precision: bool,
-    #[cfg(feature = "unbounded_depth")]
-    disable_recursion_limit: bool,
-}
-
-impl<'de, R> Deserializer<R>
-where
-    R: read::Read<'de>,
-{
-    /// Create a JSON deserializer from one of the possible serde_json input
-    /// sources.
-    ///
-    /// When reading from a source against which short reads are not efficient, such
-    /// as a [`File`], you will want to apply your own buffering because serde_json
-    /// will not buffer the input. See [`std::io::BufReader`].
-    ///
-    /// Typically it is more convenient to use one of these methods instead:
-    ///
-    ///   - Deserializer::from_str
-    ///   - Deserializer::from_slice
-    ///   - Deserializer::from_reader
-    ///
-    /// [`File`]: std::fs::File
-    pub fn new(read: R) -> Self {
-        Deserializer {
-            read,
-            scratch: Vec::new(),
-            remaining_depth: 128,
-            #[cfg(feature = "float_roundtrip")]
-            single_precision: false,
-            #[cfg(feature = "unbounded_depth")]
-            disable_recursion_limit: false,
-        }
-    }
-}
-
-#[cfg(feature = "std")]
-impl<R> Deserializer<read::IoRead<R>>
-where
-    R: crate::io::Read,
-{
-    /// Creates a JSON deserializer from an `io::Read`.
-    ///
-    /// Reader-based deserializers do not support deserializing borrowed types
-    /// like `&str`, since the `std::io::Read` trait has no non-copying methods
-    /// -- everything it does involves copying bytes out of the data source.
-    pub fn from_reader(reader: R) -> Self {
-        Deserializer::new(read::IoRead::new(reader))
-    }
-}
-
-impl<'a> Deserializer<read::SliceRead<'a>> {
-    /// Creates a JSON deserializer from a `&[u8]`.
-    pub fn from_slice(bytes: &'a [u8]) -> Self {
-        Deserializer::new(read::SliceRead::new(bytes))
-    }
-}
-
-impl<'a> Deserializer<read::StrRead<'a>> {
-    /// Creates a JSON deserializer from a `&str`.
-    pub fn from_str(s: &'a str) -> Self {
-        Deserializer::new(read::StrRead::new(s))
-    }
-}
-
-macro_rules! overflow {
-    ($a:ident * 10 + $b:ident, $c:expr) => {
-        match $c {
-            c => $a >= c / 10 && ($a > c / 10 || $b > c % 10),
-        }
-    };
-}
-
-pub(crate) enum ParserNumber {
-    F64(f64),
-    U64(u64),
-    I64(i64),
-    #[cfg(feature = "arbitrary_precision")]
-    String(String),
-}
-
-impl ParserNumber {
-    fn visit<'de, V>(self, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        match self {
-            ParserNumber::F64(x) => visitor.visit_f64(x),
-            ParserNumber::U64(x) => visitor.visit_u64(x),
-            ParserNumber::I64(x) => visitor.visit_i64(x),
-            #[cfg(feature = "arbitrary_precision")]
-            ParserNumber::String(x) => visitor.visit_map(NumberDeserializer { number: x.into() }),
-        }
-    }
-
-    fn invalid_type(self, exp: &dyn Expected) -> Error {
-        match self {
-            ParserNumber::F64(x) => de::Error::invalid_type(Unexpected::Float(x), exp),
-            ParserNumber::U64(x) => de::Error::invalid_type(Unexpected::Unsigned(x), exp),
-            ParserNumber::I64(x) => de::Error::invalid_type(Unexpected::Signed(x), exp),
-            #[cfg(feature = "arbitrary_precision")]
-            ParserNumber::String(_) => de::Error::invalid_type(Unexpected::Other("number"), exp),
-        }
-    }
-}
-
-impl<'de, R: Read<'de>> Deserializer<R> {
-    /// The `Deserializer::end` method should be called after a value has been fully deserialized.
-    /// This allows the `Deserializer` to validate that the input stream is at the end or that it
-    /// only has trailing whitespace.
-    pub fn end(&mut self) -> Result<()> {
-        match tri!(self.parse_whitespace()) {
-            Some(_) => Err(self.peek_error(ErrorCode::TrailingCharacters)),
-            None => Ok(()),
-        }
-    }
-
-    /// Turn a JSON deserializer into an iterator over values of type T.
-    pub fn into_iter<T>(self) -> StreamDeserializer<'de, R, T>
-    where
-        T: de::Deserialize<'de>,
-    {
-        // This cannot be an implementation of std::iter::IntoIterator because
-        // we need the caller to choose what T is.
-        let offset = self.read.byte_offset();
-        StreamDeserializer {
-            de: self,
-            offset,
-            failed: false,
-            output: PhantomData,
-            lifetime: PhantomData,
-        }
-    }
-
-    /// Parse arbitrarily deep JSON structures without any consideration for
-    /// overflowing the stack.
-    ///
-    /// You will want to provide some other way to protect against stack
-    /// overflows, such as by wrapping your Deserializer in the dynamically
-    /// growing stack adapter provided by the serde_stacker crate. Additionally
-    /// you will need to be careful around other recursive operations on the
-    /// parsed result which may overflow the stack after deserialization has
-    /// completed, including, but not limited to, Display and Debug and Drop
-    /// impls.
-    ///
-    /// *This method is only available if serde_json is built with the
-    /// `"unbounded_depth"` feature.*
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use serde::Deserialize;
-    /// use serde_json::Value;
-    ///
-    /// fn main() {
-    ///     let mut json = String::new();
-    ///     for _ in 0..10000 {
-    ///         json = format!("[{}]", json);
-    ///     }
-    ///
-    ///     let mut deserializer = serde_json::Deserializer::from_str(&json);
-    ///     deserializer.disable_recursion_limit();
-    ///     let deserializer = serde_stacker::Deserializer::new(&mut deserializer);
-    ///     let value = Value::deserialize(deserializer).unwrap();
-    ///
-    ///     carefully_drop_nested_arrays(value);
-    /// }
-    ///
-    /// fn carefully_drop_nested_arrays(value: Value) {
-    ///     let mut stack = vec![value];
-    ///     while let Some(value) = stack.pop() {
-    ///         if let Value::Array(array) = value {
-    ///             stack.extend(array);
-    ///         }
-    ///     }
-    /// }
-    /// ```
-    #[cfg(feature = "unbounded_depth")]
-    #[cfg_attr(docsrs, doc(cfg(feature = "unbounded_depth")))]
-    pub fn disable_recursion_limit(&mut self) {
-        self.disable_recursion_limit = true;
-    }
-
-    pub(crate) fn peek(&mut self) -> Result<Option<u8>> {
-        self.read.peek()
-    }
-
-    fn peek_or_null(&mut self) -> Result<u8> {
-        Ok(tri!(self.peek()).unwrap_or(b'\x00'))
-    }
-
-    fn eat_char(&mut self) {
-        self.read.discard();
-    }
-
-    fn next_char(&mut self) -> Result<Option<u8>> {
-        self.read.next()
-    }
-
-    fn next_char_or_null(&mut self) -> Result<u8> {
-        Ok(tri!(self.next_char()).unwrap_or(b'\x00'))
-    }
-
-    /// Error caused by a byte from next_char().
-    #[cold]
-    fn error(&self, reason: ErrorCode) -> Error {
-        let position = self.read.position();
-        Error::syntax(reason, position.line, position.column)
-    }
-
-    /// Error caused by a byte from peek().
-    #[cold]
-    fn peek_error(&self, reason: ErrorCode) -> Error {
-        let position = self.read.peek_position();
-        Error::syntax(reason, position.line, position.column)
-    }
-
-    /// Returns the first non-whitespace byte without consuming it, or `None` if
-    /// EOF is encountered.
-    fn parse_whitespace(&mut self) -> Result<Option<u8>> {
-        loop {
-            match tri!(self.peek()) {
-                Some(b' ' | b'\n' | b'\t' | b'\r') => {
-                    self.eat_char();
-                }
-                other => {
-                    return Ok(other);
-                }
-            }
-        }
-    }
-
-    #[cold]
-    fn peek_invalid_type(&mut self, exp: &dyn Expected) -> Error {
-        let err = match self.peek_or_null().unwrap_or(b'\x00') {
-            b'n' => {
-                self.eat_char();
-                if let Err(err) = self.parse_ident(b"ull") {
-                    return err;
-                }
-                de::Error::invalid_type(Unexpected::Unit, exp)
-            }
-            b't' => {
-                self.eat_char();
-                if let Err(err) = self.parse_ident(b"rue") {
-                    return err;
-                }
-                de::Error::invalid_type(Unexpected::Bool(true), exp)
-            }
-            b'f' => {
-                self.eat_char();
-                if let Err(err) = self.parse_ident(b"alse") {
-                    return err;
-                }
-                de::Error::invalid_type(Unexpected::Bool(false), exp)
-            }
-            b'-' => {
-                self.eat_char();
-                match self.parse_any_number(false) {
-                    Ok(n) => n.invalid_type(exp),
-                    Err(err) => return err,
-                }
-            }
-            b'0'..=b'9' => match self.parse_any_number(true) {
-                Ok(n) => n.invalid_type(exp),
-                Err(err) => return err,
-            },
-            b'"' => {
-                self.eat_char();
-                self.scratch.clear();
-                match self.read.parse_str(&mut self.scratch) {
-                    Ok(s) => de::Error::invalid_type(Unexpected::Str(&s), exp),
-                    Err(err) => return err,
-                }
-            }
-            b'[' => de::Error::invalid_type(Unexpected::Seq, exp),
-            b'{' => de::Error::invalid_type(Unexpected::Map, exp),
-            _ => self.peek_error(ErrorCode::ExpectedSomeValue),
-        };
-
-        self.fix_position(err)
-    }
-
-    pub(crate) fn deserialize_number<'any, V>(&mut self, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'any>,
-    {
-        let peek = match tri!(self.parse_whitespace()) {
-            Some(b) => b,
-            None => {
-                return Err(self.peek_error(ErrorCode::EofWhileParsingValue));
-            }
-        };
-
-        let value = match peek {
-            b'-' => {
-                self.eat_char();
-                tri!(self.parse_integer(false)).visit(visitor)
-            }
-            b'0'..=b'9' => tri!(self.parse_integer(true)).visit(visitor),
-            _ => Err(self.peek_invalid_type(&visitor)),
-        };
-
-        match value {
-            Ok(value) => Ok(value),
-            Err(err) => Err(self.fix_position(err)),
-        }
-    }
-
-    #[cfg(feature = "float_roundtrip")]
-    pub(crate) fn do_deserialize_f32<'any, V>(&mut self, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'any>,
-    {
-        self.single_precision = true;
-        let val = self.deserialize_number(visitor);
-        self.single_precision = false;
-        val
-    }
-
-    pub(crate) fn do_deserialize_i128<'any, V>(&mut self, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'any>,
-    {
-        let mut buf = String::new();
-
-        match tri!(self.parse_whitespace()) {
-            Some(b'-') => {
-                self.eat_char();
-                buf.push('-');
-            }
-            Some(_) => {}
-            None => {
-                return Err(self.peek_error(ErrorCode::EofWhileParsingValue));
-            }
-        }
-
-        tri!(self.scan_integer128(&mut buf));
-
-        let value = match buf.parse() {
-            Ok(int) => visitor.visit_i128(int),
-            Err(_) => {
-                return Err(self.error(ErrorCode::NumberOutOfRange));
-            }
-        };
-
-        match value {
-            Ok(value) => Ok(value),
-            Err(err) => Err(self.fix_position(err)),
-        }
-    }
-
-    pub(crate) fn do_deserialize_u128<'any, V>(&mut self, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'any>,
-    {
-        match tri!(self.parse_whitespace()) {
-            Some(b'-') => {
-                return Err(self.peek_error(ErrorCode::NumberOutOfRange));
-            }
-            Some(_) => {}
-            None => {
-                return Err(self.peek_error(ErrorCode::EofWhileParsingValue));
-            }
-        }
-
-        let mut buf = String::new();
-        tri!(self.scan_integer128(&mut buf));
-
-        let value = match buf.parse() {
-            Ok(int) => visitor.visit_u128(int),
-            Err(_) => {
-                return Err(self.error(ErrorCode::NumberOutOfRange));
-            }
-        };
-
-        match value {
-            Ok(value) => Ok(value),
-            Err(err) => Err(self.fix_position(err)),
-        }
-    }
-
-    fn scan_integer128(&mut self, buf: &mut String) -> Result<()> {
-        match tri!(self.next_char_or_null()) {
-            b'0' => {
-                buf.push('0');
-                // There can be only one leading '0'.
-                match tri!(self.peek_or_null()) {
-                    b'0'..=b'9' => Err(self.peek_error(ErrorCode::InvalidNumber)),
-                    _ => Ok(()),
-                }
-            }
-            c @ b'1'..=b'9' => {
-                buf.push(c as char);
-                while let c @ b'0'..=b'9' = tri!(self.peek_or_null()) {
-                    self.eat_char();
-                    buf.push(c as char);
-                }
-                Ok(())
-            }
-            _ => Err(self.error(ErrorCode::InvalidNumber)),
-        }
-    }
-
-    #[cold]
-    fn fix_position(&self, err: Error) -> Error {
-        err.fix_position(move |code| self.error(code))
-    }
-
-    fn parse_ident(&mut self, ident: &[u8]) -> Result<()> {
-        for expected in ident {
-            match tri!(self.next_char()) {
-                None => {
-                    return Err(self.error(ErrorCode::EofWhileParsingValue));
-                }
-                Some(next) => {
-                    if next != *expected {
-                        return Err(self.error(ErrorCode::ExpectedSomeIdent));
-                    }
-                }
-            }
-        }
-
-        Ok(())
-    }
-
-    fn parse_integer(&mut self, positive: bool) -> Result<ParserNumber> {
-        let next = match tri!(self.next_char()) {
-            Some(b) => b,
-            None => {
-                return Err(self.error(ErrorCode::EofWhileParsingValue));
-            }
-        };
-
-        match next {
-            b'0' => {
-                // There can be only one leading '0'.
-                match tri!(self.peek_or_null()) {
-                    b'0'..=b'9' => Err(self.peek_error(ErrorCode::InvalidNumber)),
-                    _ => self.parse_number(positive, 0),
-                }
-            }
-            c @ b'1'..=b'9' => {
-                let mut significand = (c - b'0') as u64;
-
-                loop {
-                    match tri!(self.peek_or_null()) {
-                        c @ b'0'..=b'9' => {
-                            let digit = (c - b'0') as u64;
-
-                            // We need to be careful with overflow. If we can,
-                            // try to keep the number as a `u64` until we grow
-                            // too large. At that point, switch to parsing the
-                            // value as a `f64`.
-                            if overflow!(significand * 10 + digit, u64::MAX) {
-                                return Ok(ParserNumber::F64(tri!(
-                                    self.parse_long_integer(positive, significand),
-                                )));
-                            }
-
-                            self.eat_char();
-                            significand = significand * 10 + digit;
-                        }
-                        _ => {
-                            return self.parse_number(positive, significand);
-                        }
-                    }
-                }
-            }
-            _ => Err(self.error(ErrorCode::InvalidNumber)),
-        }
-    }
-
-    fn parse_number(&mut self, positive: bool, significand: u64) -> Result<ParserNumber> {
-        Ok(match tri!(self.peek_or_null()) {
-            b'.' => ParserNumber::F64(tri!(self.parse_decimal(positive, significand, 0))),
-            b'e' | b'E' => ParserNumber::F64(tri!(self.parse_exponent(positive, significand, 0))),
-            _ => {
-                if positive {
-                    ParserNumber::U64(significand)
-                } else {
-                    let neg = (significand as i64).wrapping_neg();
-
-                    // Convert into a float if we underflow, or on `-0`.
-                    if neg >= 0 {
-                        ParserNumber::F64(-(significand as f64))
-                    } else {
-                        ParserNumber::I64(neg)
-                    }
-                }
-            }
-        })
-    }
-
-    fn parse_decimal(
-        &mut self,
-        positive: bool,
-        mut significand: u64,
-        exponent_before_decimal_point: i32,
-    ) -> Result<f64> {
-        self.eat_char();
-
-        let mut exponent_after_decimal_point = 0;
-        while let c @ b'0'..=b'9' = tri!(self.peek_or_null()) {
-            let digit = (c - b'0') as u64;
-
-            if overflow!(significand * 10 + digit, u64::MAX) {
-                let exponent = exponent_before_decimal_point + exponent_after_decimal_point;
-                return self.parse_decimal_overflow(positive, significand, exponent);
-            }
-
-            self.eat_char();
-            significand = significand * 10 + digit;
-            exponent_after_decimal_point -= 1;
-        }
-
-        // Error if there is not at least one digit after the decimal point.
-        if exponent_after_decimal_point == 0 {
-            match tri!(self.peek()) {
-                Some(_) => return Err(self.peek_error(ErrorCode::InvalidNumber)),
-                None => return Err(self.peek_error(ErrorCode::EofWhileParsingValue)),
-            }
-        }
-
-        let exponent = exponent_before_decimal_point + exponent_after_decimal_point;
-        match tri!(self.peek_or_null()) {
-            b'e' | b'E' => self.parse_exponent(positive, significand, exponent),
-            _ => self.f64_from_parts(positive, significand, exponent),
-        }
-    }
-
-    fn parse_exponent(
-        &mut self,
-        positive: bool,
-        significand: u64,
-        starting_exp: i32,
-    ) -> Result<f64> {
-        self.eat_char();
-
-        let positive_exp = match tri!(self.peek_or_null()) {
-            b'+' => {
-                self.eat_char();
-                true
-            }
-            b'-' => {
-                self.eat_char();
-                false
-            }
-            _ => true,
-        };
-
-        let next = match tri!(self.next_char()) {
-            Some(b) => b,
-            None => {
-                return Err(self.error(ErrorCode::EofWhileParsingValue));
-            }
-        };
-
-        // Make sure a digit follows the exponent place.
-        let mut exp = match next {
-            c @ b'0'..=b'9' => (c - b'0') as i32,
-            _ => {
-                return Err(self.error(ErrorCode::InvalidNumber));
-            }
-        };
-
-        while let c @ b'0'..=b'9' = tri!(self.peek_or_null()) {
-            self.eat_char();
-            let digit = (c - b'0') as i32;
-
-            if overflow!(exp * 10 + digit, i32::MAX) {
-                let zero_significand = significand == 0;
-                return self.parse_exponent_overflow(positive, zero_significand, positive_exp);
-            }
-
-            exp = exp * 10 + digit;
-        }
-
-        let final_exp = if positive_exp {
-            starting_exp.saturating_add(exp)
-        } else {
-            starting_exp.saturating_sub(exp)
-        };
-
-        self.f64_from_parts(positive, significand, final_exp)
-    }
-
-    #[cfg(feature = "float_roundtrip")]
-    fn f64_from_parts(&mut self, positive: bool, significand: u64, exponent: i32) -> Result<f64> {
-        let f = if self.single_precision {
-            lexical::parse_concise_float::<f32>(significand, exponent) as f64
-        } else {
-            lexical::parse_concise_float::<f64>(significand, exponent)
-        };
-
-        if f.is_infinite() {
-            Err(self.error(ErrorCode::NumberOutOfRange))
-        } else {
-            Ok(if positive { f } else { -f })
-        }
-    }
-
-    #[cfg(not(feature = "float_roundtrip"))]
-    fn f64_from_parts(
-        &mut self,
-        positive: bool,
-        significand: u64,
-        mut exponent: i32,
-    ) -> Result<f64> {
-        let mut f = significand as f64;
-        loop {
-            match POW10.get(exponent.wrapping_abs() as usize) {
-                Some(&pow) => {
-                    if exponent >= 0 {
-                        f *= pow;
-                        if f.is_infinite() {
-                            return Err(self.error(ErrorCode::NumberOutOfRange));
-                        }
-                    } else {
-                        f /= pow;
-                    }
-                    break;
-                }
-                None => {
-                    if f == 0.0 {
-                        break;
-                    }
-                    if exponent >= 0 {
-                        return Err(self.error(ErrorCode::NumberOutOfRange));
-                    }
-                    f /= 1e308;
-                    exponent += 308;
-                }
-            }
-        }
-        Ok(if positive { f } else { -f })
-    }
-
-    #[cfg(feature = "float_roundtrip")]
-    #[cold]
-    #[inline(never)]
-    fn parse_long_integer(&mut self, positive: bool, partial_significand: u64) -> Result<f64> {
-        // To deserialize floats we'll first push the integer and fraction
-        // parts, both as byte strings, into the scratch buffer and then feed
-        // both slices to lexical's parser. For example if the input is
-        // `12.34e5` we'll push b"1234" into scratch and then pass b"12" and
-        // b"34" to lexical. `integer_end` will be used to track where to split
-        // the scratch buffer.
-        //
-        // Note that lexical expects the integer part to contain *no* leading
-        // zeroes and the fraction part to contain *no* trailing zeroes. The
-        // first requirement is already handled by the integer parsing logic.
-        // The second requirement will be enforced just before passing the
-        // slices to lexical in f64_long_from_parts.
-        self.scratch.clear();
-        self.scratch
-            .extend_from_slice(itoa::Buffer::new().format(partial_significand).as_bytes());
-
-        loop {
-            match tri!(self.peek_or_null()) {
-                c @ b'0'..=b'9' => {
-                    self.scratch.push(c);
-                    self.eat_char();
-                }
-                b'.' => {
-                    self.eat_char();
-                    return self.parse_long_decimal(positive, self.scratch.len());
-                }
-                b'e' | b'E' => {
-                    return self.parse_long_exponent(positive, self.scratch.len());
-                }
-                _ => {
-                    return self.f64_long_from_parts(positive, self.scratch.len(), 0);
-                }
-            }
-        }
-    }
-
-    #[cfg(not(feature = "float_roundtrip"))]
-    #[cold]
-    #[inline(never)]
-    fn parse_long_integer(&mut self, positive: bool, significand: u64) -> Result<f64> {
-        let mut exponent = 0;
-        loop {
-            match tri!(self.peek_or_null()) {
-                b'0'..=b'9' => {
-                    self.eat_char();
-                    // This could overflow... if your integer is gigabytes long.
-                    // Ignore that possibility.
-                    exponent += 1;
-                }
-                b'.' => {
-                    return self.parse_decimal(positive, significand, exponent);
-                }
-                b'e' | b'E' => {
-                    return self.parse_exponent(positive, significand, exponent);
-                }
-                _ => {
-                    return self.f64_from_parts(positive, significand, exponent);
-                }
-            }
-        }
-    }
-
-    #[cfg(feature = "float_roundtrip")]
-    #[cold]
-    fn parse_long_decimal(&mut self, positive: bool, integer_end: usize) -> Result<f64> {
-        let mut at_least_one_digit = integer_end < self.scratch.len();
-        while let c @ b'0'..=b'9' = tri!(self.peek_or_null()) {
-            self.scratch.push(c);
-            self.eat_char();
-            at_least_one_digit = true;
-        }
-
-        if !at_least_one_digit {
-            match tri!(self.peek()) {
-                Some(_) => return Err(self.peek_error(ErrorCode::InvalidNumber)),
-                None => return Err(self.peek_error(ErrorCode::EofWhileParsingValue)),
-            }
-        }
-
-        match tri!(self.peek_or_null()) {
-            b'e' | b'E' => self.parse_long_exponent(positive, integer_end),
-            _ => self.f64_long_from_parts(positive, integer_end, 0),
-        }
-    }
-
-    #[cfg(feature = "float_roundtrip")]
-    fn parse_long_exponent(&mut self, positive: bool, integer_end: usize) -> Result<f64> {
-        self.eat_char();
-
-        let positive_exp = match tri!(self.peek_or_null()) {
-            b'+' => {
-                self.eat_char();
-                true
-            }
-            b'-' => {
-                self.eat_char();
-                false
-            }
-            _ => true,
-        };
-
-        let next = match tri!(self.next_char()) {
-            Some(b) => b,
-            None => {
-                return Err(self.error(ErrorCode::EofWhileParsingValue));
-            }
-        };
-
-        // Make sure a digit follows the exponent place.
-        let mut exp = match next {
-            c @ b'0'..=b'9' => (c - b'0') as i32,
-            _ => {
-                return Err(self.error(ErrorCode::InvalidNumber));
-            }
-        };
-
-        while let c @ b'0'..=b'9' = tri!(self.peek_or_null()) {
-            self.eat_char();
-            let digit = (c - b'0') as i32;
-
-            if overflow!(exp * 10 + digit, i32::MAX) {
-                let zero_significand = self.scratch.iter().all(|&digit| digit == b'0');
-                return self.parse_exponent_overflow(positive, zero_significand, positive_exp);
-            }
-
-            exp = exp * 10 + digit;
-        }
-
-        let final_exp = if positive_exp { exp } else { -exp };
-
-        self.f64_long_from_parts(positive, integer_end, final_exp)
-    }
-
-    // This cold code should not be inlined into the middle of the hot
-    // decimal-parsing loop above.
-    #[cfg(feature = "float_roundtrip")]
-    #[cold]
-    #[inline(never)]
-    fn parse_decimal_overflow(
-        &mut self,
-        positive: bool,
-        significand: u64,
-        exponent: i32,
-    ) -> Result<f64> {
-        let mut buffer = itoa::Buffer::new();
-        let significand = buffer.format(significand);
-        let fraction_digits = -exponent as usize;
-        self.scratch.clear();
-        if let Some(zeros) = fraction_digits.checked_sub(significand.len() + 1) {
-            self.scratch.extend(iter::repeat(b'0').take(zeros + 1));
-        }
-        self.scratch.extend_from_slice(significand.as_bytes());
-        let integer_end = self.scratch.len() - fraction_digits;
-        self.parse_long_decimal(positive, integer_end)
-    }
-
-    #[cfg(not(feature = "float_roundtrip"))]
-    #[cold]
-    #[inline(never)]
-    fn parse_decimal_overflow(
-        &mut self,
-        positive: bool,
-        significand: u64,
-        exponent: i32,
-    ) -> Result<f64> {
-        // The next multiply/add would overflow, so just ignore all further
-        // digits.
-        while let b'0'..=b'9' = tri!(self.peek_or_null()) {
-            self.eat_char();
-        }
-
-        match tri!(self.peek_or_null()) {
-            b'e' | b'E' => self.parse_exponent(positive, significand, exponent),
-            _ => self.f64_from_parts(positive, significand, exponent),
-        }
-    }
-
-    // This cold code should not be inlined into the middle of the hot
-    // exponent-parsing loop above.
-    #[cold]
-    #[inline(never)]
-    fn parse_exponent_overflow(
-        &mut self,
-        positive: bool,
-        zero_significand: bool,
-        positive_exp: bool,
-    ) -> Result<f64> {
-        // Error instead of +/- infinity.
-        if !zero_significand && positive_exp {
-            return Err(self.error(ErrorCode::NumberOutOfRange));
-        }
-
-        while let b'0'..=b'9' = tri!(self.peek_or_null()) {
-            self.eat_char();
-        }
-        Ok(if positive { 0.0 } else { -0.0 })
-    }
-
-    #[cfg(feature = "float_roundtrip")]
-    fn f64_long_from_parts(
-        &mut self,
-        positive: bool,
-        integer_end: usize,
-        exponent: i32,
-    ) -> Result<f64> {
-        let integer = &self.scratch[..integer_end];
-        let fraction = &self.scratch[integer_end..];
-
-        let f = if self.single_precision {
-            lexical::parse_truncated_float::<f32>(integer, fraction, exponent) as f64
-        } else {
-            lexical::parse_truncated_float::<f64>(integer, fraction, exponent)
-        };
-
-        if f.is_infinite() {
-            Err(self.error(ErrorCode::NumberOutOfRange))
-        } else {
-            Ok(if positive { f } else { -f })
-        }
-    }
-
-    fn parse_any_signed_number(&mut self) -> Result<ParserNumber> {
-        let peek = match tri!(self.peek()) {
-            Some(b) => b,
-            None => {
-                return Err(self.peek_error(ErrorCode::EofWhileParsingValue));
-            }
-        };
-
-        let value = match peek {
-            b'-' => {
-                self.eat_char();
-                self.parse_any_number(false)
-            }
-            b'0'..=b'9' => self.parse_any_number(true),
-            _ => Err(self.peek_error(ErrorCode::InvalidNumber)),
-        };
-
-        let value = match tri!(self.peek()) {
-            Some(_) => Err(self.peek_error(ErrorCode::InvalidNumber)),
-            None => value,
-        };
-
-        match value {
-            Ok(value) => Ok(value),
-            // The de::Error impl creates errors with unknown line and column.
-            // Fill in the position here by looking at the current index in the
-            // input. There is no way to tell whether this should call `error`
-            // or `peek_error` so pick the one that seems correct more often.
-            // Worst case, the position is off by one character.
-            Err(err) => Err(self.fix_position(err)),
-        }
-    }
-
-    #[cfg(not(feature = "arbitrary_precision"))]
-    fn parse_any_number(&mut self, positive: bool) -> Result<ParserNumber> {
-        self.parse_integer(positive)
-    }
-
-    #[cfg(feature = "arbitrary_precision")]
-    fn parse_any_number(&mut self, positive: bool) -> Result<ParserNumber> {
-        let mut buf = String::with_capacity(16);
-        if !positive {
-            buf.push('-');
-        }
-        tri!(self.scan_integer(&mut buf));
-        if positive {
-            if let Ok(unsigned) = buf.parse() {
-                return Ok(ParserNumber::U64(unsigned));
-            }
-        } else {
-            if let Ok(signed) = buf.parse() {
-                return Ok(ParserNumber::I64(signed));
-            }
-        }
-        Ok(ParserNumber::String(buf))
-    }
-
-    #[cfg(feature = "arbitrary_precision")]
-    fn scan_or_eof(&mut self, buf: &mut String) -> Result<u8> {
-        match tri!(self.next_char()) {
-            Some(b) => {
-                buf.push(b as char);
-                Ok(b)
-            }
-            None => Err(self.error(ErrorCode::EofWhileParsingValue)),
-        }
-    }
-
-    #[cfg(feature = "arbitrary_precision")]
-    fn scan_integer(&mut self, buf: &mut String) -> Result<()> {
-        match tri!(self.scan_or_eof(buf)) {
-            b'0' => {
-                // There can be only one leading '0'.
-                match tri!(self.peek_or_null()) {
-                    b'0'..=b'9' => Err(self.peek_error(ErrorCode::InvalidNumber)),
-                    _ => self.scan_number(buf),
-                }
-            }
-            b'1'..=b'9' => loop {
-                match tri!(self.peek_or_null()) {
-                    c @ b'0'..=b'9' => {
-                        self.eat_char();
-                        buf.push(c as char);
-                    }
-                    _ => {
-                        return self.scan_number(buf);
-                    }
-                }
-            },
-            _ => Err(self.error(ErrorCode::InvalidNumber)),
-        }
-    }
-
-    #[cfg(feature = "arbitrary_precision")]
-    fn scan_number(&mut self, buf: &mut String) -> Result<()> {
-        match tri!(self.peek_or_null()) {
-            b'.' => self.scan_decimal(buf),
-            e @ (b'e' | b'E') => self.scan_exponent(e as char, buf),
-            _ => Ok(()),
-        }
-    }
-
-    #[cfg(feature = "arbitrary_precision")]
-    fn scan_decimal(&mut self, buf: &mut String) -> Result<()> {
-        self.eat_char();
-        buf.push('.');
-
-        let mut at_least_one_digit = false;
-        while let c @ b'0'..=b'9' = tri!(self.peek_or_null()) {
-            self.eat_char();
-            buf.push(c as char);
-            at_least_one_digit = true;
-        }
-
-        if !at_least_one_digit {
-            match tri!(self.peek()) {
-                Some(_) => return Err(self.peek_error(ErrorCode::InvalidNumber)),
-                None => return Err(self.peek_error(ErrorCode::EofWhileParsingValue)),
-            }
-        }
-
-        match tri!(self.peek_or_null()) {
-            e @ (b'e' | b'E') => self.scan_exponent(e as char, buf),
-            _ => Ok(()),
-        }
-    }
-
-    #[cfg(feature = "arbitrary_precision")]
-    fn scan_exponent(&mut self, e: char, buf: &mut String) -> Result<()> {
-        self.eat_char();
-        buf.push(e);
-
-        match tri!(self.peek_or_null()) {
-            b'+' => {
-                self.eat_char();
-                buf.push('+');
-            }
-            b'-' => {
-                self.eat_char();
-                buf.push('-');
-            }
-            _ => {}
-        }
-
-        // Make sure a digit follows the exponent place.
-        match tri!(self.scan_or_eof(buf)) {
-            b'0'..=b'9' => {}
-            _ => {
-                return Err(self.error(ErrorCode::InvalidNumber));
-            }
-        }
-
-        while let c @ b'0'..=b'9' = tri!(self.peek_or_null()) {
-            self.eat_char();
-            buf.push(c as char);
-        }
-
-        Ok(())
-    }
-
-    fn parse_object_colon(&mut self) -> Result<()> {
-        match tri!(self.parse_whitespace()) {
-            Some(b':') => {
-                self.eat_char();
-                Ok(())
-            }
-            Some(_) => Err(self.peek_error(ErrorCode::ExpectedColon)),
-            None => Err(self.peek_error(ErrorCode::EofWhileParsingObject)),
-        }
-    }
-
-    fn end_seq(&mut self) -> Result<()> {
-        match tri!(self.parse_whitespace()) {
-            Some(b']') => {
-                self.eat_char();
-                Ok(())
-            }
-            Some(b',') => {
-                self.eat_char();
-                match self.parse_whitespace() {
-                    Ok(Some(b']')) => Err(self.peek_error(ErrorCode::TrailingComma)),
-                    _ => Err(self.peek_error(ErrorCode::TrailingCharacters)),
-                }
-            }
-            Some(_) => Err(self.peek_error(ErrorCode::TrailingCharacters)),
-            None => Err(self.peek_error(ErrorCode::EofWhileParsingList)),
-        }
-    }
-
-    fn end_map(&mut self) -> Result<()> {
-        match tri!(self.parse_whitespace()) {
-            Some(b'}') => {
-                self.eat_char();
-                Ok(())
-            }
-            Some(b',') => Err(self.peek_error(ErrorCode::TrailingComma)),
-            Some(_) => Err(self.peek_error(ErrorCode::TrailingCharacters)),
-            None => Err(self.peek_error(ErrorCode::EofWhileParsingObject)),
-        }
-    }
-
-    fn ignore_value(&mut self) -> Result<()> {
-        self.scratch.clear();
-        let mut enclosing = None;
-
-        loop {
-            let peek = match tri!(self.parse_whitespace()) {
-                Some(b) => b,
-                None => {
-                    return Err(self.peek_error(ErrorCode::EofWhileParsingValue));
-                }
-            };
-
-            let frame = match peek {
-                b'n' => {
-                    self.eat_char();
-                    tri!(self.parse_ident(b"ull"));
-                    None
-                }
-                b't' => {
-                    self.eat_char();
-                    tri!(self.parse_ident(b"rue"));
-                    None
-                }
-                b'f' => {
-                    self.eat_char();
-                    tri!(self.parse_ident(b"alse"));
-                    None
-                }
-                b'-' => {
-                    self.eat_char();
-                    tri!(self.ignore_integer());
-                    None
-                }
-                b'0'..=b'9' => {
-                    tri!(self.ignore_integer());
-                    None
-                }
-                b'"' => {
-                    self.eat_char();
-                    tri!(self.read.ignore_str());
-                    None
-                }
-                frame @ (b'[' | b'{') => {
-                    self.scratch.extend(enclosing.take());
-                    self.eat_char();
-                    Some(frame)
-                }
-                _ => return Err(self.peek_error(ErrorCode::ExpectedSomeValue)),
-            };
-
-            let (mut accept_comma, mut frame) = match frame {
-                Some(frame) => (false, frame),
-                None => match enclosing.take() {
-                    Some(frame) => (true, frame),
-                    None => match self.scratch.pop() {
-                        Some(frame) => (true, frame),
-                        None => return Ok(()),
-                    },
-                },
-            };
-
-            loop {
-                match tri!(self.parse_whitespace()) {
-                    Some(b',') if accept_comma => {
-                        self.eat_char();
-                        break;
-                    }
-                    Some(b']') if frame == b'[' => {}
-                    Some(b'}') if frame == b'{' => {}
-                    Some(_) => {
-                        if accept_comma {
-                            return Err(self.peek_error(match frame {
-                                b'[' => ErrorCode::ExpectedListCommaOrEnd,
-                                b'{' => ErrorCode::ExpectedObjectCommaOrEnd,
-                                _ => unreachable!(),
-                            }));
-                        } else {
-                            break;
-                        }
-                    }
-                    None => {
-                        return Err(self.peek_error(match frame {
-                            b'[' => ErrorCode::EofWhileParsingList,
-                            b'{' => ErrorCode::EofWhileParsingObject,
-                            _ => unreachable!(),
-                        }));
-                    }
-                }
-
-                self.eat_char();
-                frame = match self.scratch.pop() {
-                    Some(frame) => frame,
-                    None => return Ok(()),
-                };
-                accept_comma = true;
-            }
-
-            if frame == b'{' {
-                match tri!(self.parse_whitespace()) {
-                    Some(b'"') => self.eat_char(),
-                    Some(_) => return Err(self.peek_error(ErrorCode::KeyMustBeAString)),
-                    None => return Err(self.peek_error(ErrorCode::EofWhileParsingObject)),
-                }
-                tri!(self.read.ignore_str());
-                match tri!(self.parse_whitespace()) {
-                    Some(b':') => self.eat_char(),
-                    Some(_) => return Err(self.peek_error(ErrorCode::ExpectedColon)),
-                    None => return Err(self.peek_error(ErrorCode::EofWhileParsingObject)),
-                }
-            }
-
-            enclosing = Some(frame);
-        }
-    }
-
-    fn ignore_integer(&mut self) -> Result<()> {
-        match tri!(self.next_char_or_null()) {
-            b'0' => {
-                // There can be only one leading '0'.
-                if let b'0'..=b'9' = tri!(self.peek_or_null()) {
-                    return Err(self.peek_error(ErrorCode::InvalidNumber));
-                }
-            }
-            b'1'..=b'9' => {
-                while let b'0'..=b'9' = tri!(self.peek_or_null()) {
-                    self.eat_char();
-                }
-            }
-            _ => {
-                return Err(self.error(ErrorCode::InvalidNumber));
-            }
-        }
-
-        match tri!(self.peek_or_null()) {
-            b'.' => self.ignore_decimal(),
-            b'e' | b'E' => self.ignore_exponent(),
-            _ => Ok(()),
-        }
-    }
-
-    fn ignore_decimal(&mut self) -> Result<()> {
-        self.eat_char();
-
-        let mut at_least_one_digit = false;
-        while let b'0'..=b'9' = tri!(self.peek_or_null()) {
-            self.eat_char();
-            at_least_one_digit = true;
-        }
-
-        if !at_least_one_digit {
-            return Err(self.peek_error(ErrorCode::InvalidNumber));
-        }
-
-        match tri!(self.peek_or_null()) {
-            b'e' | b'E' => self.ignore_exponent(),
-            _ => Ok(()),
-        }
-    }
-
-    fn ignore_exponent(&mut self) -> Result<()> {
-        self.eat_char();
-
-        match tri!(self.peek_or_null()) {
-            b'+' | b'-' => self.eat_char(),
-            _ => {}
-        }
-
-        // Make sure a digit follows the exponent place.
-        match tri!(self.next_char_or_null()) {
-            b'0'..=b'9' => {}
-            _ => {
-                return Err(self.error(ErrorCode::InvalidNumber));
-            }
-        }
-
-        while let b'0'..=b'9' = tri!(self.peek_or_null()) {
-            self.eat_char();
-        }
-
-        Ok(())
-    }
-
-    #[cfg(feature = "raw_value")]
-    fn deserialize_raw_value<V>(&mut self, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        tri!(self.parse_whitespace());
-        self.read.begin_raw_buffering();
-        tri!(self.ignore_value());
-        self.read.end_raw_buffering(visitor)
-    }
-}
-
-impl FromStr for Number {
-    type Err = Error;
-
-    fn from_str(s: &str) -> result::Result<Self, Self::Err> {
-        Deserializer::from_str(s)
-            .parse_any_signed_number()
-            .map(Into::into)
-    }
-}
-
-#[cfg(not(feature = "float_roundtrip"))]
-static POW10: [f64; 309] = [
-    1e000, 1e001, 1e002, 1e003, 1e004, 1e005, 1e006, 1e007, 1e008, 1e009, //
-    1e010, 1e011, 1e012, 1e013, 1e014, 1e015, 1e016, 1e017, 1e018, 1e019, //
-    1e020, 1e021, 1e022, 1e023, 1e024, 1e025, 1e026, 1e027, 1e028, 1e029, //
-    1e030, 1e031, 1e032, 1e033, 1e034, 1e035, 1e036, 1e037, 1e038, 1e039, //
-    1e040, 1e041, 1e042, 1e043, 1e044, 1e045, 1e046, 1e047, 1e048, 1e049, //
-    1e050, 1e051, 1e052, 1e053, 1e054, 1e055, 1e056, 1e057, 1e058, 1e059, //
-    1e060, 1e061, 1e062, 1e063, 1e064, 1e065, 1e066, 1e067, 1e068, 1e069, //
-    1e070, 1e071, 1e072, 1e073, 1e074, 1e075, 1e076, 1e077, 1e078, 1e079, //
-    1e080, 1e081, 1e082, 1e083, 1e084, 1e085, 1e086, 1e087, 1e088, 1e089, //
-    1e090, 1e091, 1e092, 1e093, 1e094, 1e095, 1e096, 1e097, 1e098, 1e099, //
-    1e100, 1e101, 1e102, 1e103, 1e104, 1e105, 1e106, 1e107, 1e108, 1e109, //
-    1e110, 1e111, 1e112, 1e113, 1e114, 1e115, 1e116, 1e117, 1e118, 1e119, //
-    1e120, 1e121, 1e122, 1e123, 1e124, 1e125, 1e126, 1e127, 1e128, 1e129, //
-    1e130, 1e131, 1e132, 1e133, 1e134, 1e135, 1e136, 1e137, 1e138, 1e139, //
-    1e140, 1e141, 1e142, 1e143, 1e144, 1e145, 1e146, 1e147, 1e148, 1e149, //
-    1e150, 1e151, 1e152, 1e153, 1e154, 1e155, 1e156, 1e157, 1e158, 1e159, //
-    1e160, 1e161, 1e162, 1e163, 1e164, 1e165, 1e166, 1e167, 1e168, 1e169, //
-    1e170, 1e171, 1e172, 1e173, 1e174, 1e175, 1e176, 1e177, 1e178, 1e179, //
-    1e180, 1e181, 1e182, 1e183, 1e184, 1e185, 1e186, 1e187, 1e188, 1e189, //
-    1e190, 1e191, 1e192, 1e193, 1e194, 1e195, 1e196, 1e197, 1e198, 1e199, //
-    1e200, 1e201, 1e202, 1e203, 1e204, 1e205, 1e206, 1e207, 1e208, 1e209, //
-    1e210, 1e211, 1e212, 1e213, 1e214, 1e215, 1e216, 1e217, 1e218, 1e219, //
-    1e220, 1e221, 1e222, 1e223, 1e224, 1e225, 1e226, 1e227, 1e228, 1e229, //
-    1e230, 1e231, 1e232, 1e233, 1e234, 1e235, 1e236, 1e237, 1e238, 1e239, //
-    1e240, 1e241, 1e242, 1e243, 1e244, 1e245, 1e246, 1e247, 1e248, 1e249, //
-    1e250, 1e251, 1e252, 1e253, 1e254, 1e255, 1e256, 1e257, 1e258, 1e259, //
-    1e260, 1e261, 1e262, 1e263, 1e264, 1e265, 1e266, 1e267, 1e268, 1e269, //
-    1e270, 1e271, 1e272, 1e273, 1e274, 1e275, 1e276, 1e277, 1e278, 1e279, //
-    1e280, 1e281, 1e282, 1e283, 1e284, 1e285, 1e286, 1e287, 1e288, 1e289, //
-    1e290, 1e291, 1e292, 1e293, 1e294, 1e295, 1e296, 1e297, 1e298, 1e299, //
-    1e300, 1e301, 1e302, 1e303, 1e304, 1e305, 1e306, 1e307, 1e308,
-];
-
-macro_rules! deserialize_number {
-    ($method:ident) => {
-        deserialize_number!($method, deserialize_number);
-    };
-
-    ($method:ident, $using:ident) => {
-        fn $method<V>(self, visitor: V) -> Result<V::Value>
-        where
-            V: de::Visitor<'de>,
-        {
-            self.$using(visitor)
-        }
-    };
-}
-
-#[cfg(not(feature = "unbounded_depth"))]
-macro_rules! if_checking_recursion_limit {
-    ($($body:tt)*) => {
-        $($body)*
-    };
-}
-
-#[cfg(feature = "unbounded_depth")]
-macro_rules! if_checking_recursion_limit {
-    ($this:ident $($body:tt)*) => {
-        if !$this.disable_recursion_limit {
-            $this $($body)*
-        }
-    };
-}
-
-macro_rules! check_recursion {
-    ($this:ident $($body:tt)*) => {
-        if_checking_recursion_limit! {
-            $this.remaining_depth -= 1;
-            if $this.remaining_depth == 0 {
-                return Err($this.peek_error(ErrorCode::RecursionLimitExceeded));
-            }
-        }
-
-        $this $($body)*
-
-        if_checking_recursion_limit! {
-            $this.remaining_depth += 1;
-        }
-    };
-}
-
-impl<'de, R: Read<'de>> de::Deserializer<'de> for &mut Deserializer<R> {
-    type Error = Error;
-
-    #[inline]
-    fn deserialize_any<V>(self, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        let peek = match tri!(self.parse_whitespace()) {
-            Some(b) => b,
-            None => {
-                return Err(self.peek_error(ErrorCode::EofWhileParsingValue));
-            }
-        };
-
-        let value = match peek {
-            b'n' => {
-                self.eat_char();
-                tri!(self.parse_ident(b"ull"));
-                visitor.visit_unit()
-            }
-            b't' => {
-                self.eat_char();
-                tri!(self.parse_ident(b"rue"));
-                visitor.visit_bool(true)
-            }
-            b'f' => {
-                self.eat_char();
-                tri!(self.parse_ident(b"alse"));
-                visitor.visit_bool(false)
-            }
-            b'-' => {
-                self.eat_char();
-                tri!(self.parse_any_number(false)).visit(visitor)
-            }
-            b'0'..=b'9' => tri!(self.parse_any_number(true)).visit(visitor),
-            b'"' => {
-                self.eat_char();
-                self.scratch.clear();
-                match tri!(self.read.parse_str(&mut self.scratch)) {
-                    Reference::Borrowed(s) => visitor.visit_borrowed_str(s),
-                    Reference::Copied(s) => visitor.visit_str(s),
-                }
-            }
-            b'[' => {
-                check_recursion! {
-                    self.eat_char();
-                    let ret = visitor.visit_seq(SeqAccess::new(self));
-                }
-
-                match (ret, self.end_seq()) {
-                    (Ok(ret), Ok(())) => Ok(ret),
-                    (Err(err), _) | (_, Err(err)) => Err(err),
-                }
-            }
-            b'{' => {
-                check_recursion! {
-                    self.eat_char();
-                    let ret = visitor.visit_map(MapAccess::new(self));
-                }
-
-                match (ret, self.end_map()) {
-                    (Ok(ret), Ok(())) => Ok(ret),
-                    (Err(err), _) | (_, Err(err)) => Err(err),
-                }
-            }
-            _ => Err(self.peek_error(ErrorCode::ExpectedSomeValue)),
-        };
-
-        match value {
-            Ok(value) => Ok(value),
-            // The de::Error impl creates errors with unknown line and column.
-            // Fill in the position here by looking at the current index in the
-            // input. There is no way to tell whether this should call `error`
-            // or `peek_error` so pick the one that seems correct more often.
-            // Worst case, the position is off by one character.
-            Err(err) => Err(self.fix_position(err)),
-        }
-    }
-
-    fn deserialize_bool<V>(self, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        let peek = match tri!(self.parse_whitespace()) {
-            Some(b) => b,
-            None => {
-                return Err(self.peek_error(ErrorCode::EofWhileParsingValue));
-            }
-        };
-
-        let value = match peek {
-            b't' => {
-                self.eat_char();
-                tri!(self.parse_ident(b"rue"));
-                visitor.visit_bool(true)
-            }
-            b'f' => {
-                self.eat_char();
-                tri!(self.parse_ident(b"alse"));
-                visitor.visit_bool(false)
-            }
-            _ => Err(self.peek_invalid_type(&visitor)),
-        };
-
-        match value {
-            Ok(value) => Ok(value),
-            Err(err) => Err(self.fix_position(err)),
-        }
-    }
-
-    deserialize_number!(deserialize_i8);
-    deserialize_number!(deserialize_i16);
-    deserialize_number!(deserialize_i32);
-    deserialize_number!(deserialize_i64);
-    deserialize_number!(deserialize_u8);
-    deserialize_number!(deserialize_u16);
-    deserialize_number!(deserialize_u32);
-    deserialize_number!(deserialize_u64);
-    #[cfg(not(feature = "float_roundtrip"))]
-    deserialize_number!(deserialize_f32);
-    deserialize_number!(deserialize_f64);
-
-    #[cfg(feature = "float_roundtrip")]
-    deserialize_number!(deserialize_f32, do_deserialize_f32);
-    deserialize_number!(deserialize_i128, do_deserialize_i128);
-    deserialize_number!(deserialize_u128, do_deserialize_u128);
-
-    fn deserialize_char<V>(self, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        self.deserialize_str(visitor)
-    }
-
-    fn deserialize_str<V>(self, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        let peek = match tri!(self.parse_whitespace()) {
-            Some(b) => b,
-            None => {
-                return Err(self.peek_error(ErrorCode::EofWhileParsingValue));
-            }
-        };
-
-        let value = match peek {
-            b'"' => {
-                self.eat_char();
-                self.scratch.clear();
-                match tri!(self.read.parse_str(&mut self.scratch)) {
-                    Reference::Borrowed(s) => visitor.visit_borrowed_str(s),
-                    Reference::Copied(s) => visitor.visit_str(s),
-                }
-            }
-            _ => Err(self.peek_invalid_type(&visitor)),
-        };
-
-        match value {
-            Ok(value) => Ok(value),
-            Err(err) => Err(self.fix_position(err)),
-        }
-    }
-
-    fn deserialize_string<V>(self, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        self.deserialize_str(visitor)
-    }
-
-    /// Parses a JSON string as bytes. Note that this function does not check
-    /// whether the bytes represent a valid UTF-8 string.
-    ///
-    /// The relevant part of the JSON specification is Section 8.2 of [RFC
-    /// 7159]:
-    ///
-    /// > When all the strings represented in a JSON text are composed entirely
-    /// > of Unicode characters (however escaped), then that JSON text is
-    /// > interoperable in the sense that all software implementations that
-    /// > parse it will agree on the contents of names and of string values in
-    /// > objects and arrays.
-    /// >
-    /// > However, the ABNF in this specification allows member names and string
-    /// > values to contain bit sequences that cannot encode Unicode characters;
-    /// > for example, "\uDEAD" (a single unpaired UTF-16 surrogate). Instances
-    /// > of this have been observed, for example, when a library truncates a
-    /// > UTF-16 string without checking whether the truncation split a
-    /// > surrogate pair.  The behavior of software that receives JSON texts
-    /// > containing such values is unpredictable; for example, implementations
-    /// > might return different values for the length of a string value or even
-    /// > suffer fatal runtime exceptions.
-    ///
-    /// [RFC 7159]: https://tools.ietf.org/html/rfc7159
-    ///
-    /// The behavior of serde_json is specified to fail on non-UTF-8 strings
-    /// when deserializing into Rust UTF-8 string types such as String, and
-    /// succeed with the bytes representing the [WTF-8] encoding of code points
-    /// when deserializing using this method.
-    ///
-    /// [WTF-8]: https://simonsapin.github.io/wtf-8
-    ///
-    /// Escape sequences are processed as usual, and for `\uXXXX` escapes it is
-    /// still checked if the hex number represents a valid Unicode code point.
-    ///
-    /// # Examples
-    ///
-    /// You can use this to parse JSON strings containing invalid UTF-8 bytes,
-    /// or unpaired surrogates.
-    ///
-    /// ```
-    /// use serde_bytes::ByteBuf;
-    ///
-    /// fn look_at_bytes() -> Result<(), serde_json::Error> {
-    ///     let json_data = b"\"some bytes: \xe5\x00\xe5\"";
-    ///     let bytes: ByteBuf = serde_json::from_slice(json_data)?;
-    ///
-    ///     assert_eq!(b'\xe5', bytes[12]);
-    ///     assert_eq!(b'\0', bytes[13]);
-    ///     assert_eq!(b'\xe5', bytes[14]);
-    ///
-    ///     Ok(())
-    /// }
-    /// #
-    /// # look_at_bytes().unwrap();
-    /// ```
-    ///
-    /// Backslash escape sequences like `\n` are still interpreted and required
-    /// to be valid. `\u` escape sequences are required to represent a valid
-    /// Unicode code point or lone surrogate.
-    ///
-    /// ```
-    /// use serde_bytes::ByteBuf;
-    ///
-    /// fn look_at_bytes() -> Result<(), serde_json::Error> {
-    ///     let json_data = b"\"lone surrogate: \\uD801\"";
-    ///     let bytes: ByteBuf = serde_json::from_slice(json_data)?;
-    ///     let expected = b"lone surrogate: \xED\xA0\x81";
-    ///     assert_eq!(expected, bytes.as_slice());
-    ///     Ok(())
-    /// }
-    /// #
-    /// # look_at_bytes();
-    /// ```
-    fn deserialize_bytes<V>(self, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        let peek = match tri!(self.parse_whitespace()) {
-            Some(b) => b,
-            None => {
-                return Err(self.peek_error(ErrorCode::EofWhileParsingValue));
-            }
-        };
-
-        let value = match peek {
-            b'"' => {
-                self.eat_char();
-                self.scratch.clear();
-                match tri!(self.read.parse_str_raw(&mut self.scratch)) {
-                    Reference::Borrowed(b) => visitor.visit_borrowed_bytes(b),
-                    Reference::Copied(b) => visitor.visit_bytes(b),
-                }
-            }
-            b'[' => self.deserialize_seq(visitor),
-            _ => Err(self.peek_invalid_type(&visitor)),
-        };
-
-        match value {
-            Ok(value) => Ok(value),
-            Err(err) => Err(self.fix_position(err)),
-        }
-    }
-
-    #[inline]
-    fn deserialize_byte_buf<V>(self, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        self.deserialize_bytes(visitor)
-    }
-
-    /// Parses a `null` as a None, and any other values as a `Some(...)`.
-    #[inline]
-    fn deserialize_option<V>(self, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        match tri!(self.parse_whitespace()) {
-            Some(b'n') => {
-                self.eat_char();
-                tri!(self.parse_ident(b"ull"));
-                visitor.visit_none()
-            }
-            _ => visitor.visit_some(self),
-        }
-    }
-
-    fn deserialize_unit<V>(self, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        let peek = match tri!(self.parse_whitespace()) {
-            Some(b) => b,
-            None => {
-                return Err(self.peek_error(ErrorCode::EofWhileParsingValue));
-            }
-        };
-
-        let value = match peek {
-            b'n' => {
-                self.eat_char();
-                tri!(self.parse_ident(b"ull"));
-                visitor.visit_unit()
-            }
-            _ => Err(self.peek_invalid_type(&visitor)),
-        };
-
-        match value {
-            Ok(value) => Ok(value),
-            Err(err) => Err(self.fix_position(err)),
-        }
-    }
-
-    fn deserialize_unit_struct<V>(self, _name: &'static str, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        self.deserialize_unit(visitor)
-    }
-
-    /// Parses a newtype struct as the underlying value.
-    #[inline]
-    fn deserialize_newtype_struct<V>(self, name: &str, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        #[cfg(feature = "raw_value")]
-        {
-            if name == crate::raw::TOKEN {
-                return self.deserialize_raw_value(visitor);
-            }
-        }
-
-        let _ = name;
-        visitor.visit_newtype_struct(self)
-    }
-
-    fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        let peek = match tri!(self.parse_whitespace()) {
-            Some(b) => b,
-            None => {
-                return Err(self.peek_error(ErrorCode::EofWhileParsingValue));
-            }
-        };
-
-        let value = match peek {
-            b'[' => {
-                check_recursion! {
-                    self.eat_char();
-                    let ret = visitor.visit_seq(SeqAccess::new(self));
-                }
-
-                match (ret, self.end_seq()) {
-                    (Ok(ret), Ok(())) => Ok(ret),
-                    (Err(err), _) | (_, Err(err)) => Err(err),
-                }
-            }
-            _ => Err(self.peek_invalid_type(&visitor)),
-        };
-
-        match value {
-            Ok(value) => Ok(value),
-            Err(err) => Err(self.fix_position(err)),
-        }
-    }
-
-    fn deserialize_tuple<V>(self, _len: usize, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        self.deserialize_seq(visitor)
-    }
-
-    fn deserialize_tuple_struct<V>(
-        self,
-        _name: &'static str,
-        _len: usize,
-        visitor: V,
-    ) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        self.deserialize_seq(visitor)
-    }
-
-    fn deserialize_map<V>(self, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        let peek = match tri!(self.parse_whitespace()) {
-            Some(b) => b,
-            None => {
-                return Err(self.peek_error(ErrorCode::EofWhileParsingValue));
-            }
-        };
-
-        let value = match peek {
-            b'{' => {
-                check_recursion! {
-                    self.eat_char();
-                    let ret = visitor.visit_map(MapAccess::new(self));
-                }
-
-                match (ret, self.end_map()) {
-                    (Ok(ret), Ok(())) => Ok(ret),
-                    (Err(err), _) | (_, Err(err)) => Err(err),
-                }
-            }
-            _ => Err(self.peek_invalid_type(&visitor)),
-        };
-
-        match value {
-            Ok(value) => Ok(value),
-            Err(err) => Err(self.fix_position(err)),
-        }
-    }
-
-    fn deserialize_struct<V>(
-        self,
-        _name: &'static str,
-        _fields: &'static [&'static str],
-        visitor: V,
-    ) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        let peek = match tri!(self.parse_whitespace()) {
-            Some(b) => b,
-            None => {
-                return Err(self.peek_error(ErrorCode::EofWhileParsingValue));
-            }
-        };
-
-        let value = match peek {
-            b'[' => {
-                check_recursion! {
-                    self.eat_char();
-                    let ret = visitor.visit_seq(SeqAccess::new(self));
-                }
-
-                match (ret, self.end_seq()) {
-                    (Ok(ret), Ok(())) => Ok(ret),
-                    (Err(err), _) | (_, Err(err)) => Err(err),
-                }
-            }
-            b'{' => {
-                check_recursion! {
-                    self.eat_char();
-                    let ret = visitor.visit_map(MapAccess::new(self));
-                }
-
-                match (ret, self.end_map()) {
-                    (Ok(ret), Ok(())) => Ok(ret),
-                    (Err(err), _) | (_, Err(err)) => Err(err),
-                }
-            }
-            _ => Err(self.peek_invalid_type(&visitor)),
-        };
-
-        match value {
-            Ok(value) => Ok(value),
-            Err(err) => Err(self.fix_position(err)),
-        }
-    }
-
-    /// Parses an enum as an object like `{"$KEY":$VALUE}`, where $VALUE is either a straight
-    /// value, a `[..]`, or a `{..}`.
-    #[inline]
-    fn deserialize_enum<V>(
-        self,
-        _name: &str,
-        _variants: &'static [&'static str],
-        visitor: V,
-    ) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        match tri!(self.parse_whitespace()) {
-            Some(b'{') => {
-                check_recursion! {
-                    self.eat_char();
-                    let ret = visitor.visit_enum(VariantAccess::new(self));
-                }
-                let value = tri!(ret);
-
-                match tri!(self.parse_whitespace()) {
-                    Some(b'}') => {
-                        self.eat_char();
-                        Ok(value)
-                    }
-                    Some(_) => Err(self.error(ErrorCode::ExpectedSomeValue)),
-                    None => Err(self.error(ErrorCode::EofWhileParsingObject)),
-                }
-            }
-            Some(b'"') => visitor.visit_enum(UnitVariantAccess::new(self)),
-            Some(_) => Err(self.peek_error(ErrorCode::ExpectedSomeValue)),
-            None => Err(self.peek_error(ErrorCode::EofWhileParsingValue)),
-        }
-    }
-
-    fn deserialize_identifier<V>(self, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        self.deserialize_str(visitor)
-    }
-
-    fn deserialize_ignored_any<V>(self, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        tri!(self.ignore_value());
-        visitor.visit_unit()
-    }
-}
-
-struct SeqAccess<'a, R: 'a> {
-    de: &'a mut Deserializer<R>,
-    first: bool,
-}
-
-impl<'a, R: 'a> SeqAccess<'a, R> {
-    fn new(de: &'a mut Deserializer<R>) -> Self {
-        SeqAccess { de, first: true }
-    }
-}
-
-impl<'de, 'a, R: Read<'de> + 'a> de::SeqAccess<'de> for SeqAccess<'a, R> {
-    type Error = Error;
-
-    fn next_element_seed<T>(&mut self, seed: T) -> Result<Option<T::Value>>
-    where
-        T: de::DeserializeSeed<'de>,
-    {
-        fn has_next_element<'de, 'a, R: Read<'de> + 'a>(
-            seq: &mut SeqAccess<'a, R>,
-        ) -> Result<bool> {
-            let peek = match tri!(seq.de.parse_whitespace()) {
-                Some(b) => b,
-                None => {
-                    return Err(seq.de.peek_error(ErrorCode::EofWhileParsingList));
-                }
-            };
-
-            if peek == b']' {
-                Ok(false)
-            } else if seq.first {
-                seq.first = false;
-                Ok(true)
-            } else if peek == b',' {
-                seq.de.eat_char();
-                match tri!(seq.de.parse_whitespace()) {
-                    Some(b']') => Err(seq.de.peek_error(ErrorCode::TrailingComma)),
-                    Some(_) => Ok(true),
-                    None => Err(seq.de.peek_error(ErrorCode::EofWhileParsingValue)),
-                }
-            } else {
-                Err(seq.de.peek_error(ErrorCode::ExpectedListCommaOrEnd))
-            }
-        }
-
-        if tri!(has_next_element(self)) {
-            Ok(Some(tri!(seed.deserialize(&mut *self.de))))
-        } else {
-            Ok(None)
-        }
-    }
-}
-
-struct MapAccess<'a, R: 'a> {
-    de: &'a mut Deserializer<R>,
-    first: bool,
-}
-
-impl<'a, R: 'a> MapAccess<'a, R> {
-    fn new(de: &'a mut Deserializer<R>) -> Self {
-        MapAccess { de, first: true }
-    }
-}
-
-impl<'de, 'a, R: Read<'de> + 'a> de::MapAccess<'de> for MapAccess<'a, R> {
-    type Error = Error;
-
-    fn next_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>>
-    where
-        K: de::DeserializeSeed<'de>,
-    {
-        fn has_next_key<'de, 'a, R: Read<'de> + 'a>(map: &mut MapAccess<'a, R>) -> Result<bool> {
-            let peek = match tri!(map.de.parse_whitespace()) {
-                Some(b) => b,
-                None => {
-                    return Err(map.de.peek_error(ErrorCode::EofWhileParsingObject));
-                }
-            };
-
-            if peek == b'}' {
-                Ok(false)
-            } else if map.first {
-                map.first = false;
-                if peek == b'"' {
-                    Ok(true)
-                } else {
-                    Err(map.de.peek_error(ErrorCode::KeyMustBeAString))
-                }
-            } else if peek == b',' {
-                map.de.eat_char();
-                match tri!(map.de.parse_whitespace()) {
-                    Some(b'"') => Ok(true),
-                    Some(b'}') => Err(map.de.peek_error(ErrorCode::TrailingComma)),
-                    Some(_) => Err(map.de.peek_error(ErrorCode::KeyMustBeAString)),
-                    None => Err(map.de.peek_error(ErrorCode::EofWhileParsingValue)),
-                }
-            } else {
-                Err(map.de.peek_error(ErrorCode::ExpectedObjectCommaOrEnd))
-            }
-        }
-
-        if tri!(has_next_key(self)) {
-            Ok(Some(tri!(seed.deserialize(MapKey { de: &mut *self.de }))))
-        } else {
-            Ok(None)
-        }
-    }
-
-    fn next_value_seed<V>(&mut self, seed: V) -> Result<V::Value>
-    where
-        V: de::DeserializeSeed<'de>,
-    {
-        tri!(self.de.parse_object_colon());
-
-        seed.deserialize(&mut *self.de)
-    }
-}
-
-struct VariantAccess<'a, R: 'a> {
-    de: &'a mut Deserializer<R>,
-}
-
-impl<'a, R: 'a> VariantAccess<'a, R> {
-    fn new(de: &'a mut Deserializer<R>) -> Self {
-        VariantAccess { de }
-    }
-}
-
-impl<'de, 'a, R: Read<'de> + 'a> de::EnumAccess<'de> for VariantAccess<'a, R> {
-    type Error = Error;
-    type Variant = Self;
-
-    fn variant_seed<V>(self, seed: V) -> Result<(V::Value, Self)>
-    where
-        V: de::DeserializeSeed<'de>,
-    {
-        let val = tri!(seed.deserialize(&mut *self.de));
-        tri!(self.de.parse_object_colon());
-        Ok((val, self))
-    }
-}
-
-impl<'de, 'a, R: Read<'de> + 'a> de::VariantAccess<'de> for VariantAccess<'a, R> {
-    type Error = Error;
-
-    fn unit_variant(self) -> Result<()> {
-        de::Deserialize::deserialize(self.de)
-    }
-
-    fn newtype_variant_seed<T>(self, seed: T) -> Result<T::Value>
-    where
-        T: de::DeserializeSeed<'de>,
-    {
-        seed.deserialize(self.de)
-    }
-
-    fn tuple_variant<V>(self, _len: usize, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        de::Deserializer::deserialize_seq(self.de, visitor)
-    }
-
-    fn struct_variant<V>(self, fields: &'static [&'static str], visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        de::Deserializer::deserialize_struct(self.de, "", fields, visitor)
-    }
-}
-
-struct UnitVariantAccess<'a, R: 'a> {
-    de: &'a mut Deserializer<R>,
-}
-
-impl<'a, R: 'a> UnitVariantAccess<'a, R> {
-    fn new(de: &'a mut Deserializer<R>) -> Self {
-        UnitVariantAccess { de }
-    }
-}
-
-impl<'de, 'a, R: Read<'de> + 'a> de::EnumAccess<'de> for UnitVariantAccess<'a, R> {
-    type Error = Error;
-    type Variant = Self;
-
-    fn variant_seed<V>(self, seed: V) -> Result<(V::Value, Self)>
-    where
-        V: de::DeserializeSeed<'de>,
-    {
-        let variant = tri!(seed.deserialize(&mut *self.de));
-        Ok((variant, self))
-    }
-}
-
-impl<'de, 'a, R: Read<'de> + 'a> de::VariantAccess<'de> for UnitVariantAccess<'a, R> {
-    type Error = Error;
-
-    fn unit_variant(self) -> Result<()> {
-        Ok(())
-    }
-
-    fn newtype_variant_seed<T>(self, _seed: T) -> Result<T::Value>
-    where
-        T: de::DeserializeSeed<'de>,
-    {
-        Err(de::Error::invalid_type(
-            Unexpected::UnitVariant,
-            &"newtype variant",
-        ))
-    }
-
-    fn tuple_variant<V>(self, _len: usize, _visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        Err(de::Error::invalid_type(
-            Unexpected::UnitVariant,
-            &"tuple variant",
-        ))
-    }
-
-    fn struct_variant<V>(self, _fields: &'static [&'static str], _visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        Err(de::Error::invalid_type(
-            Unexpected::UnitVariant,
-            &"struct variant",
-        ))
-    }
-}
-
-/// Only deserialize from this after peeking a '"' byte! Otherwise it may
-/// deserialize invalid JSON successfully.
-struct MapKey<'a, R: 'a> {
-    de: &'a mut Deserializer<R>,
-}
-
-macro_rules! deserialize_numeric_key {
-    ($method:ident) => {
-        fn $method<V>(self, visitor: V) -> Result<V::Value>
-        where
-            V: de::Visitor<'de>,
-        {
-            self.deserialize_number(visitor)
-        }
-    };
-
-    ($method:ident, $delegate:ident) => {
-        fn $method<V>(self, visitor: V) -> Result<V::Value>
-        where
-            V: de::Visitor<'de>,
-        {
-            self.de.eat_char();
-
-            match tri!(self.de.peek()) {
-                Some(b'0'..=b'9' | b'-') => {}
-                _ => return Err(self.de.error(ErrorCode::ExpectedNumericKey)),
-            }
-
-            let value = tri!(self.de.$delegate(visitor));
-
-            match tri!(self.de.peek()) {
-                Some(b'"') => self.de.eat_char(),
-                _ => return Err(self.de.peek_error(ErrorCode::ExpectedDoubleQuote)),
-            }
-
-            Ok(value)
-        }
-    };
-}
-
-impl<'de, 'a, R> MapKey<'a, R>
-where
-    R: Read<'de>,
-{
-    deserialize_numeric_key!(deserialize_number, deserialize_number);
-}
-
-impl<'de, 'a, R> de::Deserializer<'de> for MapKey<'a, R>
-where
-    R: Read<'de>,
-{
-    type Error = Error;
-
-    #[inline]
-    fn deserialize_any<V>(self, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        self.de.eat_char();
-        self.de.scratch.clear();
-        match tri!(self.de.read.parse_str(&mut self.de.scratch)) {
-            Reference::Borrowed(s) => visitor.visit_borrowed_str(s),
-            Reference::Copied(s) => visitor.visit_str(s),
-        }
-    }
-
-    deserialize_numeric_key!(deserialize_i8);
-    deserialize_numeric_key!(deserialize_i16);
-    deserialize_numeric_key!(deserialize_i32);
-    deserialize_numeric_key!(deserialize_i64);
-    deserialize_numeric_key!(deserialize_i128, deserialize_i128);
-    deserialize_numeric_key!(deserialize_u8);
-    deserialize_numeric_key!(deserialize_u16);
-    deserialize_numeric_key!(deserialize_u32);
-    deserialize_numeric_key!(deserialize_u64);
-    deserialize_numeric_key!(deserialize_u128, deserialize_u128);
-    #[cfg(not(feature = "float_roundtrip"))]
-    deserialize_numeric_key!(deserialize_f32);
-    #[cfg(feature = "float_roundtrip")]
-    deserialize_numeric_key!(deserialize_f32, deserialize_f32);
-    deserialize_numeric_key!(deserialize_f64);
-
-    fn deserialize_bool<V>(self, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        self.de.eat_char();
-
-        let peek = match tri!(self.de.next_char()) {
-            Some(b) => b,
-            None => {
-                return Err(self.de.peek_error(ErrorCode::EofWhileParsingValue));
-            }
-        };
-
-        let value = match peek {
-            b't' => {
-                tri!(self.de.parse_ident(b"rue\""));
-                visitor.visit_bool(true)
-            }
-            b'f' => {
-                tri!(self.de.parse_ident(b"alse\""));
-                visitor.visit_bool(false)
-            }
-            _ => {
-                self.de.scratch.clear();
-                let s = tri!(self.de.read.parse_str(&mut self.de.scratch));
-                Err(de::Error::invalid_type(Unexpected::Str(&s), &visitor))
-            }
-        };
-
-        match value {
-            Ok(value) => Ok(value),
-            Err(err) => Err(self.de.fix_position(err)),
-        }
-    }
-
-    #[inline]
-    fn deserialize_option<V>(self, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        // Map keys cannot be null.
-        visitor.visit_some(self)
-    }
-
-    #[inline]
-    fn deserialize_newtype_struct<V>(self, name: &'static str, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        #[cfg(feature = "raw_value")]
-        {
-            if name == crate::raw::TOKEN {
-                return self.de.deserialize_raw_value(visitor);
-            }
-        }
-
-        let _ = name;
-        visitor.visit_newtype_struct(self)
-    }
-
-    #[inline]
-    fn deserialize_enum<V>(
-        self,
-        name: &'static str,
-        variants: &'static [&'static str],
-        visitor: V,
-    ) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        self.de.deserialize_enum(name, variants, visitor)
-    }
-
-    #[inline]
-    fn deserialize_bytes<V>(self, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        self.de.deserialize_bytes(visitor)
-    }
-
-    #[inline]
-    fn deserialize_byte_buf<V>(self, visitor: V) -> Result<V::Value>
-    where
-        V: de::Visitor<'de>,
-    {
-        self.de.deserialize_bytes(visitor)
-    }
-
-    forward_to_deserialize_any! {
-        char str string unit unit_struct seq tuple tuple_struct map struct
-        identifier ignored_any
-    }
-}
-
-//////////////////////////////////////////////////////////////////////////////
-
-/// Iterator that deserializes a stream into multiple JSON values.
-///
-/// A stream deserializer can be created from any JSON deserializer using the
-/// `Deserializer::into_iter` method.
-///
-/// The data can consist of any JSON value. Values need to be a self-delineating value e.g.
-/// arrays, objects, or strings, or be followed by whitespace or a self-delineating value.
-///
-/// ```
-/// use serde_json::{Deserializer, Value};
-///
-/// fn main() {
-///     let data = "{\"k\": 3}1\"cool\"\"stuff\" 3{}  [0, 1, 2]";
-///
-///     let stream = Deserializer::from_str(data).into_iter::<Value>();
-///
-///     for value in stream {
-///         println!("{}", value.unwrap());
-///     }
-/// }
-/// ```
-pub struct StreamDeserializer<'de, R, T> {
-    de: Deserializer<R>,
-    offset: usize,
-    failed: bool,
-    output: PhantomData<T>,
-    lifetime: PhantomData<&'de ()>,
-}
-
-impl<'de, R, T> StreamDeserializer<'de, R, T>
-where
-    R: read::Read<'de>,
-    T: de::Deserialize<'de>,
-{
-    /// Create a JSON stream deserializer from one of the possible serde_json
-    /// input sources.
-    ///
-    /// Typically it is more convenient to use one of these methods instead:
-    ///
-    ///   - Deserializer::from_str(...).into_iter()
-    ///   - Deserializer::from_slice(...).into_iter()
-    ///   - Deserializer::from_reader(...).into_iter()
-    pub fn new(read: R) -> Self {
-        let offset = read.byte_offset();
-        StreamDeserializer {
-            de: Deserializer::new(read),
-            offset,
-            failed: false,
-            output: PhantomData,
-            lifetime: PhantomData,
-        }
-    }
-
-    /// Returns the number of bytes so far deserialized into a successful `T`.
-    ///
-    /// If a stream deserializer returns an EOF error, new data can be joined to
-    /// `old_data[stream.byte_offset()..]` to try again.
-    ///
-    /// ```
-    /// let data = b"[0] [1] [";
-    ///
-    /// let de = serde_json::Deserializer::from_slice(data);
-    /// let mut stream = de.into_iter::<Vec<i32>>();
-    /// assert_eq!(0, stream.byte_offset());
-    ///
-    /// println!("{:?}", stream.next()); // [0]
-    /// assert_eq!(3, stream.byte_offset());
-    ///
-    /// println!("{:?}", stream.next()); // [1]
-    /// assert_eq!(7, stream.byte_offset());
-    ///
-    /// println!("{:?}", stream.next()); // error
-    /// assert_eq!(8, stream.byte_offset());
-    ///
-    /// // If err.is_eof(), can join the remaining data to new data and continue.
-    /// let remaining = &data[stream.byte_offset()..];
-    /// ```
-    ///
-    /// *Note:* In the future this method may be changed to return the number of
-    /// bytes so far deserialized into a successful T *or* syntactically valid
-    /// JSON skipped over due to a type error. See [serde-rs/json#70] for an
-    /// example illustrating this.
-    ///
-    /// [serde-rs/json#70]: https://github.com/serde-rs/json/issues/70
-    pub fn byte_offset(&self) -> usize {
-        self.offset
-    }
-
-    fn peek_end_of_value(&mut self) -> Result<()> {
-        match tri!(self.de.peek()) {
-            Some(b' ' | b'\n' | b'\t' | b'\r' | b'"' | b'[' | b']' | b'{' | b'}' | b',' | b':')
-            | None => Ok(()),
-            Some(_) => {
-                let position = self.de.read.peek_position();
-                Err(Error::syntax(
-                    ErrorCode::TrailingCharacters,
-                    position.line,
-                    position.column,
-                ))
-            }
-        }
-    }
-}
-
-impl<'de, R, T> Iterator for StreamDeserializer<'de, R, T>
-where
-    R: Read<'de>,
-    T: de::Deserialize<'de>,
-{
-    type Item = Result<T>;
-
-    fn next(&mut self) -> Option<Result<T>> {
-        if R::should_early_return_if_failed && self.failed {
-            return None;
-        }
-
-        // skip whitespaces, if any
-        // this helps with trailing whitespaces, since whitespaces between
-        // values are handled for us.
-        match self.de.parse_whitespace() {
-            Ok(None) => {
-                self.offset = self.de.read.byte_offset();
-                None
-            }
-            Ok(Some(b)) => {
-                // If the value does not have a clear way to show the end of the value
-                // (like numbers, null, true etc.) we have to look for whitespace or
-                // the beginning of a self-delineated value.
-                let self_delineated_value = match b {
-                    b'[' | b'"' | b'{' => true,
-                    _ => false,
-                };
-                self.offset = self.de.read.byte_offset();
-                let result = de::Deserialize::deserialize(&mut self.de);
-
-                Some(match result {
-                    Ok(value) => {
-                        self.offset = self.de.read.byte_offset();
-                        if self_delineated_value {
-                            Ok(value)
-                        } else {
-                            self.peek_end_of_value().map(|()| value)
-                        }
-                    }
-                    Err(e) => {
-                        self.de.read.set_failed(&mut self.failed);
-                        Err(e)
-                    }
-                })
-            }
-            Err(e) => {
-                self.de.read.set_failed(&mut self.failed);
-                Some(Err(e))
-            }
-        }
-    }
-}
-
-impl<'de, R, T> FusedIterator for StreamDeserializer<'de, R, T>
-where
-    R: Read<'de> + Fused,
-    T: de::Deserialize<'de>,
-{
-}
-
-//////////////////////////////////////////////////////////////////////////////
-
-fn from_trait<'de, R, T>(read: R) -> Result<T>
-where
-    R: Read<'de>,
-    T: de::Deserialize<'de>,
-{
-    let mut de = Deserializer::new(read);
-    let value = tri!(de::Deserialize::deserialize(&mut de));
-
-    // Make sure the whole stream has been consumed.
-    tri!(de.end());
-    Ok(value)
-}
-
-/// Deserialize an instance of type `T` from an I/O stream of JSON.
-///
-/// The content of the I/O stream is deserialized directly from the stream
-/// without being buffered in memory by serde_json.
-///
-/// When reading from a source against which short reads are not efficient, such
-/// as a [`File`], you will want to apply your own buffering because serde_json
-/// will not buffer the input. See [`std::io::BufReader`].
-///
-/// It is expected that the input stream ends after the deserialized object.
-/// If the stream does not end, such as in the case of a persistent socket connection,
-/// this function will not return. It is possible instead to deserialize from a prefix of an input
-/// stream without looking for EOF by managing your own [`Deserializer`].
-///
-/// Note that counter to intuition, this function is usually slower than
-/// reading a file completely into memory and then applying [`from_str`]
-/// or [`from_slice`] on it. See [issue #160].
-///
-/// [`File`]: std::fs::File
-/// [issue #160]: https://github.com/serde-rs/json/issues/160
-///
-/// # Example
-///
-/// Reading the contents of a file.
-///
-/// ```
-/// use serde::Deserialize;
-///
-/// use std::error::Error;
-/// use std::fs::File;
-/// use std::io::BufReader;
-/// use std::path::Path;
-///
-/// #[derive(Deserialize, Debug)]
-/// struct User {
-///     fingerprint: String,
-///     location: String,
-/// }
-///
-/// fn read_user_from_file<P: AsRef<Path>>(path: P) -> Result<User, Box<dyn Error>> {
-///     // Open the file in read-only mode with buffer.
-///     let file = File::open(path)?;
-///     let reader = BufReader::new(file);
-///
-///     // Read the JSON contents of the file as an instance of `User`.
-///     let u = serde_json::from_reader(reader)?;
-///
-///     // Return the `User`.
-///     Ok(u)
-/// }
-///
-/// fn main() {
-/// # }
-/// # fn fake_main() {
-///     let u = read_user_from_file("test.json").unwrap();
-///     println!("{:#?}", u);
-/// }
-/// ```
-///
-/// Reading from a persistent socket connection.
-///
-/// ```
-/// use serde::Deserialize;
-///
-/// use std::error::Error;
-/// use std::io::BufReader;
-/// use std::net::{TcpListener, TcpStream};
-///
-/// #[derive(Deserialize, Debug)]
-/// struct User {
-///     fingerprint: String,
-///     location: String,
-/// }
-///
-/// fn read_user_from_stream(stream: &mut BufReader<TcpStream>) -> Result<User, Box<dyn Error>> {
-///     let mut de = serde_json::Deserializer::from_reader(stream);
-///     let u = User::deserialize(&mut de)?;
-///
-///     Ok(u)
-/// }
-///
-/// fn main() {
-/// # }
-/// # fn fake_main() {
-///     let listener = TcpListener::bind("127.0.0.1:4000").unwrap();
-///
-///     for tcp_stream in listener.incoming() {
-///         let mut buffered = BufReader::new(tcp_stream.unwrap());
-///         println!("{:#?}", read_user_from_stream(&mut buffered));
-///     }
-/// }
-/// ```
-///
-/// # Errors
-///
-/// This conversion can fail if the structure of the input does not match the
-/// structure expected by `T`, for example if `T` is a struct type but the input
-/// contains something other than a JSON map. It can also fail if the structure
-/// is correct but `T`'s implementation of `Deserialize` decides that something
-/// is wrong with the data, for example required struct fields are missing from
-/// the JSON map or some number is too big to fit in the expected primitive
-/// type.
-#[cfg(feature = "std")]
-#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
-pub fn from_reader<R, T>(rdr: R) -> Result<T>
-where
-    R: crate::io::Read,
-    T: de::DeserializeOwned,
-{
-    from_trait(read::IoRead::new(rdr))
-}
-
-/// Deserialize an instance of type `T` from bytes of JSON text.
-///
-/// # Example
-///
-/// ```
-/// use serde::Deserialize;
-///
-/// #[derive(Deserialize, Debug)]
-/// struct User {
-///     fingerprint: String,
-///     location: String,
-/// }
-///
-/// fn main() {
-///     // The type of `j` is `&[u8]`
-///     let j = b"
-///         {
-///             \"fingerprint\": \"0xF9BA143B95FF6D82\",
-///             \"location\": \"Menlo Park, CA\"
-///         }";
-///
-///     let u: User = serde_json::from_slice(j).unwrap();
-///     println!("{:#?}", u);
-/// }
-/// ```
-///
-/// # Errors
-///
-/// This conversion can fail if the structure of the input does not match the
-/// structure expected by `T`, for example if `T` is a struct type but the input
-/// contains something other than a JSON map. It can also fail if the structure
-/// is correct but `T`'s implementation of `Deserialize` decides that something
-/// is wrong with the data, for example required struct fields are missing from
-/// the JSON map or some number is too big to fit in the expected primitive
-/// type.
-pub fn from_slice<'a, T>(v: &'a [u8]) -> Result<T>
-where
-    T: de::Deserialize<'a>,
-{
-    from_trait(read::SliceRead::new(v))
-}
-
-/// Deserialize an instance of type `T` from a string of JSON text.
-///
-/// # Example
-///
-/// ```
-/// use serde::Deserialize;
-///
-/// #[derive(Deserialize, Debug)]
-/// struct User {
-///     fingerprint: String,
-///     location: String,
-/// }
-///
-/// fn main() {
-///     // The type of `j` is `&str`
-///     let j = "
-///         {
-///             \"fingerprint\": \"0xF9BA143B95FF6D82\",
-///             \"location\": \"Menlo Park, CA\"
-///         }";
-///
-///     let u: User = serde_json::from_str(j).unwrap();
-///     println!("{:#?}", u);
-/// }
-/// ```
-///
-/// # Errors
-///
-/// This conversion can fail if the structure of the input does not match the
-/// structure expected by `T`, for example if `T` is a struct type but the input
-/// contains something other than a JSON map. It can also fail if the structure
-/// is correct but `T`'s implementation of `Deserialize` decides that something
-/// is wrong with the data, for example required struct fields are missing from
-/// the JSON map or some number is too big to fit in the expected primitive
-/// type.
-pub fn from_str<'a, T>(s: &'a str) -> Result<T>
-where
-    T: de::Deserialize<'a>,
-{
-    from_trait(read::StrRead::new(s))
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/error.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/error.rs
deleted file mode 100644
index fbf9eb1..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/error.rs
+++ /dev/null
@@ -1,541 +0,0 @@
-//! When serializing or deserializing JSON goes wrong.
-
-use crate::io;
-use alloc::boxed::Box;
-use alloc::string::{String, ToString};
-use core::fmt::{self, Debug, Display};
-use core::result;
-use core::str::FromStr;
-use serde::{de, ser};
-#[cfg(feature = "std")]
-use std::error;
-#[cfg(feature = "std")]
-use std::io::ErrorKind;
-
-/// This type represents all possible errors that can occur when serializing or
-/// deserializing JSON data.
-pub struct Error {
-    /// This `Box` allows us to keep the size of `Error` as small as possible. A
-    /// larger `Error` type was substantially slower due to all the functions
-    /// that pass around `Result<T, Error>`.
-    err: Box<ErrorImpl>,
-}
-
-/// Alias for a `Result` with the error type `serde_json::Error`.
-pub type Result<T> = result::Result<T, Error>;
-
-impl Error {
-    /// One-based line number at which the error was detected.
-    ///
-    /// Characters in the first line of the input (before the first newline
-    /// character) are in line 1.
-    pub fn line(&self) -> usize {
-        self.err.line
-    }
-
-    /// One-based column number at which the error was detected.
-    ///
-    /// The first character in the input and any characters immediately
-    /// following a newline character are in column 1.
-    ///
-    /// Note that errors may occur in column 0, for example if a read from an
-    /// I/O stream fails immediately following a previously read newline
-    /// character.
-    pub fn column(&self) -> usize {
-        self.err.column
-    }
-
-    /// Categorizes the cause of this error.
-    ///
-    /// - `Category::Io` - failure to read or write bytes on an I/O stream
-    /// - `Category::Syntax` - input that is not syntactically valid JSON
-    /// - `Category::Data` - input data that is semantically incorrect
-    /// - `Category::Eof` - unexpected end of the input data
-    pub fn classify(&self) -> Category {
-        match self.err.code {
-            ErrorCode::Message(_) => Category::Data,
-            ErrorCode::Io(_) => Category::Io,
-            ErrorCode::EofWhileParsingList
-            | ErrorCode::EofWhileParsingObject
-            | ErrorCode::EofWhileParsingString
-            | ErrorCode::EofWhileParsingValue => Category::Eof,
-            ErrorCode::ExpectedColon
-            | ErrorCode::ExpectedListCommaOrEnd
-            | ErrorCode::ExpectedObjectCommaOrEnd
-            | ErrorCode::ExpectedSomeIdent
-            | ErrorCode::ExpectedSomeValue
-            | ErrorCode::ExpectedDoubleQuote
-            | ErrorCode::InvalidEscape
-            | ErrorCode::InvalidNumber
-            | ErrorCode::NumberOutOfRange
-            | ErrorCode::InvalidUnicodeCodePoint
-            | ErrorCode::ControlCharacterWhileParsingString
-            | ErrorCode::KeyMustBeAString
-            | ErrorCode::ExpectedNumericKey
-            | ErrorCode::FloatKeyMustBeFinite
-            | ErrorCode::LoneLeadingSurrogateInHexEscape
-            | ErrorCode::TrailingComma
-            | ErrorCode::TrailingCharacters
-            | ErrorCode::UnexpectedEndOfHexEscape
-            | ErrorCode::RecursionLimitExceeded => Category::Syntax,
-        }
-    }
-
-    /// Returns true if this error was caused by a failure to read or write
-    /// bytes on an I/O stream.
-    pub fn is_io(&self) -> bool {
-        self.classify() == Category::Io
-    }
-
-    /// Returns true if this error was caused by input that was not
-    /// syntactically valid JSON.
-    pub fn is_syntax(&self) -> bool {
-        self.classify() == Category::Syntax
-    }
-
-    /// Returns true if this error was caused by input data that was
-    /// semantically incorrect.
-    ///
-    /// For example, JSON containing a number is semantically incorrect when the
-    /// type being deserialized into holds a String.
-    pub fn is_data(&self) -> bool {
-        self.classify() == Category::Data
-    }
-
-    /// Returns true if this error was caused by prematurely reaching the end of
-    /// the input data.
-    ///
-    /// Callers that process streaming input may be interested in retrying the
-    /// deserialization once more data is available.
-    pub fn is_eof(&self) -> bool {
-        self.classify() == Category::Eof
-    }
-
-    /// The kind reported by the underlying standard library I/O error, if this
-    /// error was caused by a failure to read or write bytes on an I/O stream.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use serde_json::Value;
-    /// use std::io::{self, ErrorKind, Read};
-    /// use std::process;
-    ///
-    /// struct ReaderThatWillTimeOut<'a>(&'a [u8]);
-    ///
-    /// impl<'a> Read for ReaderThatWillTimeOut<'a> {
-    ///     fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
-    ///         if self.0.is_empty() {
-    ///             Err(io::Error::new(ErrorKind::TimedOut, "timed out"))
-    ///         } else {
-    ///             self.0.read(buf)
-    ///         }
-    ///     }
-    /// }
-    ///
-    /// fn main() {
-    ///     let reader = ReaderThatWillTimeOut(br#" {"k": "#);
-    ///
-    ///     let _: Value = match serde_json::from_reader(reader) {
-    ///         Ok(value) => value,
-    ///         Err(error) => {
-    ///             if error.io_error_kind() == Some(ErrorKind::TimedOut) {
-    ///                 // Maybe this application needs to retry certain kinds of errors.
-    ///
-    ///                 # return;
-    ///             } else {
-    ///                 eprintln!("error: {}", error);
-    ///                 process::exit(1);
-    ///             }
-    ///         }
-    ///     };
-    /// }
-    /// ```
-    #[cfg(feature = "std")]
-    pub fn io_error_kind(&self) -> Option<ErrorKind> {
-        if let ErrorCode::Io(io_error) = &self.err.code {
-            Some(io_error.kind())
-        } else {
-            None
-        }
-    }
-}
-
-/// Categorizes the cause of a `serde_json::Error`.
-#[derive(Copy, Clone, PartialEq, Eq, Debug)]
-pub enum Category {
-    /// The error was caused by a failure to read or write bytes on an I/O
-    /// stream.
-    Io,
-
-    /// The error was caused by input that was not syntactically valid JSON.
-    Syntax,
-
-    /// The error was caused by input data that was semantically incorrect.
-    ///
-    /// For example, JSON containing a number is semantically incorrect when the
-    /// type being deserialized into holds a String.
-    Data,
-
-    /// The error was caused by prematurely reaching the end of the input data.
-    ///
-    /// Callers that process streaming input may be interested in retrying the
-    /// deserialization once more data is available.
-    Eof,
-}
-
-#[cfg(feature = "std")]
-#[allow(clippy::fallible_impl_from)]
-impl From<Error> for io::Error {
-    /// Convert a `serde_json::Error` into an `io::Error`.
-    ///
-    /// JSON syntax and data errors are turned into `InvalidData` I/O errors.
-    /// EOF errors are turned into `UnexpectedEof` I/O errors.
-    ///
-    /// ```
-    /// use std::io;
-    ///
-    /// enum MyError {
-    ///     Io(io::Error),
-    ///     Json(serde_json::Error),
-    /// }
-    ///
-    /// impl From<serde_json::Error> for MyError {
-    ///     fn from(err: serde_json::Error) -> MyError {
-    ///         use serde_json::error::Category;
-    ///         match err.classify() {
-    ///             Category::Io => {
-    ///                 MyError::Io(err.into())
-    ///             }
-    ///             Category::Syntax | Category::Data | Category::Eof => {
-    ///                 MyError::Json(err)
-    ///             }
-    ///         }
-    ///     }
-    /// }
-    /// ```
-    fn from(j: Error) -> Self {
-        if let ErrorCode::Io(err) = j.err.code {
-            err
-        } else {
-            match j.classify() {
-                Category::Io => unreachable!(),
-                Category::Syntax | Category::Data => io::Error::new(ErrorKind::InvalidData, j),
-                Category::Eof => io::Error::new(ErrorKind::UnexpectedEof, j),
-            }
-        }
-    }
-}
-
-struct ErrorImpl {
-    code: ErrorCode,
-    line: usize,
-    column: usize,
-}
-
-pub(crate) enum ErrorCode {
-    /// Catchall for syntax error messages
-    Message(Box<str>),
-
-    /// Some I/O error occurred while serializing or deserializing.
-    Io(io::Error),
-
-    /// EOF while parsing a list.
-    EofWhileParsingList,
-
-    /// EOF while parsing an object.
-    EofWhileParsingObject,
-
-    /// EOF while parsing a string.
-    EofWhileParsingString,
-
-    /// EOF while parsing a JSON value.
-    EofWhileParsingValue,
-
-    /// Expected this character to be a `':'`.
-    ExpectedColon,
-
-    /// Expected this character to be either a `','` or a `']'`.
-    ExpectedListCommaOrEnd,
-
-    /// Expected this character to be either a `','` or a `'}'`.
-    ExpectedObjectCommaOrEnd,
-
-    /// Expected to parse either a `true`, `false`, or a `null`.
-    ExpectedSomeIdent,
-
-    /// Expected this character to start a JSON value.
-    ExpectedSomeValue,
-
-    /// Expected this character to be a `"`.
-    ExpectedDoubleQuote,
-
-    /// Invalid hex escape code.
-    InvalidEscape,
-
-    /// Invalid number.
-    InvalidNumber,
-
-    /// Number is bigger than the maximum value of its type.
-    NumberOutOfRange,
-
-    /// Invalid unicode code point.
-    InvalidUnicodeCodePoint,
-
-    /// Control character found while parsing a string.
-    ControlCharacterWhileParsingString,
-
-    /// Object key is not a string.
-    KeyMustBeAString,
-
-    /// Contents of key were supposed to be a number.
-    ExpectedNumericKey,
-
-    /// Object key is a non-finite float value.
-    FloatKeyMustBeFinite,
-
-    /// Lone leading surrogate in hex escape.
-    LoneLeadingSurrogateInHexEscape,
-
-    /// JSON has a comma after the last value in an array or map.
-    TrailingComma,
-
-    /// JSON has non-whitespace trailing characters after the value.
-    TrailingCharacters,
-
-    /// Unexpected end of hex escape.
-    UnexpectedEndOfHexEscape,
-
-    /// Encountered nesting of JSON maps and arrays more than 128 layers deep.
-    RecursionLimitExceeded,
-}
-
-impl Error {
-    #[cold]
-    pub(crate) fn syntax(code: ErrorCode, line: usize, column: usize) -> Self {
-        Error {
-            err: Box::new(ErrorImpl { code, line, column }),
-        }
-    }
-
-    // Not public API. Should be pub(crate).
-    //
-    // Update `eager_json` crate when this function changes.
-    #[doc(hidden)]
-    #[cold]
-    pub fn io(error: io::Error) -> Self {
-        Error {
-            err: Box::new(ErrorImpl {
-                code: ErrorCode::Io(error),
-                line: 0,
-                column: 0,
-            }),
-        }
-    }
-
-    #[cold]
-    pub(crate) fn fix_position<F>(self, f: F) -> Self
-    where
-        F: FnOnce(ErrorCode) -> Error,
-    {
-        if self.err.line == 0 {
-            f(self.err.code)
-        } else {
-            self
-        }
-    }
-}
-
-impl Display for ErrorCode {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        match self {
-            ErrorCode::Message(msg) => f.write_str(msg),
-            ErrorCode::Io(err) => Display::fmt(err, f),
-            ErrorCode::EofWhileParsingList => f.write_str("EOF while parsing a list"),
-            ErrorCode::EofWhileParsingObject => f.write_str("EOF while parsing an object"),
-            ErrorCode::EofWhileParsingString => f.write_str("EOF while parsing a string"),
-            ErrorCode::EofWhileParsingValue => f.write_str("EOF while parsing a value"),
-            ErrorCode::ExpectedColon => f.write_str("expected `:`"),
-            ErrorCode::ExpectedListCommaOrEnd => f.write_str("expected `,` or `]`"),
-            ErrorCode::ExpectedObjectCommaOrEnd => f.write_str("expected `,` or `}`"),
-            ErrorCode::ExpectedSomeIdent => f.write_str("expected ident"),
-            ErrorCode::ExpectedSomeValue => f.write_str("expected value"),
-            ErrorCode::ExpectedDoubleQuote => f.write_str("expected `\"`"),
-            ErrorCode::InvalidEscape => f.write_str("invalid escape"),
-            ErrorCode::InvalidNumber => f.write_str("invalid number"),
-            ErrorCode::NumberOutOfRange => f.write_str("number out of range"),
-            ErrorCode::InvalidUnicodeCodePoint => f.write_str("invalid unicode code point"),
-            ErrorCode::ControlCharacterWhileParsingString => {
-                f.write_str("control character (\\u0000-\\u001F) found while parsing a string")
-            }
-            ErrorCode::KeyMustBeAString => f.write_str("key must be a string"),
-            ErrorCode::ExpectedNumericKey => {
-                f.write_str("invalid value: expected key to be a number in quotes")
-            }
-            ErrorCode::FloatKeyMustBeFinite => {
-                f.write_str("float key must be finite (got NaN or +/-inf)")
-            }
-            ErrorCode::LoneLeadingSurrogateInHexEscape => {
-                f.write_str("lone leading surrogate in hex escape")
-            }
-            ErrorCode::TrailingComma => f.write_str("trailing comma"),
-            ErrorCode::TrailingCharacters => f.write_str("trailing characters"),
-            ErrorCode::UnexpectedEndOfHexEscape => f.write_str("unexpected end of hex escape"),
-            ErrorCode::RecursionLimitExceeded => f.write_str("recursion limit exceeded"),
-        }
-    }
-}
-
-impl serde::de::StdError for Error {
-    #[cfg(feature = "std")]
-    fn source(&self) -> Option<&(dyn error::Error + 'static)> {
-        match &self.err.code {
-            ErrorCode::Io(err) => err.source(),
-            _ => None,
-        }
-    }
-}
-
-impl Display for Error {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        Display::fmt(&*self.err, f)
-    }
-}
-
-impl Display for ErrorImpl {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        if self.line == 0 {
-            Display::fmt(&self.code, f)
-        } else {
-            write!(
-                f,
-                "{} at line {} column {}",
-                self.code, self.line, self.column
-            )
-        }
-    }
-}
-
-// Remove two layers of verbosity from the debug representation. Humans often
-// end up seeing this representation because it is what unwrap() shows.
-impl Debug for Error {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(
-            f,
-            "Error({:?}, line: {}, column: {})",
-            self.err.code.to_string(),
-            self.err.line,
-            self.err.column
-        )
-    }
-}
-
-impl de::Error for Error {
-    #[cold]
-    fn custom<T: Display>(msg: T) -> Error {
-        make_error(msg.to_string())
-    }
-
-    #[cold]
-    fn invalid_type(unexp: de::Unexpected, exp: &dyn de::Expected) -> Self {
-        Error::custom(format_args!(
-            "invalid type: {}, expected {}",
-            JsonUnexpected(unexp),
-            exp,
-        ))
-    }
-
-    #[cold]
-    fn invalid_value(unexp: de::Unexpected, exp: &dyn de::Expected) -> Self {
-        Error::custom(format_args!(
-            "invalid value: {}, expected {}",
-            JsonUnexpected(unexp),
-            exp,
-        ))
-    }
-}
-
-impl ser::Error for Error {
-    #[cold]
-    fn custom<T: Display>(msg: T) -> Error {
-        make_error(msg.to_string())
-    }
-}
-
-struct JsonUnexpected<'a>(de::Unexpected<'a>);
-
-impl<'a> Display for JsonUnexpected<'a> {
-    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-        match self.0 {
-            de::Unexpected::Unit => formatter.write_str("null"),
-            de::Unexpected::Float(value) => write!(
-                formatter,
-                "floating point `{}`",
-                ryu::Buffer::new().format(value),
-            ),
-            unexp => Display::fmt(&unexp, formatter),
-        }
-    }
-}
-
-// Parse our own error message that looks like "{} at line {} column {}" to work
-// around erased-serde round-tripping the error through de::Error::custom.
-fn make_error(mut msg: String) -> Error {
-    let (line, column) = parse_line_col(&mut msg).unwrap_or((0, 0));
-    Error {
-        err: Box::new(ErrorImpl {
-            code: ErrorCode::Message(msg.into_boxed_str()),
-            line,
-            column,
-        }),
-    }
-}
-
-fn parse_line_col(msg: &mut String) -> Option<(usize, usize)> {
-    let start_of_suffix = match msg.rfind(" at line ") {
-        Some(index) => index,
-        None => return None,
-    };
-
-    // Find start and end of line number.
-    let start_of_line = start_of_suffix + " at line ".len();
-    let mut end_of_line = start_of_line;
-    while starts_with_digit(&msg[end_of_line..]) {
-        end_of_line += 1;
-    }
-
-    if !msg[end_of_line..].starts_with(" column ") {
-        return None;
-    }
-
-    // Find start and end of column number.
-    let start_of_column = end_of_line + " column ".len();
-    let mut end_of_column = start_of_column;
-    while starts_with_digit(&msg[end_of_column..]) {
-        end_of_column += 1;
-    }
-
-    if end_of_column < msg.len() {
-        return None;
-    }
-
-    // Parse numbers.
-    let line = match usize::from_str(&msg[start_of_line..end_of_line]) {
-        Ok(line) => line,
-        Err(_) => return None,
-    };
-    let column = match usize::from_str(&msg[start_of_column..end_of_column]) {
-        Ok(column) => column,
-        Err(_) => return None,
-    };
-
-    msg.truncate(start_of_suffix);
-    Some((line, column))
-}
-
-fn starts_with_digit(slice: &str) -> bool {
-    match slice.as_bytes().first() {
-        None => false,
-        Some(&byte) => byte >= b'0' && byte <= b'9',
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/io/core.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/io/core.rs
deleted file mode 100644
index 54c8ddf..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/io/core.rs
+++ /dev/null
@@ -1,79 +0,0 @@
-//! Reimplements core logic and types from `std::io` in an `alloc`-friendly
-//! fashion.
-
-use alloc::vec::Vec;
-use core::fmt::{self, Display};
-use core::result;
-
-pub enum ErrorKind {
-    Other,
-}
-
-// I/O errors can never occur in no-std mode. All our no-std I/O implementations
-// are infallible.
-pub struct Error;
-
-impl Display for Error {
-    fn fmt(&self, _formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
-        unreachable!()
-    }
-}
-
-impl Error {
-    pub(crate) fn new(_kind: ErrorKind, _error: &'static str) -> Error {
-        Error
-    }
-}
-
-pub type Result<T> = result::Result<T, Error>;
-
-pub trait Write {
-    fn write(&mut self, buf: &[u8]) -> Result<usize>;
-
-    fn write_all(&mut self, buf: &[u8]) -> Result<()> {
-        // All our Write impls in no_std mode always write the whole buffer in
-        // one call infallibly.
-        let result = self.write(buf);
-        debug_assert!(result.is_ok());
-        debug_assert_eq!(result.unwrap_or(0), buf.len());
-        Ok(())
-    }
-
-    fn flush(&mut self) -> Result<()>;
-}
-
-impl<W: Write> Write for &mut W {
-    #[inline]
-    fn write(&mut self, buf: &[u8]) -> Result<usize> {
-        (*self).write(buf)
-    }
-
-    #[inline]
-    fn write_all(&mut self, buf: &[u8]) -> Result<()> {
-        (*self).write_all(buf)
-    }
-
-    #[inline]
-    fn flush(&mut self) -> Result<()> {
-        (*self).flush()
-    }
-}
-
-impl Write for Vec<u8> {
-    #[inline]
-    fn write(&mut self, buf: &[u8]) -> Result<usize> {
-        self.extend_from_slice(buf);
-        Ok(buf.len())
-    }
-
-    #[inline]
-    fn write_all(&mut self, buf: &[u8]) -> Result<()> {
-        self.extend_from_slice(buf);
-        Ok(())
-    }
-
-    #[inline]
-    fn flush(&mut self) -> Result<()> {
-        Ok(())
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/io/mod.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/io/mod.rs
deleted file mode 100644
index 9dee4a0..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/io/mod.rs
+++ /dev/null
@@ -1,20 +0,0 @@
-//! A tiny, `no_std`-friendly facade around `std::io`.
-//! Reexports types from `std` when available; otherwise reimplements and
-//! provides some of the core logic.
-//!
-//! The main reason that `std::io` hasn't found itself reexported as part of
-//! the `core` crate is the `std::io::{Read, Write}` traits' reliance on
-//! `std::io::Error`, which may contain internally a heap-allocated `Box<Error>`
-//! and/or now relying on OS-specific `std::backtrace::Backtrace`.
-
-pub use self::imp::{Error, ErrorKind, Result, Write};
-
-#[cfg(not(feature = "std"))]
-#[path = "core.rs"]
-mod imp;
-
-#[cfg(feature = "std")]
-use std::io as imp;
-
-#[cfg(feature = "std")]
-pub use std::io::{Bytes, Read};
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/iter.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/iter.rs
deleted file mode 100644
index 9792916d..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/iter.rs
+++ /dev/null
@@ -1,70 +0,0 @@
-use crate::io;
-
-pub struct LineColIterator<I> {
-    iter: I,
-
-    /// Index of the current line. Characters in the first line of the input
-    /// (before the first newline character) are in line 1.
-    line: usize,
-
-    /// Index of the current column. The first character in the input and any
-    /// characters immediately following a newline character are in column 1.
-    /// The column is 0 immediately after a newline character has been read.
-    col: usize,
-
-    /// Byte offset of the start of the current line. This is the sum of lengths
-    /// of all previous lines. Keeping track of things this way allows efficient
-    /// computation of the current line, column, and byte offset while only
-    /// updating one of the counters in `next()` in the common case.
-    start_of_line: usize,
-}
-
-impl<I> LineColIterator<I>
-where
-    I: Iterator<Item = io::Result<u8>>,
-{
-    pub fn new(iter: I) -> LineColIterator<I> {
-        LineColIterator {
-            iter,
-            line: 1,
-            col: 0,
-            start_of_line: 0,
-        }
-    }
-
-    pub fn line(&self) -> usize {
-        self.line
-    }
-
-    pub fn col(&self) -> usize {
-        self.col
-    }
-
-    pub fn byte_offset(&self) -> usize {
-        self.start_of_line + self.col
-    }
-}
-
-impl<I> Iterator for LineColIterator<I>
-where
-    I: Iterator<Item = io::Result<u8>>,
-{
-    type Item = io::Result<u8>;
-
-    fn next(&mut self) -> Option<io::Result<u8>> {
-        match self.iter.next() {
-            None => None,
-            Some(Ok(b'\n')) => {
-                self.start_of_line += self.col + 1;
-                self.line += 1;
-                self.col = 0;
-                Some(Ok(b'\n'))
-            }
-            Some(Ok(c)) => {
-                self.col += 1;
-                Some(Ok(c))
-            }
-            Some(Err(e)) => Some(Err(e)),
-        }
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/algorithm.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/algorithm.rs
deleted file mode 100644
index eaa5e7e..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/algorithm.rs
+++ /dev/null
@@ -1,196 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Algorithms to efficiently convert strings to floats.
-
-use super::bhcomp::*;
-use super::cached::*;
-use super::errors::*;
-use super::float::ExtendedFloat;
-use super::num::*;
-use super::small_powers::*;
-
-// FAST
-// ----
-
-/// Convert mantissa to exact value for a non-base2 power.
-///
-/// Returns the resulting float and if the value can be represented exactly.
-pub(crate) fn fast_path<F>(mantissa: u64, exponent: i32) -> Option<F>
-where
-    F: Float,
-{
-    // `mantissa >> (F::MANTISSA_SIZE+1) != 0` effectively checks if the
-    // value has a no bits above the hidden bit, which is what we want.
-    let (min_exp, max_exp) = F::exponent_limit();
-    let shift_exp = F::mantissa_limit();
-    let mantissa_size = F::MANTISSA_SIZE + 1;
-    if mantissa == 0 {
-        Some(F::ZERO)
-    } else if mantissa >> mantissa_size != 0 {
-        // Would require truncation of the mantissa.
-        None
-    } else if exponent == 0 {
-        // 0 exponent, same as value, exact representation.
-        let float = F::as_cast(mantissa);
-        Some(float)
-    } else if exponent >= min_exp && exponent <= max_exp {
-        // Value can be exactly represented, return the value.
-        // Do not use powi, since powi can incrementally introduce
-        // error.
-        let float = F::as_cast(mantissa);
-        Some(float.pow10(exponent))
-    } else if exponent >= 0 && exponent <= max_exp + shift_exp {
-        // Check to see if we have a disguised fast-path, where the
-        // number of digits in the mantissa is very small, but and
-        // so digits can be shifted from the exponent to the mantissa.
-        // https://www.exploringbinary.com/fast-path-decimal-to-floating-point-conversion/
-        let small_powers = POW10_64;
-        let shift = exponent - max_exp;
-        let power = small_powers[shift as usize];
-
-        // Compute the product of the power, if it overflows,
-        // prematurely return early, otherwise, if we didn't overshoot,
-        // we can get an exact value.
-        let value = match mantissa.checked_mul(power) {
-            None => return None,
-            Some(value) => value,
-        };
-        if value >> mantissa_size != 0 {
-            None
-        } else {
-            // Use powi, since it's correct, and faster on
-            // the fast-path.
-            let float = F::as_cast(value);
-            Some(float.pow10(max_exp))
-        }
-    } else {
-        // Cannot be exactly represented, exponent too small or too big,
-        // would require truncation.
-        None
-    }
-}
-
-// MODERATE
-// --------
-
-/// Multiply the floating-point by the exponent.
-///
-/// Multiply by pre-calculated powers of the base, modify the extended-
-/// float, and return if new value and if the value can be represented
-/// accurately.
-fn multiply_exponent_extended<F>(fp: &mut ExtendedFloat, exponent: i32, truncated: bool) -> bool
-where
-    F: Float,
-{
-    let powers = ExtendedFloat::get_powers();
-    let exponent = exponent.saturating_add(powers.bias);
-    let small_index = exponent % powers.step;
-    let large_index = exponent / powers.step;
-    if exponent < 0 {
-        // Guaranteed underflow (assign 0).
-        fp.mant = 0;
-        true
-    } else if large_index as usize >= powers.large.len() {
-        // Overflow (assign infinity)
-        fp.mant = 1 << 63;
-        fp.exp = 0x7FF;
-        true
-    } else {
-        // Within the valid exponent range, multiply by the large and small
-        // exponents and return the resulting value.
-
-        // Track errors to as a factor of unit in last-precision.
-        let mut errors: u32 = 0;
-        if truncated {
-            errors += u64::error_halfscale();
-        }
-
-        // Multiply by the small power.
-        // Check if we can directly multiply by an integer, if not,
-        // use extended-precision multiplication.
-        match fp
-            .mant
-            .overflowing_mul(powers.get_small_int(small_index as usize))
-        {
-            // Overflow, multiplication unsuccessful, go slow path.
-            (_, true) => {
-                fp.normalize();
-                fp.imul(&powers.get_small(small_index as usize));
-                errors += u64::error_halfscale();
-            }
-            // No overflow, multiplication successful.
-            (mant, false) => {
-                fp.mant = mant;
-                fp.normalize();
-            }
-        }
-
-        // Multiply by the large power
-        fp.imul(&powers.get_large(large_index as usize));
-        if errors > 0 {
-            errors += 1;
-        }
-        errors += u64::error_halfscale();
-
-        // Normalize the floating point (and the errors).
-        let shift = fp.normalize();
-        errors <<= shift;
-
-        u64::error_is_accurate::<F>(errors, fp)
-    }
-}
-
-/// Create a precise native float using an intermediate extended-precision float.
-///
-/// Return the float approximation and if the value can be accurately
-/// represented with mantissa bits of precision.
-#[inline]
-pub(crate) fn moderate_path<F>(
-    mantissa: u64,
-    exponent: i32,
-    truncated: bool,
-) -> (ExtendedFloat, bool)
-where
-    F: Float,
-{
-    let mut fp = ExtendedFloat {
-        mant: mantissa,
-        exp: 0,
-    };
-    let valid = multiply_exponent_extended::<F>(&mut fp, exponent, truncated);
-    (fp, valid)
-}
-
-// FALLBACK
-// --------
-
-/// Fallback path when the fast path does not work.
-///
-/// Uses the moderate path, if applicable, otherwise, uses the slow path
-/// as required.
-pub(crate) fn fallback_path<F>(
-    integer: &[u8],
-    fraction: &[u8],
-    mantissa: u64,
-    exponent: i32,
-    mantissa_exponent: i32,
-    truncated: bool,
-) -> F
-where
-    F: Float,
-{
-    // Moderate path (use an extended 80-bit representation).
-    let (fp, valid) = moderate_path::<F>(mantissa, mantissa_exponent, truncated);
-    if valid {
-        return fp.into_float::<F>();
-    }
-
-    // Slow path, fast path didn't work.
-    let b = fp.into_downward_float::<F>();
-    if b.is_special() {
-        // We have a non-finite number, we get to leave early.
-        b
-    } else {
-        bhcomp(b, integer, fraction, exponent)
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/bhcomp.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/bhcomp.rs
deleted file mode 100644
index e52b1c9b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/bhcomp.rs
+++ /dev/null
@@ -1,218 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Compare the mantissa to the halfway representation of the float.
-//!
-//! Compares the actual significant digits of the mantissa to the
-//! theoretical digits from `b+h`, scaled into the proper range.
-
-use super::bignum::*;
-use super::digit::*;
-use super::exponent::*;
-use super::float::*;
-use super::math::*;
-use super::num::*;
-use super::rounding::*;
-use core::{cmp, mem};
-
-// MANTISSA
-
-/// Parse the full mantissa into a big integer.
-///
-/// Max digits is the maximum number of digits plus one.
-fn parse_mantissa<F>(integer: &[u8], fraction: &[u8]) -> Bigint
-where
-    F: Float,
-{
-    // Main loop
-    let small_powers = POW10_LIMB;
-    let step = small_powers.len() - 2;
-    let max_digits = F::MAX_DIGITS - 1;
-    let mut counter = 0;
-    let mut value: Limb = 0;
-    let mut i: usize = 0;
-    let mut result = Bigint::default();
-
-    // Iteratively process all the data in the mantissa.
-    for &digit in integer.iter().chain(fraction) {
-        // We've parsed the max digits using small values, add to bignum
-        if counter == step {
-            result.imul_small(small_powers[counter]);
-            result.iadd_small(value);
-            counter = 0;
-            value = 0;
-        }
-
-        value *= 10;
-        value += as_limb(to_digit(digit).unwrap());
-
-        i += 1;
-        counter += 1;
-        if i == max_digits {
-            break;
-        }
-    }
-
-    // We will always have a remainder, as long as we entered the loop
-    // once, or counter % step is 0.
-    if counter != 0 {
-        result.imul_small(small_powers[counter]);
-        result.iadd_small(value);
-    }
-
-    // If we have any remaining digits after the last value, we need
-    // to add a 1 after the rest of the array, it doesn't matter where,
-    // just move it up. This is good for the worst-possible float
-    // representation. We also need to return an index.
-    // Since we already trimmed trailing zeros, we know there has
-    // to be a non-zero digit if there are any left.
-    if i < integer.len() + fraction.len() {
-        result.imul_small(10);
-        result.iadd_small(1);
-    }
-
-    result
-}
-
-// FLOAT OPS
-
-/// Calculate `b` from a representation of `b` as a float.
-#[inline]
-pub(super) fn b_extended<F: Float>(f: F) -> ExtendedFloat {
-    ExtendedFloat::from_float(f)
-}
-
-/// Calculate `b+h` from a representation of `b` as a float.
-#[inline]
-pub(super) fn bh_extended<F: Float>(f: F) -> ExtendedFloat {
-    // None of these can overflow.
-    let b = b_extended(f);
-    ExtendedFloat {
-        mant: (b.mant << 1) + 1,
-        exp: b.exp - 1,
-    }
-}
-
-// ROUNDING
-
-/// Custom round-nearest, tie-event algorithm for bhcomp.
-#[inline]
-fn round_nearest_tie_even(fp: &mut ExtendedFloat, shift: i32, is_truncated: bool) {
-    let (mut is_above, mut is_halfway) = round_nearest(fp, shift);
-    if is_halfway && is_truncated {
-        is_above = true;
-        is_halfway = false;
-    }
-    tie_even(fp, is_above, is_halfway);
-}
-
-// BHCOMP
-
-/// Calculate the mantissa for a big integer with a positive exponent.
-fn large_atof<F>(mantissa: Bigint, exponent: i32) -> F
-where
-    F: Float,
-{
-    let bits = mem::size_of::<u64>() * 8;
-
-    // Simple, we just need to multiply by the power of the radix.
-    // Now, we can calculate the mantissa and the exponent from this.
-    // The binary exponent is the binary exponent for the mantissa
-    // shifted to the hidden bit.
-    let mut bigmant = mantissa;
-    bigmant.imul_pow10(exponent as u32);
-
-    // Get the exact representation of the float from the big integer.
-    let (mant, is_truncated) = bigmant.hi64();
-    let exp = bigmant.bit_length() as i32 - bits as i32;
-    let mut fp = ExtendedFloat { mant, exp };
-    fp.round_to_native::<F, _>(|fp, shift| round_nearest_tie_even(fp, shift, is_truncated));
-    into_float(fp)
-}
-
-/// Calculate the mantissa for a big integer with a negative exponent.
-///
-/// This invokes the comparison with `b+h`.
-fn small_atof<F>(mantissa: Bigint, exponent: i32, f: F) -> F
-where
-    F: Float,
-{
-    // Get the significant digits and radix exponent for the real digits.
-    let mut real_digits = mantissa;
-    let real_exp = exponent;
-    debug_assert!(real_exp < 0);
-
-    // Get the significant digits and the binary exponent for `b+h`.
-    let theor = bh_extended(f);
-    let mut theor_digits = Bigint::from_u64(theor.mant);
-    let theor_exp = theor.exp;
-
-    // We need to scale the real digits and `b+h` digits to be the same
-    // order. We currently have `real_exp`, in `radix`, that needs to be
-    // shifted to `theor_digits` (since it is negative), and `theor_exp`
-    // to either `theor_digits` or `real_digits` as a power of 2 (since it
-    // may be positive or negative). Try to remove as many powers of 2
-    // as possible. All values are relative to `theor_digits`, that is,
-    // reflect the power you need to multiply `theor_digits` by.
-
-    // Can remove a power-of-two, since the radix is 10.
-    // Both are on opposite-sides of equation, can factor out a
-    // power of two.
-    //
-    // Example: 10^-10, 2^-10   -> ( 0, 10, 0)
-    // Example: 10^-10, 2^-15   -> (-5, 10, 0)
-    // Example: 10^-10, 2^-5    -> ( 5, 10, 0)
-    // Example: 10^-10, 2^5 -> (15, 10, 0)
-    let binary_exp = theor_exp - real_exp;
-    let halfradix_exp = -real_exp;
-    let radix_exp = 0;
-
-    // Carry out our multiplication.
-    if halfradix_exp != 0 {
-        theor_digits.imul_pow5(halfradix_exp as u32);
-    }
-    if radix_exp != 0 {
-        theor_digits.imul_pow10(radix_exp as u32);
-    }
-    if binary_exp > 0 {
-        theor_digits.imul_pow2(binary_exp as u32);
-    } else if binary_exp < 0 {
-        real_digits.imul_pow2(-binary_exp as u32);
-    }
-
-    // Compare real digits to theoretical digits and round the float.
-    match real_digits.compare(&theor_digits) {
-        cmp::Ordering::Greater => f.next_positive(),
-        cmp::Ordering::Less => f,
-        cmp::Ordering::Equal => f.round_positive_even(),
-    }
-}
-
-/// Calculate the exact value of the float.
-///
-/// Note: fraction must not have trailing zeros.
-pub(crate) fn bhcomp<F>(b: F, integer: &[u8], mut fraction: &[u8], exponent: i32) -> F
-where
-    F: Float,
-{
-    // Calculate the number of integer digits and use that to determine
-    // where the significant digits start in the fraction.
-    let integer_digits = integer.len();
-    let fraction_digits = fraction.len();
-    let digits_start = if integer_digits == 0 {
-        let start = fraction.iter().take_while(|&x| *x == b'0').count();
-        fraction = &fraction[start..];
-        start
-    } else {
-        0
-    };
-    let sci_exp = scientific_exponent(exponent, integer_digits, digits_start);
-    let count = F::MAX_DIGITS.min(integer_digits + fraction_digits - digits_start);
-    let scaled_exponent = sci_exp + 1 - count as i32;
-
-    let mantissa = parse_mantissa::<F>(integer, fraction);
-    if scaled_exponent >= 0 {
-        large_atof(mantissa, scaled_exponent)
-    } else {
-        small_atof(mantissa, scaled_exponent, b)
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/bignum.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/bignum.rs
deleted file mode 100644
index 4fa7eed6..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/bignum.rs
+++ /dev/null
@@ -1,34 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Big integer type definition.
-
-use super::math::*;
-#[allow(unused_imports)]
-use alloc::vec::Vec;
-
-/// Storage for a big integer type.
-#[derive(Clone, PartialEq, Eq)]
-pub(crate) struct Bigint {
-    /// Internal storage for the Bigint, in little-endian order.
-    pub(crate) data: Vec<Limb>,
-}
-
-impl Default for Bigint {
-    fn default() -> Self {
-        Bigint {
-            data: Vec::with_capacity(20),
-        }
-    }
-}
-
-impl Math for Bigint {
-    #[inline]
-    fn data(&self) -> &Vec<Limb> {
-        &self.data
-    }
-
-    #[inline]
-    fn data_mut(&mut self) -> &mut Vec<Limb> {
-        &mut self.data
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/cached.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/cached.rs
deleted file mode 100644
index ef5a9fe..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/cached.rs
+++ /dev/null
@@ -1,82 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Cached powers trait for extended-precision floats.
-
-use super::cached_float80;
-use super::float::ExtendedFloat;
-
-// POWERS
-
-/// Precalculated powers that uses two-separate arrays for memory-efficiency.
-#[doc(hidden)]
-pub(crate) struct ExtendedFloatArray {
-    // Pre-calculated mantissa for the powers.
-    pub mant: &'static [u64],
-    // Pre-calculated binary exponents for the powers.
-    pub exp: &'static [i32],
-}
-
-/// Allow indexing of values without bounds checking
-impl ExtendedFloatArray {
-    #[inline]
-    pub fn get_extended_float(&self, index: usize) -> ExtendedFloat {
-        let mant = self.mant[index];
-        let exp = self.exp[index];
-        ExtendedFloat { mant, exp }
-    }
-
-    #[inline]
-    pub fn len(&self) -> usize {
-        self.mant.len()
-    }
-}
-
-// MODERATE PATH POWERS
-
-/// Precalculated powers of base N for the moderate path.
-#[doc(hidden)]
-pub(crate) struct ModeratePathPowers {
-    // Pre-calculated small powers.
-    pub small: ExtendedFloatArray,
-    // Pre-calculated large powers.
-    pub large: ExtendedFloatArray,
-    /// Pre-calculated small powers as 64-bit integers
-    pub small_int: &'static [u64],
-    // Step between large powers and number of small powers.
-    pub step: i32,
-    // Exponent bias for the large powers.
-    pub bias: i32,
-}
-
-/// Allow indexing of values without bounds checking
-impl ModeratePathPowers {
-    #[inline]
-    pub fn get_small(&self, index: usize) -> ExtendedFloat {
-        self.small.get_extended_float(index)
-    }
-
-    #[inline]
-    pub fn get_large(&self, index: usize) -> ExtendedFloat {
-        self.large.get_extended_float(index)
-    }
-
-    #[inline]
-    pub fn get_small_int(&self, index: usize) -> u64 {
-        self.small_int[index]
-    }
-}
-
-// CACHED EXTENDED POWERS
-
-/// Cached powers as a trait for a floating-point type.
-pub(crate) trait ModeratePathCache {
-    /// Get cached powers.
-    fn get_powers() -> &'static ModeratePathPowers;
-}
-
-impl ModeratePathCache for ExtendedFloat {
-    #[inline]
-    fn get_powers() -> &'static ModeratePathPowers {
-        cached_float80::get_powers()
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/cached_float80.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/cached_float80.rs
deleted file mode 100644
index 9beda3d..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/cached_float80.rs
+++ /dev/null
@@ -1,206 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Cached exponents for basen values with 80-bit extended floats.
-//!
-//! Exact versions of base**n as an extended-precision float, with both
-//! large and small powers. Use the large powers to minimize the amount
-//! of compounded error.
-//!
-//! These values were calculated using Python, using the arbitrary-precision
-//! integer to calculate exact extended-representation of each value.
-//! These values are all normalized.
-
-use super::cached::{ExtendedFloatArray, ModeratePathPowers};
-
-// LOW-LEVEL
-// ---------
-
-// BASE10
-
-const BASE10_SMALL_MANTISSA: [u64; 10] = [
-    9223372036854775808,  // 10^0
-    11529215046068469760, // 10^1
-    14411518807585587200, // 10^2
-    18014398509481984000, // 10^3
-    11258999068426240000, // 10^4
-    14073748835532800000, // 10^5
-    17592186044416000000, // 10^6
-    10995116277760000000, // 10^7
-    13743895347200000000, // 10^8
-    17179869184000000000, // 10^9
-];
-const BASE10_SMALL_EXPONENT: [i32; 10] = [
-    -63, // 10^0
-    -60, // 10^1
-    -57, // 10^2
-    -54, // 10^3
-    -50, // 10^4
-    -47, // 10^5
-    -44, // 10^6
-    -40, // 10^7
-    -37, // 10^8
-    -34, // 10^9
-];
-const BASE10_LARGE_MANTISSA: [u64; 66] = [
-    11555125961253852697, // 10^-350
-    13451937075301367670, // 10^-340
-    15660115838168849784, // 10^-330
-    18230774251475056848, // 10^-320
-    10611707258198326947, // 10^-310
-    12353653155963782858, // 10^-300
-    14381545078898527261, // 10^-290
-    16742321987285426889, // 10^-280
-    9745314011399999080,  // 10^-270
-    11345038669416679861, // 10^-260
-    13207363278391631158, // 10^-250
-    15375394465392026070, // 10^-240
-    17899314949046850752, // 10^-230
-    10418772551374772303, // 10^-220
-    12129047596099288555, // 10^-210
-    14120069793541087484, // 10^-200
-    16437924692338667210, // 10^-190
-    9568131466127621947,  // 10^-180
-    11138771039116687545, // 10^-170
-    12967236152753102995, // 10^-160
-    15095849699286165408, // 10^-150
-    17573882009934360870, // 10^-140
-    10229345649675443343, // 10^-130
-    11908525658859223294, // 10^-120
-    13863348470604074297, // 10^-110
-    16139061738043178685, // 10^-100
-    9394170331095332911,  // 10^-90
-    10936253623915059621, // 10^-80
-    12731474852090538039, // 10^-70
-    14821387422376473014, // 10^-60
-    17254365866976409468, // 10^-50
-    10043362776618689222, // 10^-40
-    11692013098647223345, // 10^-30
-    13611294676837538538, // 10^-20
-    15845632502852867518, // 10^-10
-    9223372036854775808,  // 10^0
-    10737418240000000000, // 10^10
-    12500000000000000000, // 10^20
-    14551915228366851806, // 10^30
-    16940658945086006781, // 10^40
-    9860761315262647567,  // 10^50
-    11479437019748901445, // 10^60
-    13363823550460978230, // 10^70
-    15557538194652854267, // 10^80
-    18111358157653424735, // 10^90
-    10542197943230523224, // 10^100
-    12272733663244316382, // 10^110
-    14287342391028437277, // 10^120
-    16632655625031838749, // 10^130
-    9681479787123295682,  // 10^140
-    11270725851789228247, // 10^150
-    13120851772591970218, // 10^160
-    15274681817498023410, // 10^170
-    17782069995880619867, // 10^180
-    10350527006597618960, // 10^190
-    12049599325514420588, // 10^200
-    14027579833653779454, // 10^210
-    16330252207878254650, // 10^220
-    9505457831475799117,  // 10^230
-    11065809325636130661, // 10^240
-    12882297539194266616, // 10^250
-    14996968138956309548, // 10^260
-    17458768723248864463, // 10^270
-    10162340898095201970, // 10^280
-    11830521861667747109, // 10^290
-    13772540099066387756, // 10^300
-];
-const BASE10_LARGE_EXPONENT: [i32; 66] = [
-    -1226, // 10^-350
-    -1193, // 10^-340
-    -1160, // 10^-330
-    -1127, // 10^-320
-    -1093, // 10^-310
-    -1060, // 10^-300
-    -1027, // 10^-290
-    -994,  // 10^-280
-    -960,  // 10^-270
-    -927,  // 10^-260
-    -894,  // 10^-250
-    -861,  // 10^-240
-    -828,  // 10^-230
-    -794,  // 10^-220
-    -761,  // 10^-210
-    -728,  // 10^-200
-    -695,  // 10^-190
-    -661,  // 10^-180
-    -628,  // 10^-170
-    -595,  // 10^-160
-    -562,  // 10^-150
-    -529,  // 10^-140
-    -495,  // 10^-130
-    -462,  // 10^-120
-    -429,  // 10^-110
-    -396,  // 10^-100
-    -362,  // 10^-90
-    -329,  // 10^-80
-    -296,  // 10^-70
-    -263,  // 10^-60
-    -230,  // 10^-50
-    -196,  // 10^-40
-    -163,  // 10^-30
-    -130,  // 10^-20
-    -97,   // 10^-10
-    -63,   // 10^0
-    -30,   // 10^10
-    3,     // 10^20
-    36,    // 10^30
-    69,    // 10^40
-    103,   // 10^50
-    136,   // 10^60
-    169,   // 10^70
-    202,   // 10^80
-    235,   // 10^90
-    269,   // 10^100
-    302,   // 10^110
-    335,   // 10^120
-    368,   // 10^130
-    402,   // 10^140
-    435,   // 10^150
-    468,   // 10^160
-    501,   // 10^170
-    534,   // 10^180
-    568,   // 10^190
-    601,   // 10^200
-    634,   // 10^210
-    667,   // 10^220
-    701,   // 10^230
-    734,   // 10^240
-    767,   // 10^250
-    800,   // 10^260
-    833,   // 10^270
-    867,   // 10^280
-    900,   // 10^290
-    933,   // 10^300
-];
-const BASE10_SMALL_INT_POWERS: [u64; 10] = [
-    1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000,
-];
-const BASE10_STEP: i32 = 10;
-const BASE10_BIAS: i32 = 350;
-
-// HIGH LEVEL
-// ----------
-
-const BASE10_POWERS: ModeratePathPowers = ModeratePathPowers {
-    small: ExtendedFloatArray {
-        mant: &BASE10_SMALL_MANTISSA,
-        exp: &BASE10_SMALL_EXPONENT,
-    },
-    large: ExtendedFloatArray {
-        mant: &BASE10_LARGE_MANTISSA,
-        exp: &BASE10_LARGE_EXPONENT,
-    },
-    small_int: &BASE10_SMALL_INT_POWERS,
-    step: BASE10_STEP,
-    bias: BASE10_BIAS,
-};
-
-/// Get powers from base.
-pub(crate) fn get_powers() -> &'static ModeratePathPowers {
-    &BASE10_POWERS
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/digit.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/digit.rs
deleted file mode 100644
index 3d150a1..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/digit.rs
+++ /dev/null
@@ -1,18 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Helpers to convert and add digits from characters.
-
-// Convert u8 to digit.
-#[inline]
-pub(crate) fn to_digit(c: u8) -> Option<u32> {
-    (c as char).to_digit(10)
-}
-
-// Add digit to mantissa.
-#[inline]
-pub(crate) fn add_digit(value: u64, digit: u32) -> Option<u64> {
-    match value.checked_mul(10) {
-        None => None,
-        Some(n) => n.checked_add(digit as u64),
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/errors.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/errors.rs
deleted file mode 100644
index f4f41cd..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/errors.rs
+++ /dev/null
@@ -1,132 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Estimate the error in an 80-bit approximation of a float.
-//!
-//! This estimates the error in a floating-point representation.
-//!
-//! This implementation is loosely based off the Golang implementation,
-//! found here: <https://golang.org/src/strconv/atof.go>
-
-use super::float::*;
-use super::num::*;
-use super::rounding::*;
-
-pub(crate) trait FloatErrors {
-    /// Get the full error scale.
-    fn error_scale() -> u32;
-    /// Get the half error scale.
-    fn error_halfscale() -> u32;
-    /// Determine if the number of errors is tolerable for float precision.
-    fn error_is_accurate<F: Float>(count: u32, fp: &ExtendedFloat) -> bool;
-}
-
-/// Check if the error is accurate with a round-nearest rounding scheme.
-#[inline]
-fn nearest_error_is_accurate(errors: u64, fp: &ExtendedFloat, extrabits: u64) -> bool {
-    // Round-to-nearest, need to use the halfway point.
-    if extrabits == 65 {
-        // Underflow, we have a shift larger than the mantissa.
-        // Representation is valid **only** if the value is close enough
-        // overflow to the next bit within errors. If it overflows,
-        // the representation is **not** valid.
-        !fp.mant.overflowing_add(errors).1
-    } else {
-        let mask: u64 = lower_n_mask(extrabits);
-        let extra: u64 = fp.mant & mask;
-
-        // Round-to-nearest, need to check if we're close to halfway.
-        // IE, b10100 | 100000, where `|` signifies the truncation point.
-        let halfway: u64 = lower_n_halfway(extrabits);
-        let cmp1 = halfway.wrapping_sub(errors) < extra;
-        let cmp2 = extra < halfway.wrapping_add(errors);
-
-        // If both comparisons are true, we have significant rounding error,
-        // and the value cannot be exactly represented. Otherwise, the
-        // representation is valid.
-        !(cmp1 && cmp2)
-    }
-}
-
-impl FloatErrors for u64 {
-    #[inline]
-    fn error_scale() -> u32 {
-        8
-    }
-
-    #[inline]
-    fn error_halfscale() -> u32 {
-        u64::error_scale() / 2
-    }
-
-    #[inline]
-    fn error_is_accurate<F: Float>(count: u32, fp: &ExtendedFloat) -> bool {
-        // Determine if extended-precision float is a good approximation.
-        // If the error has affected too many units, the float will be
-        // inaccurate, or if the representation is too close to halfway
-        // that any operations could affect this halfway representation.
-        // See the documentation for dtoa for more information.
-        let bias = -(F::EXPONENT_BIAS - F::MANTISSA_SIZE);
-        let denormal_exp = bias - 63;
-        // This is always a valid u32, since (denormal_exp - fp.exp)
-        // will always be positive and the significand size is {23, 52}.
-        let extrabits = if fp.exp <= denormal_exp {
-            64 - F::MANTISSA_SIZE + denormal_exp - fp.exp
-        } else {
-            63 - F::MANTISSA_SIZE
-        };
-
-        // Our logic is as follows: we want to determine if the actual
-        // mantissa and the errors during calculation differ significantly
-        // from the rounding point. The rounding point for round-nearest
-        // is the halfway point, IE, this when the truncated bits start
-        // with b1000..., while the rounding point for the round-toward
-        // is when the truncated bits are equal to 0.
-        // To do so, we can check whether the rounding point +/- the error
-        // are >/< the actual lower n bits.
-        //
-        // For whether we need to use signed or unsigned types for this
-        // analysis, see this example, using u8 rather than u64 to simplify
-        // things.
-        //
-        // # Comparisons
-        //      cmp1 = (halfway - errors) < extra
-        //      cmp1 = extra < (halfway + errors)
-        //
-        // # Large Extrabits, Low Errors
-        //
-        //      extrabits = 8
-        //      halfway          =  0b10000000
-        //      extra            =  0b10000010
-        //      errors           =  0b00000100
-        //      halfway - errors =  0b01111100
-        //      halfway + errors =  0b10000100
-        //
-        //      Unsigned:
-        //          halfway - errors = 124
-        //          halfway + errors = 132
-        //          extra            = 130
-        //          cmp1             = true
-        //          cmp2             = true
-        //      Signed:
-        //          halfway - errors = 124
-        //          halfway + errors = -124
-        //          extra            = -126
-        //          cmp1             = false
-        //          cmp2             = true
-        //
-        // # Conclusion
-        //
-        // Since errors will always be small, and since we want to detect
-        // if the representation is accurate, we need to use an **unsigned**
-        // type for comparisons.
-
-        let extrabits = extrabits as u64;
-        let errors = count as u64;
-        if extrabits > 65 {
-            // Underflow, we have a literal 0.
-            return true;
-        }
-
-        nearest_error_is_accurate(errors, fp, extrabits)
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/exponent.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/exponent.rs
deleted file mode 100644
index 5e27de8..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/exponent.rs
+++ /dev/null
@@ -1,50 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Utilities to calculate exponents.
-
-/// Convert usize into i32 without overflow.
-///
-/// This is needed to ensure when adjusting the exponent relative to
-/// the mantissa we do not overflow for comically-long exponents.
-#[inline]
-fn into_i32(value: usize) -> i32 {
-    if value > i32::MAX as usize {
-        i32::MAX
-    } else {
-        value as i32
-    }
-}
-
-// EXPONENT CALCULATION
-
-// Calculate the scientific notation exponent without overflow.
-//
-// For example, 0.1 would be -1, and 10 would be 1 in base 10.
-#[inline]
-pub(crate) fn scientific_exponent(
-    exponent: i32,
-    integer_digits: usize,
-    fraction_start: usize,
-) -> i32 {
-    if integer_digits == 0 {
-        let fraction_start = into_i32(fraction_start);
-        exponent.saturating_sub(fraction_start).saturating_sub(1)
-    } else {
-        let integer_shift = into_i32(integer_digits - 1);
-        exponent.saturating_add(integer_shift)
-    }
-}
-
-// Calculate the mantissa exponent without overflow.
-//
-// Remove the number of digits that contributed to the mantissa past
-// the dot, and add the number of truncated digits from the mantissa,
-// to calculate the scaling factor for the mantissa from a raw exponent.
-#[inline]
-pub(crate) fn mantissa_exponent(exponent: i32, fraction_digits: usize, truncated: usize) -> i32 {
-    if fraction_digits > truncated {
-        exponent.saturating_sub(into_i32(fraction_digits - truncated))
-    } else {
-        exponent.saturating_add(into_i32(truncated - fraction_digits))
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/float.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/float.rs
deleted file mode 100644
index 2d434a2..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/float.rs
+++ /dev/null
@@ -1,183 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-// FLOAT TYPE
-
-use super::num::*;
-use super::rounding::*;
-use super::shift::*;
-
-/// Extended precision floating-point type.
-///
-/// Private implementation, exposed only for testing purposes.
-#[doc(hidden)]
-#[derive(Clone, Copy, Debug, PartialEq, Eq)]
-pub(crate) struct ExtendedFloat {
-    /// Mantissa for the extended-precision float.
-    pub mant: u64,
-    /// Binary exponent for the extended-precision float.
-    pub exp: i32,
-}
-
-impl ExtendedFloat {
-    // PROPERTIES
-
-    // OPERATIONS
-
-    /// Multiply two normalized extended-precision floats, as if by `a*b`.
-    ///
-    /// The precision is maximal when the numbers are normalized, however,
-    /// decent precision will occur as long as both values have high bits
-    /// set. The result is not normalized.
-    ///
-    /// Algorithm:
-    ///     1. Non-signed multiplication of mantissas (requires 2x as many bits as input).
-    ///     2. Normalization of the result (not done here).
-    ///     3. Addition of exponents.
-    pub(crate) fn mul(&self, b: &ExtendedFloat) -> ExtendedFloat {
-        // Logic check, values must be decently normalized prior to multiplication.
-        debug_assert!((self.mant & u64::HIMASK != 0) && (b.mant & u64::HIMASK != 0));
-
-        // Extract high-and-low masks.
-        let ah = self.mant >> u64::HALF;
-        let al = self.mant & u64::LOMASK;
-        let bh = b.mant >> u64::HALF;
-        let bl = b.mant & u64::LOMASK;
-
-        // Get our products
-        let ah_bl = ah * bl;
-        let al_bh = al * bh;
-        let al_bl = al * bl;
-        let ah_bh = ah * bh;
-
-        let mut tmp = (ah_bl & u64::LOMASK) + (al_bh & u64::LOMASK) + (al_bl >> u64::HALF);
-        // round up
-        tmp += 1 << (u64::HALF - 1);
-
-        ExtendedFloat {
-            mant: ah_bh + (ah_bl >> u64::HALF) + (al_bh >> u64::HALF) + (tmp >> u64::HALF),
-            exp: self.exp + b.exp + u64::FULL,
-        }
-    }
-
-    /// Multiply in-place, as if by `a*b`.
-    ///
-    /// The result is not normalized.
-    #[inline]
-    pub(crate) fn imul(&mut self, b: &ExtendedFloat) {
-        *self = self.mul(b);
-    }
-
-    // NORMALIZE
-
-    /// Normalize float-point number.
-    ///
-    /// Shift the mantissa so the number of leading zeros is 0, or the value
-    /// itself is 0.
-    ///
-    /// Get the number of bytes shifted.
-    #[inline]
-    pub(crate) fn normalize(&mut self) -> u32 {
-        // Note:
-        // Using the cltz intrinsic via leading_zeros is way faster (~10x)
-        // than shifting 1-bit at a time, via while loop, and also way
-        // faster (~2x) than an unrolled loop that checks at 32, 16, 4,
-        // 2, and 1 bit.
-        //
-        // Using a modulus of pow2 (which will get optimized to a bitwise
-        // and with 0x3F or faster) is slightly slower than an if/then,
-        // however, removing the if/then will likely optimize more branched
-        // code as it removes conditional logic.
-
-        // Calculate the number of leading zeros, and then zero-out
-        // any overflowing bits, to avoid shl overflow when self.mant == 0.
-        let shift = if self.mant == 0 {
-            0
-        } else {
-            self.mant.leading_zeros()
-        };
-        shl(self, shift as i32);
-        shift
-    }
-
-    // ROUND
-
-    /// Lossy round float-point number to native mantissa boundaries.
-    #[inline]
-    pub(crate) fn round_to_native<F, Algorithm>(&mut self, algorithm: Algorithm)
-    where
-        F: Float,
-        Algorithm: FnOnce(&mut ExtendedFloat, i32),
-    {
-        round_to_native::<F, _>(self, algorithm);
-    }
-
-    // FROM
-
-    /// Create extended float from native float.
-    #[inline]
-    pub fn from_float<F: Float>(f: F) -> ExtendedFloat {
-        from_float(f)
-    }
-
-    // INTO
-
-    /// Convert into default-rounded, lower-precision native float.
-    #[inline]
-    pub(crate) fn into_float<F: Float>(mut self) -> F {
-        self.round_to_native::<F, _>(round_nearest_tie_even);
-        into_float(self)
-    }
-
-    /// Convert into downward-rounded, lower-precision native float.
-    #[inline]
-    pub(crate) fn into_downward_float<F: Float>(mut self) -> F {
-        self.round_to_native::<F, _>(round_downward);
-        into_float(self)
-    }
-}
-
-// FROM FLOAT
-
-// Import ExtendedFloat from native float.
-#[inline]
-pub(crate) fn from_float<F>(f: F) -> ExtendedFloat
-where
-    F: Float,
-{
-    ExtendedFloat {
-        mant: u64::as_cast(f.mantissa()),
-        exp: f.exponent(),
-    }
-}
-
-// INTO FLOAT
-
-// Export extended-precision float to native float.
-//
-// The extended-precision float must be in native float representation,
-// with overflow/underflow appropriately handled.
-#[inline]
-pub(crate) fn into_float<F>(fp: ExtendedFloat) -> F
-where
-    F: Float,
-{
-    // Export floating-point number.
-    if fp.mant == 0 || fp.exp < F::DENORMAL_EXPONENT {
-        // sub-denormal, underflow
-        F::ZERO
-    } else if fp.exp >= F::MAX_EXPONENT {
-        // overflow
-        F::from_bits(F::INFINITY_BITS)
-    } else {
-        // calculate the exp and fraction bits, and return a float from bits.
-        let exp: u64;
-        if (fp.exp == F::DENORMAL_EXPONENT) && (fp.mant & F::HIDDEN_BIT_MASK.as_u64()) == 0 {
-            exp = 0;
-        } else {
-            exp = (fp.exp + F::EXPONENT_BIAS) as u64;
-        }
-        let exp = exp << F::MANTISSA_SIZE;
-        let mant = fp.mant & F::MANTISSA_MASK.as_u64();
-        F::from_bits(F::Unsigned::as_cast(mant | exp))
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/large_powers.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/large_powers.rs
deleted file mode 100644
index af2c8a6..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/large_powers.rs
+++ /dev/null
@@ -1,9 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Precalculated large powers for limbs.
-
-#[cfg(fast_arithmetic = "32")]
-pub(crate) use super::large_powers32::*;
-
-#[cfg(fast_arithmetic = "64")]
-pub(crate) use super::large_powers64::*;
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/large_powers32.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/large_powers32.rs
deleted file mode 100644
index eb8582f..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/large_powers32.rs
+++ /dev/null
@@ -1,183 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Precalculated large powers for 32-bit limbs.
-
-/// Large powers (`&[u32]`) for base5 operations.
-const POW5_1: [u32; 1] = [5];
-const POW5_2: [u32; 1] = [25];
-const POW5_3: [u32; 1] = [625];
-const POW5_4: [u32; 1] = [390625];
-const POW5_5: [u32; 2] = [2264035265, 35];
-const POW5_6: [u32; 3] = [2242703233, 762134875, 1262];
-const POW5_7: [u32; 5] = [3211403009, 1849224548, 3668416493, 3913284084, 1593091];
-const POW5_8: [u32; 10] = [
-    781532673, 64985353, 253049085, 594863151, 3553621484, 3288652808, 3167596762, 2788392729,
-    3911132675, 590,
-];
-const POW5_9: [u32; 19] = [
-    2553183233, 3201533787, 3638140786, 303378311, 1809731782, 3477761648, 3583367183, 649228654,
-    2915460784, 487929380, 1011012442, 1677677582, 3428152256, 1710878487, 1438394610, 2161952759,
-    4100910556, 1608314830, 349175,
-];
-const POW5_10: [u32; 38] = [
-    4234999809, 2012377703, 2408924892, 1570150255, 3090844311, 3273530073, 1187251475, 2498123591,
-    3364452033, 1148564857, 687371067, 2854068671, 1883165473, 505794538, 2988060450, 3159489326,
-    2531348317, 3215191468, 849106862, 3892080979, 3288073877, 2242451748, 4183778142, 2995818208,
-    2477501924, 325481258, 2487842652, 1774082830, 1933815724, 2962865281, 1168579910, 2724829000,
-    2360374019, 2315984659, 2360052375, 3251779801, 1664357844, 28,
-];
-const POW5_11: [u32; 75] = [
-    689565697, 4116392818, 1853628763, 516071302, 2568769159, 365238920, 336250165, 1283268122,
-    3425490969, 248595470, 2305176814, 2111925499, 507770399, 2681111421, 589114268, 591287751,
-    1708941527, 4098957707, 475844916, 3378731398, 2452339615, 2817037361, 2678008327, 1656645978,
-    2383430340, 73103988, 448667107, 2329420453, 3124020241, 3625235717, 3208634035, 2412059158,
-    2981664444, 4117622508, 838560765, 3069470027, 270153238, 1802868219, 3692709886, 2161737865,
-    2159912357, 2585798786, 837488486, 4237238160, 2540319504, 3798629246, 3748148874, 1021550776,
-    2386715342, 1973637538, 1823520457, 1146713475, 833971519, 3277251466, 905620390, 26278816,
-    2680483154, 2294040859, 373297482, 5996609, 4109575006, 512575049, 917036550, 1942311753,
-    2816916778, 3248920332, 1192784020, 3537586671, 2456567643, 2925660628, 759380297, 888447942,
-    3559939476, 3654687237, 805,
-];
-const POW5_12: [u32; 149] = [
-    322166785, 3809044581, 2994556223, 1239584207, 3962455841, 4001882964, 3053876612, 915114683,
-    2783289745, 785739093, 4253185907, 3931164994, 1370983858, 2553556126, 3360742076, 2255410929,
-    422849554, 2457422215, 3539495362, 1720790602, 1908931983, 1470596141, 592794347, 4219465164,
-    4085652704, 941661409, 2534650953, 885063988, 2355909854, 2812815516, 767256131, 3821757683,
-    2155151105, 3817418473, 281116564, 2834395026, 2821201622, 2524625843, 1511330880, 2572352493,
-    330571332, 2951088579, 2730271766, 4044456479, 4212286644, 2444937588, 3603420843, 2387148597,
-    1142537539, 3299235429, 1751012624, 861228086, 2873722519, 230498814, 1023297821, 2553128038,
-    3421129895, 2651917435, 2042981258, 1606787143, 2228751918, 447345732, 1930371132, 1784132011,
-    3612538790, 2275925090, 2487567871, 1080427616, 2009179183, 3383506781, 3899054063, 1950782960,
-    2168622213, 2717674390, 3616636027, 2079341593, 1530129217, 1461057425, 2406264415, 3674671357,
-    2972036238, 2019354295, 1455849819, 1866918619, 1324269294, 424891864, 2722422332, 2641594816,
-    1400249021, 3482963993, 3734946379, 225889849, 1891545473, 777383150, 3589824633, 4117601611,
-    4220028667, 334453379, 1083130821, 1060342180, 4208163139, 1489826908, 4163762246, 1096580926,
-    689301528, 2336054516, 1782865703, 4175148410, 3398369392, 2329412588, 3001580596, 59740741,
-    3202189932, 3351895776, 246185302, 718535188, 3772647488, 4151666556, 4055698133, 2461934110,
-    2281316281, 3466396836, 3536023465, 1064267812, 2955456354, 2423805422, 3627960790, 1325057500,
-    3876919979, 2009959531, 175455101, 184092852, 2358785571, 3842977831, 2485266289, 487121622,
-    4159252710, 4075707558, 459389244, 300652075, 2521346588, 3458976673, 888631636, 2076098096,
-    3844514585, 2363697580, 3729421522, 3051115477, 649395,
-];
-const POW5_13: [u32; 298] = [
-    711442433, 3564261005, 2399042279, 4170849936, 4010295575, 1423987028, 330414929, 1349249065,
-    4213813618, 3852031822, 4040843590, 2154565331, 3094013374, 1159028371, 3227065538, 2115927092,
-    2085102554, 488590542, 2609619432, 3602898805, 3812736528, 3269439096, 23816114, 253984538,
-    1035905997, 2942969204, 3400787671, 338562688, 1637191975, 740509713, 2264962817, 3410753922,
-    4162231428, 2282041228, 1759373012, 3155367777, 4278913285, 1420532801, 1981002276, 438054990,
-    1006507643, 1142697287, 1332538012, 2029019521, 3949305784, 818392641, 2491288846, 2716584663,
-    3648886102, 556814413, 444795339, 4071412999, 1066321706, 4253169466, 2510832316, 672091442,
-    4083256000, 2165985028, 1841538484, 3549854235, 364431512, 3707648143, 1162785440, 2268641545,
-    281340310, 735693841, 848809228, 1700785200, 2919703985, 4094234344, 58530286, 965505005,
-    1000010347, 3381961808, 3040089923, 1973852082, 2890971585, 1019960210, 4292895237, 2821887841,
-    3756675650, 3951282907, 3885870583, 1008791145, 503998487, 1881258362, 1949332730, 392996726,
-    2012973814, 3970014187, 2461725150, 2942547730, 3728066699, 2766901132, 3778532841, 1085564064,
-    2278673896, 1116879805, 3448726271, 774279411, 157211670, 1506320155, 531168605, 1362654525,
-    956967721, 2148871960, 769186085, 4186232894, 2055679604, 3248365487, 3981268013, 3975787984,
-    2489510517, 3309046495, 212771124, 933418041, 3371839114, 562115198, 1853601831, 757336096,
-    1354633440, 1486083256, 2872126393, 522920738, 1141587749, 3210903262, 1926940553, 3054024853,
-    2021162538, 2262742000, 1877899947, 3147002868, 669840763, 4158174590, 4238502559, 1023731922,
-    3386840011, 829588074, 3449720188, 2835142880, 2999162007, 813056473, 482949569, 638108879,
-    3067201471, 1026714238, 4004452838, 2383667807, 3999477803, 771648919, 630660440, 3827121348,
-    176185980, 2878191002, 2666149832, 3909811063, 2429163983, 2665690412, 907266128, 4269332098,
-    2022665808, 1527122180, 3072053668, 1072477492, 3006022924, 549664855, 2800340954, 37352654,
-    1212772743, 2711280533, 3029527946, 2511120040, 1305308377, 3474662224, 4226330922, 442988428,
-    954940108, 3274548099, 4212288177, 2688499880, 3982226758, 3922609956, 1279948029, 1939943640,
-    3650489901, 2733364929, 2494263275, 1864579964, 1225941120, 2390465139, 1267503249, 3533240729,
-    904410805, 2842550015, 2517736241, 1796069820, 3335274381, 673539835, 1924694759, 3598098235,
-    2792633405, 16535707, 3703535497, 3592841791, 2929082877, 1317622811, 294990855, 1396706563,
-    2383271770, 3853857605, 277813677, 277580220, 1101318484, 3761974115, 1132150143, 2544692622,
-    3419825776, 743770306, 1695464553, 1548693232, 2421159615, 2575672031, 2678971806, 1591267897,
-    626546738, 3823443129, 267710932, 1455435162, 2353985540, 3248523795, 335348168, 3872552561,
-    2814522612, 2634118860, 3503767026, 1301019273, 1414467789, 722985138, 3070909565, 4253482569,
-    3744939841, 558142907, 2229819389, 13833173, 77003966, 2763671364, 3905603970, 2931990126,
-    2280419384, 1879090457, 2934846267, 4284933164, 2331863845, 62191163, 3178861020, 1522063815,
-    785672270, 1215568492, 2936443917, 802972489, 2956820173, 3916732783, 2893572089, 1391232801,
-    3168640330, 2396859648, 894950918, 1103583736, 961991865, 2807302642, 305977505, 3054505899,
-    1048256994, 781017659, 2459278754, 3164823415, 537658277, 905753687, 464963300, 4149131560,
-    1029507924, 2278300961, 1231291503, 414073408, 3630740085, 2345841814, 475358196, 3258243317,
-    4167625072, 4178911231, 2927355042, 655438830, 3138378018, 623200562, 2785714112, 273403236,
-    807993669, 98,
-];
-const POW5_14: [u32; 595] = [
-    1691320321, 2671006246, 1682531301, 2072858707, 1240508969, 3108358191, 1125119096, 2470144952,
-    1610099978, 1690632660, 1941696884, 2663506355, 1006364675, 3909158537, 4147711374, 1072663936,
-    4078768933, 745751659, 4123687570, 471458681, 655028926, 4113407388, 3945524552, 985625313,
-    1254424514, 2127508744, 570530434, 945388122, 3194649404, 2589065070, 2731705399, 202030749,
-    2090780394, 3348662271, 1481754777, 1130635472, 4025144705, 1924486271, 2578567861, 125491448,
-    1558036315, 994248173, 3817216711, 763950077, 1030439870, 959586474, 3845661701, 483795093,
-    1637944470, 2275463649, 3398804829, 1758016486, 2665513698, 2004912571, 1094885097, 4223064276,
-    3307819021, 651121777, 1757003305, 3603542336, 129917786, 2215974994, 3042386306, 2205352757,
-    3944939700, 3710987569, 97967515, 1217242524, 930630949, 3660328512, 1787663098, 1784141600,
-    2500542892, 4034561586, 3444961378, 785043562, 3869499367, 885623728, 2625011087, 3053789617,
-    1965731793, 3900511934, 2648823592, 3851062028, 3321968688, 799195417, 1011847510, 1369129160,
-    1348009103, 2876796955, 2915408967, 3305284948, 263399535, 1715990604, 2645821294, 1587844552,
-    2624912049, 3035631499, 2306636348, 3499275462, 675152704, 854794152, 4004972748, 1739996642,
-    1333476491, 4012621867, 3658792931, 3297985728, 2864481726, 3066357406, 785287846, 1671499798,
-    433044045, 1919608025, 264833858, 3999983367, 1116778570, 1301982149, 4213901070, 4081649357,
-    536169226, 1389008649, 188923873, 373495152, 2551132278, 1800758715, 3951840330, 2632334454,
-    3118778225, 1034046547, 1862428410, 3037609062, 1994608505, 29051798, 2571685694, 264151332,
-    2260643090, 2717535964, 3508441116, 3283713017, 1903365635, 923575694, 1219598101, 2288281570,
-    3676533911, 1014136356, 555142354, 2389170030, 4185108175, 884862419, 836141292, 2957159173,
-    1997444768, 4233903127, 2876184692, 3089125070, 1480848293, 1097600237, 299700527, 2507669891,
-    2982628312, 2114881043, 2529576251, 2812279824, 2987750993, 4241938954, 2204775591, 1037094060,
-    829315638, 1231047149, 52608178, 3735136637, 3455232602, 962039123, 488286513, 50685385,
-    3516451821, 843975207, 1572355722, 675489076, 2428445672, 1555117248, 3708476086, 10375249,
-    4172112346, 2117510871, 2227658327, 3187664554, 3050656558, 328034318, 3179601324, 1247769761,
-    3439263953, 1431538938, 2962525068, 1213366289, 3813013550, 2651093719, 1860661503, 3933716208,
-    264320617, 789980519, 2257856172, 102000748, 977269860, 1113845122, 3008928583, 1461738106,
-    557786285, 2926560363, 1038106190, 3643478847, 828004507, 457818698, 1933056971, 373408056,
-    2076808229, 3160935130, 2781854874, 2519636100, 177606000, 4237103862, 3977834316, 1621936232,
-    2599050516, 319893558, 3343370366, 765044144, 976657331, 7026264, 294277429, 3829376742,
-    3029627280, 2705178718, 3614653880, 230519152, 3288033233, 293525479, 3805751881, 3227511198,
-    2520308544, 3648103003, 1111086184, 437622105, 2232033852, 3239146386, 584244184, 1450926016,
-    2462430443, 3226534010, 298582169, 4214576928, 1762099469, 964985185, 1585788148, 1641127666,
-    787006566, 2315956284, 3258232694, 2275058964, 2541003317, 1508235863, 2613339827, 4080647514,
-    1152057965, 3149266279, 731345410, 914737650, 65395712, 1884566942, 1379520432, 2611027720,
-    4163073378, 2619704967, 2746552541, 1388822415, 3005141199, 843440249, 4288674003, 3136174279,
-    4051522914, 4144149433, 3427566947, 3419023197, 3758479825, 3893877676, 96899594, 1657725776,
-    253618880, 434129337, 1499045748, 2996992534, 4036042074, 2110713869, 906222950, 928326225,
-    2541827893, 1604330202, 226792470, 4022228930, 815850898, 1466012310, 3377712199, 292769859,
-    2822055597, 3225701344, 3052947004, 385831222, 705324593, 4030158636, 3540280538, 2982120874,
-    2136414455, 255762046, 3852783591, 3262064164, 2358991588, 3756586117, 4143612643, 3326743817,
-    2897365738, 807711264, 3719310016, 3721264861, 3627337076, 944539331, 3640975513, 3712525681,
-    1162911839, 2008243316, 2179489649, 2867584109, 261861553, 3570253908, 2062868357, 2220328623,
-    3857004679, 3744109002, 4138041873, 1451860932, 2364975637, 2802161722, 2680106834, 753401584,
-    1223182946, 1245401957, 4163377735, 3565815922, 2216942838, 4036140094, 71979081, 3924559643,
-    400477238, 551750683, 1174153235, 859969898, 1185921017, 1711399735, 812991545, 4051735761,
-    3549118738, 1631653329, 3631835958, 3648867800, 1206500363, 2155893137, 361030362, 3454286017,
-    2505909489, 1083595169, 453595313, 1510564703, 1706163902, 1632924345, 1381875722, 1661526119,
-    1082778324, 3571910052, 1140625929, 851544870, 1145546234, 2938573139, 907528924, 1304752338,
-    1764668294, 1788942063, 1700368828, 104979467, 1413911959, 3327497828, 1956384744, 1272712474,
-    2815637534, 3307809377, 1320574940, 1111968962, 4073107827, 434096622, 169451929, 3201183459,
-    3331028877, 2852366972, 3369830128, 2924794558, 3106537952, 3739481231, 1612955817, 4138608722,
-    2721281595, 2755775390, 843505117, 982234295, 1157276611, 814674632, 4246504726, 3532006708,
-    992340967, 1647538031, 204696133, 193866982, 3899126129, 300851698, 1379496684, 1759463683,
-    1354782756, 1374637239, 3410883240, 1073406229, 3038431791, 1053909855, 3607043270, 173719711,
-    3733903830, 171820911, 1573050589, 932781534, 4183534770, 2158849555, 372245998, 3573073830,
-    841339264, 2759200520, 1610547277, 2603293319, 3890906486, 1557138278, 3964109906, 677238797,
-    537994297, 1124184993, 4287078344, 4207654540, 2943022776, 2977947524, 3255359985, 4098397558,
-    2274666217, 2915862060, 243524940, 2467726756, 2869020032, 507521339, 3403121914, 522051455,
-    1803903108, 3471254194, 473535371, 1948602036, 3352095732, 3116527002, 1795743673, 775867940,
-    2551469548, 3757442064, 3162525227, 3765412747, 3040105484, 1927625810, 48214767, 2997207130,
-    1342349989, 2536583992, 1501320191, 3592287317, 887432730, 967585477, 3334212779, 948663609,
-    1064513472, 15386372, 2465931737, 3230242590, 3036652803, 2063155087, 1927500726, 2821790499,
-    2187774383, 501520074, 3688568496, 3606711121, 2576459247, 3176542345, 378322447, 156541411,
-    1400607301, 1406179107, 677848877, 2253753529, 193196070, 4207435024, 4166396241, 509467541,
-    2906024136, 1221753746, 3375413222, 431327897, 2749265123, 2848827671, 3412997614, 2051920238,
-    1283516885, 1300498239, 1957256104, 2634010560, 3531900395, 360276850, 1461184973, 2012063967,
-    2873572430, 2914608609, 4289554777, 1539331673, 1859532928, 4213441063, 538215691, 3512720863,
-    4258743698, 3040408445, 982396546, 343095663, 4138069496, 1021581857, 214185242, 1968079460,
-    2864275059, 3347192726, 4096783459, 3259169450, 3707808869, 142485006, 399610869, 230556456,
-    2219467721, 4191227798, 2242548189, 3136366572, 179755707, 3464881829, 452317775, 3887426070,
-    3446430233, 1473370015, 1576807208, 3964523248, 419325089, 2373067114, 1596072055, 1928415752,
-    3635452689, 1005598891, 3335462724, 3290848636, 3669078247, 1178176812, 2110774376, 3068593619,
-    1253036518, 908857731, 3631223047, 4138506423, 2903592318, 3596915748, 3289036113, 3721512676,
-    2704409359, 3386016968, 3676268074, 2185259502, 1096257611, 3360076717, 3548676554, 170167319,
-    3360064287, 3899940843, 9640,
-];
-
-pub(crate) const POW5: [&'static [u32]; 14] = [
-    &POW5_1, &POW5_2, &POW5_3, &POW5_4, &POW5_5, &POW5_6, &POW5_7, &POW5_8, &POW5_9, &POW5_10,
-    &POW5_11, &POW5_12, &POW5_13, &POW5_14,
-];
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/large_powers64.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/large_powers64.rs
deleted file mode 100644
index 96554eac..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/large_powers64.rs
+++ /dev/null
@@ -1,625 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Precalculated large powers for 64-bit limbs.
-
-/// Large powers (`&[u64]`) for base5 operations.
-const POW5_1: [u64; 1] = [5];
-const POW5_2: [u64; 1] = [25];
-const POW5_3: [u64; 1] = [625];
-const POW5_4: [u64; 1] = [390625];
-const POW5_5: [u64; 1] = [152587890625];
-const POW5_6: [u64; 2] = [3273344365508751233, 1262];
-const POW5_7: [u64; 3] = [7942358959831785217, 16807427164405733357, 1593091];
-const POW5_8: [u64; 5] = [
-    279109966635548161,
-    2554917779393558781,
-    14124656261812188652,
-    11976055582626787546,
-    2537941837315,
-];
-const POW5_9: [u64; 10] = [
-    13750482914757213185,
-    1302999927698857842,
-    14936872543252795590,
-    2788415840139466767,
-    2095640732773017264,
-    7205570348933370714,
-    7348167152523113408,
-    9285516396840364274,
-    6907659600622710236,
-    349175,
-];
-const POW5_10: [u64; 19] = [
-    8643096425819600897,
-    6743743997439985372,
-    14059704609098336919,
-    10729359125898331411,
-    4933048501514368705,
-    12258131603170554683,
-    2172371001088594721,
-    13569903330219142946,
-    13809142207969578845,
-    16716360519037769646,
-    9631256923806107285,
-    12866941232305103710,
-    1397931361048440292,
-    7619627737732970332,
-    12725409486282665900,
-    11703051443360963910,
-    9947078370803086083,
-    13966287901448440471,
-    121923442132,
-];
-const POW5_11: [u64; 38] = [
-    17679772531488845825,
-    2216509366347768155,
-    1568689219195129479,
-    5511594616325588277,
-    1067709417009240089,
-    9070650952098657518,
-    11515285870634858015,
-    2539561553659505564,
-    17604889300961091799,
-    14511540856854204724,
-    12099083339557485471,
-    7115240299237943815,
-    313979240050606788,
-    10004784664717172195,
-    15570268847930131473,
-    10359715202835930803,
-    17685054012115162812,
-    13183273382855797757,
-    7743260039872919062,
-    9284593436392572926,
-    11105921222066415013,
-    18198799323400703846,
-    16314988383739458320,
-    4387527177871570570,
-    8476708682254672590,
-    4925096874831034057,
-    14075687868072027455,
-    112866656203221926,
-    9852830467773230418,
-    25755239915196746,
-    2201493076310172510,
-    8342165458688466438,
-    13954006576066379050,
-    15193819059903295636,
-    12565616718911389531,
-    3815854855847885129,
-    15696762163583540628,
-    805,
-];
-const POW5_12: [u64; 75] = [
-    16359721904723189761,
-    5323973632697650495,
-    17187956456762001185,
-    3930387638628283780,
-    3374723710406992273,
-    16884225088663222131,
-    10967440051041439154,
-    9686916182456720060,
-    10554548046311730194,
-    7390739362393647554,
-    6316162333127736719,
-    18122464886584070891,
-    4044404959645932768,
-    3801320885861987401,
-    12080950653257274590,
-    16414324262488991299,
-    16395687498836410113,
-    12173633940896186260,
-    10843185433142632150,
-    11048169832730399808,
-    12674828934734683716,
-    17370808310130582550,
-    10500926985433408692,
-    10252725158410704555,
-    14170108270502067523,
-    3698946465517688080,
-    989984870770509463,
-    10965601426733943069,
-    11389898658438335655,
-    6901098232861256586,
-    1921335291173932590,
-    7662788640922083388,
-    9775023833308395430,
-    4640401278902814207,
-    14532050972198413359,
-    8378549018693130223,
-    11672322628395371653,
-    8930704142764178555,
-    6275193859483102017,
-    15782593304269205087,
-    8673060659034172558,
-    8018354414354334043,
-    1824896661540749038,
-    11345563346725559868,
-    14959216444480821949,
-    970189517688324683,
-    3338835207603007873,
-    17684964260791738489,
-    1436466329061721851,
-    4554134986752476101,
-    6398757850768963907,
-    4709779218751158342,
-    10033277748582410264,
-    17932125878679265063,
-    10004750887749091440,
-    256584531835386932,
-    14396282740722731628,
-    3086085133731396950,
-    17831272085689600064,
-    10573926491412564693,
-    14888061047859191737,
-    4570995450261499817,
-    10410165022312935266,
-    5691078631447480790,
-    8632710455805418155,
-    790672778942823293,
-    16505464105756800547,
-    2092171438149740401,
-    17505030673829275878,
-    1291290830058928444,
-    14856191690683232796,
-    8916773426496500052,
-    10152003807578858265,
-    13104441193763861714,
-    649395,
-];
-const POW5_13: [u64; 149] = [
-    15308384451594534913,
-    17913664074042735335,
-    6115977719198531863,
-    5794980608663993169,
-    16544350702855106930,
-    9253787637781258566,
-    4977988951675168190,
-    9087837664087448770,
-    2098480401110016986,
-    15474332540882100712,
-    14042133997396540944,
-    1090855284423485362,
-    12639956485351058381,
-    1454115676006639319,
-    3180465001342538023,
-    14649076551958697729,
-    9801292446545910916,
-    13552201410826594004,
-    6101141927469189381,
-    1881431857880609316,
-    4907847477899433595,
-    8714572486973123228,
-    3514969632331374520,
-    11667642286891470094,
-    2391499697425323350,
-    17486585679659076043,
-    18267223761882105642,
-    2886610765822313148,
-    9302834862968900288,
-    15246507846733637044,
-    15924227519624562840,
-    9743741243284697760,
-    3159780987244964246,
-    7304816812369628428,
-    17584602612559717809,
-    4146812420657846766,
-    14525415362681041515,
-    8477630142371600195,
-    4380695748062263745,
-    12119915994367943173,
-    16970630866565485122,
-    4332724980155264503,
-    8079943140620527639,
-    1687908087554405626,
-    17051081099834002166,
-    12638146269730763230,
-    11883749876933445771,
-    4662462156371383785,
-    4796962238316531176,
-    3325504751659868927,
-    6469595803187862550,
-    5852556621152583005,
-    9229334792448387881,
-    17979733373938620709,
-    13951623534175792756,
-    17075879371091039277,
-    14212246479457938037,
-    4008999959804158260,
-    2414266395366403722,
-    3252733766253918247,
-    6382678985007829216,
-    2245927470982310841,
-    13790724502051307301,
-    13116936866733148041,
-    9718402891306794538,
-    13516274400356104875,
-    17859223875778049403,
-    4396895129099725471,
-    3563053650368467915,
-    12176845952536972668,
-    3492050964335269015,
-    2740656767075170753,
-    4409704077614761919,
-    10237775279597492710,
-    3314206875098230827,
-    16437361028114095448,
-    12361736225407656572,
-    16792510651790145480,
-    11449053143229929935,
-    18336641737580333136,
-    6558939822118891088,
-    4606255756908155300,
-    2360792578991605004,
-    160428430149144538,
-    11644861220729221511,
-    10785178451159739786,
-    14923560618031934681,
-    1902620814992781610,
-    14064076995338910412,
-    11547019064112212657,
-    16847481479966225734,
-    8331994491163145469,
-    11739712981738851885,
-    8008309968651120619,
-    10266969595459035264,
-    15175153381217702033,
-    12208659352573720245,
-    7714061140750342961,
-    2892831567213510541,
-    15453714249045017319,
-    71020323573871677,
-    15431137995750602633,
-    5659146884637671933,
-    5998809010488554503,
-    16552192379299157850,
-    1192197967194298797,
-    16157555793424861524,
-    10929371590994640255,
-    3194469143425738352,
-    6651586784672005225,
-    11062427140788057791,
-    6834443579468668318,
-    16421563197797455922,
-    6251046422506172884,
-    13952303462156793860,
-    16632486601871393224,
-    11313454360291325172,
-    5587835232504462834,
-    3105197524618514637,
-    18268568531031972989,
-    2397205535804309313,
-    59413027864729597,
-    11869878125348715710,
-    12592801707270523266,
-    8070632061321113656,
-    18403647807860650811,
-    267109013517069093,
-    6537214311028855260,
-    5220826919973709902,
-    3448740582779163661,
-    16822239213112884941,
-    5975299384311048185,
-    10294433804430712138,
-    4739856055412448774,
-    12057273038326387897,
-    13119002941950056609,
-    3354445304051737058,
-    13592813067499314594,
-    3890182464434078629,
-    17820384357466425060,
-    9785228118969879380,
-    1778431746734556271,
-    10075313876350055029,
-    13994048489400919028,
-    17948287074199726448,
-    2815088342305858722,
-    2676626035777198370,
-    1174257960026283968,
-    421714788677,
-];
-const POW5_14: [u64; 298] = [
-    11471884475673051137,
-    8902860357476377573,
-    13350296775839230505,
-    10609191786344608888,
-    7261211985859587338,
-    11439672689354862964,
-    16789708072300570627,
-    4607056528866348430,
-    3202978990421512997,
-    2024899620433984146,
-    17666950207239811774,
-    4233228489390288200,
-    9137580478688460738,
-    4060411066587388546,
-    11119949806060600124,
-    867715462473090103,
-    14382394941384869610,
-    4856042377419278489,
-    8265605599571137921,
-    538981667666252469,
-    4270263388700786523,
-    3281140600308898503,
-    4121392524544394174,
-    2077884106245940229,
-    9773041957329767574,
-    7550623316597646685,
-    8611033926449791714,
-    18137922955420802793,
-    2796546741236224013,
-    15477096484628446761,
-    9517540128113714010,
-    9471917970500821378,
-    15938570248662483124,
-    5228016831978462619,
-    15720991252586974501,
-    7662829825220776698,
-    17328310068068434348,
-    3371736428170309730,
-    3803724952191098855,
-    13115926536504376719,
-    16752571196153442257,
-    16540185467776259880,
-    3432518182450051120,
-    5880364967211798870,
-    12355748840305392783,
-    14196090758536469575,
-    7370123524686686319,
-    6819740424617592686,
-    13037938013537368753,
-    15029273671291927100,
-    3671312928327205696,
-    7473228676544792780,
-    17234079691312938123,
-    14164740848093544419,
-    13169904779481875902,
-    7179036968465894054,
-    8244653688947194445,
-    17179797746073799490,
-    5591970751047577674,
-    17530550506268329742,
-    5965746721852312330,
-    1604149463243472865,
-    7734199791463116918,
-    11305790396015856714,
-    4441196105025505137,
-    13046431581185664762,
-    124776524294606713,
-    1134521334706523966,
-    11671728093344476434,
-    14103440020972933148,
-    3966727403013869059,
-    9828094508409132821,
-    4355682486381147287,
-    10261407143988481234,
-    3800455155249557199,
-    12700901937937547500,
-    18184475466894579360,
-    13267691151779895412,
-    4714157123477697445,
-    10770360171308585263,
-    9083344917597998040,
-    12078649873810212155,
-    18218989082046199377,
-    4454285072780637351,
-    5287307245618354742,
-    16042289702059031730,
-    4131926574212754010,
-    217692071448455473,
-    3624845916216282093,
-    2901203491797614218,
-    6679177724033967080,
-    44561358851332790,
-    9094639944041587162,
-    13690915012276084311,
-    1408896670826320686,
-    5359130319612337580,
-    6148412925099835601,
-    5211368532286409612,
-    11386360825549027374,
-    16895182466965795071,
-    3392940493846427241,
-    438089879085393580,
-    4783928372776399972,
-    6278117363595909959,
-    12569481049412674733,
-    15648622492570893902,
-    1966316336235305115,
-    1603775390515993547,
-    13576113010204316709,
-    10821754650102840474,
-    18198222517222903152,
-    6966163076615302988,
-    1373932372410129684,
-    3285839581819684990,
-    30177575069719475,
-    16447047871247307061,
-    11618654126674833808,
-    990072222556306872,
-    1260682336135768017,
-    13862055046689532489,
-    15668483092844698432,
-    1879572630092764264,
-    13912027797058626108,
-    6231679788219816920,
-    13857858054844167403,
-    18101470072534728857,
-    4144579812461609229,
-    7048589655616599284,
-    9946956499532694630,
-    9771303850109874038,
-    6477823708780339765,
-    17526247621747041971,
-    13525995675852669549,
-    3928768291901239810,
-    8094153383078124544,
-    11214278667728965552,
-    11251547162596832610,
-    5964946855123292381,
-    3622548288590237903,
-    13469765967150053587,
-    17798986288523466082,
-    14684592818807932259,
-    16724077276802963921,
-    7119877993753121290,
-    1864571304902781632,
-    12871984921385213812,
-    9065447042604670298,
-    3987130777300360550,
-    6890545752116901685,
-    17275341711601865750,
-    6296474927799264658,
-    1257436973037243463,
-    13854281781965301421,
-    1657132483318662716,
-    17309399540017292849,
-    12808111630089217242,
-    1098489625264462071,
-    14010458905686364135,
-    16134414519481621220,
-    14288255900328821475,
-    3469093466388187882,
-    15982710881468295872,
-    4056765540058056052,
-    15945176389096104089,
-    8625339365793505375,
-    12316179968863788913,
-    15334123773538054321,
-    9536238824220581765,
-    16080825720106203271,
-    6235695225418121745,
-    12035192956458019349,
-    3235835166714703698,
-    5348960676912581218,
-    15315062772709464647,
-    17335089708021308662,
-    16855855317958414409,
-    2369751139431140406,
-    3693542588628609043,
-    7350405893393987577,
-    17402072586341663801,
-    7007897690013647122,
-    15671767872059304758,
-    9259490518292347915,
-    14836045474406130394,
-    4654005815464502513,
-    6487825998330548401,
-    7013356660323385022,
-    7136200343936679946,
-    15341236858676437716,
-    3657357368867197449,
-    12621075530054608378,
-    5603868621997066972,
-    7683447656788439942,
-    450883379216880060,
-    14291494350184945047,
-    5466258454997635048,
-    14206933098432772126,
-    4775870327277641692,
-    1864430798867181939,
-    13748978265070608793,
-    12250822864261576589,
-    12561896977498605296,
-    16060949594257359328,
-    17775189113543311529,
-    11835965177892927035,
-    4218664174878121437,
-    3499000902478111683,
-    15169853304359126294,
-    7076121963053575143,
-    832652347668916805,
-    1292148207755194737,
-    7556838978364207852,
-    5904021986723518500,
-    4610244652288570024,
-    4526508363195533871,
-    746120481022614726,
-    737965197247830486,
-    4006266184415762653,
-    9272188239892688050,
-    15346235246415709678,
-    11850675997347533184,
-    11181059668610842701,
-    6687857983250662774,
-    2908718488661492818,
-    4828337780126983225,
-    18071738646453002184,
-    12790187227727197880,
-    17602483480871623153,
-    12523532189621855977,
-    10598805712727696716,
-    2179787555896149376,
-    2242193929457337594,
-    14908923241136742532,
-    8369182018012550027,
-    13385381554043022324,
-    3332327430110633913,
-    16138090784046208492,
-    16172324607469047339,
-    8279089815915615244,
-    12872906602736235247,
-    10894545290539475621,
-    15428756545851905023,
-    4155747980686992922,
-    4074479178894544043,
-    66083965608603584,
-    13873786284662268377,
-    8861183628277687555,
-    12119497911296021430,
-    2154012318305274287,
-    15490706314503067312,
-    13643145488710608367,
-    672340241093017103,
-    6039493278284091973,
-    9679797700977436461,
-    18070795828318171174,
-    2188146431134935377,
-    5247392385741514952,
-    1852539214842869734,
-    12235621681634112739,
-    8812930319623534062,
-    5585597406294108629,
-    11312989214475901864,
-    1547377291787797995,
-    8641748937186208205,
-    12518148659168623694,
-    6611379197521520985,
-    18096591571068008576,
-    15087021227100112139,
-    13058454842015958418,
-    1473584652966833794,
-    4387660670140018168,
-    8452836916843525402,
-    14376083294443363955,
-    13998026203969090659,
-    611968444648172645,
-    990232438801273845,
-    18001186324715561929,
-    13470591857250177501,
-    14881554140239420091,
-    16696367836720124495,
-    6328076032778459673,
-    17027497695968504616,
-    10192245646262428833,
-    8282482589527318647,
-    4319014353374321425,
-    14134087271041670980,
-    5060230880114618599,
-    13179509240430058600,
-    3903514232614801894,
-    17774749744702165255,
-    15448635507030969726,
-    15983775238358480209,
-    14542832143965487887,
-    9385618098039514666,
-    14431419612662304843,
-    730863073501675978,
-    16750118380379734815,
-    9640,
-];
-
-pub(crate) const POW5: [&[u64]; 14] = [
-    &POW5_1, &POW5_2, &POW5_3, &POW5_4, &POW5_5, &POW5_6, &POW5_7, &POW5_8, &POW5_9, &POW5_10,
-    &POW5_11, &POW5_12, &POW5_13, &POW5_14,
-];
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/math.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/math.rs
deleted file mode 100644
index 2e900f1..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/math.rs
+++ /dev/null
@@ -1,884 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Building-blocks for arbitrary-precision math.
-//!
-//! These algorithms assume little-endian order for the large integer
-//! buffers, so for a `vec![0, 1, 2, 3]`, `3` is the most significant limb,
-//! and `0` is the least significant limb.
-
-use super::large_powers;
-use super::num::*;
-use super::small_powers::*;
-use alloc::vec::Vec;
-use core::{cmp, iter, mem};
-
-// ALIASES
-// -------
-
-//  Type for a single limb of the big integer.
-//
-//  A limb is analogous to a digit in base10, except, it stores 32-bit
-//  or 64-bit numbers instead.
-//
-//  This should be all-known 64-bit platforms supported by Rust.
-//      https://forge.rust-lang.org/platform-support.html
-//
-//  Platforms where native 128-bit multiplication is explicitly supported:
-//      - x86_64 (Supported via `MUL`).
-//      - mips64 (Supported via `DMULTU`, which `HI` and `LO` can be read-from).
-//
-//  Platforms where native 64-bit multiplication is supported and
-//  you can extract hi-lo for 64-bit multiplications.
-//      aarch64 (Requires `UMULH` and `MUL` to capture high and low bits).
-//      powerpc64 (Requires `MULHDU` and `MULLD` to capture high and low bits).
-//
-//  Platforms where native 128-bit multiplication is not supported,
-//  requiring software emulation.
-//      sparc64 (`UMUL` only supported double-word arguments).
-
-// 32-BIT LIMB
-#[cfg(fast_arithmetic = "32")]
-pub type Limb = u32;
-
-#[cfg(fast_arithmetic = "32")]
-pub const POW5_LIMB: &[Limb] = &POW5_32;
-
-#[cfg(fast_arithmetic = "32")]
-pub const POW10_LIMB: &[Limb] = &POW10_32;
-
-#[cfg(fast_arithmetic = "32")]
-type Wide = u64;
-
-// 64-BIT LIMB
-#[cfg(fast_arithmetic = "64")]
-pub type Limb = u64;
-
-#[cfg(fast_arithmetic = "64")]
-pub const POW5_LIMB: &[Limb] = &POW5_64;
-
-#[cfg(fast_arithmetic = "64")]
-pub const POW10_LIMB: &[Limb] = &POW10_64;
-
-#[cfg(fast_arithmetic = "64")]
-type Wide = u128;
-
-/// Cast to limb type.
-#[inline]
-pub(crate) fn as_limb<T: Integer>(t: T) -> Limb {
-    Limb::as_cast(t)
-}
-
-/// Cast to wide type.
-#[inline]
-fn as_wide<T: Integer>(t: T) -> Wide {
-    Wide::as_cast(t)
-}
-
-// SPLIT
-// -----
-
-/// Split u64 into limbs, in little-endian order.
-#[inline]
-#[cfg(fast_arithmetic = "32")]
-fn split_u64(x: u64) -> [Limb; 2] {
-    [as_limb(x), as_limb(x >> 32)]
-}
-
-/// Split u64 into limbs, in little-endian order.
-#[inline]
-#[cfg(fast_arithmetic = "64")]
-fn split_u64(x: u64) -> [Limb; 1] {
-    [as_limb(x)]
-}
-
-// HI64
-// ----
-
-// NONZERO
-
-/// Check if any of the remaining bits are non-zero.
-#[inline]
-pub fn nonzero<T: Integer>(x: &[T], rindex: usize) -> bool {
-    let len = x.len();
-    let slc = &x[..len - rindex];
-    slc.iter().rev().any(|&x| x != T::ZERO)
-}
-
-/// Shift 64-bit integer to high 64-bits.
-#[inline]
-fn u64_to_hi64_1(r0: u64) -> (u64, bool) {
-    debug_assert!(r0 != 0);
-    let ls = r0.leading_zeros();
-    (r0 << ls, false)
-}
-
-/// Shift 2 64-bit integers to high 64-bits.
-#[inline]
-fn u64_to_hi64_2(r0: u64, r1: u64) -> (u64, bool) {
-    debug_assert!(r0 != 0);
-    let ls = r0.leading_zeros();
-    let rs = 64 - ls;
-    let v = match ls {
-        0 => r0,
-        _ => (r0 << ls) | (r1 >> rs),
-    };
-    let n = r1 << ls != 0;
-    (v, n)
-}
-
-/// Trait to export the high 64-bits from a little-endian slice.
-trait Hi64<T>: AsRef<[T]> {
-    /// Get the hi64 bits from a 1-limb slice.
-    fn hi64_1(&self) -> (u64, bool);
-
-    /// Get the hi64 bits from a 2-limb slice.
-    fn hi64_2(&self) -> (u64, bool);
-
-    /// Get the hi64 bits from a 3-limb slice.
-    fn hi64_3(&self) -> (u64, bool);
-
-    /// High-level exporter to extract the high 64 bits from a little-endian slice.
-    #[inline]
-    fn hi64(&self) -> (u64, bool) {
-        match self.as_ref().len() {
-            0 => (0, false),
-            1 => self.hi64_1(),
-            2 => self.hi64_2(),
-            _ => self.hi64_3(),
-        }
-    }
-}
-
-impl Hi64<u32> for [u32] {
-    #[inline]
-    fn hi64_1(&self) -> (u64, bool) {
-        debug_assert!(self.len() == 1);
-        let r0 = self[0] as u64;
-        u64_to_hi64_1(r0)
-    }
-
-    #[inline]
-    fn hi64_2(&self) -> (u64, bool) {
-        debug_assert!(self.len() == 2);
-        let r0 = (self[1] as u64) << 32;
-        let r1 = self[0] as u64;
-        u64_to_hi64_1(r0 | r1)
-    }
-
-    #[inline]
-    fn hi64_3(&self) -> (u64, bool) {
-        debug_assert!(self.len() >= 3);
-        let r0 = self[self.len() - 1] as u64;
-        let r1 = (self[self.len() - 2] as u64) << 32;
-        let r2 = self[self.len() - 3] as u64;
-        let (v, n) = u64_to_hi64_2(r0, r1 | r2);
-        (v, n || nonzero(self, 3))
-    }
-}
-
-impl Hi64<u64> for [u64] {
-    #[inline]
-    fn hi64_1(&self) -> (u64, bool) {
-        debug_assert!(self.len() == 1);
-        let r0 = self[0];
-        u64_to_hi64_1(r0)
-    }
-
-    #[inline]
-    fn hi64_2(&self) -> (u64, bool) {
-        debug_assert!(self.len() >= 2);
-        let r0 = self[self.len() - 1];
-        let r1 = self[self.len() - 2];
-        let (v, n) = u64_to_hi64_2(r0, r1);
-        (v, n || nonzero(self, 2))
-    }
-
-    #[inline]
-    fn hi64_3(&self) -> (u64, bool) {
-        self.hi64_2()
-    }
-}
-
-// SCALAR
-// ------
-
-// Scalar-to-scalar operations, for building-blocks for arbitrary-precision
-// operations.
-
-mod scalar {
-    use super::*;
-
-    // ADDITION
-
-    /// Add two small integers and return the resulting value and if overflow happens.
-    #[inline]
-    pub fn add(x: Limb, y: Limb) -> (Limb, bool) {
-        x.overflowing_add(y)
-    }
-
-    /// AddAssign two small integers and return if overflow happens.
-    #[inline]
-    pub fn iadd(x: &mut Limb, y: Limb) -> bool {
-        let t = add(*x, y);
-        *x = t.0;
-        t.1
-    }
-
-    // SUBTRACTION
-
-    /// Subtract two small integers and return the resulting value and if overflow happens.
-    #[inline]
-    pub fn sub(x: Limb, y: Limb) -> (Limb, bool) {
-        x.overflowing_sub(y)
-    }
-
-    /// SubAssign two small integers and return if overflow happens.
-    #[inline]
-    pub fn isub(x: &mut Limb, y: Limb) -> bool {
-        let t = sub(*x, y);
-        *x = t.0;
-        t.1
-    }
-
-    // MULTIPLICATION
-
-    /// Multiply two small integers (with carry) (and return the overflow contribution).
-    ///
-    /// Returns the (low, high) components.
-    #[inline]
-    pub fn mul(x: Limb, y: Limb, carry: Limb) -> (Limb, Limb) {
-        // Cannot overflow, as long as wide is 2x as wide. This is because
-        // the following is always true:
-        // `Wide::max_value() - (Narrow::max_value() * Narrow::max_value()) >= Narrow::max_value()`
-        let z: Wide = as_wide(x) * as_wide(y) + as_wide(carry);
-        let bits = mem::size_of::<Limb>() * 8;
-        (as_limb(z), as_limb(z >> bits))
-    }
-
-    /// Multiply two small integers (with carry) (and return if overflow happens).
-    #[inline]
-    pub fn imul(x: &mut Limb, y: Limb, carry: Limb) -> Limb {
-        let t = mul(*x, y, carry);
-        *x = t.0;
-        t.1
-    }
-} // scalar
-
-// SMALL
-// -----
-
-// Large-to-small operations, to modify a big integer from a native scalar.
-
-mod small {
-    use super::*;
-
-    // ADDITION
-
-    /// Implied AddAssign implementation for adding a small integer to bigint.
-    ///
-    /// Allows us to choose a start-index in x to store, to allow incrementing
-    /// from a non-zero start.
-    #[inline]
-    pub fn iadd_impl(x: &mut Vec<Limb>, y: Limb, xstart: usize) {
-        if x.len() <= xstart {
-            x.push(y);
-        } else {
-            // Initial add
-            let mut carry = scalar::iadd(&mut x[xstart], y);
-
-            // Increment until overflow stops occurring.
-            let mut size = xstart + 1;
-            while carry && size < x.len() {
-                carry = scalar::iadd(&mut x[size], 1);
-                size += 1;
-            }
-
-            // If we overflowed the buffer entirely, need to add 1 to the end
-            // of the buffer.
-            if carry {
-                x.push(1);
-            }
-        }
-    }
-
-    /// AddAssign small integer to bigint.
-    #[inline]
-    pub fn iadd(x: &mut Vec<Limb>, y: Limb) {
-        iadd_impl(x, y, 0);
-    }
-
-    // SUBTRACTION
-
-    /// SubAssign small integer to bigint.
-    /// Does not do overflowing subtraction.
-    #[inline]
-    pub fn isub_impl(x: &mut Vec<Limb>, y: Limb, xstart: usize) {
-        debug_assert!(x.len() > xstart && (x[xstart] >= y || x.len() > xstart + 1));
-
-        // Initial subtraction
-        let mut carry = scalar::isub(&mut x[xstart], y);
-
-        // Increment until overflow stops occurring.
-        let mut size = xstart + 1;
-        while carry && size < x.len() {
-            carry = scalar::isub(&mut x[size], 1);
-            size += 1;
-        }
-        normalize(x);
-    }
-
-    // MULTIPLICATION
-
-    /// MulAssign small integer to bigint.
-    #[inline]
-    pub fn imul(x: &mut Vec<Limb>, y: Limb) {
-        // Multiply iteratively over all elements, adding the carry each time.
-        let mut carry: Limb = 0;
-        for xi in &mut *x {
-            carry = scalar::imul(xi, y, carry);
-        }
-
-        // Overflow of value, add to end.
-        if carry != 0 {
-            x.push(carry);
-        }
-    }
-
-    /// Mul small integer to bigint.
-    #[inline]
-    pub fn mul(x: &[Limb], y: Limb) -> Vec<Limb> {
-        let mut z = Vec::<Limb>::default();
-        z.extend_from_slice(x);
-        imul(&mut z, y);
-        z
-    }
-
-    /// MulAssign by a power.
-    ///
-    /// Theoretically...
-    ///
-    /// Use an exponentiation by squaring method, since it reduces the time
-    /// complexity of the multiplication to ~`O(log(n))` for the squaring,
-    /// and `O(n*m)` for the result. Since `m` is typically a lower-order
-    /// factor, this significantly reduces the number of multiplications
-    /// we need to do. Iteratively multiplying by small powers follows
-    /// the nth triangular number series, which scales as `O(p^2)`, but
-    /// where `p` is `n+m`. In short, it scales very poorly.
-    ///
-    /// Practically....
-    ///
-    /// Exponentiation by Squaring:
-    ///     running 2 tests
-    ///     test bigcomp_f32_lexical ... bench:       1,018 ns/iter (+/- 78)
-    ///     test bigcomp_f64_lexical ... bench:       3,639 ns/iter (+/- 1,007)
-    ///
-    /// Exponentiation by Iterative Small Powers:
-    ///     running 2 tests
-    ///     test bigcomp_f32_lexical ... bench:         518 ns/iter (+/- 31)
-    ///     test bigcomp_f64_lexical ... bench:         583 ns/iter (+/- 47)
-    ///
-    /// Exponentiation by Iterative Large Powers (of 2):
-    ///     running 2 tests
-    ///     test bigcomp_f32_lexical ... bench:         671 ns/iter (+/- 31)
-    ///     test bigcomp_f64_lexical ... bench:       1,394 ns/iter (+/- 47)
-    ///
-    /// Even using worst-case scenarios, exponentiation by squaring is
-    /// significantly slower for our workloads. Just multiply by small powers,
-    /// in simple cases, and use precalculated large powers in other cases.
-    pub fn imul_pow5(x: &mut Vec<Limb>, n: u32) {
-        use super::large::KARATSUBA_CUTOFF;
-
-        let small_powers = POW5_LIMB;
-        let large_powers = large_powers::POW5;
-
-        if n == 0 {
-            // No exponent, just return.
-            // The 0-index of the large powers is `2^0`, which is 1, so we want
-            // to make sure we don't take that path with a literal 0.
-            return;
-        }
-
-        // We want to use the asymptotically faster algorithm if we're going
-        // to be using Karabatsu multiplication sometime during the result,
-        // otherwise, just use exponentiation by squaring.
-        let bit_length = 32 - n.leading_zeros() as usize;
-        debug_assert!(bit_length != 0 && bit_length <= large_powers.len());
-        if x.len() + large_powers[bit_length - 1].len() < 2 * KARATSUBA_CUTOFF {
-            // We can use iterative small powers to make this faster for the
-            // easy cases.
-
-            // Multiply by the largest small power until n < step.
-            let step = small_powers.len() - 1;
-            let power = small_powers[step];
-            let mut n = n as usize;
-            while n >= step {
-                imul(x, power);
-                n -= step;
-            }
-
-            // Multiply by the remainder.
-            imul(x, small_powers[n]);
-        } else {
-            // In theory, this code should be asymptotically a lot faster,
-            // in practice, our small::imul seems to be the limiting step,
-            // and large imul is slow as well.
-
-            // Multiply by higher order powers.
-            let mut idx: usize = 0;
-            let mut bit: usize = 1;
-            let mut n = n as usize;
-            while n != 0 {
-                if n & bit != 0 {
-                    debug_assert!(idx < large_powers.len());
-                    large::imul(x, large_powers[idx]);
-                    n ^= bit;
-                }
-                idx += 1;
-                bit <<= 1;
-            }
-        }
-    }
-
-    // BIT LENGTH
-
-    /// Get number of leading zero bits in the storage.
-    #[inline]
-    pub fn leading_zeros(x: &[Limb]) -> usize {
-        x.last().map_or(0, |x| x.leading_zeros() as usize)
-    }
-
-    /// Calculate the bit-length of the big-integer.
-    #[inline]
-    pub fn bit_length(x: &[Limb]) -> usize {
-        let bits = mem::size_of::<Limb>() * 8;
-        // Avoid overflowing, calculate via total number of bits
-        // minus leading zero bits.
-        let nlz = leading_zeros(x);
-        bits.checked_mul(x.len())
-            .map_or_else(usize::max_value, |v| v - nlz)
-    }
-
-    // SHL
-
-    /// Shift-left bits inside a buffer.
-    ///
-    /// Assumes `n < Limb::BITS`, IE, internally shifting bits.
-    #[inline]
-    pub fn ishl_bits(x: &mut Vec<Limb>, n: usize) {
-        // Need to shift by the number of `bits % Limb::BITS)`.
-        let bits = mem::size_of::<Limb>() * 8;
-        debug_assert!(n < bits);
-        if n == 0 {
-            return;
-        }
-
-        // Internally, for each item, we shift left by n, and add the previous
-        // right shifted limb-bits.
-        // For example, we transform (for u8) shifted left 2, to:
-        //      b10100100 b01000010
-        //      b10 b10010001 b00001000
-        let rshift = bits - n;
-        let lshift = n;
-        let mut prev: Limb = 0;
-        for xi in &mut *x {
-            let tmp = *xi;
-            *xi <<= lshift;
-            *xi |= prev >> rshift;
-            prev = tmp;
-        }
-
-        // Always push the carry, even if it creates a non-normal result.
-        let carry = prev >> rshift;
-        if carry != 0 {
-            x.push(carry);
-        }
-    }
-
-    /// Shift-left `n` digits inside a buffer.
-    ///
-    /// Assumes `n` is not 0.
-    #[inline]
-    pub fn ishl_limbs(x: &mut Vec<Limb>, n: usize) {
-        debug_assert!(n != 0);
-        if !x.is_empty() {
-            x.reserve(n);
-            x.splice(..0, iter::repeat(0).take(n));
-        }
-    }
-
-    /// Shift-left buffer by n bits.
-    #[inline]
-    pub fn ishl(x: &mut Vec<Limb>, n: usize) {
-        let bits = mem::size_of::<Limb>() * 8;
-        // Need to pad with zeros for the number of `bits / Limb::BITS`,
-        // and shift-left with carry for `bits % Limb::BITS`.
-        let rem = n % bits;
-        let div = n / bits;
-        ishl_bits(x, rem);
-        if div != 0 {
-            ishl_limbs(x, div);
-        }
-    }
-
-    // NORMALIZE
-
-    /// Normalize the container by popping any leading zeros.
-    #[inline]
-    pub fn normalize(x: &mut Vec<Limb>) {
-        // Remove leading zero if we cause underflow. Since we're dividing
-        // by a small power, we have at max 1 int removed.
-        while x.last() == Some(&0) {
-            x.pop();
-        }
-    }
-} // small
-
-// LARGE
-// -----
-
-// Large-to-large operations, to modify a big integer from a native scalar.
-
-mod large {
-    use super::*;
-
-    // RELATIVE OPERATORS
-
-    /// Compare `x` to `y`, in little-endian order.
-    #[inline]
-    pub fn compare(x: &[Limb], y: &[Limb]) -> cmp::Ordering {
-        if x.len() > y.len() {
-            cmp::Ordering::Greater
-        } else if x.len() < y.len() {
-            cmp::Ordering::Less
-        } else {
-            let iter = x.iter().rev().zip(y.iter().rev());
-            for (&xi, &yi) in iter {
-                if xi > yi {
-                    return cmp::Ordering::Greater;
-                } else if xi < yi {
-                    return cmp::Ordering::Less;
-                }
-            }
-            // Equal case.
-            cmp::Ordering::Equal
-        }
-    }
-
-    /// Check if x is less than y.
-    #[inline]
-    pub fn less(x: &[Limb], y: &[Limb]) -> bool {
-        compare(x, y) == cmp::Ordering::Less
-    }
-
-    /// Check if x is greater than or equal to y.
-    #[inline]
-    pub fn greater_equal(x: &[Limb], y: &[Limb]) -> bool {
-        !less(x, y)
-    }
-
-    // ADDITION
-
-    /// Implied AddAssign implementation for bigints.
-    ///
-    /// Allows us to choose a start-index in x to store, so we can avoid
-    /// padding the buffer with zeros when not needed, optimized for vectors.
-    pub fn iadd_impl(x: &mut Vec<Limb>, y: &[Limb], xstart: usize) {
-        // The effective x buffer is from `xstart..x.len()`, so we need to treat
-        // that as the current range. If the effective y buffer is longer, need
-        // to resize to that, + the start index.
-        if y.len() > x.len() - xstart {
-            x.resize(y.len() + xstart, 0);
-        }
-
-        // Iteratively add elements from y to x.
-        let mut carry = false;
-        for (xi, yi) in x[xstart..].iter_mut().zip(y.iter()) {
-            // Only one op of the two can overflow, since we added at max
-            // Limb::max_value() + Limb::max_value(). Add the previous carry,
-            // and store the current carry for the next.
-            let mut tmp = scalar::iadd(xi, *yi);
-            if carry {
-                tmp |= scalar::iadd(xi, 1);
-            }
-            carry = tmp;
-        }
-
-        // Overflow from the previous bit.
-        if carry {
-            small::iadd_impl(x, 1, y.len() + xstart);
-        }
-    }
-
-    /// AddAssign bigint to bigint.
-    #[inline]
-    pub fn iadd(x: &mut Vec<Limb>, y: &[Limb]) {
-        iadd_impl(x, y, 0);
-    }
-
-    /// Add bigint to bigint.
-    #[inline]
-    pub fn add(x: &[Limb], y: &[Limb]) -> Vec<Limb> {
-        let mut z = Vec::<Limb>::default();
-        z.extend_from_slice(x);
-        iadd(&mut z, y);
-        z
-    }
-
-    // SUBTRACTION
-
-    /// SubAssign bigint to bigint.
-    pub fn isub(x: &mut Vec<Limb>, y: &[Limb]) {
-        // Basic underflow checks.
-        debug_assert!(greater_equal(x, y));
-
-        // Iteratively add elements from y to x.
-        let mut carry = false;
-        for (xi, yi) in x.iter_mut().zip(y.iter()) {
-            // Only one op of the two can overflow, since we added at max
-            // Limb::max_value() + Limb::max_value(). Add the previous carry,
-            // and store the current carry for the next.
-            let mut tmp = scalar::isub(xi, *yi);
-            if carry {
-                tmp |= scalar::isub(xi, 1);
-            }
-            carry = tmp;
-        }
-
-        if carry {
-            small::isub_impl(x, 1, y.len());
-        } else {
-            small::normalize(x);
-        }
-    }
-
-    // MULTIPLICATION
-
-    /// Number of digits to bottom-out to asymptotically slow algorithms.
-    ///
-    /// Karatsuba tends to out-perform long-multiplication at ~320-640 bits,
-    /// so we go halfway, while Newton division tends to out-perform
-    /// Algorithm D at ~1024 bits. We can toggle this for optimal performance.
-    pub const KARATSUBA_CUTOFF: usize = 32;
-
-    /// Grade-school multiplication algorithm.
-    ///
-    /// Slow, naive algorithm, using limb-bit bases and just shifting left for
-    /// each iteration. This could be optimized with numerous other algorithms,
-    /// but it's extremely simple, and works in O(n*m) time, which is fine
-    /// by me. Each iteration, of which there are `m` iterations, requires
-    /// `n` multiplications, and `n` additions, or grade-school multiplication.
-    fn long_mul(x: &[Limb], y: &[Limb]) -> Vec<Limb> {
-        // Using the immutable value, multiply by all the scalars in y, using
-        // the algorithm defined above. Use a single buffer to avoid
-        // frequent reallocations. Handle the first case to avoid a redundant
-        // addition, since we know y.len() >= 1.
-        let mut z: Vec<Limb> = small::mul(x, y[0]);
-        z.resize(x.len() + y.len(), 0);
-
-        // Handle the iterative cases.
-        for (i, &yi) in y[1..].iter().enumerate() {
-            let zi: Vec<Limb> = small::mul(x, yi);
-            iadd_impl(&mut z, &zi, i + 1);
-        }
-
-        small::normalize(&mut z);
-
-        z
-    }
-
-    /// Split two buffers into halfway, into (lo, hi).
-    #[inline]
-    pub fn karatsuba_split(z: &[Limb], m: usize) -> (&[Limb], &[Limb]) {
-        (&z[..m], &z[m..])
-    }
-
-    /// Karatsuba multiplication algorithm with roughly equal input sizes.
-    ///
-    /// Assumes `y.len() >= x.len()`.
-    fn karatsuba_mul(x: &[Limb], y: &[Limb]) -> Vec<Limb> {
-        if y.len() <= KARATSUBA_CUTOFF {
-            // Bottom-out to long division for small cases.
-            long_mul(x, y)
-        } else if x.len() < y.len() / 2 {
-            karatsuba_uneven_mul(x, y)
-        } else {
-            // Do our 3 multiplications.
-            let m = y.len() / 2;
-            let (xl, xh) = karatsuba_split(x, m);
-            let (yl, yh) = karatsuba_split(y, m);
-            let sumx = add(xl, xh);
-            let sumy = add(yl, yh);
-            let z0 = karatsuba_mul(xl, yl);
-            let mut z1 = karatsuba_mul(&sumx, &sumy);
-            let z2 = karatsuba_mul(xh, yh);
-            // Properly scale z1, which is `z1 - z2 - zo`.
-            isub(&mut z1, &z2);
-            isub(&mut z1, &z0);
-
-            // Create our result, which is equal to, in little-endian order:
-            // [z0, z1 - z2 - z0, z2]
-            //  z1 must be shifted m digits (2^(32m)) over.
-            //  z2 must be shifted 2*m digits (2^(64m)) over.
-            let len = z0.len().max(m + z1.len()).max(2 * m + z2.len());
-            let mut result = z0;
-            result.reserve_exact(len - result.len());
-            iadd_impl(&mut result, &z1, m);
-            iadd_impl(&mut result, &z2, 2 * m);
-
-            result
-        }
-    }
-
-    /// Karatsuba multiplication algorithm where y is substantially larger than x.
-    ///
-    /// Assumes `y.len() >= x.len()`.
-    fn karatsuba_uneven_mul(x: &[Limb], mut y: &[Limb]) -> Vec<Limb> {
-        let mut result = Vec::<Limb>::default();
-        result.resize(x.len() + y.len(), 0);
-
-        // This effectively is like grade-school multiplication between
-        // two numbers, except we're using splits on `y`, and the intermediate
-        // step is a Karatsuba multiplication.
-        let mut start = 0;
-        while !y.is_empty() {
-            let m = x.len().min(y.len());
-            let (yl, yh) = karatsuba_split(y, m);
-            let prod = karatsuba_mul(x, yl);
-            iadd_impl(&mut result, &prod, start);
-            y = yh;
-            start += m;
-        }
-        small::normalize(&mut result);
-
-        result
-    }
-
-    /// Forwarder to the proper Karatsuba algorithm.
-    #[inline]
-    fn karatsuba_mul_fwd(x: &[Limb], y: &[Limb]) -> Vec<Limb> {
-        if x.len() < y.len() {
-            karatsuba_mul(x, y)
-        } else {
-            karatsuba_mul(y, x)
-        }
-    }
-
-    /// MulAssign bigint to bigint.
-    #[inline]
-    pub fn imul(x: &mut Vec<Limb>, y: &[Limb]) {
-        if y.len() == 1 {
-            small::imul(x, y[0]);
-        } else {
-            // We're not really in a condition where using Karatsuba
-            // multiplication makes sense, so we're just going to use long
-            // division. ~20% speedup compared to:
-            //      *x = karatsuba_mul_fwd(x, y);
-            *x = karatsuba_mul_fwd(x, y);
-        }
-    }
-} // large
-
-// TRAITS
-// ------
-
-/// Traits for shared operations for big integers.
-///
-/// None of these are implemented using normal traits, since these
-/// are very expensive operations, and we want to deliberately
-/// and explicitly use these functions.
-pub(crate) trait Math: Clone + Sized + Default {
-    // DATA
-
-    /// Get access to the underlying data
-    fn data(&self) -> &Vec<Limb>;
-
-    /// Get access to the underlying data
-    fn data_mut(&mut self) -> &mut Vec<Limb>;
-
-    // RELATIVE OPERATIONS
-
-    /// Compare self to y.
-    #[inline]
-    fn compare(&self, y: &Self) -> cmp::Ordering {
-        large::compare(self.data(), y.data())
-    }
-
-    // PROPERTIES
-
-    /// Get the high 64-bits from the bigint and if there are remaining bits.
-    #[inline]
-    fn hi64(&self) -> (u64, bool) {
-        self.data().as_slice().hi64()
-    }
-
-    /// Calculate the bit-length of the big-integer.
-    /// Returns usize::max_value() if the value overflows,
-    /// IE, if `self.data().len() > usize::max_value() / 8`.
-    #[inline]
-    fn bit_length(&self) -> usize {
-        small::bit_length(self.data())
-    }
-
-    // INTEGER CONVERSIONS
-
-    /// Create new big integer from u64.
-    #[inline]
-    fn from_u64(x: u64) -> Self {
-        let mut v = Self::default();
-        let slc = split_u64(x);
-        v.data_mut().extend_from_slice(&slc);
-        v.normalize();
-        v
-    }
-
-    // NORMALIZE
-
-    /// Normalize the integer, so any leading zero values are removed.
-    #[inline]
-    fn normalize(&mut self) {
-        small::normalize(self.data_mut());
-    }
-
-    // ADDITION
-
-    /// AddAssign small integer.
-    #[inline]
-    fn iadd_small(&mut self, y: Limb) {
-        small::iadd(self.data_mut(), y);
-    }
-
-    // MULTIPLICATION
-
-    /// MulAssign small integer.
-    #[inline]
-    fn imul_small(&mut self, y: Limb) {
-        small::imul(self.data_mut(), y);
-    }
-
-    /// Multiply by a power of 2.
-    #[inline]
-    fn imul_pow2(&mut self, n: u32) {
-        self.ishl(n as usize);
-    }
-
-    /// Multiply by a power of 5.
-    #[inline]
-    fn imul_pow5(&mut self, n: u32) {
-        small::imul_pow5(self.data_mut(), n);
-    }
-
-    /// MulAssign by a power of 10.
-    #[inline]
-    fn imul_pow10(&mut self, n: u32) {
-        self.imul_pow5(n);
-        self.imul_pow2(n);
-    }
-
-    // SHIFTS
-
-    /// Shift-left the entire buffer n bits.
-    #[inline]
-    fn ishl(&mut self, n: usize) {
-        small::ishl(self.data_mut(), n);
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/mod.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/mod.rs
deleted file mode 100644
index aeed406..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/mod.rs
+++ /dev/null
@@ -1,38 +0,0 @@
-// The code in this module is derived from the `lexical` crate by @Alexhuszagh
-// which the author condensed into this minimal subset for use in serde_json.
-// For the serde_json use case we care more about reliably round tripping all
-// possible floating point values than about parsing any arbitrarily long string
-// of digits with perfect accuracy, as the latter would take a high cost in
-// compile time and performance.
-//
-// Dual licensed as MIT and Apache 2.0 just like the rest of serde_json, but
-// copyright Alexander Huszagh.
-
-//! Fast, minimal float-parsing algorithm.
-
-// MODULES
-pub(crate) mod algorithm;
-mod bhcomp;
-mod bignum;
-mod cached;
-mod cached_float80;
-mod digit;
-mod errors;
-pub(crate) mod exponent;
-pub(crate) mod float;
-mod large_powers;
-pub(crate) mod math;
-pub(crate) mod num;
-pub(crate) mod parse;
-pub(crate) mod rounding;
-mod shift;
-mod small_powers;
-
-#[cfg(fast_arithmetic = "32")]
-mod large_powers32;
-
-#[cfg(fast_arithmetic = "64")]
-mod large_powers64;
-
-// API
-pub use self::parse::{parse_concise_float, parse_truncated_float};
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/num.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/num.rs
deleted file mode 100644
index 3f39140..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/num.rs
+++ /dev/null
@@ -1,421 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Utilities for Rust numbers.
-
-use core::ops;
-
-/// Precalculated values of radix**i for i in range [0, arr.len()-1].
-/// Each value can be **exactly** represented as that type.
-const F32_POW10: [f32; 11] = [
-    1.0,
-    10.0,
-    100.0,
-    1000.0,
-    10000.0,
-    100000.0,
-    1000000.0,
-    10000000.0,
-    100000000.0,
-    1000000000.0,
-    10000000000.0,
-];
-
-/// Precalculated values of radix**i for i in range [0, arr.len()-1].
-/// Each value can be **exactly** represented as that type.
-const F64_POW10: [f64; 23] = [
-    1.0,
-    10.0,
-    100.0,
-    1000.0,
-    10000.0,
-    100000.0,
-    1000000.0,
-    10000000.0,
-    100000000.0,
-    1000000000.0,
-    10000000000.0,
-    100000000000.0,
-    1000000000000.0,
-    10000000000000.0,
-    100000000000000.0,
-    1000000000000000.0,
-    10000000000000000.0,
-    100000000000000000.0,
-    1000000000000000000.0,
-    10000000000000000000.0,
-    100000000000000000000.0,
-    1000000000000000000000.0,
-    10000000000000000000000.0,
-];
-
-/// Type that can be converted to primitive with `as`.
-pub trait AsPrimitive: Sized + Copy + PartialOrd {
-    fn as_u32(self) -> u32;
-    fn as_u64(self) -> u64;
-    fn as_u128(self) -> u128;
-    fn as_usize(self) -> usize;
-    fn as_f32(self) -> f32;
-    fn as_f64(self) -> f64;
-}
-
-macro_rules! as_primitive_impl {
-    ($($ty:ident)*) => {
-        $(
-            impl AsPrimitive for $ty {
-                #[inline]
-                fn as_u32(self) -> u32 {
-                    self as u32
-                }
-
-                #[inline]
-                fn as_u64(self) -> u64 {
-                    self as u64
-                }
-
-                #[inline]
-                fn as_u128(self) -> u128 {
-                    self as u128
-                }
-
-                #[inline]
-                fn as_usize(self) -> usize {
-                    self as usize
-                }
-
-                #[inline]
-                fn as_f32(self) -> f32 {
-                    self as f32
-                }
-
-                #[inline]
-                fn as_f64(self) -> f64 {
-                    self as f64
-                }
-            }
-        )*
-    };
-}
-
-as_primitive_impl! { u32 u64 u128 usize f32 f64 }
-
-/// An interface for casting between machine scalars.
-pub trait AsCast: AsPrimitive {
-    /// Creates a number from another value that can be converted into
-    /// a primitive via the `AsPrimitive` trait.
-    fn as_cast<N: AsPrimitive>(n: N) -> Self;
-}
-
-macro_rules! as_cast_impl {
-    ($ty:ident, $method:ident) => {
-        impl AsCast for $ty {
-            #[inline]
-            fn as_cast<N: AsPrimitive>(n: N) -> Self {
-                n.$method()
-            }
-        }
-    };
-}
-
-as_cast_impl!(u32, as_u32);
-as_cast_impl!(u64, as_u64);
-as_cast_impl!(u128, as_u128);
-as_cast_impl!(usize, as_usize);
-as_cast_impl!(f32, as_f32);
-as_cast_impl!(f64, as_f64);
-
-/// Numerical type trait.
-pub trait Number: AsCast + ops::Add<Output = Self> {}
-
-macro_rules! number_impl {
-    ($($ty:ident)*) => {
-        $(
-            impl Number for $ty {}
-        )*
-    };
-}
-
-number_impl! { u32 u64 u128 usize f32 f64 }
-
-/// Defines a trait that supports integral operations.
-pub trait Integer: Number + ops::BitAnd<Output = Self> + ops::Shr<i32, Output = Self> {
-    const ZERO: Self;
-}
-
-macro_rules! integer_impl {
-    ($($ty:tt)*) => {
-        $(
-            impl Integer for $ty {
-                const ZERO: Self = 0;
-            }
-        )*
-    };
-}
-
-integer_impl! { u32 u64 u128 usize }
-
-/// Type trait for the mantissa type.
-pub trait Mantissa: Integer {
-    /// Mask to extract the high bits from the integer.
-    const HIMASK: Self;
-    /// Mask to extract the low bits from the integer.
-    const LOMASK: Self;
-    /// Full size of the integer, in bits.
-    const FULL: i32;
-    /// Half size of the integer, in bits.
-    const HALF: i32 = Self::FULL / 2;
-}
-
-impl Mantissa for u64 {
-    const HIMASK: u64 = 0xFFFFFFFF00000000;
-    const LOMASK: u64 = 0x00000000FFFFFFFF;
-    const FULL: i32 = 64;
-}
-
-/// Get exact exponent limit for radix.
-pub trait Float: Number {
-    /// Unsigned type of the same size.
-    type Unsigned: Integer;
-
-    /// Literal zero.
-    const ZERO: Self;
-    /// Maximum number of digits that can contribute in the mantissa.
-    ///
-    /// We can exactly represent a float in radix `b` from radix 2 if
-    /// `b` is divisible by 2. This function calculates the exact number of
-    /// digits required to exactly represent that float.
-    ///
-    /// According to the "Handbook of Floating Point Arithmetic",
-    /// for IEEE754, with emin being the min exponent, p2 being the
-    /// precision, and b being the radix, the number of digits follows as:
-    ///
-    /// `−emin + p2 + ⌊(emin + 1) log(2, b) − log(1 − 2^(−p2), b)⌋`
-    ///
-    /// For f32, this follows as:
-    ///     emin = -126
-    ///     p2 = 24
-    ///
-    /// For f64, this follows as:
-    ///     emin = -1022
-    ///     p2 = 53
-    ///
-    /// In Python:
-    ///     `-emin + p2 + math.floor((emin+1)*math.log(2, b) - math.log(1-2**(-p2), b))`
-    ///
-    /// This was used to calculate the maximum number of digits for [2, 36].
-    const MAX_DIGITS: usize;
-
-    // MASKS
-
-    /// Bitmask for the exponent, including the hidden bit.
-    const EXPONENT_MASK: Self::Unsigned;
-    /// Bitmask for the hidden bit in exponent, which is an implicit 1 in the fraction.
-    const HIDDEN_BIT_MASK: Self::Unsigned;
-    /// Bitmask for the mantissa (fraction), excluding the hidden bit.
-    const MANTISSA_MASK: Self::Unsigned;
-
-    // PROPERTIES
-
-    /// Positive infinity as bits.
-    const INFINITY_BITS: Self::Unsigned;
-    /// Size of the significand (mantissa) without hidden bit.
-    const MANTISSA_SIZE: i32;
-    /// Bias of the exponent
-    const EXPONENT_BIAS: i32;
-    /// Exponent portion of a denormal float.
-    const DENORMAL_EXPONENT: i32;
-    /// Maximum exponent value in float.
-    const MAX_EXPONENT: i32;
-
-    // ROUNDING
-
-    /// Default number of bits to shift (or 64 - mantissa size - 1).
-    const DEFAULT_SHIFT: i32;
-    /// Mask to determine if a full-carry occurred (1 in bit above hidden bit).
-    const CARRY_MASK: u64;
-
-    /// Get min and max exponent limits (exact) from radix.
-    fn exponent_limit() -> (i32, i32);
-
-    /// Get the number of digits that can be shifted from exponent to mantissa.
-    fn mantissa_limit() -> i32;
-
-    // Re-exported methods from std.
-    fn pow10(self, n: i32) -> Self;
-    fn from_bits(u: Self::Unsigned) -> Self;
-    fn to_bits(self) -> Self::Unsigned;
-    fn is_sign_positive(self) -> bool;
-
-    /// Returns true if the float is a denormal.
-    #[inline]
-    fn is_denormal(self) -> bool {
-        self.to_bits() & Self::EXPONENT_MASK == Self::Unsigned::ZERO
-    }
-
-    /// Returns true if the float is a NaN or Infinite.
-    #[inline]
-    fn is_special(self) -> bool {
-        self.to_bits() & Self::EXPONENT_MASK == Self::EXPONENT_MASK
-    }
-
-    /// Returns true if the float is infinite.
-    #[inline]
-    fn is_inf(self) -> bool {
-        self.is_special() && (self.to_bits() & Self::MANTISSA_MASK) == Self::Unsigned::ZERO
-    }
-
-    /// Get exponent component from the float.
-    #[inline]
-    fn exponent(self) -> i32 {
-        if self.is_denormal() {
-            return Self::DENORMAL_EXPONENT;
-        }
-
-        let bits = self.to_bits();
-        let biased_e = ((bits & Self::EXPONENT_MASK) >> Self::MANTISSA_SIZE).as_u32();
-        biased_e as i32 - Self::EXPONENT_BIAS
-    }
-
-    /// Get mantissa (significand) component from float.
-    #[inline]
-    fn mantissa(self) -> Self::Unsigned {
-        let bits = self.to_bits();
-        let s = bits & Self::MANTISSA_MASK;
-        if !self.is_denormal() {
-            s + Self::HIDDEN_BIT_MASK
-        } else {
-            s
-        }
-    }
-
-    /// Get next greater float for a positive float.
-    /// Value must be >= 0.0 and < INFINITY.
-    #[inline]
-    fn next_positive(self) -> Self {
-        debug_assert!(self.is_sign_positive() && !self.is_inf());
-        Self::from_bits(self.to_bits() + Self::Unsigned::as_cast(1u32))
-    }
-
-    /// Round a positive number to even.
-    #[inline]
-    fn round_positive_even(self) -> Self {
-        if self.mantissa() & Self::Unsigned::as_cast(1u32) == Self::Unsigned::as_cast(1u32) {
-            self.next_positive()
-        } else {
-            self
-        }
-    }
-}
-
-impl Float for f32 {
-    type Unsigned = u32;
-
-    const ZERO: f32 = 0.0;
-    const MAX_DIGITS: usize = 114;
-    const EXPONENT_MASK: u32 = 0x7F800000;
-    const HIDDEN_BIT_MASK: u32 = 0x00800000;
-    const MANTISSA_MASK: u32 = 0x007FFFFF;
-    const INFINITY_BITS: u32 = 0x7F800000;
-    const MANTISSA_SIZE: i32 = 23;
-    const EXPONENT_BIAS: i32 = 127 + Self::MANTISSA_SIZE;
-    const DENORMAL_EXPONENT: i32 = 1 - Self::EXPONENT_BIAS;
-    const MAX_EXPONENT: i32 = 0xFF - Self::EXPONENT_BIAS;
-    const DEFAULT_SHIFT: i32 = u64::FULL - f32::MANTISSA_SIZE - 1;
-    const CARRY_MASK: u64 = 0x1000000;
-
-    #[inline]
-    fn exponent_limit() -> (i32, i32) {
-        (-10, 10)
-    }
-
-    #[inline]
-    fn mantissa_limit() -> i32 {
-        7
-    }
-
-    #[inline]
-    fn pow10(self, n: i32) -> f32 {
-        // Check the exponent is within bounds in debug builds.
-        debug_assert!({
-            let (min, max) = Self::exponent_limit();
-            n >= min && n <= max
-        });
-
-        if n > 0 {
-            self * F32_POW10[n as usize]
-        } else {
-            self / F32_POW10[-n as usize]
-        }
-    }
-
-    #[inline]
-    fn from_bits(u: u32) -> f32 {
-        f32::from_bits(u)
-    }
-
-    #[inline]
-    fn to_bits(self) -> u32 {
-        f32::to_bits(self)
-    }
-
-    #[inline]
-    fn is_sign_positive(self) -> bool {
-        f32::is_sign_positive(self)
-    }
-}
-
-impl Float for f64 {
-    type Unsigned = u64;
-
-    const ZERO: f64 = 0.0;
-    const MAX_DIGITS: usize = 769;
-    const EXPONENT_MASK: u64 = 0x7FF0000000000000;
-    const HIDDEN_BIT_MASK: u64 = 0x0010000000000000;
-    const MANTISSA_MASK: u64 = 0x000FFFFFFFFFFFFF;
-    const INFINITY_BITS: u64 = 0x7FF0000000000000;
-    const MANTISSA_SIZE: i32 = 52;
-    const EXPONENT_BIAS: i32 = 1023 + Self::MANTISSA_SIZE;
-    const DENORMAL_EXPONENT: i32 = 1 - Self::EXPONENT_BIAS;
-    const MAX_EXPONENT: i32 = 0x7FF - Self::EXPONENT_BIAS;
-    const DEFAULT_SHIFT: i32 = u64::FULL - f64::MANTISSA_SIZE - 1;
-    const CARRY_MASK: u64 = 0x20000000000000;
-
-    #[inline]
-    fn exponent_limit() -> (i32, i32) {
-        (-22, 22)
-    }
-
-    #[inline]
-    fn mantissa_limit() -> i32 {
-        15
-    }
-
-    #[inline]
-    fn pow10(self, n: i32) -> f64 {
-        // Check the exponent is within bounds in debug builds.
-        debug_assert!({
-            let (min, max) = Self::exponent_limit();
-            n >= min && n <= max
-        });
-
-        if n > 0 {
-            self * F64_POW10[n as usize]
-        } else {
-            self / F64_POW10[-n as usize]
-        }
-    }
-
-    #[inline]
-    fn from_bits(u: u64) -> f64 {
-        f64::from_bits(u)
-    }
-
-    #[inline]
-    fn to_bits(self) -> u64 {
-        f64::to_bits(self)
-    }
-
-    #[inline]
-    fn is_sign_positive(self) -> bool {
-        f64::is_sign_positive(self)
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/parse.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/parse.rs
deleted file mode 100644
index e3d7f1e..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/parse.rs
+++ /dev/null
@@ -1,83 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-use super::algorithm::*;
-use super::bhcomp::*;
-use super::digit::*;
-use super::exponent::*;
-use super::num::*;
-
-// PARSERS
-// -------
-
-/// Parse float for which the entire integer and fraction parts fit into a 64
-/// bit mantissa.
-pub fn parse_concise_float<F>(mantissa: u64, mant_exp: i32) -> F
-where
-    F: Float,
-{
-    if let Some(float) = fast_path(mantissa, mant_exp) {
-        return float;
-    }
-
-    // Moderate path (use an extended 80-bit representation).
-    let truncated = false;
-    let (fp, valid) = moderate_path::<F>(mantissa, mant_exp, truncated);
-    if valid {
-        return fp.into_float::<F>();
-    }
-
-    let b = fp.into_downward_float::<F>();
-    if b.is_special() {
-        // We have a non-finite number, we get to leave early.
-        return b;
-    }
-
-    // Slow path, fast path didn't work.
-    let mut buffer = itoa::Buffer::new();
-    let integer = buffer.format(mantissa).as_bytes();
-    let fraction = &[];
-    bhcomp(b, integer, fraction, mant_exp)
-}
-
-/// Parse float from extracted float components.
-///
-/// * `integer`     - Slice containing the integer digits.
-/// * `fraction`    - Slice containing the fraction digits.
-/// * `exponent`    - Parsed, 32-bit exponent.
-///
-/// Precondition: The integer must not have leading zeros.
-pub fn parse_truncated_float<F>(integer: &[u8], mut fraction: &[u8], exponent: i32) -> F
-where
-    F: Float,
-{
-    // Trim trailing zeroes from the fraction part.
-    while fraction.last() == Some(&b'0') {
-        fraction = &fraction[..fraction.len() - 1];
-    }
-
-    // Calculate the number of truncated digits.
-    let mut truncated = 0;
-    let mut mantissa: u64 = 0;
-    let mut iter = integer.iter().chain(fraction);
-    for &c in &mut iter {
-        mantissa = match add_digit(mantissa, to_digit(c).unwrap()) {
-            Some(v) => v,
-            None => {
-                truncated = 1 + iter.count();
-                break;
-            }
-        };
-    }
-
-    let mant_exp = mantissa_exponent(exponent, fraction.len(), truncated);
-    let is_truncated = true;
-
-    fallback_path(
-        integer,
-        fraction,
-        mantissa,
-        exponent,
-        mant_exp,
-        is_truncated,
-    )
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/rounding.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/rounding.rs
deleted file mode 100644
index 6344875..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/rounding.rs
+++ /dev/null
@@ -1,231 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Defines rounding schemes for floating-point numbers.
-
-use super::float::ExtendedFloat;
-use super::num::*;
-use super::shift::*;
-use core::mem;
-
-// MASKS
-
-/// Calculate a scalar factor of 2 above the halfway point.
-#[inline]
-pub(crate) fn nth_bit(n: u64) -> u64 {
-    let bits: u64 = mem::size_of::<u64>() as u64 * 8;
-    debug_assert!(n < bits, "nth_bit() overflow in shl.");
-
-    1 << n
-}
-
-/// Generate a bitwise mask for the lower `n` bits.
-#[inline]
-pub(crate) fn lower_n_mask(n: u64) -> u64 {
-    let bits: u64 = mem::size_of::<u64>() as u64 * 8;
-    debug_assert!(n <= bits, "lower_n_mask() overflow in shl.");
-
-    if n == bits {
-        u64::MAX
-    } else {
-        (1 << n) - 1
-    }
-}
-
-/// Calculate the halfway point for the lower `n` bits.
-#[inline]
-pub(crate) fn lower_n_halfway(n: u64) -> u64 {
-    let bits: u64 = mem::size_of::<u64>() as u64 * 8;
-    debug_assert!(n <= bits, "lower_n_halfway() overflow in shl.");
-
-    if n == 0 {
-        0
-    } else {
-        nth_bit(n - 1)
-    }
-}
-
-/// Calculate a bitwise mask with `n` 1 bits starting at the `bit` position.
-#[inline]
-pub(crate) fn internal_n_mask(bit: u64, n: u64) -> u64 {
-    let bits: u64 = mem::size_of::<u64>() as u64 * 8;
-    debug_assert!(bit <= bits, "internal_n_halfway() overflow in shl.");
-    debug_assert!(n <= bits, "internal_n_halfway() overflow in shl.");
-    debug_assert!(bit >= n, "internal_n_halfway() overflow in sub.");
-
-    lower_n_mask(bit) ^ lower_n_mask(bit - n)
-}
-
-// NEAREST ROUNDING
-
-// Shift right N-bytes and round to the nearest.
-//
-// Return if we are above halfway and if we are halfway.
-#[inline]
-pub(crate) fn round_nearest(fp: &mut ExtendedFloat, shift: i32) -> (bool, bool) {
-    // Extract the truncated bits using mask.
-    // Calculate if the value of the truncated bits are either above
-    // the mid-way point, or equal to it.
-    //
-    // For example, for 4 truncated bytes, the mask would be b1111
-    // and the midway point would be b1000.
-    let mask: u64 = lower_n_mask(shift as u64);
-    let halfway: u64 = lower_n_halfway(shift as u64);
-
-    let truncated_bits = fp.mant & mask;
-    let is_above = truncated_bits > halfway;
-    let is_halfway = truncated_bits == halfway;
-
-    // Bit shift so the leading bit is in the hidden bit.
-    overflowing_shr(fp, shift);
-
-    (is_above, is_halfway)
-}
-
-// Tie rounded floating point to event.
-#[inline]
-pub(crate) fn tie_even(fp: &mut ExtendedFloat, is_above: bool, is_halfway: bool) {
-    // Extract the last bit after shifting (and determine if it is odd).
-    let is_odd = fp.mant & 1 == 1;
-
-    // Calculate if we need to roundup.
-    // We need to roundup if we are above halfway, or if we are odd
-    // and at half-way (need to tie-to-even).
-    if is_above || (is_odd && is_halfway) {
-        fp.mant += 1;
-    }
-}
-
-// Shift right N-bytes and round nearest, tie-to-even.
-//
-// Floating-point arithmetic uses round to nearest, ties to even,
-// which rounds to the nearest value, if the value is halfway in between,
-// round to an even value.
-#[inline]
-pub(crate) fn round_nearest_tie_even(fp: &mut ExtendedFloat, shift: i32) {
-    let (is_above, is_halfway) = round_nearest(fp, shift);
-    tie_even(fp, is_above, is_halfway);
-}
-
-// DIRECTED ROUNDING
-
-// Shift right N-bytes and round towards a direction.
-//
-// Return if we have any truncated bytes.
-#[inline]
-fn round_toward(fp: &mut ExtendedFloat, shift: i32) -> bool {
-    let mask: u64 = lower_n_mask(shift as u64);
-    let truncated_bits = fp.mant & mask;
-
-    // Bit shift so the leading bit is in the hidden bit.
-    overflowing_shr(fp, shift);
-
-    truncated_bits != 0
-}
-
-// Round down.
-#[inline]
-fn downard(_: &mut ExtendedFloat, _: bool) {}
-
-// Shift right N-bytes and round toward zero.
-//
-// Floating-point arithmetic defines round toward zero, which rounds
-// towards positive zero.
-#[inline]
-pub(crate) fn round_downward(fp: &mut ExtendedFloat, shift: i32) {
-    // Bit shift so the leading bit is in the hidden bit.
-    // No rounding schemes, so we just ignore everything else.
-    let is_truncated = round_toward(fp, shift);
-    downard(fp, is_truncated);
-}
-
-// ROUND TO FLOAT
-
-// Shift the ExtendedFloat fraction to the fraction bits in a native float.
-//
-// Floating-point arithmetic uses round to nearest, ties to even,
-// which rounds to the nearest value, if the value is halfway in between,
-// round to an even value.
-#[inline]
-pub(crate) fn round_to_float<F, Algorithm>(fp: &mut ExtendedFloat, algorithm: Algorithm)
-where
-    F: Float,
-    Algorithm: FnOnce(&mut ExtendedFloat, i32),
-{
-    // Calculate the difference to allow a single calculation
-    // rather than a loop, to minimize the number of ops required.
-    // This does underflow detection.
-    let final_exp = fp.exp + F::DEFAULT_SHIFT;
-    if final_exp < F::DENORMAL_EXPONENT {
-        // We would end up with a denormal exponent, try to round to more
-        // digits. Only shift right if we can avoid zeroing out the value,
-        // which requires the exponent diff to be < M::BITS. The value
-        // is already normalized, so we shouldn't have any issue zeroing
-        // out the value.
-        let diff = F::DENORMAL_EXPONENT - fp.exp;
-        if diff <= u64::FULL {
-            // We can avoid underflow, can get a valid representation.
-            algorithm(fp, diff);
-        } else {
-            // Certain underflow, assign literal 0s.
-            fp.mant = 0;
-            fp.exp = 0;
-        }
-    } else {
-        algorithm(fp, F::DEFAULT_SHIFT);
-    }
-
-    if fp.mant & F::CARRY_MASK == F::CARRY_MASK {
-        // Roundup carried over to 1 past the hidden bit.
-        shr(fp, 1);
-    }
-}
-
-// AVOID OVERFLOW/UNDERFLOW
-
-// Avoid overflow for large values, shift left as needed.
-//
-// Shift until a 1-bit is in the hidden bit, if the mantissa is not 0.
-#[inline]
-pub(crate) fn avoid_overflow<F>(fp: &mut ExtendedFloat)
-where
-    F: Float,
-{
-    // Calculate the difference to allow a single calculation
-    // rather than a loop, minimizing the number of ops required.
-    if fp.exp >= F::MAX_EXPONENT {
-        let diff = fp.exp - F::MAX_EXPONENT;
-        if diff <= F::MANTISSA_SIZE {
-            // Our overflow mask needs to start at the hidden bit, or at
-            // `F::MANTISSA_SIZE+1`, and needs to have `diff+1` bits set,
-            // to see if our value overflows.
-            let bit = (F::MANTISSA_SIZE + 1) as u64;
-            let n = (diff + 1) as u64;
-            let mask = internal_n_mask(bit, n);
-            if (fp.mant & mask) == 0 {
-                // If we have no 1-bit in the hidden-bit position,
-                // which is index 0, we need to shift 1.
-                let shift = diff + 1;
-                shl(fp, shift);
-            }
-        }
-    }
-}
-
-// ROUND TO NATIVE
-
-// Round an extended-precision float to a native float representation.
-#[inline]
-pub(crate) fn round_to_native<F, Algorithm>(fp: &mut ExtendedFloat, algorithm: Algorithm)
-where
-    F: Float,
-    Algorithm: FnOnce(&mut ExtendedFloat, i32),
-{
-    // Shift all the way left, to ensure a consistent representation.
-    // The following right-shifts do not work for a non-normalized number.
-    fp.normalize();
-
-    // Round so the fraction is in a native mantissa representation,
-    // and avoid overflow/underflow.
-    round_to_float::<F, _>(fp, algorithm);
-    avoid_overflow::<F>(fp);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/shift.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/shift.rs
deleted file mode 100644
index a0bae01..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/shift.rs
+++ /dev/null
@@ -1,46 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Bit-shift helpers.
-
-use super::float::ExtendedFloat;
-use core::mem;
-
-// Shift extended-precision float right `shift` bytes.
-#[inline]
-pub(crate) fn shr(fp: &mut ExtendedFloat, shift: i32) {
-    let bits: u64 = mem::size_of::<u64>() as u64 * 8;
-    debug_assert!((shift as u64) < bits, "shr() overflow in shift right.");
-
-    fp.mant >>= shift;
-    fp.exp += shift;
-}
-
-// Shift extended-precision float right `shift` bytes.
-//
-// Accepts when the shift is the same as the type size, and
-// sets the value to 0.
-#[inline]
-pub(crate) fn overflowing_shr(fp: &mut ExtendedFloat, shift: i32) {
-    let bits: u64 = mem::size_of::<u64>() as u64 * 8;
-    debug_assert!(
-        (shift as u64) <= bits,
-        "overflowing_shr() overflow in shift right."
-    );
-
-    fp.mant = if shift as u64 == bits {
-        0
-    } else {
-        fp.mant >> shift
-    };
-    fp.exp += shift;
-}
-
-// Shift extended-precision float left `shift` bytes.
-#[inline]
-pub(crate) fn shl(fp: &mut ExtendedFloat, shift: i32) {
-    let bits: u64 = mem::size_of::<u64>() as u64 * 8;
-    debug_assert!((shift as u64) < bits, "shl() overflow in shift left.");
-
-    fp.mant <<= shift;
-    fp.exp -= shift;
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/small_powers.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/small_powers.rs
deleted file mode 100644
index 29b83a15..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/small_powers.rs
+++ /dev/null
@@ -1,70 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Pre-computed small powers.
-
-// 32 BIT
-#[cfg(fast_arithmetic = "32")]
-pub(crate) const POW5_32: [u32; 14] = [
-    1, 5, 25, 125, 625, 3125, 15625, 78125, 390625, 1953125, 9765625, 48828125, 244140625,
-    1220703125,
-];
-
-#[cfg(fast_arithmetic = "32")]
-pub(crate) const POW10_32: [u32; 10] = [
-    1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000,
-];
-
-// 64 BIT
-#[cfg(fast_arithmetic = "64")]
-pub(crate) const POW5_64: [u64; 28] = [
-    1,
-    5,
-    25,
-    125,
-    625,
-    3125,
-    15625,
-    78125,
-    390625,
-    1953125,
-    9765625,
-    48828125,
-    244140625,
-    1220703125,
-    6103515625,
-    30517578125,
-    152587890625,
-    762939453125,
-    3814697265625,
-    19073486328125,
-    95367431640625,
-    476837158203125,
-    2384185791015625,
-    11920928955078125,
-    59604644775390625,
-    298023223876953125,
-    1490116119384765625,
-    7450580596923828125,
-];
-pub(crate) const POW10_64: [u64; 20] = [
-    1,
-    10,
-    100,
-    1000,
-    10000,
-    100000,
-    1000000,
-    10000000,
-    100000000,
-    1000000000,
-    10000000000,
-    100000000000,
-    1000000000000,
-    10000000000000,
-    100000000000000,
-    1000000000000000,
-    10000000000000000,
-    100000000000000000,
-    1000000000000000000,
-    10000000000000000000,
-];
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lib.rs
deleted file mode 100644
index a9f82f2..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lib.rs
+++ /dev/null
@@ -1,437 +0,0 @@
-//! # Serde JSON
-//!
-//! JSON is a ubiquitous open-standard format that uses human-readable text to
-//! transmit data objects consisting of key-value pairs.
-//!
-//! ```json
-//! {
-//!     "name": "John Doe",
-//!     "age": 43,
-//!     "address": {
-//!         "street": "10 Downing Street",
-//!         "city": "London"
-//!     },
-//!     "phones": [
-//!         "+44 1234567",
-//!         "+44 2345678"
-//!     ]
-//! }
-//! ```
-//!
-//! There are three common ways that you might find yourself needing to work
-//! with JSON data in Rust.
-//!
-//!  - **As text data.** An unprocessed string of JSON data that you receive on
-//!    an HTTP endpoint, read from a file, or prepare to send to a remote
-//!    server.
-//!  - **As an untyped or loosely typed representation.** Maybe you want to
-//!    check that some JSON data is valid before passing it on, but without
-//!    knowing the structure of what it contains. Or you want to do very basic
-//!    manipulations like insert a key in a particular spot.
-//!  - **As a strongly typed Rust data structure.** When you expect all or most
-//!    of your data to conform to a particular structure and want to get real
-//!    work done without JSON's loosey-goosey nature tripping you up.
-//!
-//! Serde JSON provides efficient, flexible, safe ways of converting data
-//! between each of these representations.
-//!
-//! # Operating on untyped JSON values
-//!
-//! Any valid JSON data can be manipulated in the following recursive enum
-//! representation. This data structure is [`serde_json::Value`][value].
-//!
-//! ```
-//! # use serde_json::{Number, Map};
-//! #
-//! # #[allow(dead_code)]
-//! enum Value {
-//!     Null,
-//!     Bool(bool),
-//!     Number(Number),
-//!     String(String),
-//!     Array(Vec<Value>),
-//!     Object(Map<String, Value>),
-//! }
-//! ```
-//!
-//! A string of JSON data can be parsed into a `serde_json::Value` by the
-//! [`serde_json::from_str`][from_str] function. There is also [`from_slice`]
-//! for parsing from a byte slice `&[u8]` and [`from_reader`] for parsing from
-//! any `io::Read` like a File or a TCP stream.
-//!
-//! ```
-//! use serde_json::{Result, Value};
-//!
-//! fn untyped_example() -> Result<()> {
-//!     // Some JSON input data as a &str. Maybe this comes from the user.
-//!     let data = r#"
-//!         {
-//!             "name": "John Doe",
-//!             "age": 43,
-//!             "phones": [
-//!                 "+44 1234567",
-//!                 "+44 2345678"
-//!             ]
-//!         }"#;
-//!
-//!     // Parse the string of data into serde_json::Value.
-//!     let v: Value = serde_json::from_str(data)?;
-//!
-//!     // Access parts of the data by indexing with square brackets.
-//!     println!("Please call {} at the number {}", v["name"], v["phones"][0]);
-//!
-//!     Ok(())
-//! }
-//! #
-//! # fn main() {
-//! #     untyped_example().unwrap();
-//! # }
-//! ```
-//!
-//! The result of square bracket indexing like `v["name"]` is a borrow of the
-//! data at that index, so the type is `&Value`. A JSON map can be indexed with
-//! string keys, while a JSON array can be indexed with integer keys. If the
-//! type of the data is not right for the type with which it is being indexed,
-//! or if a map does not contain the key being indexed, or if the index into a
-//! vector is out of bounds, the returned element is `Value::Null`.
-//!
-//! When a `Value` is printed, it is printed as a JSON string. So in the code
-//! above, the output looks like `Please call "John Doe" at the number "+44
-//! 1234567"`. The quotation marks appear because `v["name"]` is a `&Value`
-//! containing a JSON string and its JSON representation is `"John Doe"`.
-//! Printing as a plain string without quotation marks involves converting from
-//! a JSON string to a Rust string with [`as_str()`] or avoiding the use of
-//! `Value` as described in the following section.
-//!
-//! [`as_str()`]: crate::Value::as_str
-//!
-//! The `Value` representation is sufficient for very basic tasks but can be
-//! tedious to work with for anything more significant. Error handling is
-//! verbose to implement correctly, for example imagine trying to detect the
-//! presence of unrecognized fields in the input data. The compiler is powerless
-//! to help you when you make a mistake, for example imagine typoing `v["name"]`
-//! as `v["nmae"]` in one of the dozens of places it is used in your code.
-//!
-//! # Parsing JSON as strongly typed data structures
-//!
-//! Serde provides a powerful way of mapping JSON data into Rust data structures
-//! largely automatically.
-//!
-//! ```
-//! use serde::{Deserialize, Serialize};
-//! use serde_json::Result;
-//!
-//! #[derive(Serialize, Deserialize)]
-//! struct Person {
-//!     name: String,
-//!     age: u8,
-//!     phones: Vec<String>,
-//! }
-//!
-//! fn typed_example() -> Result<()> {
-//!     // Some JSON input data as a &str. Maybe this comes from the user.
-//!     let data = r#"
-//!         {
-//!             "name": "John Doe",
-//!             "age": 43,
-//!             "phones": [
-//!                 "+44 1234567",
-//!                 "+44 2345678"
-//!             ]
-//!         }"#;
-//!
-//!     // Parse the string of data into a Person object. This is exactly the
-//!     // same function as the one that produced serde_json::Value above, but
-//!     // now we are asking it for a Person as output.
-//!     let p: Person = serde_json::from_str(data)?;
-//!
-//!     // Do things just like with any other Rust data structure.
-//!     println!("Please call {} at the number {}", p.name, p.phones[0]);
-//!
-//!     Ok(())
-//! }
-//! #
-//! # fn main() {
-//! #     typed_example().unwrap();
-//! # }
-//! ```
-//!
-//! This is the same `serde_json::from_str` function as before, but this time we
-//! assign the return value to a variable of type `Person` so Serde will
-//! automatically interpret the input data as a `Person` and produce informative
-//! error messages if the layout does not conform to what a `Person` is expected
-//! to look like.
-//!
-//! Any type that implements Serde's `Deserialize` trait can be deserialized
-//! this way. This includes built-in Rust standard library types like `Vec<T>`
-//! and `HashMap<K, V>`, as well as any structs or enums annotated with
-//! `#[derive(Deserialize)]`.
-//!
-//! Once we have `p` of type `Person`, our IDE and the Rust compiler can help us
-//! use it correctly like they do for any other Rust code. The IDE can
-//! autocomplete field names to prevent typos, which was impossible in the
-//! `serde_json::Value` representation. And the Rust compiler can check that
-//! when we write `p.phones[0]`, then `p.phones` is guaranteed to be a
-//! `Vec<String>` so indexing into it makes sense and produces a `String`.
-//!
-//! # Constructing JSON values
-//!
-//! Serde JSON provides a [`json!` macro][macro] to build `serde_json::Value`
-//! objects with very natural JSON syntax.
-//!
-//! ```
-//! use serde_json::json;
-//!
-//! fn main() {
-//!     // The type of `john` is `serde_json::Value`
-//!     let john = json!({
-//!         "name": "John Doe",
-//!         "age": 43,
-//!         "phones": [
-//!             "+44 1234567",
-//!             "+44 2345678"
-//!         ]
-//!     });
-//!
-//!     println!("first phone number: {}", john["phones"][0]);
-//!
-//!     // Convert to a string of JSON and print it out
-//!     println!("{}", john.to_string());
-//! }
-//! ```
-//!
-//! The `Value::to_string()` function converts a `serde_json::Value` into a
-//! `String` of JSON text.
-//!
-//! One neat thing about the `json!` macro is that variables and expressions can
-//! be interpolated directly into the JSON value as you are building it. Serde
-//! will check at compile time that the value you are interpolating is able to
-//! be represented as JSON.
-//!
-//! ```
-//! # use serde_json::json;
-//! #
-//! # fn random_phone() -> u16 { 0 }
-//! #
-//! let full_name = "John Doe";
-//! let age_last_year = 42;
-//!
-//! // The type of `john` is `serde_json::Value`
-//! let john = json!({
-//!     "name": full_name,
-//!     "age": age_last_year + 1,
-//!     "phones": [
-//!         format!("+44 {}", random_phone())
-//!     ]
-//! });
-//! ```
-//!
-//! This is amazingly convenient, but we have the problem we had before with
-//! `Value`: the IDE and Rust compiler cannot help us if we get it wrong. Serde
-//! JSON provides a better way of serializing strongly-typed data structures
-//! into JSON text.
-//!
-//! # Creating JSON by serializing data structures
-//!
-//! A data structure can be converted to a JSON string by
-//! [`serde_json::to_string`][to_string]. There is also
-//! [`serde_json::to_vec`][to_vec] which serializes to a `Vec<u8>` and
-//! [`serde_json::to_writer`][to_writer] which serializes to any `io::Write`
-//! such as a File or a TCP stream.
-//!
-//! ```
-//! use serde::{Deserialize, Serialize};
-//! use serde_json::Result;
-//!
-//! #[derive(Serialize, Deserialize)]
-//! struct Address {
-//!     street: String,
-//!     city: String,
-//! }
-//!
-//! fn print_an_address() -> Result<()> {
-//!     // Some data structure.
-//!     let address = Address {
-//!         street: "10 Downing Street".to_owned(),
-//!         city: "London".to_owned(),
-//!     };
-//!
-//!     // Serialize it to a JSON string.
-//!     let j = serde_json::to_string(&address)?;
-//!
-//!     // Print, write to a file, or send to an HTTP server.
-//!     println!("{}", j);
-//!
-//!     Ok(())
-//! }
-//! #
-//! # fn main() {
-//! #     print_an_address().unwrap();
-//! # }
-//! ```
-//!
-//! Any type that implements Serde's `Serialize` trait can be serialized this
-//! way. This includes built-in Rust standard library types like `Vec<T>` and
-//! `HashMap<K, V>`, as well as any structs or enums annotated with
-//! `#[derive(Serialize)]`.
-//!
-//! # No-std support
-//!
-//! As long as there is a memory allocator, it is possible to use serde_json
-//! without the rest of the Rust standard library. Disable the default "std"
-//! feature and enable the "alloc" feature:
-//!
-//! ```toml
-//! [dependencies]
-//! serde_json = { version = "1.0", default-features = false, features = ["alloc"] }
-//! ```
-//!
-//! For JSON support in Serde without a memory allocator, please see the
-//! [`serde-json-core`] crate.
-//!
-//! [value]: crate::value::Value
-//! [from_str]: crate::de::from_str
-//! [from_slice]: crate::de::from_slice
-//! [from_reader]: crate::de::from_reader
-//! [to_string]: crate::ser::to_string
-//! [to_vec]: crate::ser::to_vec
-//! [to_writer]: crate::ser::to_writer
-//! [macro]: crate::json
-//! [`serde-json-core`]: https://github.com/rust-embedded-community/serde-json-core
-
-#![doc(html_root_url = "https://docs.rs/serde_json/1.0.140")]
-// Ignored clippy lints
-#![allow(
-    clippy::collapsible_else_if,
-    clippy::comparison_chain,
-    clippy::deprecated_cfg_attr,
-    clippy::doc_markdown,
-    clippy::elidable_lifetime_names,
-    clippy::excessive_precision,
-    clippy::explicit_auto_deref,
-    clippy::float_cmp,
-    clippy::manual_range_contains,
-    clippy::match_like_matches_macro,
-    clippy::match_single_binding,
-    clippy::needless_doctest_main,
-    clippy::needless_late_init,
-    clippy::needless_lifetimes,
-    clippy::return_self_not_must_use,
-    clippy::transmute_ptr_to_ptr,
-    clippy::unbuffered_bytes,
-    clippy::unconditional_recursion, // https://github.com/rust-lang/rust-clippy/issues/12133
-    clippy::unnecessary_wraps
-)]
-// Ignored clippy_pedantic lints
-#![allow(
-    // Deserializer::from_str, into_iter
-    clippy::should_implement_trait,
-    // integer and float ser/de requires these sorts of casts
-    clippy::cast_possible_truncation,
-    clippy::cast_possible_wrap,
-    clippy::cast_precision_loss,
-    clippy::cast_sign_loss,
-    // correctly used
-    clippy::enum_glob_use,
-    clippy::if_not_else,
-    clippy::integer_division,
-    clippy::let_underscore_untyped,
-    clippy::map_err_ignore,
-    clippy::match_same_arms,
-    clippy::similar_names,
-    clippy::unused_self,
-    clippy::wildcard_imports,
-    // things are often more readable this way
-    clippy::cast_lossless,
-    clippy::items_after_statements,
-    clippy::module_name_repetitions,
-    clippy::redundant_else,
-    clippy::shadow_unrelated,
-    clippy::single_match_else,
-    clippy::too_many_lines,
-    clippy::unreadable_literal,
-    clippy::unseparated_literal_suffix,
-    clippy::use_self,
-    clippy::zero_prefixed_literal,
-    // we support older compilers
-    clippy::checked_conversions,
-    clippy::mem_replace_with_default,
-    // noisy
-    clippy::missing_errors_doc,
-    clippy::must_use_candidate,
-)]
-// Restrictions
-#![deny(clippy::question_mark_used)]
-#![allow(non_upper_case_globals)]
-#![deny(missing_docs)]
-#![no_std]
-#![cfg_attr(docsrs, feature(doc_cfg))]
-
-#[cfg(not(any(feature = "std", feature = "alloc")))]
-compile_error! {
-    "serde_json requires that either `std` (default) or `alloc` feature is enabled"
-}
-
-extern crate alloc;
-
-#[cfg(feature = "std")]
-extern crate std;
-
-// Not public API. Used from macro-generated code.
-#[doc(hidden)]
-pub mod __private {
-    #[doc(hidden)]
-    pub use alloc::vec;
-}
-
-#[cfg(feature = "std")]
-#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
-#[doc(inline)]
-pub use crate::de::from_reader;
-#[doc(inline)]
-pub use crate::de::{from_slice, from_str, Deserializer, StreamDeserializer};
-#[doc(inline)]
-pub use crate::error::{Error, Result};
-#[doc(inline)]
-pub use crate::ser::{to_string, to_string_pretty, to_vec, to_vec_pretty};
-#[cfg(feature = "std")]
-#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
-#[doc(inline)]
-pub use crate::ser::{to_writer, to_writer_pretty, Serializer};
-#[doc(inline)]
-pub use crate::value::{from_value, to_value, Map, Number, Value};
-
-// We only use our own error type; no need for From conversions provided by the
-// standard library's try! macro. This reduces lines of LLVM IR by 4%.
-macro_rules! tri {
-    ($e:expr $(,)?) => {
-        match $e {
-            core::result::Result::Ok(val) => val,
-            core::result::Result::Err(err) => return core::result::Result::Err(err),
-        }
-    };
-}
-
-#[macro_use]
-mod macros;
-
-pub mod de;
-pub mod error;
-pub mod map;
-#[cfg(feature = "std")]
-#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
-pub mod ser;
-#[cfg(not(feature = "std"))]
-mod ser;
-pub mod value;
-
-mod io;
-#[cfg(feature = "std")]
-mod iter;
-#[cfg(feature = "float_roundtrip")]
-mod lexical;
-mod number;
-mod read;
-
-#[cfg(feature = "raw_value")]
-mod raw;
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/macros.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/macros.rs
deleted file mode 100644
index c47bdf9..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/macros.rs
+++ /dev/null
@@ -1,303 +0,0 @@
-/// Construct a `serde_json::Value` from a JSON literal.
-///
-/// ```
-/// # use serde_json::json;
-/// #
-/// let value = json!({
-///     "code": 200,
-///     "success": true,
-///     "payload": {
-///         "features": [
-///             "serde",
-///             "json"
-///         ],
-///         "homepage": null
-///     }
-/// });
-/// ```
-///
-/// Variables or expressions can be interpolated into the JSON literal. Any type
-/// interpolated into an array element or object value must implement Serde's
-/// `Serialize` trait, while any type interpolated into a object key must
-/// implement `Into<String>`. If the `Serialize` implementation of the
-/// interpolated type decides to fail, or if the interpolated type contains a
-/// map with non-string keys, the `json!` macro will panic.
-///
-/// ```
-/// # use serde_json::json;
-/// #
-/// let code = 200;
-/// let features = vec!["serde", "json"];
-///
-/// let value = json!({
-///     "code": code,
-///     "success": code == 200,
-///     "payload": {
-///         features[0]: features[1]
-///     }
-/// });
-/// ```
-///
-/// Trailing commas are allowed inside both arrays and objects.
-///
-/// ```
-/// # use serde_json::json;
-/// #
-/// let value = json!([
-///     "notice",
-///     "the",
-///     "trailing",
-///     "comma -->",
-/// ]);
-/// ```
-#[macro_export]
-macro_rules! json {
-    // Hide distracting implementation details from the generated rustdoc.
-    ($($json:tt)+) => {
-        $crate::json_internal!($($json)+)
-    };
-}
-
-// Rocket relies on this because they export their own `json!` with a different
-// doc comment than ours, and various Rust bugs prevent them from calling our
-// `json!` from their `json!` so they call `json_internal!` directly. Check with
-// @SergioBenitez before making breaking changes to this macro.
-//
-// Changes are fine as long as `json_internal!` does not call any new helper
-// macros and can still be invoked as `json_internal!($($json)+)`.
-#[macro_export]
-#[doc(hidden)]
-macro_rules! json_internal {
-    //////////////////////////////////////////////////////////////////////////
-    // TT muncher for parsing the inside of an array [...]. Produces a vec![...]
-    // of the elements.
-    //
-    // Must be invoked as: json_internal!(@array [] $($tt)*)
-    //////////////////////////////////////////////////////////////////////////
-
-    // Done with trailing comma.
-    (@array [$($elems:expr,)*]) => {
-        $crate::__private::vec![$($elems,)*]
-    };
-
-    // Done without trailing comma.
-    (@array [$($elems:expr),*]) => {
-        $crate::__private::vec![$($elems),*]
-    };
-
-    // Next element is `null`.
-    (@array [$($elems:expr,)*] null $($rest:tt)*) => {
-        $crate::json_internal!(@array [$($elems,)* $crate::json_internal!(null)] $($rest)*)
-    };
-
-    // Next element is `true`.
-    (@array [$($elems:expr,)*] true $($rest:tt)*) => {
-        $crate::json_internal!(@array [$($elems,)* $crate::json_internal!(true)] $($rest)*)
-    };
-
-    // Next element is `false`.
-    (@array [$($elems:expr,)*] false $($rest:tt)*) => {
-        $crate::json_internal!(@array [$($elems,)* $crate::json_internal!(false)] $($rest)*)
-    };
-
-    // Next element is an array.
-    (@array [$($elems:expr,)*] [$($array:tt)*] $($rest:tt)*) => {
-        $crate::json_internal!(@array [$($elems,)* $crate::json_internal!([$($array)*])] $($rest)*)
-    };
-
-    // Next element is a map.
-    (@array [$($elems:expr,)*] {$($map:tt)*} $($rest:tt)*) => {
-        $crate::json_internal!(@array [$($elems,)* $crate::json_internal!({$($map)*})] $($rest)*)
-    };
-
-    // Next element is an expression followed by comma.
-    (@array [$($elems:expr,)*] $next:expr, $($rest:tt)*) => {
-        $crate::json_internal!(@array [$($elems,)* $crate::json_internal!($next),] $($rest)*)
-    };
-
-    // Last element is an expression with no trailing comma.
-    (@array [$($elems:expr,)*] $last:expr) => {
-        $crate::json_internal!(@array [$($elems,)* $crate::json_internal!($last)])
-    };
-
-    // Comma after the most recent element.
-    (@array [$($elems:expr),*] , $($rest:tt)*) => {
-        $crate::json_internal!(@array [$($elems,)*] $($rest)*)
-    };
-
-    // Unexpected token after most recent element.
-    (@array [$($elems:expr),*] $unexpected:tt $($rest:tt)*) => {
-        $crate::json_unexpected!($unexpected)
-    };
-
-    //////////////////////////////////////////////////////////////////////////
-    // TT muncher for parsing the inside of an object {...}. Each entry is
-    // inserted into the given map variable.
-    //
-    // Must be invoked as: json_internal!(@object $map () ($($tt)*) ($($tt)*))
-    //
-    // We require two copies of the input tokens so that we can match on one
-    // copy and trigger errors on the other copy.
-    //////////////////////////////////////////////////////////////////////////
-
-    // Done.
-    (@object $object:ident () () ()) => {};
-
-    // Insert the current entry followed by trailing comma.
-    (@object $object:ident [$($key:tt)+] ($value:expr) , $($rest:tt)*) => {
-        let _ = $object.insert(($($key)+).into(), $value);
-        $crate::json_internal!(@object $object () ($($rest)*) ($($rest)*));
-    };
-
-    // Current entry followed by unexpected token.
-    (@object $object:ident [$($key:tt)+] ($value:expr) $unexpected:tt $($rest:tt)*) => {
-        $crate::json_unexpected!($unexpected);
-    };
-
-    // Insert the last entry without trailing comma.
-    (@object $object:ident [$($key:tt)+] ($value:expr)) => {
-        let _ = $object.insert(($($key)+).into(), $value);
-    };
-
-    // Next value is `null`.
-    (@object $object:ident ($($key:tt)+) (: null $($rest:tt)*) $copy:tt) => {
-        $crate::json_internal!(@object $object [$($key)+] ($crate::json_internal!(null)) $($rest)*);
-    };
-
-    // Next value is `true`.
-    (@object $object:ident ($($key:tt)+) (: true $($rest:tt)*) $copy:tt) => {
-        $crate::json_internal!(@object $object [$($key)+] ($crate::json_internal!(true)) $($rest)*);
-    };
-
-    // Next value is `false`.
-    (@object $object:ident ($($key:tt)+) (: false $($rest:tt)*) $copy:tt) => {
-        $crate::json_internal!(@object $object [$($key)+] ($crate::json_internal!(false)) $($rest)*);
-    };
-
-    // Next value is an array.
-    (@object $object:ident ($($key:tt)+) (: [$($array:tt)*] $($rest:tt)*) $copy:tt) => {
-        $crate::json_internal!(@object $object [$($key)+] ($crate::json_internal!([$($array)*])) $($rest)*);
-    };
-
-    // Next value is a map.
-    (@object $object:ident ($($key:tt)+) (: {$($map:tt)*} $($rest:tt)*) $copy:tt) => {
-        $crate::json_internal!(@object $object [$($key)+] ($crate::json_internal!({$($map)*})) $($rest)*);
-    };
-
-    // Next value is an expression followed by comma.
-    (@object $object:ident ($($key:tt)+) (: $value:expr , $($rest:tt)*) $copy:tt) => {
-        $crate::json_internal!(@object $object [$($key)+] ($crate::json_internal!($value)) , $($rest)*);
-    };
-
-    // Last value is an expression with no trailing comma.
-    (@object $object:ident ($($key:tt)+) (: $value:expr) $copy:tt) => {
-        $crate::json_internal!(@object $object [$($key)+] ($crate::json_internal!($value)));
-    };
-
-    // Missing value for last entry. Trigger a reasonable error message.
-    (@object $object:ident ($($key:tt)+) (:) $copy:tt) => {
-        // "unexpected end of macro invocation"
-        $crate::json_internal!();
-    };
-
-    // Missing colon and value for last entry. Trigger a reasonable error
-    // message.
-    (@object $object:ident ($($key:tt)+) () $copy:tt) => {
-        // "unexpected end of macro invocation"
-        $crate::json_internal!();
-    };
-
-    // Misplaced colon. Trigger a reasonable error message.
-    (@object $object:ident () (: $($rest:tt)*) ($colon:tt $($copy:tt)*)) => {
-        // Takes no arguments so "no rules expected the token `:`".
-        $crate::json_unexpected!($colon);
-    };
-
-    // Found a comma inside a key. Trigger a reasonable error message.
-    (@object $object:ident ($($key:tt)*) (, $($rest:tt)*) ($comma:tt $($copy:tt)*)) => {
-        // Takes no arguments so "no rules expected the token `,`".
-        $crate::json_unexpected!($comma);
-    };
-
-    // Key is fully parenthesized. This avoids clippy double_parens false
-    // positives because the parenthesization may be necessary here.
-    (@object $object:ident () (($key:expr) : $($rest:tt)*) $copy:tt) => {
-        $crate::json_internal!(@object $object ($key) (: $($rest)*) (: $($rest)*));
-    };
-
-    // Refuse to absorb colon token into key expression.
-    (@object $object:ident ($($key:tt)*) (: $($unexpected:tt)+) $copy:tt) => {
-        $crate::json_expect_expr_comma!($($unexpected)+);
-    };
-
-    // Munch a token into the current key.
-    (@object $object:ident ($($key:tt)*) ($tt:tt $($rest:tt)*) $copy:tt) => {
-        $crate::json_internal!(@object $object ($($key)* $tt) ($($rest)*) ($($rest)*));
-    };
-
-    //////////////////////////////////////////////////////////////////////////
-    // The main implementation.
-    //
-    // Must be invoked as: json_internal!($($json)+)
-    //////////////////////////////////////////////////////////////////////////
-
-    (null) => {
-        $crate::Value::Null
-    };
-
-    (true) => {
-        $crate::Value::Bool(true)
-    };
-
-    (false) => {
-        $crate::Value::Bool(false)
-    };
-
-    ([]) => {
-        $crate::Value::Array($crate::__private::vec![])
-    };
-
-    ([ $($tt:tt)+ ]) => {
-        $crate::Value::Array($crate::json_internal!(@array [] $($tt)+))
-    };
-
-    ({}) => {
-        $crate::Value::Object($crate::Map::new())
-    };
-
-    ({ $($tt:tt)+ }) => {
-        $crate::Value::Object({
-            let mut object = $crate::Map::new();
-            $crate::json_internal!(@object object () ($($tt)+) ($($tt)+));
-            object
-        })
-    };
-
-    // Any Serialize type: numbers, strings, struct literals, variables etc.
-    // Must be below every other rule.
-    ($other:expr) => {
-        $crate::to_value(&$other).unwrap()
-    };
-}
-
-// Used by old versions of Rocket.
-// Unused since https://github.com/rwf2/Rocket/commit/c74bcfd40a47b35330db6cafb88e4f3da83e0d17
-#[macro_export]
-#[doc(hidden)]
-macro_rules! json_internal_vec {
-    ($($content:tt)*) => {
-        vec![$($content)*]
-    };
-}
-
-#[macro_export]
-#[doc(hidden)]
-macro_rules! json_unexpected {
-    () => {};
-}
-
-#[macro_export]
-#[doc(hidden)]
-macro_rules! json_expect_expr_comma {
-    ($e:expr , $($tt:tt)*) => {};
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/map.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/map.rs
deleted file mode 100644
index be60f22..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/map.rs
+++ /dev/null
@@ -1,1174 +0,0 @@
-//! A map of String to serde_json::Value.
-//!
-//! By default the map is backed by a [`BTreeMap`]. Enable the `preserve_order`
-//! feature of serde_json to use [`IndexMap`] instead.
-//!
-//! [`BTreeMap`]: std::collections::BTreeMap
-//! [`IndexMap`]: indexmap::IndexMap
-
-use crate::error::Error;
-use crate::value::Value;
-use alloc::string::String;
-#[cfg(feature = "preserve_order")]
-use alloc::vec::Vec;
-use core::borrow::Borrow;
-use core::fmt::{self, Debug};
-use core::hash::{Hash, Hasher};
-use core::iter::FusedIterator;
-#[cfg(feature = "preserve_order")]
-use core::mem;
-use core::ops;
-use serde::de;
-
-#[cfg(not(feature = "preserve_order"))]
-use alloc::collections::{btree_map, BTreeMap};
-#[cfg(feature = "preserve_order")]
-use indexmap::IndexMap;
-
-/// Represents a JSON key/value type.
-pub struct Map<K, V> {
-    map: MapImpl<K, V>,
-}
-
-#[cfg(not(feature = "preserve_order"))]
-type MapImpl<K, V> = BTreeMap<K, V>;
-#[cfg(feature = "preserve_order")]
-type MapImpl<K, V> = IndexMap<K, V>;
-
-impl Map<String, Value> {
-    /// Makes a new empty Map.
-    #[inline]
-    pub fn new() -> Self {
-        Map {
-            map: MapImpl::new(),
-        }
-    }
-
-    /// Makes a new empty Map with the given initial capacity.
-    #[inline]
-    pub fn with_capacity(capacity: usize) -> Self {
-        Map {
-            #[cfg(not(feature = "preserve_order"))]
-            map: {
-                // does not support with_capacity
-                let _ = capacity;
-                BTreeMap::new()
-            },
-            #[cfg(feature = "preserve_order")]
-            map: IndexMap::with_capacity(capacity),
-        }
-    }
-
-    /// Clears the map, removing all values.
-    #[inline]
-    pub fn clear(&mut self) {
-        self.map.clear();
-    }
-
-    /// Returns a reference to the value corresponding to the key.
-    ///
-    /// The key may be any borrowed form of the map's key type, but the ordering
-    /// on the borrowed form *must* match the ordering on the key type.
-    #[inline]
-    pub fn get<Q>(&self, key: &Q) -> Option<&Value>
-    where
-        String: Borrow<Q>,
-        Q: ?Sized + Ord + Eq + Hash,
-    {
-        self.map.get(key)
-    }
-
-    /// Returns true if the map contains a value for the specified key.
-    ///
-    /// The key may be any borrowed form of the map's key type, but the ordering
-    /// on the borrowed form *must* match the ordering on the key type.
-    #[inline]
-    pub fn contains_key<Q>(&self, key: &Q) -> bool
-    where
-        String: Borrow<Q>,
-        Q: ?Sized + Ord + Eq + Hash,
-    {
-        self.map.contains_key(key)
-    }
-
-    /// Returns a mutable reference to the value corresponding to the key.
-    ///
-    /// The key may be any borrowed form of the map's key type, but the ordering
-    /// on the borrowed form *must* match the ordering on the key type.
-    #[inline]
-    pub fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut Value>
-    where
-        String: Borrow<Q>,
-        Q: ?Sized + Ord + Eq + Hash,
-    {
-        self.map.get_mut(key)
-    }
-
-    /// Returns the key-value pair matching the given key.
-    ///
-    /// The key may be any borrowed form of the map's key type, but the ordering
-    /// on the borrowed form *must* match the ordering on the key type.
-    #[inline]
-    pub fn get_key_value<Q>(&self, key: &Q) -> Option<(&String, &Value)>
-    where
-        String: Borrow<Q>,
-        Q: ?Sized + Ord + Eq + Hash,
-    {
-        self.map.get_key_value(key)
-    }
-
-    /// Inserts a key-value pair into the map.
-    ///
-    /// If the map did not have this key present, `None` is returned.
-    ///
-    /// If the map did have this key present, the value is updated, and the old
-    /// value is returned.
-    #[inline]
-    pub fn insert(&mut self, k: String, v: Value) -> Option<Value> {
-        self.map.insert(k, v)
-    }
-
-    /// Insert a key-value pair in the map at the given index.
-    ///
-    /// If the map did not have this key present, `None` is returned.
-    ///
-    /// If the map did have this key present, the key is moved to the new
-    /// position, the value is updated, and the old value is returned.
-    #[cfg(feature = "preserve_order")]
-    #[cfg_attr(docsrs, doc(cfg(feature = "preserve_order")))]
-    #[inline]
-    pub fn shift_insert(&mut self, index: usize, k: String, v: Value) -> Option<Value> {
-        self.map.shift_insert(index, k, v)
-    }
-
-    /// Removes a key from the map, returning the value at the key if the key
-    /// was previously in the map.
-    ///
-    /// The key may be any borrowed form of the map's key type, but the ordering
-    /// on the borrowed form *must* match the ordering on the key type.
-    ///
-    /// If serde_json's "preserve_order" is enabled, `.remove(key)` is
-    /// equivalent to [`.swap_remove(key)`][Self::swap_remove], replacing this
-    /// entry's position with the last element. If you need to preserve the
-    /// relative order of the keys in the map, use
-    /// [`.shift_remove(key)`][Self::shift_remove] instead.
-    #[inline]
-    pub fn remove<Q>(&mut self, key: &Q) -> Option<Value>
-    where
-        String: Borrow<Q>,
-        Q: ?Sized + Ord + Eq + Hash,
-    {
-        #[cfg(feature = "preserve_order")]
-        return self.swap_remove(key);
-        #[cfg(not(feature = "preserve_order"))]
-        return self.map.remove(key);
-    }
-
-    /// Removes a key from the map, returning the stored key and value if the
-    /// key was previously in the map.
-    ///
-    /// The key may be any borrowed form of the map's key type, but the ordering
-    /// on the borrowed form *must* match the ordering on the key type.
-    ///
-    /// If serde_json's "preserve_order" is enabled, `.remove_entry(key)` is
-    /// equivalent to [`.swap_remove_entry(key)`][Self::swap_remove_entry],
-    /// replacing this entry's position with the last element. If you need to
-    /// preserve the relative order of the keys in the map, use
-    /// [`.shift_remove_entry(key)`][Self::shift_remove_entry] instead.
-    #[inline]
-    pub fn remove_entry<Q>(&mut self, key: &Q) -> Option<(String, Value)>
-    where
-        String: Borrow<Q>,
-        Q: ?Sized + Ord + Eq + Hash,
-    {
-        #[cfg(feature = "preserve_order")]
-        return self.swap_remove_entry(key);
-        #[cfg(not(feature = "preserve_order"))]
-        return self.map.remove_entry(key);
-    }
-
-    /// Removes and returns the value corresponding to the key from the map.
-    ///
-    /// Like [`Vec::swap_remove`], the entry is removed by swapping it with the
-    /// last element of the map and popping it off. This perturbs the position
-    /// of what used to be the last element!
-    ///
-    /// [`Vec::swap_remove`]: std::vec::Vec::swap_remove
-    #[cfg(feature = "preserve_order")]
-    #[cfg_attr(docsrs, doc(cfg(feature = "preserve_order")))]
-    #[inline]
-    pub fn swap_remove<Q>(&mut self, key: &Q) -> Option<Value>
-    where
-        String: Borrow<Q>,
-        Q: ?Sized + Ord + Eq + Hash,
-    {
-        self.map.swap_remove(key)
-    }
-
-    /// Remove and return the key-value pair.
-    ///
-    /// Like [`Vec::swap_remove`], the entry is removed by swapping it with the
-    /// last element of the map and popping it off. This perturbs the position
-    /// of what used to be the last element!
-    ///
-    /// [`Vec::swap_remove`]: std::vec::Vec::swap_remove
-    #[cfg(feature = "preserve_order")]
-    #[cfg_attr(docsrs, doc(cfg(feature = "preserve_order")))]
-    #[inline]
-    pub fn swap_remove_entry<Q>(&mut self, key: &Q) -> Option<(String, Value)>
-    where
-        String: Borrow<Q>,
-        Q: ?Sized + Ord + Eq + Hash,
-    {
-        self.map.swap_remove_entry(key)
-    }
-
-    /// Removes and returns the value corresponding to the key from the map.
-    ///
-    /// Like [`Vec::remove`], the entry is removed by shifting all of the
-    /// elements that follow it, preserving their relative order. This perturbs
-    /// the index of all of those elements!
-    ///
-    /// [`Vec::remove`]: std::vec::Vec::remove
-    #[cfg(feature = "preserve_order")]
-    #[cfg_attr(docsrs, doc(cfg(feature = "preserve_order")))]
-    #[inline]
-    pub fn shift_remove<Q>(&mut self, key: &Q) -> Option<Value>
-    where
-        String: Borrow<Q>,
-        Q: ?Sized + Ord + Eq + Hash,
-    {
-        self.map.shift_remove(key)
-    }
-
-    /// Remove and return the key-value pair.
-    ///
-    /// Like [`Vec::remove`], the entry is removed by shifting all of the
-    /// elements that follow it, preserving their relative order. This perturbs
-    /// the index of all of those elements!
-    ///
-    /// [`Vec::remove`]: std::vec::Vec::remove
-    #[cfg(feature = "preserve_order")]
-    #[cfg_attr(docsrs, doc(cfg(feature = "preserve_order")))]
-    #[inline]
-    pub fn shift_remove_entry<Q>(&mut self, key: &Q) -> Option<(String, Value)>
-    where
-        String: Borrow<Q>,
-        Q: ?Sized + Ord + Eq + Hash,
-    {
-        self.map.shift_remove_entry(key)
-    }
-
-    /// Moves all elements from other into self, leaving other empty.
-    #[inline]
-    pub fn append(&mut self, other: &mut Self) {
-        #[cfg(feature = "preserve_order")]
-        self.map
-            .extend(mem::replace(&mut other.map, MapImpl::default()));
-        #[cfg(not(feature = "preserve_order"))]
-        self.map.append(&mut other.map);
-    }
-
-    /// Gets the given key's corresponding entry in the map for in-place
-    /// manipulation.
-    pub fn entry<S>(&mut self, key: S) -> Entry
-    where
-        S: Into<String>,
-    {
-        #[cfg(not(feature = "preserve_order"))]
-        use alloc::collections::btree_map::Entry as EntryImpl;
-        #[cfg(feature = "preserve_order")]
-        use indexmap::map::Entry as EntryImpl;
-
-        match self.map.entry(key.into()) {
-            EntryImpl::Vacant(vacant) => Entry::Vacant(VacantEntry { vacant }),
-            EntryImpl::Occupied(occupied) => Entry::Occupied(OccupiedEntry { occupied }),
-        }
-    }
-
-    /// Returns the number of elements in the map.
-    #[inline]
-    pub fn len(&self) -> usize {
-        self.map.len()
-    }
-
-    /// Returns true if the map contains no elements.
-    #[inline]
-    pub fn is_empty(&self) -> bool {
-        self.map.is_empty()
-    }
-
-    /// Gets an iterator over the entries of the map.
-    #[inline]
-    pub fn iter(&self) -> Iter {
-        Iter {
-            iter: self.map.iter(),
-        }
-    }
-
-    /// Gets a mutable iterator over the entries of the map.
-    #[inline]
-    pub fn iter_mut(&mut self) -> IterMut {
-        IterMut {
-            iter: self.map.iter_mut(),
-        }
-    }
-
-    /// Gets an iterator over the keys of the map.
-    #[inline]
-    pub fn keys(&self) -> Keys {
-        Keys {
-            iter: self.map.keys(),
-        }
-    }
-
-    /// Gets an iterator over the values of the map.
-    #[inline]
-    pub fn values(&self) -> Values {
-        Values {
-            iter: self.map.values(),
-        }
-    }
-
-    /// Gets an iterator over mutable values of the map.
-    #[inline]
-    pub fn values_mut(&mut self) -> ValuesMut {
-        ValuesMut {
-            iter: self.map.values_mut(),
-        }
-    }
-
-    /// Gets an iterator over the values of the map.
-    #[inline]
-    pub fn into_values(self) -> IntoValues {
-        IntoValues {
-            iter: self.map.into_values(),
-        }
-    }
-
-    /// Retains only the elements specified by the predicate.
-    ///
-    /// In other words, remove all pairs `(k, v)` such that `f(&k, &mut v)`
-    /// returns `false`.
-    #[inline]
-    pub fn retain<F>(&mut self, f: F)
-    where
-        F: FnMut(&String, &mut Value) -> bool,
-    {
-        self.map.retain(f);
-    }
-
-    /// Sorts this map's entries in-place using `str`'s usual ordering.
-    ///
-    /// If serde_json's "preserve_order" feature is not enabled, this method
-    /// does no work because all JSON maps are always kept in a sorted state.
-    ///
-    /// If serde_json's "preserve_order" feature is enabled, this method
-    /// destroys the original source order or insertion order of this map in
-    /// favor of an alphanumerical order that matches how a BTreeMap with the
-    /// same contents would be ordered. This takes **O(n log n + c)** time where
-    /// _n_ is the length of the map and _c_ is the capacity.
-    ///
-    /// Other maps nested within the values of this map are not sorted. If you
-    /// need the entire data structure to be sorted at all levels, you must also
-    /// call
-    /// <code>map.[values_mut]\().for_each([Value::sort_all_objects])</code>.
-    ///
-    /// [values_mut]: Map::values_mut
-    #[inline]
-    pub fn sort_keys(&mut self) {
-        #[cfg(feature = "preserve_order")]
-        self.map.sort_unstable_keys();
-    }
-}
-
-#[allow(clippy::derivable_impls)] // clippy bug: https://github.com/rust-lang/rust-clippy/issues/7655
-impl Default for Map<String, Value> {
-    #[inline]
-    fn default() -> Self {
-        Map {
-            map: MapImpl::new(),
-        }
-    }
-}
-
-impl Clone for Map<String, Value> {
-    #[inline]
-    fn clone(&self) -> Self {
-        Map {
-            map: self.map.clone(),
-        }
-    }
-
-    #[inline]
-    fn clone_from(&mut self, source: &Self) {
-        self.map.clone_from(&source.map);
-    }
-}
-
-impl PartialEq for Map<String, Value> {
-    #[inline]
-    fn eq(&self, other: &Self) -> bool {
-        self.map.eq(&other.map)
-    }
-}
-
-impl Eq for Map<String, Value> {}
-
-impl Hash for Map<String, Value> {
-    fn hash<H: Hasher>(&self, state: &mut H) {
-        #[cfg(not(feature = "preserve_order"))]
-        {
-            self.map.hash(state);
-        }
-
-        #[cfg(feature = "preserve_order")]
-        {
-            let mut kv = Vec::from_iter(&self.map);
-            kv.sort_unstable_by(|a, b| a.0.cmp(b.0));
-            kv.hash(state);
-        }
-    }
-}
-
-/// Access an element of this map. Panics if the given key is not present in the
-/// map.
-///
-/// ```
-/// # use serde_json::Value;
-/// #
-/// # let val = &Value::String("".to_owned());
-/// # let _ =
-/// match val {
-///     Value::String(s) => Some(s.as_str()),
-///     Value::Array(arr) => arr[0].as_str(),
-///     Value::Object(map) => map["type"].as_str(),
-///     _ => None,
-/// }
-/// # ;
-/// ```
-impl<Q> ops::Index<&Q> for Map<String, Value>
-where
-    String: Borrow<Q>,
-    Q: ?Sized + Ord + Eq + Hash,
-{
-    type Output = Value;
-
-    fn index(&self, index: &Q) -> &Value {
-        self.map.index(index)
-    }
-}
-
-/// Mutably access an element of this map. Panics if the given key is not
-/// present in the map.
-///
-/// ```
-/// # use serde_json::json;
-/// #
-/// # let mut map = serde_json::Map::new();
-/// # map.insert("key".to_owned(), serde_json::Value::Null);
-/// #
-/// map["key"] = json!("value");
-/// ```
-impl<Q> ops::IndexMut<&Q> for Map<String, Value>
-where
-    String: Borrow<Q>,
-    Q: ?Sized + Ord + Eq + Hash,
-{
-    fn index_mut(&mut self, index: &Q) -> &mut Value {
-        self.map.get_mut(index).expect("no entry found for key")
-    }
-}
-
-impl Debug for Map<String, Value> {
-    #[inline]
-    fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
-        self.map.fmt(formatter)
-    }
-}
-
-#[cfg(any(feature = "std", feature = "alloc"))]
-impl serde::ser::Serialize for Map<String, Value> {
-    #[inline]
-    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
-    where
-        S: serde::ser::Serializer,
-    {
-        use serde::ser::SerializeMap;
-        let mut map = tri!(serializer.serialize_map(Some(self.len())));
-        for (k, v) in self {
-            tri!(map.serialize_entry(k, v));
-        }
-        map.end()
-    }
-}
-
-impl<'de> de::Deserialize<'de> for Map<String, Value> {
-    #[inline]
-    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
-    where
-        D: de::Deserializer<'de>,
-    {
-        struct Visitor;
-
-        impl<'de> de::Visitor<'de> for Visitor {
-            type Value = Map<String, Value>;
-
-            fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-                formatter.write_str("a map")
-            }
-
-            #[inline]
-            fn visit_unit<E>(self) -> Result<Self::Value, E>
-            where
-                E: de::Error,
-            {
-                Ok(Map::new())
-            }
-
-            #[cfg(any(feature = "std", feature = "alloc"))]
-            #[inline]
-            fn visit_map<V>(self, mut visitor: V) -> Result<Self::Value, V::Error>
-            where
-                V: de::MapAccess<'de>,
-            {
-                let mut values = Map::new();
-
-                while let Some((key, value)) = tri!(visitor.next_entry()) {
-                    values.insert(key, value);
-                }
-
-                Ok(values)
-            }
-        }
-
-        deserializer.deserialize_map(Visitor)
-    }
-}
-
-impl FromIterator<(String, Value)> for Map<String, Value> {
-    fn from_iter<T>(iter: T) -> Self
-    where
-        T: IntoIterator<Item = (String, Value)>,
-    {
-        Map {
-            map: FromIterator::from_iter(iter),
-        }
-    }
-}
-
-impl Extend<(String, Value)> for Map<String, Value> {
-    fn extend<T>(&mut self, iter: T)
-    where
-        T: IntoIterator<Item = (String, Value)>,
-    {
-        self.map.extend(iter);
-    }
-}
-
-macro_rules! delegate_iterator {
-    (($name:ident $($generics:tt)*) => $item:ty) => {
-        impl $($generics)* Iterator for $name $($generics)* {
-            type Item = $item;
-            #[inline]
-            fn next(&mut self) -> Option<Self::Item> {
-                self.iter.next()
-            }
-            #[inline]
-            fn size_hint(&self) -> (usize, Option<usize>) {
-                self.iter.size_hint()
-            }
-        }
-
-        impl $($generics)* DoubleEndedIterator for $name $($generics)* {
-            #[inline]
-            fn next_back(&mut self) -> Option<Self::Item> {
-                self.iter.next_back()
-            }
-        }
-
-        impl $($generics)* ExactSizeIterator for $name $($generics)* {
-            #[inline]
-            fn len(&self) -> usize {
-                self.iter.len()
-            }
-        }
-
-        impl $($generics)* FusedIterator for $name $($generics)* {}
-    }
-}
-
-impl<'de> de::IntoDeserializer<'de, Error> for Map<String, Value> {
-    type Deserializer = Self;
-
-    fn into_deserializer(self) -> Self::Deserializer {
-        self
-    }
-}
-
-impl<'de> de::IntoDeserializer<'de, Error> for &'de Map<String, Value> {
-    type Deserializer = Self;
-
-    fn into_deserializer(self) -> Self::Deserializer {
-        self
-    }
-}
-
-//////////////////////////////////////////////////////////////////////////////
-
-/// A view into a single entry in a map, which may either be vacant or occupied.
-/// This enum is constructed from the [`entry`] method on [`Map`].
-///
-/// [`entry`]: Map::entry
-pub enum Entry<'a> {
-    /// A vacant Entry.
-    Vacant(VacantEntry<'a>),
-    /// An occupied Entry.
-    Occupied(OccupiedEntry<'a>),
-}
-
-/// A vacant Entry. It is part of the [`Entry`] enum.
-pub struct VacantEntry<'a> {
-    vacant: VacantEntryImpl<'a>,
-}
-
-/// An occupied Entry. It is part of the [`Entry`] enum.
-pub struct OccupiedEntry<'a> {
-    occupied: OccupiedEntryImpl<'a>,
-}
-
-#[cfg(not(feature = "preserve_order"))]
-type VacantEntryImpl<'a> = btree_map::VacantEntry<'a, String, Value>;
-#[cfg(feature = "preserve_order")]
-type VacantEntryImpl<'a> = indexmap::map::VacantEntry<'a, String, Value>;
-
-#[cfg(not(feature = "preserve_order"))]
-type OccupiedEntryImpl<'a> = btree_map::OccupiedEntry<'a, String, Value>;
-#[cfg(feature = "preserve_order")]
-type OccupiedEntryImpl<'a> = indexmap::map::OccupiedEntry<'a, String, Value>;
-
-impl<'a> Entry<'a> {
-    /// Returns a reference to this entry's key.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// let mut map = serde_json::Map::new();
-    /// assert_eq!(map.entry("serde").key(), &"serde");
-    /// ```
-    pub fn key(&self) -> &String {
-        match self {
-            Entry::Vacant(e) => e.key(),
-            Entry::Occupied(e) => e.key(),
-        }
-    }
-
-    /// Ensures a value is in the entry by inserting the default if empty, and
-    /// returns a mutable reference to the value in the entry.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let mut map = serde_json::Map::new();
-    /// map.entry("serde").or_insert(json!(12));
-    ///
-    /// assert_eq!(map["serde"], 12);
-    /// ```
-    pub fn or_insert(self, default: Value) -> &'a mut Value {
-        match self {
-            Entry::Vacant(entry) => entry.insert(default),
-            Entry::Occupied(entry) => entry.into_mut(),
-        }
-    }
-
-    /// Ensures a value is in the entry by inserting the result of the default
-    /// function if empty, and returns a mutable reference to the value in the
-    /// entry.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let mut map = serde_json::Map::new();
-    /// map.entry("serde").or_insert_with(|| json!("hoho"));
-    ///
-    /// assert_eq!(map["serde"], "hoho".to_owned());
-    /// ```
-    pub fn or_insert_with<F>(self, default: F) -> &'a mut Value
-    where
-        F: FnOnce() -> Value,
-    {
-        match self {
-            Entry::Vacant(entry) => entry.insert(default()),
-            Entry::Occupied(entry) => entry.into_mut(),
-        }
-    }
-
-    /// Provides in-place mutable access to an occupied entry before any
-    /// potential inserts into the map.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let mut map = serde_json::Map::new();
-    /// map.entry("serde")
-    ///     .and_modify(|e| *e = json!("rust"))
-    ///     .or_insert(json!("cpp"));
-    ///
-    /// assert_eq!(map["serde"], "cpp");
-    ///
-    /// map.entry("serde")
-    ///     .and_modify(|e| *e = json!("rust"))
-    ///     .or_insert(json!("cpp"));
-    ///
-    /// assert_eq!(map["serde"], "rust");
-    /// ```
-    pub fn and_modify<F>(self, f: F) -> Self
-    where
-        F: FnOnce(&mut Value),
-    {
-        match self {
-            Entry::Occupied(mut entry) => {
-                f(entry.get_mut());
-                Entry::Occupied(entry)
-            }
-            Entry::Vacant(entry) => Entry::Vacant(entry),
-        }
-    }
-}
-
-impl<'a> VacantEntry<'a> {
-    /// Gets a reference to the key that would be used when inserting a value
-    /// through the VacantEntry.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use serde_json::map::Entry;
-    ///
-    /// let mut map = serde_json::Map::new();
-    ///
-    /// match map.entry("serde") {
-    ///     Entry::Vacant(vacant) => {
-    ///         assert_eq!(vacant.key(), &"serde");
-    ///     }
-    ///     Entry::Occupied(_) => unimplemented!(),
-    /// }
-    /// ```
-    #[inline]
-    pub fn key(&self) -> &String {
-        self.vacant.key()
-    }
-
-    /// Sets the value of the entry with the VacantEntry's key, and returns a
-    /// mutable reference to it.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// use serde_json::map::Entry;
-    ///
-    /// let mut map = serde_json::Map::new();
-    ///
-    /// match map.entry("serde") {
-    ///     Entry::Vacant(vacant) => {
-    ///         vacant.insert(json!("hoho"));
-    ///     }
-    ///     Entry::Occupied(_) => unimplemented!(),
-    /// }
-    /// ```
-    #[inline]
-    pub fn insert(self, value: Value) -> &'a mut Value {
-        self.vacant.insert(value)
-    }
-}
-
-impl<'a> OccupiedEntry<'a> {
-    /// Gets a reference to the key in the entry.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// use serde_json::map::Entry;
-    ///
-    /// let mut map = serde_json::Map::new();
-    /// map.insert("serde".to_owned(), json!(12));
-    ///
-    /// match map.entry("serde") {
-    ///     Entry::Occupied(occupied) => {
-    ///         assert_eq!(occupied.key(), &"serde");
-    ///     }
-    ///     Entry::Vacant(_) => unimplemented!(),
-    /// }
-    /// ```
-    #[inline]
-    pub fn key(&self) -> &String {
-        self.occupied.key()
-    }
-
-    /// Gets a reference to the value in the entry.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// use serde_json::map::Entry;
-    ///
-    /// let mut map = serde_json::Map::new();
-    /// map.insert("serde".to_owned(), json!(12));
-    ///
-    /// match map.entry("serde") {
-    ///     Entry::Occupied(occupied) => {
-    ///         assert_eq!(occupied.get(), 12);
-    ///     }
-    ///     Entry::Vacant(_) => unimplemented!(),
-    /// }
-    /// ```
-    #[inline]
-    pub fn get(&self) -> &Value {
-        self.occupied.get()
-    }
-
-    /// Gets a mutable reference to the value in the entry.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// use serde_json::map::Entry;
-    ///
-    /// let mut map = serde_json::Map::new();
-    /// map.insert("serde".to_owned(), json!([1, 2, 3]));
-    ///
-    /// match map.entry("serde") {
-    ///     Entry::Occupied(mut occupied) => {
-    ///         occupied.get_mut().as_array_mut().unwrap().push(json!(4));
-    ///     }
-    ///     Entry::Vacant(_) => unimplemented!(),
-    /// }
-    ///
-    /// assert_eq!(map["serde"].as_array().unwrap().len(), 4);
-    /// ```
-    #[inline]
-    pub fn get_mut(&mut self) -> &mut Value {
-        self.occupied.get_mut()
-    }
-
-    /// Converts the entry into a mutable reference to its value.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// use serde_json::map::Entry;
-    ///
-    /// let mut map = serde_json::Map::new();
-    /// map.insert("serde".to_owned(), json!([1, 2, 3]));
-    ///
-    /// match map.entry("serde") {
-    ///     Entry::Occupied(mut occupied) => {
-    ///         occupied.into_mut().as_array_mut().unwrap().push(json!(4));
-    ///     }
-    ///     Entry::Vacant(_) => unimplemented!(),
-    /// }
-    ///
-    /// assert_eq!(map["serde"].as_array().unwrap().len(), 4);
-    /// ```
-    #[inline]
-    pub fn into_mut(self) -> &'a mut Value {
-        self.occupied.into_mut()
-    }
-
-    /// Sets the value of the entry with the `OccupiedEntry`'s key, and returns
-    /// the entry's old value.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// use serde_json::map::Entry;
-    ///
-    /// let mut map = serde_json::Map::new();
-    /// map.insert("serde".to_owned(), json!(12));
-    ///
-    /// match map.entry("serde") {
-    ///     Entry::Occupied(mut occupied) => {
-    ///         assert_eq!(occupied.insert(json!(13)), 12);
-    ///         assert_eq!(occupied.get(), 13);
-    ///     }
-    ///     Entry::Vacant(_) => unimplemented!(),
-    /// }
-    /// ```
-    #[inline]
-    pub fn insert(&mut self, value: Value) -> Value {
-        self.occupied.insert(value)
-    }
-
-    /// Takes the value of the entry out of the map, and returns it.
-    ///
-    /// If serde_json's "preserve_order" is enabled, `.remove()` is
-    /// equivalent to [`.swap_remove()`][Self::swap_remove], replacing this
-    /// entry's position with the last element. If you need to preserve the
-    /// relative order of the keys in the map, use
-    /// [`.shift_remove()`][Self::shift_remove] instead.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// use serde_json::map::Entry;
-    ///
-    /// let mut map = serde_json::Map::new();
-    /// map.insert("serde".to_owned(), json!(12));
-    ///
-    /// match map.entry("serde") {
-    ///     Entry::Occupied(occupied) => {
-    ///         assert_eq!(occupied.remove(), 12);
-    ///     }
-    ///     Entry::Vacant(_) => unimplemented!(),
-    /// }
-    /// ```
-    #[inline]
-    pub fn remove(self) -> Value {
-        #[cfg(feature = "preserve_order")]
-        return self.swap_remove();
-        #[cfg(not(feature = "preserve_order"))]
-        return self.occupied.remove();
-    }
-
-    /// Takes the value of the entry out of the map, and returns it.
-    ///
-    /// Like [`Vec::swap_remove`], the entry is removed by swapping it with the
-    /// last element of the map and popping it off. This perturbs the position
-    /// of what used to be the last element!
-    ///
-    /// [`Vec::swap_remove`]: std::vec::Vec::swap_remove
-    #[cfg(feature = "preserve_order")]
-    #[cfg_attr(docsrs, doc(cfg(feature = "preserve_order")))]
-    #[inline]
-    pub fn swap_remove(self) -> Value {
-        self.occupied.swap_remove()
-    }
-
-    /// Takes the value of the entry out of the map, and returns it.
-    ///
-    /// Like [`Vec::remove`], the entry is removed by shifting all of the
-    /// elements that follow it, preserving their relative order. This perturbs
-    /// the index of all of those elements!
-    ///
-    /// [`Vec::remove`]: std::vec::Vec::remove
-    #[cfg(feature = "preserve_order")]
-    #[cfg_attr(docsrs, doc(cfg(feature = "preserve_order")))]
-    #[inline]
-    pub fn shift_remove(self) -> Value {
-        self.occupied.shift_remove()
-    }
-
-    /// Removes the entry from the map, returning the stored key and value.
-    ///
-    /// If serde_json's "preserve_order" is enabled, `.remove_entry()` is
-    /// equivalent to [`.swap_remove_entry()`][Self::swap_remove_entry],
-    /// replacing this entry's position with the last element. If you need to
-    /// preserve the relative order of the keys in the map, use
-    /// [`.shift_remove_entry()`][Self::shift_remove_entry] instead.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// use serde_json::map::Entry;
-    ///
-    /// let mut map = serde_json::Map::new();
-    /// map.insert("serde".to_owned(), json!(12));
-    ///
-    /// match map.entry("serde") {
-    ///     Entry::Occupied(occupied) => {
-    ///         let (key, value) = occupied.remove_entry();
-    ///         assert_eq!(key, "serde");
-    ///         assert_eq!(value, 12);
-    ///     }
-    ///     Entry::Vacant(_) => unimplemented!(),
-    /// }
-    /// ```
-    #[inline]
-    pub fn remove_entry(self) -> (String, Value) {
-        #[cfg(feature = "preserve_order")]
-        return self.swap_remove_entry();
-        #[cfg(not(feature = "preserve_order"))]
-        return self.occupied.remove_entry();
-    }
-
-    /// Removes the entry from the map, returning the stored key and value.
-    ///
-    /// Like [`Vec::swap_remove`], the entry is removed by swapping it with the
-    /// last element of the map and popping it off. This perturbs the position
-    /// of what used to be the last element!
-    ///
-    /// [`Vec::swap_remove`]: std::vec::Vec::swap_remove
-    #[cfg(feature = "preserve_order")]
-    #[cfg_attr(docsrs, doc(cfg(feature = "preserve_order")))]
-    #[inline]
-    pub fn swap_remove_entry(self) -> (String, Value) {
-        self.occupied.swap_remove_entry()
-    }
-
-    /// Removes the entry from the map, returning the stored key and value.
-    ///
-    /// Like [`Vec::remove`], the entry is removed by shifting all of the
-    /// elements that follow it, preserving their relative order. This perturbs
-    /// the index of all of those elements!
-    ///
-    /// [`Vec::remove`]: std::vec::Vec::remove
-    #[cfg(feature = "preserve_order")]
-    #[cfg_attr(docsrs, doc(cfg(feature = "preserve_order")))]
-    #[inline]
-    pub fn shift_remove_entry(self) -> (String, Value) {
-        self.occupied.shift_remove_entry()
-    }
-}
-
-//////////////////////////////////////////////////////////////////////////////
-
-impl<'a> IntoIterator for &'a Map<String, Value> {
-    type Item = (&'a String, &'a Value);
-    type IntoIter = Iter<'a>;
-    #[inline]
-    fn into_iter(self) -> Self::IntoIter {
-        Iter {
-            iter: self.map.iter(),
-        }
-    }
-}
-
-/// An iterator over a serde_json::Map's entries.
-pub struct Iter<'a> {
-    iter: IterImpl<'a>,
-}
-
-#[cfg(not(feature = "preserve_order"))]
-type IterImpl<'a> = btree_map::Iter<'a, String, Value>;
-#[cfg(feature = "preserve_order")]
-type IterImpl<'a> = indexmap::map::Iter<'a, String, Value>;
-
-delegate_iterator!((Iter<'a>) => (&'a String, &'a Value));
-
-//////////////////////////////////////////////////////////////////////////////
-
-impl<'a> IntoIterator for &'a mut Map<String, Value> {
-    type Item = (&'a String, &'a mut Value);
-    type IntoIter = IterMut<'a>;
-    #[inline]
-    fn into_iter(self) -> Self::IntoIter {
-        IterMut {
-            iter: self.map.iter_mut(),
-        }
-    }
-}
-
-/// A mutable iterator over a serde_json::Map's entries.
-pub struct IterMut<'a> {
-    iter: IterMutImpl<'a>,
-}
-
-#[cfg(not(feature = "preserve_order"))]
-type IterMutImpl<'a> = btree_map::IterMut<'a, String, Value>;
-#[cfg(feature = "preserve_order")]
-type IterMutImpl<'a> = indexmap::map::IterMut<'a, String, Value>;
-
-delegate_iterator!((IterMut<'a>) => (&'a String, &'a mut Value));
-
-//////////////////////////////////////////////////////////////////////////////
-
-impl IntoIterator for Map<String, Value> {
-    type Item = (String, Value);
-    type IntoIter = IntoIter;
-    #[inline]
-    fn into_iter(self) -> Self::IntoIter {
-        IntoIter {
-            iter: self.map.into_iter(),
-        }
-    }
-}
-
-/// An owning iterator over a serde_json::Map's entries.
-pub struct IntoIter {
-    iter: IntoIterImpl,
-}
-
-#[cfg(not(feature = "preserve_order"))]
-type IntoIterImpl = btree_map::IntoIter<String, Value>;
-#[cfg(feature = "preserve_order")]
-type IntoIterImpl = indexmap::map::IntoIter<String, Value>;
-
-delegate_iterator!((IntoIter) => (String, Value));
-
-//////////////////////////////////////////////////////////////////////////////
-
-/// An iterator over a serde_json::Map's keys.
-pub struct Keys<'a> {
-    iter: KeysImpl<'a>,
-}
-
-#[cfg(not(feature = "preserve_order"))]
-type KeysImpl<'a> = btree_map::Keys<'a, String, Value>;
-#[cfg(feature = "preserve_order")]
-type KeysImpl<'a> = indexmap::map::Keys<'a, String, Value>;
-
-delegate_iterator!((Keys<'a>) => &'a String);
-
-//////////////////////////////////////////////////////////////////////////////
-
-/// An iterator over a serde_json::Map's values.
-pub struct Values<'a> {
-    iter: ValuesImpl<'a>,
-}
-
-#[cfg(not(feature = "preserve_order"))]
-type ValuesImpl<'a> = btree_map::Values<'a, String, Value>;
-#[cfg(feature = "preserve_order")]
-type ValuesImpl<'a> = indexmap::map::Values<'a, String, Value>;
-
-delegate_iterator!((Values<'a>) => &'a Value);
-
-//////////////////////////////////////////////////////////////////////////////
-
-/// A mutable iterator over a serde_json::Map's values.
-pub struct ValuesMut<'a> {
-    iter: ValuesMutImpl<'a>,
-}
-
-#[cfg(not(feature = "preserve_order"))]
-type ValuesMutImpl<'a> = btree_map::ValuesMut<'a, String, Value>;
-#[cfg(feature = "preserve_order")]
-type ValuesMutImpl<'a> = indexmap::map::ValuesMut<'a, String, Value>;
-
-delegate_iterator!((ValuesMut<'a>) => &'a mut Value);
-
-//////////////////////////////////////////////////////////////////////////////
-
-/// An owning iterator over a serde_json::Map's values.
-pub struct IntoValues {
-    iter: IntoValuesImpl,
-}
-
-#[cfg(not(feature = "preserve_order"))]
-type IntoValuesImpl = btree_map::IntoValues<String, Value>;
-#[cfg(feature = "preserve_order")]
-type IntoValuesImpl = indexmap::map::IntoValues<String, Value>;
-
-delegate_iterator!((IntoValues) => Value);
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/number.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/number.rs
deleted file mode 100644
index 8cb0c3b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/number.rs
+++ /dev/null
@@ -1,811 +0,0 @@
-use crate::de::ParserNumber;
-use crate::error::Error;
-#[cfg(feature = "arbitrary_precision")]
-use crate::error::ErrorCode;
-#[cfg(feature = "arbitrary_precision")]
-use alloc::borrow::ToOwned;
-#[cfg(feature = "arbitrary_precision")]
-use alloc::string::{String, ToString};
-use core::fmt::{self, Debug, Display};
-#[cfg(not(feature = "arbitrary_precision"))]
-use core::hash::{Hash, Hasher};
-use serde::de::{self, Unexpected, Visitor};
-#[cfg(feature = "arbitrary_precision")]
-use serde::de::{IntoDeserializer, MapAccess};
-use serde::{forward_to_deserialize_any, Deserialize, Deserializer, Serialize, Serializer};
-
-#[cfg(feature = "arbitrary_precision")]
-pub(crate) const TOKEN: &str = "$serde_json::private::Number";
-
-/// Represents a JSON number, whether integer or floating point.
-#[derive(Clone, PartialEq, Eq, Hash)]
-pub struct Number {
-    n: N,
-}
-
-#[cfg(not(feature = "arbitrary_precision"))]
-#[derive(Copy, Clone)]
-enum N {
-    PosInt(u64),
-    /// Always less than zero.
-    NegInt(i64),
-    /// Always finite.
-    Float(f64),
-}
-
-#[cfg(not(feature = "arbitrary_precision"))]
-impl PartialEq for N {
-    fn eq(&self, other: &Self) -> bool {
-        match (self, other) {
-            (N::PosInt(a), N::PosInt(b)) => a == b,
-            (N::NegInt(a), N::NegInt(b)) => a == b,
-            (N::Float(a), N::Float(b)) => a == b,
-            _ => false,
-        }
-    }
-}
-
-// Implementing Eq is fine since any float values are always finite.
-#[cfg(not(feature = "arbitrary_precision"))]
-impl Eq for N {}
-
-#[cfg(not(feature = "arbitrary_precision"))]
-impl Hash for N {
-    fn hash<H: Hasher>(&self, h: &mut H) {
-        match *self {
-            N::PosInt(i) => i.hash(h),
-            N::NegInt(i) => i.hash(h),
-            N::Float(f) => {
-                if f == 0.0f64 {
-                    // There are 2 zero representations, +0 and -0, which
-                    // compare equal but have different bits. We use the +0 hash
-                    // for both so that hash(+0) == hash(-0).
-                    0.0f64.to_bits().hash(h);
-                } else {
-                    f.to_bits().hash(h);
-                }
-            }
-        }
-    }
-}
-
-#[cfg(feature = "arbitrary_precision")]
-type N = String;
-
-impl Number {
-    /// Returns true if the `Number` is an integer between `i64::MIN` and
-    /// `i64::MAX`.
-    ///
-    /// For any Number on which `is_i64` returns true, `as_i64` is guaranteed to
-    /// return the integer value.
-    pub fn is_i64(&self) -> bool {
-        #[cfg(not(feature = "arbitrary_precision"))]
-        match self.n {
-            N::PosInt(v) => v <= i64::MAX as u64,
-            N::NegInt(_) => true,
-            N::Float(_) => false,
-        }
-        #[cfg(feature = "arbitrary_precision")]
-        self.as_i64().is_some()
-    }
-
-    /// Returns true if the `Number` is an integer between zero and `u64::MAX`.
-    ///
-    /// For any Number on which `is_u64` returns true, `as_u64` is guaranteed to
-    /// return the integer value.
-    pub fn is_u64(&self) -> bool {
-        #[cfg(not(feature = "arbitrary_precision"))]
-        match self.n {
-            N::PosInt(_) => true,
-            N::NegInt(_) | N::Float(_) => false,
-        }
-        #[cfg(feature = "arbitrary_precision")]
-        self.as_u64().is_some()
-    }
-
-    /// Returns true if the `Number` can be represented by f64.
-    ///
-    /// For any Number on which `is_f64` returns true, `as_f64` is guaranteed to
-    /// return the floating point value.
-    ///
-    /// Currently this function returns true if and only if both `is_i64` and
-    /// `is_u64` return false but this is not a guarantee in the future.
-    pub fn is_f64(&self) -> bool {
-        #[cfg(not(feature = "arbitrary_precision"))]
-        match self.n {
-            N::Float(_) => true,
-            N::PosInt(_) | N::NegInt(_) => false,
-        }
-        #[cfg(feature = "arbitrary_precision")]
-        {
-            for c in self.n.chars() {
-                if c == '.' || c == 'e' || c == 'E' {
-                    return self.n.parse::<f64>().ok().map_or(false, f64::is_finite);
-                }
-            }
-            false
-        }
-    }
-
-    /// If the `Number` is an integer, represent it as i64 if possible. Returns
-    /// None otherwise.
-    pub fn as_i64(&self) -> Option<i64> {
-        #[cfg(not(feature = "arbitrary_precision"))]
-        match self.n {
-            N::PosInt(n) => {
-                if n <= i64::MAX as u64 {
-                    Some(n as i64)
-                } else {
-                    None
-                }
-            }
-            N::NegInt(n) => Some(n),
-            N::Float(_) => None,
-        }
-        #[cfg(feature = "arbitrary_precision")]
-        self.n.parse().ok()
-    }
-
-    /// If the `Number` is an integer, represent it as u64 if possible. Returns
-    /// None otherwise.
-    pub fn as_u64(&self) -> Option<u64> {
-        #[cfg(not(feature = "arbitrary_precision"))]
-        match self.n {
-            N::PosInt(n) => Some(n),
-            N::NegInt(_) | N::Float(_) => None,
-        }
-        #[cfg(feature = "arbitrary_precision")]
-        self.n.parse().ok()
-    }
-
-    /// Represents the number as f64 if possible. Returns None otherwise.
-    pub fn as_f64(&self) -> Option<f64> {
-        #[cfg(not(feature = "arbitrary_precision"))]
-        match self.n {
-            N::PosInt(n) => Some(n as f64),
-            N::NegInt(n) => Some(n as f64),
-            N::Float(n) => Some(n),
-        }
-        #[cfg(feature = "arbitrary_precision")]
-        self.n.parse::<f64>().ok().filter(|float| float.is_finite())
-    }
-
-    /// Converts a finite `f64` to a `Number`. Infinite or NaN values are not JSON
-    /// numbers.
-    ///
-    /// ```
-    /// # use serde_json::Number;
-    /// #
-    /// assert!(Number::from_f64(256.0).is_some());
-    ///
-    /// assert!(Number::from_f64(f64::NAN).is_none());
-    /// ```
-    pub fn from_f64(f: f64) -> Option<Number> {
-        if f.is_finite() {
-            let n = {
-                #[cfg(not(feature = "arbitrary_precision"))]
-                {
-                    N::Float(f)
-                }
-                #[cfg(feature = "arbitrary_precision")]
-                {
-                    ryu::Buffer::new().format_finite(f).to_owned()
-                }
-            };
-            Some(Number { n })
-        } else {
-            None
-        }
-    }
-
-    /// If the `Number` is an integer, represent it as i128 if possible. Returns
-    /// None otherwise.
-    pub fn as_i128(&self) -> Option<i128> {
-        #[cfg(not(feature = "arbitrary_precision"))]
-        match self.n {
-            N::PosInt(n) => Some(n as i128),
-            N::NegInt(n) => Some(n as i128),
-            N::Float(_) => None,
-        }
-        #[cfg(feature = "arbitrary_precision")]
-        self.n.parse().ok()
-    }
-
-    /// If the `Number` is an integer, represent it as u128 if possible. Returns
-    /// None otherwise.
-    pub fn as_u128(&self) -> Option<u128> {
-        #[cfg(not(feature = "arbitrary_precision"))]
-        match self.n {
-            N::PosInt(n) => Some(n as u128),
-            N::NegInt(_) | N::Float(_) => None,
-        }
-        #[cfg(feature = "arbitrary_precision")]
-        self.n.parse().ok()
-    }
-
-    /// Converts an `i128` to a `Number`. Numbers smaller than i64::MIN or
-    /// larger than u64::MAX can only be represented in `Number` if serde_json's
-    /// "arbitrary_precision" feature is enabled.
-    ///
-    /// ```
-    /// # use serde_json::Number;
-    /// #
-    /// assert!(Number::from_i128(256).is_some());
-    /// ```
-    pub fn from_i128(i: i128) -> Option<Number> {
-        let n = {
-            #[cfg(not(feature = "arbitrary_precision"))]
-            {
-                if let Ok(u) = u64::try_from(i) {
-                    N::PosInt(u)
-                } else if let Ok(i) = i64::try_from(i) {
-                    N::NegInt(i)
-                } else {
-                    return None;
-                }
-            }
-            #[cfg(feature = "arbitrary_precision")]
-            {
-                i.to_string()
-            }
-        };
-        Some(Number { n })
-    }
-
-    /// Converts a `u128` to a `Number`. Numbers greater than u64::MAX can only
-    /// be represented in `Number` if serde_json's "arbitrary_precision" feature
-    /// is enabled.
-    ///
-    /// ```
-    /// # use serde_json::Number;
-    /// #
-    /// assert!(Number::from_u128(256).is_some());
-    /// ```
-    pub fn from_u128(i: u128) -> Option<Number> {
-        let n = {
-            #[cfg(not(feature = "arbitrary_precision"))]
-            {
-                if let Ok(u) = u64::try_from(i) {
-                    N::PosInt(u)
-                } else {
-                    return None;
-                }
-            }
-            #[cfg(feature = "arbitrary_precision")]
-            {
-                i.to_string()
-            }
-        };
-        Some(Number { n })
-    }
-
-    /// Returns the exact original JSON representation that this Number was
-    /// parsed from.
-    ///
-    /// For numbers constructed not via parsing, such as by `From<i32>`, returns
-    /// the JSON representation that serde\_json would serialize for this
-    /// number.
-    ///
-    /// ```
-    /// # use serde_json::Number;
-    /// for value in [
-    ///     "7",
-    ///     "12.34",
-    ///     "34e-56789",
-    ///     "0.0123456789000000012345678900000001234567890000123456789",
-    ///     "343412345678910111213141516171819202122232425262728293034",
-    ///     "-343412345678910111213141516171819202122232425262728293031",
-    /// ] {
-    ///     let number: Number = serde_json::from_str(value).unwrap();
-    ///     assert_eq!(number.as_str(), value);
-    /// }
-    /// ```
-    #[cfg(feature = "arbitrary_precision")]
-    #[cfg_attr(docsrs, doc(cfg(feature = "arbitrary_precision")))]
-    pub fn as_str(&self) -> &str {
-        &self.n
-    }
-
-    pub(crate) fn as_f32(&self) -> Option<f32> {
-        #[cfg(not(feature = "arbitrary_precision"))]
-        match self.n {
-            N::PosInt(n) => Some(n as f32),
-            N::NegInt(n) => Some(n as f32),
-            N::Float(n) => Some(n as f32),
-        }
-        #[cfg(feature = "arbitrary_precision")]
-        self.n.parse::<f32>().ok().filter(|float| float.is_finite())
-    }
-
-    pub(crate) fn from_f32(f: f32) -> Option<Number> {
-        if f.is_finite() {
-            let n = {
-                #[cfg(not(feature = "arbitrary_precision"))]
-                {
-                    N::Float(f as f64)
-                }
-                #[cfg(feature = "arbitrary_precision")]
-                {
-                    ryu::Buffer::new().format_finite(f).to_owned()
-                }
-            };
-            Some(Number { n })
-        } else {
-            None
-        }
-    }
-
-    #[cfg(feature = "arbitrary_precision")]
-    /// Not public API. Only tests use this.
-    #[doc(hidden)]
-    #[inline]
-    pub fn from_string_unchecked(n: String) -> Self {
-        Number { n }
-    }
-}
-
-impl Display for Number {
-    #[cfg(not(feature = "arbitrary_precision"))]
-    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-        match self.n {
-            N::PosInt(u) => formatter.write_str(itoa::Buffer::new().format(u)),
-            N::NegInt(i) => formatter.write_str(itoa::Buffer::new().format(i)),
-            N::Float(f) => formatter.write_str(ryu::Buffer::new().format_finite(f)),
-        }
-    }
-
-    #[cfg(feature = "arbitrary_precision")]
-    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-        Display::fmt(&self.n, formatter)
-    }
-}
-
-impl Debug for Number {
-    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-        write!(formatter, "Number({})", self)
-    }
-}
-
-impl Serialize for Number {
-    #[cfg(not(feature = "arbitrary_precision"))]
-    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
-    where
-        S: Serializer,
-    {
-        match self.n {
-            N::PosInt(u) => serializer.serialize_u64(u),
-            N::NegInt(i) => serializer.serialize_i64(i),
-            N::Float(f) => serializer.serialize_f64(f),
-        }
-    }
-
-    #[cfg(feature = "arbitrary_precision")]
-    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
-    where
-        S: Serializer,
-    {
-        use serde::ser::SerializeStruct;
-
-        let mut s = tri!(serializer.serialize_struct(TOKEN, 1));
-        tri!(s.serialize_field(TOKEN, &self.n));
-        s.end()
-    }
-}
-
-impl<'de> Deserialize<'de> for Number {
-    #[inline]
-    fn deserialize<D>(deserializer: D) -> Result<Number, D::Error>
-    where
-        D: Deserializer<'de>,
-    {
-        struct NumberVisitor;
-
-        impl<'de> Visitor<'de> for NumberVisitor {
-            type Value = Number;
-
-            fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-                formatter.write_str("a JSON number")
-            }
-
-            fn visit_i64<E>(self, value: i64) -> Result<Number, E> {
-                Ok(value.into())
-            }
-
-            fn visit_i128<E>(self, value: i128) -> Result<Number, E>
-            where
-                E: de::Error,
-            {
-                Number::from_i128(value)
-                    .ok_or_else(|| de::Error::custom("JSON number out of range"))
-            }
-
-            fn visit_u64<E>(self, value: u64) -> Result<Number, E> {
-                Ok(value.into())
-            }
-
-            fn visit_u128<E>(self, value: u128) -> Result<Number, E>
-            where
-                E: de::Error,
-            {
-                Number::from_u128(value)
-                    .ok_or_else(|| de::Error::custom("JSON number out of range"))
-            }
-
-            fn visit_f64<E>(self, value: f64) -> Result<Number, E>
-            where
-                E: de::Error,
-            {
-                Number::from_f64(value).ok_or_else(|| de::Error::custom("not a JSON number"))
-            }
-
-            #[cfg(feature = "arbitrary_precision")]
-            fn visit_map<V>(self, mut visitor: V) -> Result<Number, V::Error>
-            where
-                V: de::MapAccess<'de>,
-            {
-                let value = tri!(visitor.next_key::<NumberKey>());
-                if value.is_none() {
-                    return Err(de::Error::invalid_type(Unexpected::Map, &self));
-                }
-                let v: NumberFromString = tri!(visitor.next_value());
-                Ok(v.value)
-            }
-        }
-
-        deserializer.deserialize_any(NumberVisitor)
-    }
-}
-
-#[cfg(feature = "arbitrary_precision")]
-struct NumberKey;
-
-#[cfg(feature = "arbitrary_precision")]
-impl<'de> de::Deserialize<'de> for NumberKey {
-    fn deserialize<D>(deserializer: D) -> Result<NumberKey, D::Error>
-    where
-        D: de::Deserializer<'de>,
-    {
-        struct FieldVisitor;
-
-        impl<'de> de::Visitor<'de> for FieldVisitor {
-            type Value = ();
-
-            fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-                formatter.write_str("a valid number field")
-            }
-
-            fn visit_str<E>(self, s: &str) -> Result<(), E>
-            where
-                E: de::Error,
-            {
-                if s == TOKEN {
-                    Ok(())
-                } else {
-                    Err(de::Error::custom("expected field with custom name"))
-                }
-            }
-        }
-
-        tri!(deserializer.deserialize_identifier(FieldVisitor));
-        Ok(NumberKey)
-    }
-}
-
-#[cfg(feature = "arbitrary_precision")]
-pub struct NumberFromString {
-    pub value: Number,
-}
-
-#[cfg(feature = "arbitrary_precision")]
-impl<'de> de::Deserialize<'de> for NumberFromString {
-    fn deserialize<D>(deserializer: D) -> Result<NumberFromString, D::Error>
-    where
-        D: de::Deserializer<'de>,
-    {
-        struct Visitor;
-
-        impl<'de> de::Visitor<'de> for Visitor {
-            type Value = NumberFromString;
-
-            fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-                formatter.write_str("string containing a number")
-            }
-
-            fn visit_str<E>(self, s: &str) -> Result<NumberFromString, E>
-            where
-                E: de::Error,
-            {
-                let n = tri!(s.parse().map_err(de::Error::custom));
-                Ok(NumberFromString { value: n })
-            }
-        }
-
-        deserializer.deserialize_str(Visitor)
-    }
-}
-
-#[cfg(feature = "arbitrary_precision")]
-fn invalid_number() -> Error {
-    Error::syntax(ErrorCode::InvalidNumber, 0, 0)
-}
-
-macro_rules! deserialize_any {
-    (@expand [$($num_string:tt)*]) => {
-        #[cfg(not(feature = "arbitrary_precision"))]
-        fn deserialize_any<V>(self, visitor: V) -> Result<V::Value, Error>
-        where
-            V: Visitor<'de>,
-        {
-            match self.n {
-                N::PosInt(u) => visitor.visit_u64(u),
-                N::NegInt(i) => visitor.visit_i64(i),
-                N::Float(f) => visitor.visit_f64(f),
-            }
-        }
-
-        #[cfg(feature = "arbitrary_precision")]
-        fn deserialize_any<V>(self, visitor: V) -> Result<V::Value, Error>
-            where V: Visitor<'de>
-        {
-            if let Some(u) = self.as_u64() {
-                return visitor.visit_u64(u);
-            } else if let Some(i) = self.as_i64() {
-                return visitor.visit_i64(i);
-            } else if let Some(u) = self.as_u128() {
-                return visitor.visit_u128(u);
-            } else if let Some(i) = self.as_i128() {
-                return visitor.visit_i128(i);
-            } else if let Some(f) = self.as_f64() {
-                if ryu::Buffer::new().format_finite(f) == self.n || f.to_string() == self.n {
-                    return visitor.visit_f64(f);
-                }
-            }
-
-            visitor.visit_map(NumberDeserializer {
-                number: Some(self.$($num_string)*),
-            })
-        }
-    };
-
-    (owned) => {
-        deserialize_any!(@expand [n]);
-    };
-
-    (ref) => {
-        deserialize_any!(@expand [n.clone()]);
-    };
-}
-
-macro_rules! deserialize_number {
-    ($deserialize:ident => $visit:ident) => {
-        #[cfg(not(feature = "arbitrary_precision"))]
-        fn $deserialize<V>(self, visitor: V) -> Result<V::Value, Error>
-        where
-            V: Visitor<'de>,
-        {
-            self.deserialize_any(visitor)
-        }
-
-        #[cfg(feature = "arbitrary_precision")]
-        fn $deserialize<V>(self, visitor: V) -> Result<V::Value, Error>
-        where
-            V: de::Visitor<'de>,
-        {
-            visitor.$visit(tri!(self.n.parse().map_err(|_| invalid_number())))
-        }
-    };
-}
-
-impl<'de> Deserializer<'de> for Number {
-    type Error = Error;
-
-    deserialize_any!(owned);
-
-    deserialize_number!(deserialize_i8 => visit_i8);
-    deserialize_number!(deserialize_i16 => visit_i16);
-    deserialize_number!(deserialize_i32 => visit_i32);
-    deserialize_number!(deserialize_i64 => visit_i64);
-    deserialize_number!(deserialize_i128 => visit_i128);
-    deserialize_number!(deserialize_u8 => visit_u8);
-    deserialize_number!(deserialize_u16 => visit_u16);
-    deserialize_number!(deserialize_u32 => visit_u32);
-    deserialize_number!(deserialize_u64 => visit_u64);
-    deserialize_number!(deserialize_u128 => visit_u128);
-    deserialize_number!(deserialize_f32 => visit_f32);
-    deserialize_number!(deserialize_f64 => visit_f64);
-
-    forward_to_deserialize_any! {
-        bool char str string bytes byte_buf option unit unit_struct
-        newtype_struct seq tuple tuple_struct map struct enum identifier
-        ignored_any
-    }
-}
-
-impl<'de> Deserializer<'de> for &Number {
-    type Error = Error;
-
-    deserialize_any!(ref);
-
-    deserialize_number!(deserialize_i8 => visit_i8);
-    deserialize_number!(deserialize_i16 => visit_i16);
-    deserialize_number!(deserialize_i32 => visit_i32);
-    deserialize_number!(deserialize_i64 => visit_i64);
-    deserialize_number!(deserialize_i128 => visit_i128);
-    deserialize_number!(deserialize_u8 => visit_u8);
-    deserialize_number!(deserialize_u16 => visit_u16);
-    deserialize_number!(deserialize_u32 => visit_u32);
-    deserialize_number!(deserialize_u64 => visit_u64);
-    deserialize_number!(deserialize_u128 => visit_u128);
-    deserialize_number!(deserialize_f32 => visit_f32);
-    deserialize_number!(deserialize_f64 => visit_f64);
-
-    forward_to_deserialize_any! {
-        bool char str string bytes byte_buf option unit unit_struct
-        newtype_struct seq tuple tuple_struct map struct enum identifier
-        ignored_any
-    }
-}
-
-#[cfg(feature = "arbitrary_precision")]
-pub(crate) struct NumberDeserializer {
-    pub number: Option<String>,
-}
-
-#[cfg(feature = "arbitrary_precision")]
-impl<'de> MapAccess<'de> for NumberDeserializer {
-    type Error = Error;
-
-    fn next_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>, Error>
-    where
-        K: de::DeserializeSeed<'de>,
-    {
-        if self.number.is_none() {
-            return Ok(None);
-        }
-        seed.deserialize(NumberFieldDeserializer).map(Some)
-    }
-
-    fn next_value_seed<V>(&mut self, seed: V) -> Result<V::Value, Error>
-    where
-        V: de::DeserializeSeed<'de>,
-    {
-        seed.deserialize(self.number.take().unwrap().into_deserializer())
-    }
-}
-
-#[cfg(feature = "arbitrary_precision")]
-struct NumberFieldDeserializer;
-
-#[cfg(feature = "arbitrary_precision")]
-impl<'de> Deserializer<'de> for NumberFieldDeserializer {
-    type Error = Error;
-
-    fn deserialize_any<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: de::Visitor<'de>,
-    {
-        visitor.visit_borrowed_str(TOKEN)
-    }
-
-    forward_to_deserialize_any! {
-        bool u8 u16 u32 u64 u128 i8 i16 i32 i64 i128 f32 f64 char str string seq
-        bytes byte_buf map struct option unit newtype_struct ignored_any
-        unit_struct tuple_struct tuple enum identifier
-    }
-}
-
-impl From<ParserNumber> for Number {
-    fn from(value: ParserNumber) -> Self {
-        let n = match value {
-            ParserNumber::F64(f) => {
-                #[cfg(not(feature = "arbitrary_precision"))]
-                {
-                    N::Float(f)
-                }
-                #[cfg(feature = "arbitrary_precision")]
-                {
-                    ryu::Buffer::new().format_finite(f).to_owned()
-                }
-            }
-            ParserNumber::U64(u) => {
-                #[cfg(not(feature = "arbitrary_precision"))]
-                {
-                    N::PosInt(u)
-                }
-                #[cfg(feature = "arbitrary_precision")]
-                {
-                    itoa::Buffer::new().format(u).to_owned()
-                }
-            }
-            ParserNumber::I64(i) => {
-                #[cfg(not(feature = "arbitrary_precision"))]
-                {
-                    N::NegInt(i)
-                }
-                #[cfg(feature = "arbitrary_precision")]
-                {
-                    itoa::Buffer::new().format(i).to_owned()
-                }
-            }
-            #[cfg(feature = "arbitrary_precision")]
-            ParserNumber::String(s) => s,
-        };
-        Number { n }
-    }
-}
-
-macro_rules! impl_from_unsigned {
-    (
-        $($ty:ty),*
-    ) => {
-        $(
-            impl From<$ty> for Number {
-                fn from(u: $ty) -> Self {
-                    let n = {
-                        #[cfg(not(feature = "arbitrary_precision"))]
-                        { N::PosInt(u as u64) }
-                        #[cfg(feature = "arbitrary_precision")]
-                        {
-                            itoa::Buffer::new().format(u).to_owned()
-                        }
-                    };
-                    Number { n }
-                }
-            }
-        )*
-    };
-}
-
-macro_rules! impl_from_signed {
-    (
-        $($ty:ty),*
-    ) => {
-        $(
-            impl From<$ty> for Number {
-                fn from(i: $ty) -> Self {
-                    let n = {
-                        #[cfg(not(feature = "arbitrary_precision"))]
-                        {
-                            if i < 0 {
-                                N::NegInt(i as i64)
-                            } else {
-                                N::PosInt(i as u64)
-                            }
-                        }
-                        #[cfg(feature = "arbitrary_precision")]
-                        {
-                            itoa::Buffer::new().format(i).to_owned()
-                        }
-                    };
-                    Number { n }
-                }
-            }
-        )*
-    };
-}
-
-impl_from_unsigned!(u8, u16, u32, u64, usize);
-impl_from_signed!(i8, i16, i32, i64, isize);
-
-#[cfg(feature = "arbitrary_precision")]
-impl_from_unsigned!(u128);
-#[cfg(feature = "arbitrary_precision")]
-impl_from_signed!(i128);
-
-impl Number {
-    #[cfg(not(feature = "arbitrary_precision"))]
-    #[cold]
-    pub(crate) fn unexpected(&self) -> Unexpected {
-        match self.n {
-            N::PosInt(u) => Unexpected::Unsigned(u),
-            N::NegInt(i) => Unexpected::Signed(i),
-            N::Float(f) => Unexpected::Float(f),
-        }
-    }
-
-    #[cfg(feature = "arbitrary_precision")]
-    #[cold]
-    pub(crate) fn unexpected(&self) -> Unexpected {
-        Unexpected::Other("number")
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/raw.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/raw.rs
deleted file mode 100644
index be4dad4..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/raw.rs
+++ /dev/null
@@ -1,784 +0,0 @@
-use crate::error::Error;
-use alloc::borrow::ToOwned;
-use alloc::boxed::Box;
-use alloc::string::String;
-use core::fmt::{self, Debug, Display};
-use core::mem;
-use serde::de::value::BorrowedStrDeserializer;
-use serde::de::{
-    self, Deserialize, DeserializeSeed, Deserializer, IntoDeserializer, MapAccess, Unexpected,
-    Visitor,
-};
-use serde::forward_to_deserialize_any;
-use serde::ser::{Serialize, SerializeStruct, Serializer};
-
-/// Reference to a range of bytes encompassing a single valid JSON value in the
-/// input data.
-///
-/// A `RawValue` can be used to defer parsing parts of a payload until later,
-/// or to avoid parsing it at all in the case that part of the payload just
-/// needs to be transferred verbatim into a different output object.
-///
-/// When serializing, a value of this type will retain its original formatting
-/// and will not be minified or pretty-printed.
-///
-/// # Note
-///
-/// `RawValue` is only available if serde\_json is built with the `"raw_value"`
-/// feature.
-///
-/// ```toml
-/// [dependencies]
-/// serde_json = { version = "1.0", features = ["raw_value"] }
-/// ```
-///
-/// # Example
-///
-/// ```
-/// use serde::{Deserialize, Serialize};
-/// use serde_json::{Result, value::RawValue};
-///
-/// #[derive(Deserialize)]
-/// struct Input<'a> {
-///     code: u32,
-///     #[serde(borrow)]
-///     payload: &'a RawValue,
-/// }
-///
-/// #[derive(Serialize)]
-/// struct Output<'a> {
-///     info: (u32, &'a RawValue),
-/// }
-///
-/// // Efficiently rearrange JSON input containing separate "code" and "payload"
-/// // keys into a single "info" key holding an array of code and payload.
-/// //
-/// // This could be done equivalently using serde_json::Value as the type for
-/// // payload, but &RawValue will perform better because it does not require
-/// // memory allocation. The correct range of bytes is borrowed from the input
-/// // data and pasted verbatim into the output.
-/// fn rearrange(input: &str) -> Result<String> {
-///     let input: Input = serde_json::from_str(input)?;
-///
-///     let output = Output {
-///         info: (input.code, input.payload),
-///     };
-///
-///     serde_json::to_string(&output)
-/// }
-///
-/// fn main() -> Result<()> {
-///     let out = rearrange(r#" {"code": 200, "payload": {}} "#)?;
-///
-///     assert_eq!(out, r#"{"info":[200,{}]}"#);
-///
-///     Ok(())
-/// }
-/// ```
-///
-/// # Ownership
-///
-/// The typical usage of `RawValue` will be in the borrowed form:
-///
-/// ```
-/// # use serde::Deserialize;
-/// # use serde_json::value::RawValue;
-/// #
-/// #[derive(Deserialize)]
-/// struct SomeStruct<'a> {
-///     #[serde(borrow)]
-///     raw_value: &'a RawValue,
-/// }
-/// ```
-///
-/// The borrowed form is suitable when deserializing through
-/// [`serde_json::from_str`] and [`serde_json::from_slice`] which support
-/// borrowing from the input data without memory allocation.
-///
-/// When deserializing through [`serde_json::from_reader`] you will need to use
-/// the boxed form of `RawValue` instead. This is almost as efficient but
-/// involves buffering the raw value from the I/O stream into memory.
-///
-/// [`serde_json::from_str`]: crate::from_str
-/// [`serde_json::from_slice`]: crate::from_slice
-/// [`serde_json::from_reader`]: crate::from_reader
-///
-/// ```
-/// # use serde::Deserialize;
-/// # use serde_json::value::RawValue;
-/// #
-/// #[derive(Deserialize)]
-/// struct SomeStruct {
-///     raw_value: Box<RawValue>,
-/// }
-/// ```
-#[cfg_attr(docsrs, doc(cfg(feature = "raw_value")))]
-#[repr(transparent)]
-pub struct RawValue {
-    json: str,
-}
-
-impl RawValue {
-    const fn from_borrowed(json: &str) -> &Self {
-        unsafe { mem::transmute::<&str, &RawValue>(json) }
-    }
-
-    fn from_owned(json: Box<str>) -> Box<Self> {
-        unsafe { mem::transmute::<Box<str>, Box<RawValue>>(json) }
-    }
-
-    fn into_owned(raw_value: Box<Self>) -> Box<str> {
-        unsafe { mem::transmute::<Box<RawValue>, Box<str>>(raw_value) }
-    }
-}
-
-impl Clone for Box<RawValue> {
-    fn clone(&self) -> Self {
-        (**self).to_owned()
-    }
-}
-
-impl ToOwned for RawValue {
-    type Owned = Box<RawValue>;
-
-    fn to_owned(&self) -> Self::Owned {
-        RawValue::from_owned(self.json.to_owned().into_boxed_str())
-    }
-}
-
-impl Default for Box<RawValue> {
-    fn default() -> Self {
-        RawValue::NULL.to_owned()
-    }
-}
-
-impl Debug for RawValue {
-    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-        formatter
-            .debug_tuple("RawValue")
-            .field(&format_args!("{}", &self.json))
-            .finish()
-    }
-}
-
-impl Display for RawValue {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        f.write_str(&self.json)
-    }
-}
-
-impl RawValue {
-    /// A constant RawValue with the JSON value `null`.
-    pub const NULL: &'static RawValue = RawValue::from_borrowed("null");
-    /// A constant RawValue with the JSON value `true`.
-    pub const TRUE: &'static RawValue = RawValue::from_borrowed("true");
-    /// A constant RawValue with the JSON value `false`.
-    pub const FALSE: &'static RawValue = RawValue::from_borrowed("false");
-
-    /// Convert an owned `String` of JSON data to an owned `RawValue`.
-    ///
-    /// This function is equivalent to `serde_json::from_str::<Box<RawValue>>`
-    /// except that we avoid an allocation and memcpy if both of the following
-    /// are true:
-    ///
-    /// - the input has no leading or trailing whitespace, and
-    /// - the input has capacity equal to its length.
-    pub fn from_string(json: String) -> Result<Box<Self>, Error> {
-        let borrowed = tri!(crate::from_str::<&Self>(&json));
-        if borrowed.json.len() < json.len() {
-            return Ok(borrowed.to_owned());
-        }
-        Ok(Self::from_owned(json.into_boxed_str()))
-    }
-
-    /// Access the JSON text underlying a raw value.
-    ///
-    /// # Example
-    ///
-    /// ```
-    /// use serde::Deserialize;
-    /// use serde_json::{Result, value::RawValue};
-    ///
-    /// #[derive(Deserialize)]
-    /// struct Response<'a> {
-    ///     code: u32,
-    ///     #[serde(borrow)]
-    ///     payload: &'a RawValue,
-    /// }
-    ///
-    /// fn process(input: &str) -> Result<()> {
-    ///     let response: Response = serde_json::from_str(input)?;
-    ///
-    ///     let payload = response.payload.get();
-    ///     if payload.starts_with('{') {
-    ///         // handle a payload which is a JSON map
-    ///     } else {
-    ///         // handle any other type
-    ///     }
-    ///
-    ///     Ok(())
-    /// }
-    ///
-    /// fn main() -> Result<()> {
-    ///     process(r#" {"code": 200, "payload": {}} "#)?;
-    ///     Ok(())
-    /// }
-    /// ```
-    pub fn get(&self) -> &str {
-        &self.json
-    }
-}
-
-impl From<Box<RawValue>> for Box<str> {
-    fn from(raw_value: Box<RawValue>) -> Self {
-        RawValue::into_owned(raw_value)
-    }
-}
-
-/// Convert a `T` into a boxed `RawValue`.
-///
-/// # Example
-///
-/// ```
-/// // Upstream crate
-/// # #[derive(Serialize)]
-/// pub struct Thing {
-///     foo: String,
-///     bar: Option<String>,
-///     extra_data: Box<RawValue>,
-/// }
-///
-/// // Local crate
-/// use serde::Serialize;
-/// use serde_json::value::{to_raw_value, RawValue};
-///
-/// #[derive(Serialize)]
-/// struct MyExtraData {
-///     a: u32,
-///     b: u32,
-/// }
-///
-/// let my_thing = Thing {
-///     foo: "FooVal".into(),
-///     bar: None,
-///     extra_data: to_raw_value(&MyExtraData { a: 1, b: 2 }).unwrap(),
-/// };
-/// # assert_eq!(
-/// #     serde_json::to_value(my_thing).unwrap(),
-/// #     serde_json::json!({
-/// #         "foo": "FooVal",
-/// #         "bar": null,
-/// #         "extra_data": { "a": 1, "b": 2 }
-/// #     })
-/// # );
-/// ```
-///
-/// # Errors
-///
-/// This conversion can fail if `T`'s implementation of `Serialize` decides to
-/// fail, or if `T` contains a map with non-string keys.
-///
-/// ```
-/// use std::collections::BTreeMap;
-///
-/// // The keys in this map are vectors, not strings.
-/// let mut map = BTreeMap::new();
-/// map.insert(vec![32, 64], "x86");
-///
-/// println!("{}", serde_json::value::to_raw_value(&map).unwrap_err());
-/// ```
-#[cfg_attr(docsrs, doc(cfg(feature = "raw_value")))]
-pub fn to_raw_value<T>(value: &T) -> Result<Box<RawValue>, Error>
-where
-    T: ?Sized + Serialize,
-{
-    let json_string = tri!(crate::to_string(value));
-    Ok(RawValue::from_owned(json_string.into_boxed_str()))
-}
-
-pub const TOKEN: &str = "$serde_json::private::RawValue";
-
-impl Serialize for RawValue {
-    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
-    where
-        S: Serializer,
-    {
-        let mut s = tri!(serializer.serialize_struct(TOKEN, 1));
-        tri!(s.serialize_field(TOKEN, &self.json));
-        s.end()
-    }
-}
-
-impl<'de: 'a, 'a> Deserialize<'de> for &'a RawValue {
-    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
-    where
-        D: Deserializer<'de>,
-    {
-        struct ReferenceVisitor;
-
-        impl<'de> Visitor<'de> for ReferenceVisitor {
-            type Value = &'de RawValue;
-
-            fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-                write!(formatter, "any valid JSON value")
-            }
-
-            fn visit_map<V>(self, mut visitor: V) -> Result<Self::Value, V::Error>
-            where
-                V: MapAccess<'de>,
-            {
-                let value = tri!(visitor.next_key::<RawKey>());
-                if value.is_none() {
-                    return Err(de::Error::invalid_type(Unexpected::Map, &self));
-                }
-                visitor.next_value_seed(ReferenceFromString)
-            }
-        }
-
-        deserializer.deserialize_newtype_struct(TOKEN, ReferenceVisitor)
-    }
-}
-
-impl<'de> Deserialize<'de> for Box<RawValue> {
-    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
-    where
-        D: Deserializer<'de>,
-    {
-        struct BoxedVisitor;
-
-        impl<'de> Visitor<'de> for BoxedVisitor {
-            type Value = Box<RawValue>;
-
-            fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-                write!(formatter, "any valid JSON value")
-            }
-
-            fn visit_map<V>(self, mut visitor: V) -> Result<Self::Value, V::Error>
-            where
-                V: MapAccess<'de>,
-            {
-                let value = tri!(visitor.next_key::<RawKey>());
-                if value.is_none() {
-                    return Err(de::Error::invalid_type(Unexpected::Map, &self));
-                }
-                visitor.next_value_seed(BoxedFromString)
-            }
-        }
-
-        deserializer.deserialize_newtype_struct(TOKEN, BoxedVisitor)
-    }
-}
-
-struct RawKey;
-
-impl<'de> Deserialize<'de> for RawKey {
-    fn deserialize<D>(deserializer: D) -> Result<RawKey, D::Error>
-    where
-        D: Deserializer<'de>,
-    {
-        struct FieldVisitor;
-
-        impl<'de> Visitor<'de> for FieldVisitor {
-            type Value = ();
-
-            fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-                formatter.write_str("raw value")
-            }
-
-            fn visit_str<E>(self, s: &str) -> Result<(), E>
-            where
-                E: de::Error,
-            {
-                if s == TOKEN {
-                    Ok(())
-                } else {
-                    Err(de::Error::custom("unexpected raw value"))
-                }
-            }
-        }
-
-        tri!(deserializer.deserialize_identifier(FieldVisitor));
-        Ok(RawKey)
-    }
-}
-
-pub struct ReferenceFromString;
-
-impl<'de> DeserializeSeed<'de> for ReferenceFromString {
-    type Value = &'de RawValue;
-
-    fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
-    where
-        D: Deserializer<'de>,
-    {
-        deserializer.deserialize_str(self)
-    }
-}
-
-impl<'de> Visitor<'de> for ReferenceFromString {
-    type Value = &'de RawValue;
-
-    fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-        formatter.write_str("raw value")
-    }
-
-    fn visit_borrowed_str<E>(self, s: &'de str) -> Result<Self::Value, E>
-    where
-        E: de::Error,
-    {
-        Ok(RawValue::from_borrowed(s))
-    }
-}
-
-pub struct BoxedFromString;
-
-impl<'de> DeserializeSeed<'de> for BoxedFromString {
-    type Value = Box<RawValue>;
-
-    fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
-    where
-        D: Deserializer<'de>,
-    {
-        deserializer.deserialize_str(self)
-    }
-}
-
-impl<'de> Visitor<'de> for BoxedFromString {
-    type Value = Box<RawValue>;
-
-    fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-        formatter.write_str("raw value")
-    }
-
-    fn visit_str<E>(self, s: &str) -> Result<Self::Value, E>
-    where
-        E: de::Error,
-    {
-        Ok(RawValue::from_owned(s.to_owned().into_boxed_str()))
-    }
-
-    #[cfg(any(feature = "std", feature = "alloc"))]
-    fn visit_string<E>(self, s: String) -> Result<Self::Value, E>
-    where
-        E: de::Error,
-    {
-        Ok(RawValue::from_owned(s.into_boxed_str()))
-    }
-}
-
-struct RawKeyDeserializer;
-
-impl<'de> Deserializer<'de> for RawKeyDeserializer {
-    type Error = Error;
-
-    fn deserialize_any<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: de::Visitor<'de>,
-    {
-        visitor.visit_borrowed_str(TOKEN)
-    }
-
-    forward_to_deserialize_any! {
-        bool u8 u16 u32 u64 u128 i8 i16 i32 i64 i128 f32 f64 char str string seq
-        bytes byte_buf map struct option unit newtype_struct ignored_any
-        unit_struct tuple_struct tuple enum identifier
-    }
-}
-
-pub struct OwnedRawDeserializer {
-    pub raw_value: Option<String>,
-}
-
-impl<'de> MapAccess<'de> for OwnedRawDeserializer {
-    type Error = Error;
-
-    fn next_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>, Error>
-    where
-        K: de::DeserializeSeed<'de>,
-    {
-        if self.raw_value.is_none() {
-            return Ok(None);
-        }
-        seed.deserialize(RawKeyDeserializer).map(Some)
-    }
-
-    fn next_value_seed<V>(&mut self, seed: V) -> Result<V::Value, Error>
-    where
-        V: de::DeserializeSeed<'de>,
-    {
-        seed.deserialize(self.raw_value.take().unwrap().into_deserializer())
-    }
-}
-
-pub struct BorrowedRawDeserializer<'de> {
-    pub raw_value: Option<&'de str>,
-}
-
-impl<'de> MapAccess<'de> for BorrowedRawDeserializer<'de> {
-    type Error = Error;
-
-    fn next_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>, Error>
-    where
-        K: de::DeserializeSeed<'de>,
-    {
-        if self.raw_value.is_none() {
-            return Ok(None);
-        }
-        seed.deserialize(RawKeyDeserializer).map(Some)
-    }
-
-    fn next_value_seed<V>(&mut self, seed: V) -> Result<V::Value, Error>
-    where
-        V: de::DeserializeSeed<'de>,
-    {
-        seed.deserialize(BorrowedStrDeserializer::new(self.raw_value.take().unwrap()))
-    }
-}
-
-impl<'de> IntoDeserializer<'de, Error> for &'de RawValue {
-    type Deserializer = &'de RawValue;
-
-    fn into_deserializer(self) -> Self::Deserializer {
-        self
-    }
-}
-
-impl<'de> Deserializer<'de> for &'de RawValue {
-    type Error = Error;
-
-    fn deserialize_any<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_any(visitor)
-    }
-
-    fn deserialize_bool<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_bool(visitor)
-    }
-
-    fn deserialize_i8<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_i8(visitor)
-    }
-
-    fn deserialize_i16<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_i16(visitor)
-    }
-
-    fn deserialize_i32<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_i32(visitor)
-    }
-
-    fn deserialize_i64<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_i64(visitor)
-    }
-
-    fn deserialize_i128<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_i128(visitor)
-    }
-
-    fn deserialize_u8<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_u8(visitor)
-    }
-
-    fn deserialize_u16<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_u16(visitor)
-    }
-
-    fn deserialize_u32<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_u32(visitor)
-    }
-
-    fn deserialize_u64<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_u64(visitor)
-    }
-
-    fn deserialize_u128<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_u128(visitor)
-    }
-
-    fn deserialize_f32<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_f32(visitor)
-    }
-
-    fn deserialize_f64<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_f64(visitor)
-    }
-
-    fn deserialize_char<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_char(visitor)
-    }
-
-    fn deserialize_str<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_str(visitor)
-    }
-
-    fn deserialize_string<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_string(visitor)
-    }
-
-    fn deserialize_bytes<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_bytes(visitor)
-    }
-
-    fn deserialize_byte_buf<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_byte_buf(visitor)
-    }
-
-    fn deserialize_option<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_option(visitor)
-    }
-
-    fn deserialize_unit<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_unit(visitor)
-    }
-
-    fn deserialize_unit_struct<V>(self, name: &'static str, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_unit_struct(name, visitor)
-    }
-
-    fn deserialize_newtype_struct<V>(
-        self,
-        name: &'static str,
-        visitor: V,
-    ) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_newtype_struct(name, visitor)
-    }
-
-    fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_seq(visitor)
-    }
-
-    fn deserialize_tuple<V>(self, len: usize, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_tuple(len, visitor)
-    }
-
-    fn deserialize_tuple_struct<V>(
-        self,
-        name: &'static str,
-        len: usize,
-        visitor: V,
-    ) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_tuple_struct(name, len, visitor)
-    }
-
-    fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_map(visitor)
-    }
-
-    fn deserialize_struct<V>(
-        self,
-        name: &'static str,
-        fields: &'static [&'static str],
-        visitor: V,
-    ) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_struct(name, fields, visitor)
-    }
-
-    fn deserialize_enum<V>(
-        self,
-        name: &'static str,
-        variants: &'static [&'static str],
-        visitor: V,
-    ) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_enum(name, variants, visitor)
-    }
-
-    fn deserialize_identifier<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_identifier(visitor)
-    }
-
-    fn deserialize_ignored_any<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        crate::Deserializer::from_str(&self.json).deserialize_ignored_any(visitor)
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/read.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/read.rs
deleted file mode 100644
index 0748af4..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/read.rs
+++ /dev/null
@@ -1,1089 +0,0 @@
-use crate::error::{Error, ErrorCode, Result};
-use alloc::vec::Vec;
-use core::cmp;
-use core::mem;
-use core::ops::Deref;
-use core::str;
-
-#[cfg(feature = "std")]
-use crate::io;
-#[cfg(feature = "std")]
-use crate::iter::LineColIterator;
-
-#[cfg(feature = "raw_value")]
-use crate::raw::BorrowedRawDeserializer;
-#[cfg(all(feature = "raw_value", feature = "std"))]
-use crate::raw::OwnedRawDeserializer;
-#[cfg(all(feature = "raw_value", feature = "std"))]
-use alloc::string::String;
-#[cfg(feature = "raw_value")]
-use serde::de::Visitor;
-
-/// Trait used by the deserializer for iterating over input. This is manually
-/// "specialized" for iterating over `&[u8]`. Once feature(specialization) is
-/// stable we can use actual specialization.
-///
-/// This trait is sealed and cannot be implemented for types outside of
-/// `serde_json`.
-pub trait Read<'de>: private::Sealed {
-    #[doc(hidden)]
-    fn next(&mut self) -> Result<Option<u8>>;
-    #[doc(hidden)]
-    fn peek(&mut self) -> Result<Option<u8>>;
-
-    /// Only valid after a call to peek(). Discards the peeked byte.
-    #[doc(hidden)]
-    fn discard(&mut self);
-
-    /// Position of the most recent call to next().
-    ///
-    /// The most recent call was probably next() and not peek(), but this method
-    /// should try to return a sensible result if the most recent call was
-    /// actually peek() because we don't always know.
-    ///
-    /// Only called in case of an error, so performance is not important.
-    #[doc(hidden)]
-    fn position(&self) -> Position;
-
-    /// Position of the most recent call to peek().
-    ///
-    /// The most recent call was probably peek() and not next(), but this method
-    /// should try to return a sensible result if the most recent call was
-    /// actually next() because we don't always know.
-    ///
-    /// Only called in case of an error, so performance is not important.
-    #[doc(hidden)]
-    fn peek_position(&self) -> Position;
-
-    /// Offset from the beginning of the input to the next byte that would be
-    /// returned by next() or peek().
-    #[doc(hidden)]
-    fn byte_offset(&self) -> usize;
-
-    /// Assumes the previous byte was a quotation mark. Parses a JSON-escaped
-    /// string until the next quotation mark using the given scratch space if
-    /// necessary. The scratch space is initially empty.
-    #[doc(hidden)]
-    fn parse_str<'s>(&'s mut self, scratch: &'s mut Vec<u8>) -> Result<Reference<'de, 's, str>>;
-
-    /// Assumes the previous byte was a quotation mark. Parses a JSON-escaped
-    /// string until the next quotation mark using the given scratch space if
-    /// necessary. The scratch space is initially empty.
-    ///
-    /// This function returns the raw bytes in the string with escape sequences
-    /// expanded but without performing unicode validation.
-    #[doc(hidden)]
-    fn parse_str_raw<'s>(
-        &'s mut self,
-        scratch: &'s mut Vec<u8>,
-    ) -> Result<Reference<'de, 's, [u8]>>;
-
-    /// Assumes the previous byte was a quotation mark. Parses a JSON-escaped
-    /// string until the next quotation mark but discards the data.
-    #[doc(hidden)]
-    fn ignore_str(&mut self) -> Result<()>;
-
-    /// Assumes the previous byte was a hex escape sequence ('\u') in a string.
-    /// Parses next hexadecimal sequence.
-    #[doc(hidden)]
-    fn decode_hex_escape(&mut self) -> Result<u16>;
-
-    /// Switch raw buffering mode on.
-    ///
-    /// This is used when deserializing `RawValue`.
-    #[cfg(feature = "raw_value")]
-    #[doc(hidden)]
-    fn begin_raw_buffering(&mut self);
-
-    /// Switch raw buffering mode off and provides the raw buffered data to the
-    /// given visitor.
-    #[cfg(feature = "raw_value")]
-    #[doc(hidden)]
-    fn end_raw_buffering<V>(&mut self, visitor: V) -> Result<V::Value>
-    where
-        V: Visitor<'de>;
-
-    /// Whether StreamDeserializer::next needs to check the failed flag. True
-    /// for IoRead, false for StrRead and SliceRead which can track failure by
-    /// truncating their input slice to avoid the extra check on every next
-    /// call.
-    #[doc(hidden)]
-    const should_early_return_if_failed: bool;
-
-    /// Mark a persistent failure of StreamDeserializer, either by setting the
-    /// flag or by truncating the input data.
-    #[doc(hidden)]
-    fn set_failed(&mut self, failed: &mut bool);
-}
-
-pub struct Position {
-    pub line: usize,
-    pub column: usize,
-}
-
-pub enum Reference<'b, 'c, T>
-where
-    T: ?Sized + 'static,
-{
-    Borrowed(&'b T),
-    Copied(&'c T),
-}
-
-impl<'b, 'c, T> Deref for Reference<'b, 'c, T>
-where
-    T: ?Sized + 'static,
-{
-    type Target = T;
-
-    fn deref(&self) -> &Self::Target {
-        match *self {
-            Reference::Borrowed(b) => b,
-            Reference::Copied(c) => c,
-        }
-    }
-}
-
-/// JSON input source that reads from a std::io input stream.
-#[cfg(feature = "std")]
-#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
-pub struct IoRead<R>
-where
-    R: io::Read,
-{
-    iter: LineColIterator<io::Bytes<R>>,
-    /// Temporary storage of peeked byte.
-    ch: Option<u8>,
-    #[cfg(feature = "raw_value")]
-    raw_buffer: Option<Vec<u8>>,
-}
-
-/// JSON input source that reads from a slice of bytes.
-//
-// This is more efficient than other iterators because peek() can be read-only
-// and we can compute line/col position only if an error happens.
-pub struct SliceRead<'a> {
-    slice: &'a [u8],
-    /// Index of the *next* byte that will be returned by next() or peek().
-    index: usize,
-    #[cfg(feature = "raw_value")]
-    raw_buffering_start_index: usize,
-}
-
-/// JSON input source that reads from a UTF-8 string.
-//
-// Able to elide UTF-8 checks by assuming that the input is valid UTF-8.
-pub struct StrRead<'a> {
-    delegate: SliceRead<'a>,
-    #[cfg(feature = "raw_value")]
-    data: &'a str,
-}
-
-// Prevent users from implementing the Read trait.
-mod private {
-    pub trait Sealed {}
-}
-
-//////////////////////////////////////////////////////////////////////////////
-
-#[cfg(feature = "std")]
-impl<R> IoRead<R>
-where
-    R: io::Read,
-{
-    /// Create a JSON input source to read from a std::io input stream.
-    ///
-    /// When reading from a source against which short reads are not efficient, such
-    /// as a [`File`], you will want to apply your own buffering because serde_json
-    /// will not buffer the input. See [`std::io::BufReader`].
-    ///
-    /// [`File`]: std::fs::File
-    pub fn new(reader: R) -> Self {
-        IoRead {
-            iter: LineColIterator::new(reader.bytes()),
-            ch: None,
-            #[cfg(feature = "raw_value")]
-            raw_buffer: None,
-        }
-    }
-}
-
-#[cfg(feature = "std")]
-impl<R> private::Sealed for IoRead<R> where R: io::Read {}
-
-#[cfg(feature = "std")]
-impl<R> IoRead<R>
-where
-    R: io::Read,
-{
-    fn parse_str_bytes<'s, T, F>(
-        &'s mut self,
-        scratch: &'s mut Vec<u8>,
-        validate: bool,
-        result: F,
-    ) -> Result<T>
-    where
-        T: 's,
-        F: FnOnce(&'s Self, &'s [u8]) -> Result<T>,
-    {
-        loop {
-            let ch = tri!(next_or_eof(self));
-            if !is_escape(ch, true) {
-                scratch.push(ch);
-                continue;
-            }
-            match ch {
-                b'"' => {
-                    return result(self, scratch);
-                }
-                b'\\' => {
-                    tri!(parse_escape(self, validate, scratch));
-                }
-                _ => {
-                    if validate {
-                        return error(self, ErrorCode::ControlCharacterWhileParsingString);
-                    }
-                    scratch.push(ch);
-                }
-            }
-        }
-    }
-}
-
-#[cfg(feature = "std")]
-impl<'de, R> Read<'de> for IoRead<R>
-where
-    R: io::Read,
-{
-    #[inline]
-    fn next(&mut self) -> Result<Option<u8>> {
-        match self.ch.take() {
-            Some(ch) => {
-                #[cfg(feature = "raw_value")]
-                {
-                    if let Some(buf) = &mut self.raw_buffer {
-                        buf.push(ch);
-                    }
-                }
-                Ok(Some(ch))
-            }
-            None => match self.iter.next() {
-                Some(Err(err)) => Err(Error::io(err)),
-                Some(Ok(ch)) => {
-                    #[cfg(feature = "raw_value")]
-                    {
-                        if let Some(buf) = &mut self.raw_buffer {
-                            buf.push(ch);
-                        }
-                    }
-                    Ok(Some(ch))
-                }
-                None => Ok(None),
-            },
-        }
-    }
-
-    #[inline]
-    fn peek(&mut self) -> Result<Option<u8>> {
-        match self.ch {
-            Some(ch) => Ok(Some(ch)),
-            None => match self.iter.next() {
-                Some(Err(err)) => Err(Error::io(err)),
-                Some(Ok(ch)) => {
-                    self.ch = Some(ch);
-                    Ok(self.ch)
-                }
-                None => Ok(None),
-            },
-        }
-    }
-
-    #[cfg(not(feature = "raw_value"))]
-    #[inline]
-    fn discard(&mut self) {
-        self.ch = None;
-    }
-
-    #[cfg(feature = "raw_value")]
-    fn discard(&mut self) {
-        if let Some(ch) = self.ch.take() {
-            if let Some(buf) = &mut self.raw_buffer {
-                buf.push(ch);
-            }
-        }
-    }
-
-    fn position(&self) -> Position {
-        Position {
-            line: self.iter.line(),
-            column: self.iter.col(),
-        }
-    }
-
-    fn peek_position(&self) -> Position {
-        // The LineColIterator updates its position during peek() so it has the
-        // right one here.
-        self.position()
-    }
-
-    fn byte_offset(&self) -> usize {
-        match self.ch {
-            Some(_) => self.iter.byte_offset() - 1,
-            None => self.iter.byte_offset(),
-        }
-    }
-
-    fn parse_str<'s>(&'s mut self, scratch: &'s mut Vec<u8>) -> Result<Reference<'de, 's, str>> {
-        self.parse_str_bytes(scratch, true, as_str)
-            .map(Reference::Copied)
-    }
-
-    fn parse_str_raw<'s>(
-        &'s mut self,
-        scratch: &'s mut Vec<u8>,
-    ) -> Result<Reference<'de, 's, [u8]>> {
-        self.parse_str_bytes(scratch, false, |_, bytes| Ok(bytes))
-            .map(Reference::Copied)
-    }
-
-    fn ignore_str(&mut self) -> Result<()> {
-        loop {
-            let ch = tri!(next_or_eof(self));
-            if !is_escape(ch, true) {
-                continue;
-            }
-            match ch {
-                b'"' => {
-                    return Ok(());
-                }
-                b'\\' => {
-                    tri!(ignore_escape(self));
-                }
-                _ => {
-                    return error(self, ErrorCode::ControlCharacterWhileParsingString);
-                }
-            }
-        }
-    }
-
-    fn decode_hex_escape(&mut self) -> Result<u16> {
-        let a = tri!(next_or_eof(self));
-        let b = tri!(next_or_eof(self));
-        let c = tri!(next_or_eof(self));
-        let d = tri!(next_or_eof(self));
-        match decode_four_hex_digits(a, b, c, d) {
-            Some(val) => Ok(val),
-            None => error(self, ErrorCode::InvalidEscape),
-        }
-    }
-
-    #[cfg(feature = "raw_value")]
-    fn begin_raw_buffering(&mut self) {
-        self.raw_buffer = Some(Vec::new());
-    }
-
-    #[cfg(feature = "raw_value")]
-    fn end_raw_buffering<V>(&mut self, visitor: V) -> Result<V::Value>
-    where
-        V: Visitor<'de>,
-    {
-        let raw = self.raw_buffer.take().unwrap();
-        let raw = match String::from_utf8(raw) {
-            Ok(raw) => raw,
-            Err(_) => return error(self, ErrorCode::InvalidUnicodeCodePoint),
-        };
-        visitor.visit_map(OwnedRawDeserializer {
-            raw_value: Some(raw),
-        })
-    }
-
-    const should_early_return_if_failed: bool = true;
-
-    #[inline]
-    #[cold]
-    fn set_failed(&mut self, failed: &mut bool) {
-        *failed = true;
-    }
-}
-
-//////////////////////////////////////////////////////////////////////////////
-
-impl<'a> SliceRead<'a> {
-    /// Create a JSON input source to read from a slice of bytes.
-    pub fn new(slice: &'a [u8]) -> Self {
-        SliceRead {
-            slice,
-            index: 0,
-            #[cfg(feature = "raw_value")]
-            raw_buffering_start_index: 0,
-        }
-    }
-
-    fn position_of_index(&self, i: usize) -> Position {
-        let start_of_line = match memchr::memrchr(b'\n', &self.slice[..i]) {
-            Some(position) => position + 1,
-            None => 0,
-        };
-        Position {
-            line: 1 + memchr::memchr_iter(b'\n', &self.slice[..start_of_line]).count(),
-            column: i - start_of_line,
-        }
-    }
-
-    fn skip_to_escape(&mut self, forbid_control_characters: bool) {
-        // Immediately bail-out on empty strings and consecutive escapes (e.g. \u041b\u0435)
-        if self.index == self.slice.len()
-            || is_escape(self.slice[self.index], forbid_control_characters)
-        {
-            return;
-        }
-        self.index += 1;
-
-        let rest = &self.slice[self.index..];
-
-        if !forbid_control_characters {
-            self.index += memchr::memchr2(b'"', b'\\', rest).unwrap_or(rest.len());
-            return;
-        }
-
-        // We wish to find the first byte in range 0x00..=0x1F or " or \. Ideally, we'd use
-        // something akin to memchr3, but the memchr crate does not support this at the moment.
-        // Therefore, we use a variation on Mycroft's algorithm [1] to provide performance better
-        // than a naive loop. It runs faster than equivalent two-pass memchr2+SWAR code on
-        // benchmarks and it's cross-platform, so probably the right fit.
-        // [1]: https://groups.google.com/forum/#!original/comp.lang.c/2HtQXvg7iKc/xOJeipH6KLMJ
-
-        #[cfg(fast_arithmetic = "64")]
-        type Chunk = u64;
-        #[cfg(fast_arithmetic = "32")]
-        type Chunk = u32;
-
-        const STEP: usize = mem::size_of::<Chunk>();
-        const ONE_BYTES: Chunk = Chunk::MAX / 255; // 0x0101...01
-
-        for chunk in rest.chunks_exact(STEP) {
-            let chars = Chunk::from_le_bytes(chunk.try_into().unwrap());
-            let contains_ctrl = chars.wrapping_sub(ONE_BYTES * 0x20) & !chars;
-            let chars_quote = chars ^ (ONE_BYTES * Chunk::from(b'"'));
-            let contains_quote = chars_quote.wrapping_sub(ONE_BYTES) & !chars_quote;
-            let chars_backslash = chars ^ (ONE_BYTES * Chunk::from(b'\\'));
-            let contains_backslash = chars_backslash.wrapping_sub(ONE_BYTES) & !chars_backslash;
-            let masked = (contains_ctrl | contains_quote | contains_backslash) & (ONE_BYTES << 7);
-            if masked != 0 {
-                // SAFETY: chunk is in-bounds for slice
-                self.index = unsafe { chunk.as_ptr().offset_from(self.slice.as_ptr()) } as usize
-                    + masked.trailing_zeros() as usize / 8;
-                return;
-            }
-        }
-
-        self.index += rest.len() / STEP * STEP;
-        self.skip_to_escape_slow();
-    }
-
-    #[cold]
-    #[inline(never)]
-    fn skip_to_escape_slow(&mut self) {
-        while self.index < self.slice.len() && !is_escape(self.slice[self.index], true) {
-            self.index += 1;
-        }
-    }
-
-    /// The big optimization here over IoRead is that if the string contains no
-    /// backslash escape sequences, the returned &str is a slice of the raw JSON
-    /// data so we avoid copying into the scratch space.
-    fn parse_str_bytes<'s, T, F>(
-        &'s mut self,
-        scratch: &'s mut Vec<u8>,
-        validate: bool,
-        result: F,
-    ) -> Result<Reference<'a, 's, T>>
-    where
-        T: ?Sized + 's,
-        F: for<'f> FnOnce(&'s Self, &'f [u8]) -> Result<&'f T>,
-    {
-        // Index of the first byte not yet copied into the scratch space.
-        let mut start = self.index;
-
-        loop {
-            self.skip_to_escape(validate);
-            if self.index == self.slice.len() {
-                return error(self, ErrorCode::EofWhileParsingString);
-            }
-            match self.slice[self.index] {
-                b'"' => {
-                    if scratch.is_empty() {
-                        // Fast path: return a slice of the raw JSON without any
-                        // copying.
-                        let borrowed = &self.slice[start..self.index];
-                        self.index += 1;
-                        return result(self, borrowed).map(Reference::Borrowed);
-                    } else {
-                        scratch.extend_from_slice(&self.slice[start..self.index]);
-                        self.index += 1;
-                        return result(self, scratch).map(Reference::Copied);
-                    }
-                }
-                b'\\' => {
-                    scratch.extend_from_slice(&self.slice[start..self.index]);
-                    self.index += 1;
-                    tri!(parse_escape(self, validate, scratch));
-                    start = self.index;
-                }
-                _ => {
-                    self.index += 1;
-                    return error(self, ErrorCode::ControlCharacterWhileParsingString);
-                }
-            }
-        }
-    }
-}
-
-impl<'a> private::Sealed for SliceRead<'a> {}
-
-impl<'a> Read<'a> for SliceRead<'a> {
-    #[inline]
-    fn next(&mut self) -> Result<Option<u8>> {
-        // `Ok(self.slice.get(self.index).map(|ch| { self.index += 1; *ch }))`
-        // is about 10% slower.
-        Ok(if self.index < self.slice.len() {
-            let ch = self.slice[self.index];
-            self.index += 1;
-            Some(ch)
-        } else {
-            None
-        })
-    }
-
-    #[inline]
-    fn peek(&mut self) -> Result<Option<u8>> {
-        // `Ok(self.slice.get(self.index).map(|ch| *ch))` is about 10% slower
-        // for some reason.
-        Ok(if self.index < self.slice.len() {
-            Some(self.slice[self.index])
-        } else {
-            None
-        })
-    }
-
-    #[inline]
-    fn discard(&mut self) {
-        self.index += 1;
-    }
-
-    fn position(&self) -> Position {
-        self.position_of_index(self.index)
-    }
-
-    fn peek_position(&self) -> Position {
-        // Cap it at slice.len() just in case the most recent call was next()
-        // and it returned the last byte.
-        self.position_of_index(cmp::min(self.slice.len(), self.index + 1))
-    }
-
-    fn byte_offset(&self) -> usize {
-        self.index
-    }
-
-    fn parse_str<'s>(&'s mut self, scratch: &'s mut Vec<u8>) -> Result<Reference<'a, 's, str>> {
-        self.parse_str_bytes(scratch, true, as_str)
-    }
-
-    fn parse_str_raw<'s>(
-        &'s mut self,
-        scratch: &'s mut Vec<u8>,
-    ) -> Result<Reference<'a, 's, [u8]>> {
-        self.parse_str_bytes(scratch, false, |_, bytes| Ok(bytes))
-    }
-
-    fn ignore_str(&mut self) -> Result<()> {
-        loop {
-            self.skip_to_escape(true);
-            if self.index == self.slice.len() {
-                return error(self, ErrorCode::EofWhileParsingString);
-            }
-            match self.slice[self.index] {
-                b'"' => {
-                    self.index += 1;
-                    return Ok(());
-                }
-                b'\\' => {
-                    self.index += 1;
-                    tri!(ignore_escape(self));
-                }
-                _ => {
-                    return error(self, ErrorCode::ControlCharacterWhileParsingString);
-                }
-            }
-        }
-    }
-
-    #[inline]
-    fn decode_hex_escape(&mut self) -> Result<u16> {
-        match self.slice[self.index..] {
-            [a, b, c, d, ..] => {
-                self.index += 4;
-                match decode_four_hex_digits(a, b, c, d) {
-                    Some(val) => Ok(val),
-                    None => error(self, ErrorCode::InvalidEscape),
-                }
-            }
-            _ => {
-                self.index = self.slice.len();
-                error(self, ErrorCode::EofWhileParsingString)
-            }
-        }
-    }
-
-    #[cfg(feature = "raw_value")]
-    fn begin_raw_buffering(&mut self) {
-        self.raw_buffering_start_index = self.index;
-    }
-
-    #[cfg(feature = "raw_value")]
-    fn end_raw_buffering<V>(&mut self, visitor: V) -> Result<V::Value>
-    where
-        V: Visitor<'a>,
-    {
-        let raw = &self.slice[self.raw_buffering_start_index..self.index];
-        let raw = match str::from_utf8(raw) {
-            Ok(raw) => raw,
-            Err(_) => return error(self, ErrorCode::InvalidUnicodeCodePoint),
-        };
-        visitor.visit_map(BorrowedRawDeserializer {
-            raw_value: Some(raw),
-        })
-    }
-
-    const should_early_return_if_failed: bool = false;
-
-    #[inline]
-    #[cold]
-    fn set_failed(&mut self, _failed: &mut bool) {
-        self.slice = &self.slice[..self.index];
-    }
-}
-
-//////////////////////////////////////////////////////////////////////////////
-
-impl<'a> StrRead<'a> {
-    /// Create a JSON input source to read from a UTF-8 string.
-    pub fn new(s: &'a str) -> Self {
-        StrRead {
-            delegate: SliceRead::new(s.as_bytes()),
-            #[cfg(feature = "raw_value")]
-            data: s,
-        }
-    }
-}
-
-impl<'a> private::Sealed for StrRead<'a> {}
-
-impl<'a> Read<'a> for StrRead<'a> {
-    #[inline]
-    fn next(&mut self) -> Result<Option<u8>> {
-        self.delegate.next()
-    }
-
-    #[inline]
-    fn peek(&mut self) -> Result<Option<u8>> {
-        self.delegate.peek()
-    }
-
-    #[inline]
-    fn discard(&mut self) {
-        self.delegate.discard();
-    }
-
-    fn position(&self) -> Position {
-        self.delegate.position()
-    }
-
-    fn peek_position(&self) -> Position {
-        self.delegate.peek_position()
-    }
-
-    fn byte_offset(&self) -> usize {
-        self.delegate.byte_offset()
-    }
-
-    fn parse_str<'s>(&'s mut self, scratch: &'s mut Vec<u8>) -> Result<Reference<'a, 's, str>> {
-        self.delegate.parse_str_bytes(scratch, true, |_, bytes| {
-            // The deserialization input came in as &str with a UTF-8 guarantee,
-            // and the \u-escapes are checked along the way, so don't need to
-            // check here.
-            Ok(unsafe { str::from_utf8_unchecked(bytes) })
-        })
-    }
-
-    fn parse_str_raw<'s>(
-        &'s mut self,
-        scratch: &'s mut Vec<u8>,
-    ) -> Result<Reference<'a, 's, [u8]>> {
-        self.delegate.parse_str_raw(scratch)
-    }
-
-    fn ignore_str(&mut self) -> Result<()> {
-        self.delegate.ignore_str()
-    }
-
-    fn decode_hex_escape(&mut self) -> Result<u16> {
-        self.delegate.decode_hex_escape()
-    }
-
-    #[cfg(feature = "raw_value")]
-    fn begin_raw_buffering(&mut self) {
-        self.delegate.begin_raw_buffering();
-    }
-
-    #[cfg(feature = "raw_value")]
-    fn end_raw_buffering<V>(&mut self, visitor: V) -> Result<V::Value>
-    where
-        V: Visitor<'a>,
-    {
-        let raw = &self.data[self.delegate.raw_buffering_start_index..self.delegate.index];
-        visitor.visit_map(BorrowedRawDeserializer {
-            raw_value: Some(raw),
-        })
-    }
-
-    const should_early_return_if_failed: bool = false;
-
-    #[inline]
-    #[cold]
-    fn set_failed(&mut self, failed: &mut bool) {
-        self.delegate.set_failed(failed);
-    }
-}
-
-//////////////////////////////////////////////////////////////////////////////
-
-impl<'de, R> private::Sealed for &mut R where R: Read<'de> {}
-
-impl<'de, R> Read<'de> for &mut R
-where
-    R: Read<'de>,
-{
-    fn next(&mut self) -> Result<Option<u8>> {
-        R::next(self)
-    }
-
-    fn peek(&mut self) -> Result<Option<u8>> {
-        R::peek(self)
-    }
-
-    fn discard(&mut self) {
-        R::discard(self);
-    }
-
-    fn position(&self) -> Position {
-        R::position(self)
-    }
-
-    fn peek_position(&self) -> Position {
-        R::peek_position(self)
-    }
-
-    fn byte_offset(&self) -> usize {
-        R::byte_offset(self)
-    }
-
-    fn parse_str<'s>(&'s mut self, scratch: &'s mut Vec<u8>) -> Result<Reference<'de, 's, str>> {
-        R::parse_str(self, scratch)
-    }
-
-    fn parse_str_raw<'s>(
-        &'s mut self,
-        scratch: &'s mut Vec<u8>,
-    ) -> Result<Reference<'de, 's, [u8]>> {
-        R::parse_str_raw(self, scratch)
-    }
-
-    fn ignore_str(&mut self) -> Result<()> {
-        R::ignore_str(self)
-    }
-
-    fn decode_hex_escape(&mut self) -> Result<u16> {
-        R::decode_hex_escape(self)
-    }
-
-    #[cfg(feature = "raw_value")]
-    fn begin_raw_buffering(&mut self) {
-        R::begin_raw_buffering(self);
-    }
-
-    #[cfg(feature = "raw_value")]
-    fn end_raw_buffering<V>(&mut self, visitor: V) -> Result<V::Value>
-    where
-        V: Visitor<'de>,
-    {
-        R::end_raw_buffering(self, visitor)
-    }
-
-    const should_early_return_if_failed: bool = R::should_early_return_if_failed;
-
-    fn set_failed(&mut self, failed: &mut bool) {
-        R::set_failed(self, failed);
-    }
-}
-
-//////////////////////////////////////////////////////////////////////////////
-
-/// Marker for whether StreamDeserializer can implement FusedIterator.
-pub trait Fused: private::Sealed {}
-impl<'a> Fused for SliceRead<'a> {}
-impl<'a> Fused for StrRead<'a> {}
-
-fn is_escape(ch: u8, including_control_characters: bool) -> bool {
-    ch == b'"' || ch == b'\\' || (including_control_characters && ch < 0x20)
-}
-
-fn next_or_eof<'de, R>(read: &mut R) -> Result<u8>
-where
-    R: ?Sized + Read<'de>,
-{
-    match tri!(read.next()) {
-        Some(b) => Ok(b),
-        None => error(read, ErrorCode::EofWhileParsingString),
-    }
-}
-
-fn peek_or_eof<'de, R>(read: &mut R) -> Result<u8>
-where
-    R: ?Sized + Read<'de>,
-{
-    match tri!(read.peek()) {
-        Some(b) => Ok(b),
-        None => error(read, ErrorCode::EofWhileParsingString),
-    }
-}
-
-fn error<'de, R, T>(read: &R, reason: ErrorCode) -> Result<T>
-where
-    R: ?Sized + Read<'de>,
-{
-    let position = read.position();
-    Err(Error::syntax(reason, position.line, position.column))
-}
-
-fn as_str<'de, 's, R: Read<'de>>(read: &R, slice: &'s [u8]) -> Result<&'s str> {
-    str::from_utf8(slice).or_else(|_| error(read, ErrorCode::InvalidUnicodeCodePoint))
-}
-
-/// Parses a JSON escape sequence and appends it into the scratch space. Assumes
-/// the previous byte read was a backslash.
-fn parse_escape<'de, R: Read<'de>>(
-    read: &mut R,
-    validate: bool,
-    scratch: &mut Vec<u8>,
-) -> Result<()> {
-    let ch = tri!(next_or_eof(read));
-
-    match ch {
-        b'"' => scratch.push(b'"'),
-        b'\\' => scratch.push(b'\\'),
-        b'/' => scratch.push(b'/'),
-        b'b' => scratch.push(b'\x08'),
-        b'f' => scratch.push(b'\x0c'),
-        b'n' => scratch.push(b'\n'),
-        b'r' => scratch.push(b'\r'),
-        b't' => scratch.push(b'\t'),
-        b'u' => return parse_unicode_escape(read, validate, scratch),
-        _ => return error(read, ErrorCode::InvalidEscape),
-    }
-
-    Ok(())
-}
-
-/// Parses a JSON \u escape and appends it into the scratch space. Assumes `\u`
-/// has just been read.
-#[cold]
-fn parse_unicode_escape<'de, R: Read<'de>>(
-    read: &mut R,
-    validate: bool,
-    scratch: &mut Vec<u8>,
-) -> Result<()> {
-    let mut n = tri!(read.decode_hex_escape());
-
-    // Non-BMP characters are encoded as a sequence of two hex escapes,
-    // representing UTF-16 surrogates. If deserializing a utf-8 string the
-    // surrogates are required to be paired, whereas deserializing a byte string
-    // accepts lone surrogates.
-    if validate && n >= 0xDC00 && n <= 0xDFFF {
-        // XXX: This is actually a trailing surrogate.
-        return error(read, ErrorCode::LoneLeadingSurrogateInHexEscape);
-    }
-
-    loop {
-        if n < 0xD800 || n > 0xDBFF {
-            // Every u16 outside of the surrogate ranges is guaranteed to be a
-            // legal char.
-            push_wtf8_codepoint(n as u32, scratch);
-            return Ok(());
-        }
-
-        // n is a leading surrogate, we now expect a trailing surrogate.
-        let n1 = n;
-
-        if tri!(peek_or_eof(read)) == b'\\' {
-            read.discard();
-        } else {
-            return if validate {
-                read.discard();
-                error(read, ErrorCode::UnexpectedEndOfHexEscape)
-            } else {
-                push_wtf8_codepoint(n1 as u32, scratch);
-                Ok(())
-            };
-        }
-
-        if tri!(peek_or_eof(read)) == b'u' {
-            read.discard();
-        } else {
-            return if validate {
-                read.discard();
-                error(read, ErrorCode::UnexpectedEndOfHexEscape)
-            } else {
-                push_wtf8_codepoint(n1 as u32, scratch);
-                // The \ prior to this byte started an escape sequence, so we
-                // need to parse that now. This recursive call does not blow the
-                // stack on malicious input because the escape is not \u, so it
-                // will be handled by one of the easy nonrecursive cases.
-                parse_escape(read, validate, scratch)
-            };
-        }
-
-        let n2 = tri!(read.decode_hex_escape());
-
-        if n2 < 0xDC00 || n2 > 0xDFFF {
-            if validate {
-                return error(read, ErrorCode::LoneLeadingSurrogateInHexEscape);
-            }
-            push_wtf8_codepoint(n1 as u32, scratch);
-            // If n2 is a leading surrogate, we need to restart.
-            n = n2;
-            continue;
-        }
-
-        // This value is in range U+10000..=U+10FFFF, which is always a valid
-        // codepoint.
-        let n = ((((n1 - 0xD800) as u32) << 10) | (n2 - 0xDC00) as u32) + 0x1_0000;
-        push_wtf8_codepoint(n, scratch);
-        return Ok(());
-    }
-}
-
-/// Adds a WTF-8 codepoint to the end of the buffer. This is a more efficient
-/// implementation of String::push. The codepoint may be a surrogate.
-#[inline]
-fn push_wtf8_codepoint(n: u32, scratch: &mut Vec<u8>) {
-    if n < 0x80 {
-        scratch.push(n as u8);
-        return;
-    }
-
-    scratch.reserve(4);
-
-    // SAFETY: After the `reserve` call, `scratch` has at least 4 bytes of
-    // allocated but unintialized memory after its last initialized byte, which
-    // is where `ptr` points. All reachable match arms write `encoded_len` bytes
-    // to that region and update the length accordingly, and `encoded_len` is
-    // always <= 4.
-    unsafe {
-        let ptr = scratch.as_mut_ptr().add(scratch.len());
-
-        let encoded_len = match n {
-            0..=0x7F => unreachable!(),
-            0x80..=0x7FF => {
-                ptr.write(((n >> 6) & 0b0001_1111) as u8 | 0b1100_0000);
-                2
-            }
-            0x800..=0xFFFF => {
-                ptr.write(((n >> 12) & 0b0000_1111) as u8 | 0b1110_0000);
-                ptr.add(1)
-                    .write(((n >> 6) & 0b0011_1111) as u8 | 0b1000_0000);
-                3
-            }
-            0x1_0000..=0x10_FFFF => {
-                ptr.write(((n >> 18) & 0b0000_0111) as u8 | 0b1111_0000);
-                ptr.add(1)
-                    .write(((n >> 12) & 0b0011_1111) as u8 | 0b1000_0000);
-                ptr.add(2)
-                    .write(((n >> 6) & 0b0011_1111) as u8 | 0b1000_0000);
-                4
-            }
-            0x11_0000.. => unreachable!(),
-        };
-        ptr.add(encoded_len - 1)
-            .write((n & 0b0011_1111) as u8 | 0b1000_0000);
-
-        scratch.set_len(scratch.len() + encoded_len);
-    }
-}
-
-/// Parses a JSON escape sequence and discards the value. Assumes the previous
-/// byte read was a backslash.
-fn ignore_escape<'de, R>(read: &mut R) -> Result<()>
-where
-    R: ?Sized + Read<'de>,
-{
-    let ch = tri!(next_or_eof(read));
-
-    match ch {
-        b'"' | b'\\' | b'/' | b'b' | b'f' | b'n' | b'r' | b't' => {}
-        b'u' => {
-            // At this point we don't care if the codepoint is valid. We just
-            // want to consume it. We don't actually know what is valid or not
-            // at this point, because that depends on if this string will
-            // ultimately be parsed into a string or a byte buffer in the "real"
-            // parse.
-
-            tri!(read.decode_hex_escape());
-        }
-        _ => {
-            return error(read, ErrorCode::InvalidEscape);
-        }
-    }
-
-    Ok(())
-}
-
-const fn decode_hex_val_slow(val: u8) -> Option<u8> {
-    match val {
-        b'0'..=b'9' => Some(val - b'0'),
-        b'A'..=b'F' => Some(val - b'A' + 10),
-        b'a'..=b'f' => Some(val - b'a' + 10),
-        _ => None,
-    }
-}
-
-const fn build_hex_table(shift: usize) -> [i16; 256] {
-    let mut table = [0; 256];
-    let mut ch = 0;
-    while ch < 256 {
-        table[ch] = match decode_hex_val_slow(ch as u8) {
-            Some(val) => (val as i16) << shift,
-            None => -1,
-        };
-        ch += 1;
-    }
-    table
-}
-
-static HEX0: [i16; 256] = build_hex_table(0);
-static HEX1: [i16; 256] = build_hex_table(4);
-
-fn decode_four_hex_digits(a: u8, b: u8, c: u8, d: u8) -> Option<u16> {
-    let a = HEX1[a as usize] as i32;
-    let b = HEX0[b as usize] as i32;
-    let c = HEX1[c as usize] as i32;
-    let d = HEX0[d as usize] as i32;
-
-    let codepoint = ((a | b) << 8) | c | d;
-
-    // A single sign bit check.
-    if codepoint >= 0 {
-        Some(codepoint as u16)
-    } else {
-        None
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/ser.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/ser.rs
deleted file mode 100644
index 9b14389..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/ser.rs
+++ /dev/null
@@ -1,2277 +0,0 @@
-//! Serialize a Rust data structure into JSON data.
-
-use crate::error::{Error, ErrorCode, Result};
-use crate::io;
-use alloc::string::String;
-#[cfg(feature = "raw_value")]
-use alloc::string::ToString;
-use alloc::vec::Vec;
-use core::fmt::{self, Display};
-use core::num::FpCategory;
-use serde::ser::{self, Impossible, Serialize};
-
-/// A structure for serializing Rust values into JSON.
-#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
-pub struct Serializer<W, F = CompactFormatter> {
-    writer: W,
-    formatter: F,
-}
-
-impl<W> Serializer<W>
-where
-    W: io::Write,
-{
-    /// Creates a new JSON serializer.
-    #[inline]
-    pub fn new(writer: W) -> Self {
-        Serializer::with_formatter(writer, CompactFormatter)
-    }
-}
-
-impl<'a, W> Serializer<W, PrettyFormatter<'a>>
-where
-    W: io::Write,
-{
-    /// Creates a new JSON pretty print serializer.
-    #[inline]
-    pub fn pretty(writer: W) -> Self {
-        Serializer::with_formatter(writer, PrettyFormatter::new())
-    }
-}
-
-impl<W, F> Serializer<W, F>
-where
-    W: io::Write,
-    F: Formatter,
-{
-    /// Creates a new JSON visitor whose output will be written to the writer
-    /// specified.
-    #[inline]
-    pub fn with_formatter(writer: W, formatter: F) -> Self {
-        Serializer { writer, formatter }
-    }
-
-    /// Unwrap the `Writer` from the `Serializer`.
-    #[inline]
-    pub fn into_inner(self) -> W {
-        self.writer
-    }
-}
-
-impl<'a, W, F> ser::Serializer for &'a mut Serializer<W, F>
-where
-    W: io::Write,
-    F: Formatter,
-{
-    type Ok = ();
-    type Error = Error;
-
-    type SerializeSeq = Compound<'a, W, F>;
-    type SerializeTuple = Compound<'a, W, F>;
-    type SerializeTupleStruct = Compound<'a, W, F>;
-    type SerializeTupleVariant = Compound<'a, W, F>;
-    type SerializeMap = Compound<'a, W, F>;
-    type SerializeStruct = Compound<'a, W, F>;
-    type SerializeStructVariant = Compound<'a, W, F>;
-
-    #[inline]
-    fn serialize_bool(self, value: bool) -> Result<()> {
-        self.formatter
-            .write_bool(&mut self.writer, value)
-            .map_err(Error::io)
-    }
-
-    #[inline]
-    fn serialize_i8(self, value: i8) -> Result<()> {
-        self.formatter
-            .write_i8(&mut self.writer, value)
-            .map_err(Error::io)
-    }
-
-    #[inline]
-    fn serialize_i16(self, value: i16) -> Result<()> {
-        self.formatter
-            .write_i16(&mut self.writer, value)
-            .map_err(Error::io)
-    }
-
-    #[inline]
-    fn serialize_i32(self, value: i32) -> Result<()> {
-        self.formatter
-            .write_i32(&mut self.writer, value)
-            .map_err(Error::io)
-    }
-
-    #[inline]
-    fn serialize_i64(self, value: i64) -> Result<()> {
-        self.formatter
-            .write_i64(&mut self.writer, value)
-            .map_err(Error::io)
-    }
-
-    fn serialize_i128(self, value: i128) -> Result<()> {
-        self.formatter
-            .write_i128(&mut self.writer, value)
-            .map_err(Error::io)
-    }
-
-    #[inline]
-    fn serialize_u8(self, value: u8) -> Result<()> {
-        self.formatter
-            .write_u8(&mut self.writer, value)
-            .map_err(Error::io)
-    }
-
-    #[inline]
-    fn serialize_u16(self, value: u16) -> Result<()> {
-        self.formatter
-            .write_u16(&mut self.writer, value)
-            .map_err(Error::io)
-    }
-
-    #[inline]
-    fn serialize_u32(self, value: u32) -> Result<()> {
-        self.formatter
-            .write_u32(&mut self.writer, value)
-            .map_err(Error::io)
-    }
-
-    #[inline]
-    fn serialize_u64(self, value: u64) -> Result<()> {
-        self.formatter
-            .write_u64(&mut self.writer, value)
-            .map_err(Error::io)
-    }
-
-    fn serialize_u128(self, value: u128) -> Result<()> {
-        self.formatter
-            .write_u128(&mut self.writer, value)
-            .map_err(Error::io)
-    }
-
-    #[inline]
-    fn serialize_f32(self, value: f32) -> Result<()> {
-        match value.classify() {
-            FpCategory::Nan | FpCategory::Infinite => self
-                .formatter
-                .write_null(&mut self.writer)
-                .map_err(Error::io),
-            _ => self
-                .formatter
-                .write_f32(&mut self.writer, value)
-                .map_err(Error::io),
-        }
-    }
-
-    #[inline]
-    fn serialize_f64(self, value: f64) -> Result<()> {
-        match value.classify() {
-            FpCategory::Nan | FpCategory::Infinite => self
-                .formatter
-                .write_null(&mut self.writer)
-                .map_err(Error::io),
-            _ => self
-                .formatter
-                .write_f64(&mut self.writer, value)
-                .map_err(Error::io),
-        }
-    }
-
-    #[inline]
-    fn serialize_char(self, value: char) -> Result<()> {
-        // A char encoded as UTF-8 takes 4 bytes at most.
-        let mut buf = [0; 4];
-        self.serialize_str(value.encode_utf8(&mut buf))
-    }
-
-    #[inline]
-    fn serialize_str(self, value: &str) -> Result<()> {
-        format_escaped_str(&mut self.writer, &mut self.formatter, value).map_err(Error::io)
-    }
-
-    #[inline]
-    fn serialize_bytes(self, value: &[u8]) -> Result<()> {
-        self.formatter
-            .write_byte_array(&mut self.writer, value)
-            .map_err(Error::io)
-    }
-
-    #[inline]
-    fn serialize_unit(self) -> Result<()> {
-        self.formatter
-            .write_null(&mut self.writer)
-            .map_err(Error::io)
-    }
-
-    #[inline]
-    fn serialize_unit_struct(self, _name: &'static str) -> Result<()> {
-        self.serialize_unit()
-    }
-
-    #[inline]
-    fn serialize_unit_variant(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        variant: &'static str,
-    ) -> Result<()> {
-        self.serialize_str(variant)
-    }
-
-    /// Serialize newtypes without an object wrapper.
-    #[inline]
-    fn serialize_newtype_struct<T>(self, _name: &'static str, value: &T) -> Result<()>
-    where
-        T: ?Sized + Serialize,
-    {
-        value.serialize(self)
-    }
-
-    #[inline]
-    fn serialize_newtype_variant<T>(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        variant: &'static str,
-        value: &T,
-    ) -> Result<()>
-    where
-        T: ?Sized + Serialize,
-    {
-        tri!(self
-            .formatter
-            .begin_object(&mut self.writer)
-            .map_err(Error::io));
-        tri!(self
-            .formatter
-            .begin_object_key(&mut self.writer, true)
-            .map_err(Error::io));
-        tri!(self.serialize_str(variant));
-        tri!(self
-            .formatter
-            .end_object_key(&mut self.writer)
-            .map_err(Error::io));
-        tri!(self
-            .formatter
-            .begin_object_value(&mut self.writer)
-            .map_err(Error::io));
-        tri!(value.serialize(&mut *self));
-        tri!(self
-            .formatter
-            .end_object_value(&mut self.writer)
-            .map_err(Error::io));
-        self.formatter
-            .end_object(&mut self.writer)
-            .map_err(Error::io)
-    }
-
-    #[inline]
-    fn serialize_none(self) -> Result<()> {
-        self.serialize_unit()
-    }
-
-    #[inline]
-    fn serialize_some<T>(self, value: &T) -> Result<()>
-    where
-        T: ?Sized + Serialize,
-    {
-        value.serialize(self)
-    }
-
-    #[inline]
-    fn serialize_seq(self, len: Option<usize>) -> Result<Self::SerializeSeq> {
-        tri!(self
-            .formatter
-            .begin_array(&mut self.writer)
-            .map_err(Error::io));
-        if len == Some(0) {
-            tri!(self
-                .formatter
-                .end_array(&mut self.writer)
-                .map_err(Error::io));
-            Ok(Compound::Map {
-                ser: self,
-                state: State::Empty,
-            })
-        } else {
-            Ok(Compound::Map {
-                ser: self,
-                state: State::First,
-            })
-        }
-    }
-
-    #[inline]
-    fn serialize_tuple(self, len: usize) -> Result<Self::SerializeTuple> {
-        self.serialize_seq(Some(len))
-    }
-
-    #[inline]
-    fn serialize_tuple_struct(
-        self,
-        _name: &'static str,
-        len: usize,
-    ) -> Result<Self::SerializeTupleStruct> {
-        self.serialize_seq(Some(len))
-    }
-
-    #[inline]
-    fn serialize_tuple_variant(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        variant: &'static str,
-        len: usize,
-    ) -> Result<Self::SerializeTupleVariant> {
-        tri!(self
-            .formatter
-            .begin_object(&mut self.writer)
-            .map_err(Error::io));
-        tri!(self
-            .formatter
-            .begin_object_key(&mut self.writer, true)
-            .map_err(Error::io));
-        tri!(self.serialize_str(variant));
-        tri!(self
-            .formatter
-            .end_object_key(&mut self.writer)
-            .map_err(Error::io));
-        tri!(self
-            .formatter
-            .begin_object_value(&mut self.writer)
-            .map_err(Error::io));
-        self.serialize_seq(Some(len))
-    }
-
-    #[inline]
-    fn serialize_map(self, len: Option<usize>) -> Result<Self::SerializeMap> {
-        tri!(self
-            .formatter
-            .begin_object(&mut self.writer)
-            .map_err(Error::io));
-        if len == Some(0) {
-            tri!(self
-                .formatter
-                .end_object(&mut self.writer)
-                .map_err(Error::io));
-            Ok(Compound::Map {
-                ser: self,
-                state: State::Empty,
-            })
-        } else {
-            Ok(Compound::Map {
-                ser: self,
-                state: State::First,
-            })
-        }
-    }
-
-    #[inline]
-    fn serialize_struct(self, name: &'static str, len: usize) -> Result<Self::SerializeStruct> {
-        match name {
-            #[cfg(feature = "arbitrary_precision")]
-            crate::number::TOKEN => Ok(Compound::Number { ser: self }),
-            #[cfg(feature = "raw_value")]
-            crate::raw::TOKEN => Ok(Compound::RawValue { ser: self }),
-            _ => self.serialize_map(Some(len)),
-        }
-    }
-
-    #[inline]
-    fn serialize_struct_variant(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        variant: &'static str,
-        len: usize,
-    ) -> Result<Self::SerializeStructVariant> {
-        tri!(self
-            .formatter
-            .begin_object(&mut self.writer)
-            .map_err(Error::io));
-        tri!(self
-            .formatter
-            .begin_object_key(&mut self.writer, true)
-            .map_err(Error::io));
-        tri!(self.serialize_str(variant));
-        tri!(self
-            .formatter
-            .end_object_key(&mut self.writer)
-            .map_err(Error::io));
-        tri!(self
-            .formatter
-            .begin_object_value(&mut self.writer)
-            .map_err(Error::io));
-        self.serialize_map(Some(len))
-    }
-
-    fn collect_str<T>(self, value: &T) -> Result<()>
-    where
-        T: ?Sized + Display,
-    {
-        use self::fmt::Write;
-
-        struct Adapter<'ser, W: 'ser, F: 'ser> {
-            writer: &'ser mut W,
-            formatter: &'ser mut F,
-            error: Option<io::Error>,
-        }
-
-        impl<'ser, W, F> Write for Adapter<'ser, W, F>
-        where
-            W: io::Write,
-            F: Formatter,
-        {
-            fn write_str(&mut self, s: &str) -> fmt::Result {
-                debug_assert!(self.error.is_none());
-                match format_escaped_str_contents(self.writer, self.formatter, s) {
-                    Ok(()) => Ok(()),
-                    Err(err) => {
-                        self.error = Some(err);
-                        Err(fmt::Error)
-                    }
-                }
-            }
-        }
-
-        tri!(self
-            .formatter
-            .begin_string(&mut self.writer)
-            .map_err(Error::io));
-        let mut adapter = Adapter {
-            writer: &mut self.writer,
-            formatter: &mut self.formatter,
-            error: None,
-        };
-        match write!(adapter, "{}", value) {
-            Ok(()) => debug_assert!(adapter.error.is_none()),
-            Err(fmt::Error) => {
-                return Err(Error::io(adapter.error.expect("there should be an error")));
-            }
-        }
-        self.formatter
-            .end_string(&mut self.writer)
-            .map_err(Error::io)
-    }
-}
-
-// Not public API. Should be pub(crate).
-#[doc(hidden)]
-#[derive(Eq, PartialEq)]
-pub enum State {
-    Empty,
-    First,
-    Rest,
-}
-
-// Not public API. Should be pub(crate).
-#[doc(hidden)]
-pub enum Compound<'a, W: 'a, F: 'a> {
-    Map {
-        ser: &'a mut Serializer<W, F>,
-        state: State,
-    },
-    #[cfg(feature = "arbitrary_precision")]
-    Number { ser: &'a mut Serializer<W, F> },
-    #[cfg(feature = "raw_value")]
-    RawValue { ser: &'a mut Serializer<W, F> },
-}
-
-impl<'a, W, F> ser::SerializeSeq for Compound<'a, W, F>
-where
-    W: io::Write,
-    F: Formatter,
-{
-    type Ok = ();
-    type Error = Error;
-
-    #[inline]
-    fn serialize_element<T>(&mut self, value: &T) -> Result<()>
-    where
-        T: ?Sized + Serialize,
-    {
-        match self {
-            Compound::Map { ser, state } => {
-                tri!(ser
-                    .formatter
-                    .begin_array_value(&mut ser.writer, *state == State::First)
-                    .map_err(Error::io));
-                *state = State::Rest;
-                tri!(value.serialize(&mut **ser));
-                ser.formatter
-                    .end_array_value(&mut ser.writer)
-                    .map_err(Error::io)
-            }
-            #[cfg(feature = "arbitrary_precision")]
-            Compound::Number { .. } => unreachable!(),
-            #[cfg(feature = "raw_value")]
-            Compound::RawValue { .. } => unreachable!(),
-        }
-    }
-
-    #[inline]
-    fn end(self) -> Result<()> {
-        match self {
-            Compound::Map { ser, state } => match state {
-                State::Empty => Ok(()),
-                _ => ser.formatter.end_array(&mut ser.writer).map_err(Error::io),
-            },
-            #[cfg(feature = "arbitrary_precision")]
-            Compound::Number { .. } => unreachable!(),
-            #[cfg(feature = "raw_value")]
-            Compound::RawValue { .. } => unreachable!(),
-        }
-    }
-}
-
-impl<'a, W, F> ser::SerializeTuple for Compound<'a, W, F>
-where
-    W: io::Write,
-    F: Formatter,
-{
-    type Ok = ();
-    type Error = Error;
-
-    #[inline]
-    fn serialize_element<T>(&mut self, value: &T) -> Result<()>
-    where
-        T: ?Sized + Serialize,
-    {
-        ser::SerializeSeq::serialize_element(self, value)
-    }
-
-    #[inline]
-    fn end(self) -> Result<()> {
-        ser::SerializeSeq::end(self)
-    }
-}
-
-impl<'a, W, F> ser::SerializeTupleStruct for Compound<'a, W, F>
-where
-    W: io::Write,
-    F: Formatter,
-{
-    type Ok = ();
-    type Error = Error;
-
-    #[inline]
-    fn serialize_field<T>(&mut self, value: &T) -> Result<()>
-    where
-        T: ?Sized + Serialize,
-    {
-        ser::SerializeSeq::serialize_element(self, value)
-    }
-
-    #[inline]
-    fn end(self) -> Result<()> {
-        ser::SerializeSeq::end(self)
-    }
-}
-
-impl<'a, W, F> ser::SerializeTupleVariant for Compound<'a, W, F>
-where
-    W: io::Write,
-    F: Formatter,
-{
-    type Ok = ();
-    type Error = Error;
-
-    #[inline]
-    fn serialize_field<T>(&mut self, value: &T) -> Result<()>
-    where
-        T: ?Sized + Serialize,
-    {
-        ser::SerializeSeq::serialize_element(self, value)
-    }
-
-    #[inline]
-    fn end(self) -> Result<()> {
-        match self {
-            Compound::Map { ser, state } => {
-                match state {
-                    State::Empty => {}
-                    _ => tri!(ser.formatter.end_array(&mut ser.writer).map_err(Error::io)),
-                }
-                tri!(ser
-                    .formatter
-                    .end_object_value(&mut ser.writer)
-                    .map_err(Error::io));
-                ser.formatter.end_object(&mut ser.writer).map_err(Error::io)
-            }
-            #[cfg(feature = "arbitrary_precision")]
-            Compound::Number { .. } => unreachable!(),
-            #[cfg(feature = "raw_value")]
-            Compound::RawValue { .. } => unreachable!(),
-        }
-    }
-}
-
-impl<'a, W, F> ser::SerializeMap for Compound<'a, W, F>
-where
-    W: io::Write,
-    F: Formatter,
-{
-    type Ok = ();
-    type Error = Error;
-
-    #[inline]
-    fn serialize_key<T>(&mut self, key: &T) -> Result<()>
-    where
-        T: ?Sized + Serialize,
-    {
-        match self {
-            Compound::Map { ser, state } => {
-                tri!(ser
-                    .formatter
-                    .begin_object_key(&mut ser.writer, *state == State::First)
-                    .map_err(Error::io));
-                *state = State::Rest;
-
-                tri!(key.serialize(MapKeySerializer { ser: *ser }));
-
-                ser.formatter
-                    .end_object_key(&mut ser.writer)
-                    .map_err(Error::io)
-            }
-            #[cfg(feature = "arbitrary_precision")]
-            Compound::Number { .. } => unreachable!(),
-            #[cfg(feature = "raw_value")]
-            Compound::RawValue { .. } => unreachable!(),
-        }
-    }
-
-    #[inline]
-    fn serialize_value<T>(&mut self, value: &T) -> Result<()>
-    where
-        T: ?Sized + Serialize,
-    {
-        match self {
-            Compound::Map { ser, .. } => {
-                tri!(ser
-                    .formatter
-                    .begin_object_value(&mut ser.writer)
-                    .map_err(Error::io));
-                tri!(value.serialize(&mut **ser));
-                ser.formatter
-                    .end_object_value(&mut ser.writer)
-                    .map_err(Error::io)
-            }
-            #[cfg(feature = "arbitrary_precision")]
-            Compound::Number { .. } => unreachable!(),
-            #[cfg(feature = "raw_value")]
-            Compound::RawValue { .. } => unreachable!(),
-        }
-    }
-
-    #[inline]
-    fn end(self) -> Result<()> {
-        match self {
-            Compound::Map { ser, state } => match state {
-                State::Empty => Ok(()),
-                _ => ser.formatter.end_object(&mut ser.writer).map_err(Error::io),
-            },
-            #[cfg(feature = "arbitrary_precision")]
-            Compound::Number { .. } => unreachable!(),
-            #[cfg(feature = "raw_value")]
-            Compound::RawValue { .. } => unreachable!(),
-        }
-    }
-}
-
-impl<'a, W, F> ser::SerializeStruct for Compound<'a, W, F>
-where
-    W: io::Write,
-    F: Formatter,
-{
-    type Ok = ();
-    type Error = Error;
-
-    #[inline]
-    fn serialize_field<T>(&mut self, key: &'static str, value: &T) -> Result<()>
-    where
-        T: ?Sized + Serialize,
-    {
-        match self {
-            Compound::Map { .. } => ser::SerializeMap::serialize_entry(self, key, value),
-            #[cfg(feature = "arbitrary_precision")]
-            Compound::Number { ser, .. } => {
-                if key == crate::number::TOKEN {
-                    value.serialize(NumberStrEmitter(ser))
-                } else {
-                    Err(invalid_number())
-                }
-            }
-            #[cfg(feature = "raw_value")]
-            Compound::RawValue { ser, .. } => {
-                if key == crate::raw::TOKEN {
-                    value.serialize(RawValueStrEmitter(ser))
-                } else {
-                    Err(invalid_raw_value())
-                }
-            }
-        }
-    }
-
-    #[inline]
-    fn end(self) -> Result<()> {
-        match self {
-            Compound::Map { .. } => ser::SerializeMap::end(self),
-            #[cfg(feature = "arbitrary_precision")]
-            Compound::Number { .. } => Ok(()),
-            #[cfg(feature = "raw_value")]
-            Compound::RawValue { .. } => Ok(()),
-        }
-    }
-}
-
-impl<'a, W, F> ser::SerializeStructVariant for Compound<'a, W, F>
-where
-    W: io::Write,
-    F: Formatter,
-{
-    type Ok = ();
-    type Error = Error;
-
-    #[inline]
-    fn serialize_field<T>(&mut self, key: &'static str, value: &T) -> Result<()>
-    where
-        T: ?Sized + Serialize,
-    {
-        match *self {
-            Compound::Map { .. } => ser::SerializeStruct::serialize_field(self, key, value),
-            #[cfg(feature = "arbitrary_precision")]
-            Compound::Number { .. } => unreachable!(),
-            #[cfg(feature = "raw_value")]
-            Compound::RawValue { .. } => unreachable!(),
-        }
-    }
-
-    #[inline]
-    fn end(self) -> Result<()> {
-        match self {
-            Compound::Map { ser, state } => {
-                match state {
-                    State::Empty => {}
-                    _ => tri!(ser.formatter.end_object(&mut ser.writer).map_err(Error::io)),
-                }
-                tri!(ser
-                    .formatter
-                    .end_object_value(&mut ser.writer)
-                    .map_err(Error::io));
-                ser.formatter.end_object(&mut ser.writer).map_err(Error::io)
-            }
-            #[cfg(feature = "arbitrary_precision")]
-            Compound::Number { .. } => unreachable!(),
-            #[cfg(feature = "raw_value")]
-            Compound::RawValue { .. } => unreachable!(),
-        }
-    }
-}
-
-struct MapKeySerializer<'a, W: 'a, F: 'a> {
-    ser: &'a mut Serializer<W, F>,
-}
-
-#[cfg(feature = "arbitrary_precision")]
-fn invalid_number() -> Error {
-    Error::syntax(ErrorCode::InvalidNumber, 0, 0)
-}
-
-#[cfg(feature = "raw_value")]
-fn invalid_raw_value() -> Error {
-    Error::syntax(ErrorCode::ExpectedSomeValue, 0, 0)
-}
-
-fn key_must_be_a_string() -> Error {
-    Error::syntax(ErrorCode::KeyMustBeAString, 0, 0)
-}
-
-fn float_key_must_be_finite() -> Error {
-    Error::syntax(ErrorCode::FloatKeyMustBeFinite, 0, 0)
-}
-
-impl<'a, W, F> ser::Serializer for MapKeySerializer<'a, W, F>
-where
-    W: io::Write,
-    F: Formatter,
-{
-    type Ok = ();
-    type Error = Error;
-
-    #[inline]
-    fn serialize_str(self, value: &str) -> Result<()> {
-        self.ser.serialize_str(value)
-    }
-
-    #[inline]
-    fn serialize_unit_variant(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        variant: &'static str,
-    ) -> Result<()> {
-        self.ser.serialize_str(variant)
-    }
-
-    #[inline]
-    fn serialize_newtype_struct<T>(self, _name: &'static str, value: &T) -> Result<()>
-    where
-        T: ?Sized + Serialize,
-    {
-        value.serialize(self)
-    }
-
-    type SerializeSeq = Impossible<(), Error>;
-    type SerializeTuple = Impossible<(), Error>;
-    type SerializeTupleStruct = Impossible<(), Error>;
-    type SerializeTupleVariant = Impossible<(), Error>;
-    type SerializeMap = Impossible<(), Error>;
-    type SerializeStruct = Impossible<(), Error>;
-    type SerializeStructVariant = Impossible<(), Error>;
-
-    fn serialize_bool(self, value: bool) -> Result<()> {
-        tri!(self
-            .ser
-            .formatter
-            .begin_string(&mut self.ser.writer)
-            .map_err(Error::io));
-        tri!(self
-            .ser
-            .formatter
-            .write_bool(&mut self.ser.writer, value)
-            .map_err(Error::io));
-        self.ser
-            .formatter
-            .end_string(&mut self.ser.writer)
-            .map_err(Error::io)
-    }
-
-    fn serialize_i8(self, value: i8) -> Result<()> {
-        tri!(self
-            .ser
-            .formatter
-            .begin_string(&mut self.ser.writer)
-            .map_err(Error::io));
-        tri!(self
-            .ser
-            .formatter
-            .write_i8(&mut self.ser.writer, value)
-            .map_err(Error::io));
-        self.ser
-            .formatter
-            .end_string(&mut self.ser.writer)
-            .map_err(Error::io)
-    }
-
-    fn serialize_i16(self, value: i16) -> Result<()> {
-        tri!(self
-            .ser
-            .formatter
-            .begin_string(&mut self.ser.writer)
-            .map_err(Error::io));
-        tri!(self
-            .ser
-            .formatter
-            .write_i16(&mut self.ser.writer, value)
-            .map_err(Error::io));
-        self.ser
-            .formatter
-            .end_string(&mut self.ser.writer)
-            .map_err(Error::io)
-    }
-
-    fn serialize_i32(self, value: i32) -> Result<()> {
-        tri!(self
-            .ser
-            .formatter
-            .begin_string(&mut self.ser.writer)
-            .map_err(Error::io));
-        tri!(self
-            .ser
-            .formatter
-            .write_i32(&mut self.ser.writer, value)
-            .map_err(Error::io));
-        self.ser
-            .formatter
-            .end_string(&mut self.ser.writer)
-            .map_err(Error::io)
-    }
-
-    fn serialize_i64(self, value: i64) -> Result<()> {
-        tri!(self
-            .ser
-            .formatter
-            .begin_string(&mut self.ser.writer)
-            .map_err(Error::io));
-        tri!(self
-            .ser
-            .formatter
-            .write_i64(&mut self.ser.writer, value)
-            .map_err(Error::io));
-        self.ser
-            .formatter
-            .end_string(&mut self.ser.writer)
-            .map_err(Error::io)
-    }
-
-    fn serialize_i128(self, value: i128) -> Result<()> {
-        tri!(self
-            .ser
-            .formatter
-            .begin_string(&mut self.ser.writer)
-            .map_err(Error::io));
-        tri!(self
-            .ser
-            .formatter
-            .write_i128(&mut self.ser.writer, value)
-            .map_err(Error::io));
-        self.ser
-            .formatter
-            .end_string(&mut self.ser.writer)
-            .map_err(Error::io)
-    }
-
-    fn serialize_u8(self, value: u8) -> Result<()> {
-        tri!(self
-            .ser
-            .formatter
-            .begin_string(&mut self.ser.writer)
-            .map_err(Error::io));
-        tri!(self
-            .ser
-            .formatter
-            .write_u8(&mut self.ser.writer, value)
-            .map_err(Error::io));
-        self.ser
-            .formatter
-            .end_string(&mut self.ser.writer)
-            .map_err(Error::io)
-    }
-
-    fn serialize_u16(self, value: u16) -> Result<()> {
-        tri!(self
-            .ser
-            .formatter
-            .begin_string(&mut self.ser.writer)
-            .map_err(Error::io));
-        tri!(self
-            .ser
-            .formatter
-            .write_u16(&mut self.ser.writer, value)
-            .map_err(Error::io));
-        self.ser
-            .formatter
-            .end_string(&mut self.ser.writer)
-            .map_err(Error::io)
-    }
-
-    fn serialize_u32(self, value: u32) -> Result<()> {
-        tri!(self
-            .ser
-            .formatter
-            .begin_string(&mut self.ser.writer)
-            .map_err(Error::io));
-        tri!(self
-            .ser
-            .formatter
-            .write_u32(&mut self.ser.writer, value)
-            .map_err(Error::io));
-        self.ser
-            .formatter
-            .end_string(&mut self.ser.writer)
-            .map_err(Error::io)
-    }
-
-    fn serialize_u64(self, value: u64) -> Result<()> {
-        tri!(self
-            .ser
-            .formatter
-            .begin_string(&mut self.ser.writer)
-            .map_err(Error::io));
-        tri!(self
-            .ser
-            .formatter
-            .write_u64(&mut self.ser.writer, value)
-            .map_err(Error::io));
-        self.ser
-            .formatter
-            .end_string(&mut self.ser.writer)
-            .map_err(Error::io)
-    }
-
-    fn serialize_u128(self, value: u128) -> Result<()> {
-        tri!(self
-            .ser
-            .formatter
-            .begin_string(&mut self.ser.writer)
-            .map_err(Error::io));
-        tri!(self
-            .ser
-            .formatter
-            .write_u128(&mut self.ser.writer, value)
-            .map_err(Error::io));
-        self.ser
-            .formatter
-            .end_string(&mut self.ser.writer)
-            .map_err(Error::io)
-    }
-
-    fn serialize_f32(self, value: f32) -> Result<()> {
-        if !value.is_finite() {
-            return Err(float_key_must_be_finite());
-        }
-
-        tri!(self
-            .ser
-            .formatter
-            .begin_string(&mut self.ser.writer)
-            .map_err(Error::io));
-        tri!(self
-            .ser
-            .formatter
-            .write_f32(&mut self.ser.writer, value)
-            .map_err(Error::io));
-        self.ser
-            .formatter
-            .end_string(&mut self.ser.writer)
-            .map_err(Error::io)
-    }
-
-    fn serialize_f64(self, value: f64) -> Result<()> {
-        if !value.is_finite() {
-            return Err(float_key_must_be_finite());
-        }
-
-        tri!(self
-            .ser
-            .formatter
-            .begin_string(&mut self.ser.writer)
-            .map_err(Error::io));
-        tri!(self
-            .ser
-            .formatter
-            .write_f64(&mut self.ser.writer, value)
-            .map_err(Error::io));
-        self.ser
-            .formatter
-            .end_string(&mut self.ser.writer)
-            .map_err(Error::io)
-    }
-
-    fn serialize_char(self, value: char) -> Result<()> {
-        self.ser.serialize_str(value.encode_utf8(&mut [0u8; 4]))
-    }
-
-    fn serialize_bytes(self, _value: &[u8]) -> Result<()> {
-        Err(key_must_be_a_string())
-    }
-
-    fn serialize_unit(self) -> Result<()> {
-        Err(key_must_be_a_string())
-    }
-
-    fn serialize_unit_struct(self, _name: &'static str) -> Result<()> {
-        Err(key_must_be_a_string())
-    }
-
-    fn serialize_newtype_variant<T>(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        _variant: &'static str,
-        _value: &T,
-    ) -> Result<()>
-    where
-        T: ?Sized + Serialize,
-    {
-        Err(key_must_be_a_string())
-    }
-
-    fn serialize_none(self) -> Result<()> {
-        Err(key_must_be_a_string())
-    }
-
-    fn serialize_some<T>(self, value: &T) -> Result<()>
-    where
-        T: ?Sized + Serialize,
-    {
-        value.serialize(self)
-    }
-
-    fn serialize_seq(self, _len: Option<usize>) -> Result<Self::SerializeSeq> {
-        Err(key_must_be_a_string())
-    }
-
-    fn serialize_tuple(self, _len: usize) -> Result<Self::SerializeTuple> {
-        Err(key_must_be_a_string())
-    }
-
-    fn serialize_tuple_struct(
-        self,
-        _name: &'static str,
-        _len: usize,
-    ) -> Result<Self::SerializeTupleStruct> {
-        Err(key_must_be_a_string())
-    }
-
-    fn serialize_tuple_variant(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        _variant: &'static str,
-        _len: usize,
-    ) -> Result<Self::SerializeTupleVariant> {
-        Err(key_must_be_a_string())
-    }
-
-    fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap> {
-        Err(key_must_be_a_string())
-    }
-
-    fn serialize_struct(self, _name: &'static str, _len: usize) -> Result<Self::SerializeStruct> {
-        Err(key_must_be_a_string())
-    }
-
-    fn serialize_struct_variant(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        _variant: &'static str,
-        _len: usize,
-    ) -> Result<Self::SerializeStructVariant> {
-        Err(key_must_be_a_string())
-    }
-
-    fn collect_str<T>(self, value: &T) -> Result<()>
-    where
-        T: ?Sized + Display,
-    {
-        self.ser.collect_str(value)
-    }
-}
-
-#[cfg(feature = "arbitrary_precision")]
-struct NumberStrEmitter<'a, W: 'a + io::Write, F: 'a + Formatter>(&'a mut Serializer<W, F>);
-
-#[cfg(feature = "arbitrary_precision")]
-impl<'a, W: io::Write, F: Formatter> ser::Serializer for NumberStrEmitter<'a, W, F> {
-    type Ok = ();
-    type Error = Error;
-
-    type SerializeSeq = Impossible<(), Error>;
-    type SerializeTuple = Impossible<(), Error>;
-    type SerializeTupleStruct = Impossible<(), Error>;
-    type SerializeTupleVariant = Impossible<(), Error>;
-    type SerializeMap = Impossible<(), Error>;
-    type SerializeStruct = Impossible<(), Error>;
-    type SerializeStructVariant = Impossible<(), Error>;
-
-    fn serialize_bool(self, _v: bool) -> Result<()> {
-        Err(invalid_number())
-    }
-
-    fn serialize_i8(self, _v: i8) -> Result<()> {
-        Err(invalid_number())
-    }
-
-    fn serialize_i16(self, _v: i16) -> Result<()> {
-        Err(invalid_number())
-    }
-
-    fn serialize_i32(self, _v: i32) -> Result<()> {
-        Err(invalid_number())
-    }
-
-    fn serialize_i64(self, _v: i64) -> Result<()> {
-        Err(invalid_number())
-    }
-
-    fn serialize_i128(self, _v: i128) -> Result<()> {
-        Err(invalid_number())
-    }
-
-    fn serialize_u8(self, _v: u8) -> Result<()> {
-        Err(invalid_number())
-    }
-
-    fn serialize_u16(self, _v: u16) -> Result<()> {
-        Err(invalid_number())
-    }
-
-    fn serialize_u32(self, _v: u32) -> Result<()> {
-        Err(invalid_number())
-    }
-
-    fn serialize_u64(self, _v: u64) -> Result<()> {
-        Err(invalid_number())
-    }
-
-    fn serialize_u128(self, _v: u128) -> Result<()> {
-        Err(invalid_number())
-    }
-
-    fn serialize_f32(self, _v: f32) -> Result<()> {
-        Err(invalid_number())
-    }
-
-    fn serialize_f64(self, _v: f64) -> Result<()> {
-        Err(invalid_number())
-    }
-
-    fn serialize_char(self, _v: char) -> Result<()> {
-        Err(invalid_number())
-    }
-
-    fn serialize_str(self, value: &str) -> Result<()> {
-        let NumberStrEmitter(serializer) = self;
-        serializer
-            .formatter
-            .write_number_str(&mut serializer.writer, value)
-            .map_err(Error::io)
-    }
-
-    fn serialize_bytes(self, _value: &[u8]) -> Result<()> {
-        Err(invalid_number())
-    }
-
-    fn serialize_none(self) -> Result<()> {
-        Err(invalid_number())
-    }
-
-    fn serialize_some<T>(self, _value: &T) -> Result<()>
-    where
-        T: ?Sized + Serialize,
-    {
-        Err(invalid_number())
-    }
-
-    fn serialize_unit(self) -> Result<()> {
-        Err(invalid_number())
-    }
-
-    fn serialize_unit_struct(self, _name: &'static str) -> Result<()> {
-        Err(invalid_number())
-    }
-
-    fn serialize_unit_variant(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        _variant: &'static str,
-    ) -> Result<()> {
-        Err(invalid_number())
-    }
-
-    fn serialize_newtype_struct<T>(self, _name: &'static str, _value: &T) -> Result<()>
-    where
-        T: ?Sized + Serialize,
-    {
-        Err(invalid_number())
-    }
-
-    fn serialize_newtype_variant<T>(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        _variant: &'static str,
-        _value: &T,
-    ) -> Result<()>
-    where
-        T: ?Sized + Serialize,
-    {
-        Err(invalid_number())
-    }
-
-    fn serialize_seq(self, _len: Option<usize>) -> Result<Self::SerializeSeq> {
-        Err(invalid_number())
-    }
-
-    fn serialize_tuple(self, _len: usize) -> Result<Self::SerializeTuple> {
-        Err(invalid_number())
-    }
-
-    fn serialize_tuple_struct(
-        self,
-        _name: &'static str,
-        _len: usize,
-    ) -> Result<Self::SerializeTupleStruct> {
-        Err(invalid_number())
-    }
-
-    fn serialize_tuple_variant(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        _variant: &'static str,
-        _len: usize,
-    ) -> Result<Self::SerializeTupleVariant> {
-        Err(invalid_number())
-    }
-
-    fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap> {
-        Err(invalid_number())
-    }
-
-    fn serialize_struct(self, _name: &'static str, _len: usize) -> Result<Self::SerializeStruct> {
-        Err(invalid_number())
-    }
-
-    fn serialize_struct_variant(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        _variant: &'static str,
-        _len: usize,
-    ) -> Result<Self::SerializeStructVariant> {
-        Err(invalid_number())
-    }
-}
-
-#[cfg(feature = "raw_value")]
-struct RawValueStrEmitter<'a, W: 'a + io::Write, F: 'a + Formatter>(&'a mut Serializer<W, F>);
-
-#[cfg(feature = "raw_value")]
-impl<'a, W: io::Write, F: Formatter> ser::Serializer for RawValueStrEmitter<'a, W, F> {
-    type Ok = ();
-    type Error = Error;
-
-    type SerializeSeq = Impossible<(), Error>;
-    type SerializeTuple = Impossible<(), Error>;
-    type SerializeTupleStruct = Impossible<(), Error>;
-    type SerializeTupleVariant = Impossible<(), Error>;
-    type SerializeMap = Impossible<(), Error>;
-    type SerializeStruct = Impossible<(), Error>;
-    type SerializeStructVariant = Impossible<(), Error>;
-
-    fn serialize_bool(self, _v: bool) -> Result<()> {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn serialize_i8(self, _v: i8) -> Result<()> {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn serialize_i16(self, _v: i16) -> Result<()> {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn serialize_i32(self, _v: i32) -> Result<()> {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn serialize_i64(self, _v: i64) -> Result<()> {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn serialize_i128(self, _v: i128) -> Result<()> {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn serialize_u8(self, _v: u8) -> Result<()> {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn serialize_u16(self, _v: u16) -> Result<()> {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn serialize_u32(self, _v: u32) -> Result<()> {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn serialize_u64(self, _v: u64) -> Result<()> {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn serialize_u128(self, _v: u128) -> Result<()> {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn serialize_f32(self, _v: f32) -> Result<()> {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn serialize_f64(self, _v: f64) -> Result<()> {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn serialize_char(self, _v: char) -> Result<()> {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn serialize_str(self, value: &str) -> Result<()> {
-        let RawValueStrEmitter(serializer) = self;
-        serializer
-            .formatter
-            .write_raw_fragment(&mut serializer.writer, value)
-            .map_err(Error::io)
-    }
-
-    fn serialize_bytes(self, _value: &[u8]) -> Result<()> {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn serialize_none(self) -> Result<()> {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn serialize_some<T>(self, _value: &T) -> Result<()>
-    where
-        T: ?Sized + Serialize,
-    {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn serialize_unit(self) -> Result<()> {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn serialize_unit_struct(self, _name: &'static str) -> Result<()> {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn serialize_unit_variant(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        _variant: &'static str,
-    ) -> Result<()> {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn serialize_newtype_struct<T>(self, _name: &'static str, _value: &T) -> Result<()>
-    where
-        T: ?Sized + Serialize,
-    {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn serialize_newtype_variant<T>(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        _variant: &'static str,
-        _value: &T,
-    ) -> Result<()>
-    where
-        T: ?Sized + Serialize,
-    {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn serialize_seq(self, _len: Option<usize>) -> Result<Self::SerializeSeq> {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn serialize_tuple(self, _len: usize) -> Result<Self::SerializeTuple> {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn serialize_tuple_struct(
-        self,
-        _name: &'static str,
-        _len: usize,
-    ) -> Result<Self::SerializeTupleStruct> {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn serialize_tuple_variant(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        _variant: &'static str,
-        _len: usize,
-    ) -> Result<Self::SerializeTupleVariant> {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap> {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn serialize_struct(self, _name: &'static str, _len: usize) -> Result<Self::SerializeStruct> {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn serialize_struct_variant(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        _variant: &'static str,
-        _len: usize,
-    ) -> Result<Self::SerializeStructVariant> {
-        Err(ser::Error::custom("expected RawValue"))
-    }
-
-    fn collect_str<T>(self, value: &T) -> Result<Self::Ok>
-    where
-        T: ?Sized + Display,
-    {
-        self.serialize_str(&value.to_string())
-    }
-}
-
-/// Represents a character escape code in a type-safe manner.
-pub enum CharEscape {
-    /// An escaped quote `"`
-    Quote,
-    /// An escaped reverse solidus `\`
-    ReverseSolidus,
-    /// An escaped solidus `/`
-    Solidus,
-    /// An escaped backspace character (usually escaped as `\b`)
-    Backspace,
-    /// An escaped form feed character (usually escaped as `\f`)
-    FormFeed,
-    /// An escaped line feed character (usually escaped as `\n`)
-    LineFeed,
-    /// An escaped carriage return character (usually escaped as `\r`)
-    CarriageReturn,
-    /// An escaped tab character (usually escaped as `\t`)
-    Tab,
-    /// An escaped ASCII plane control character (usually escaped as
-    /// `\u00XX` where `XX` are two hex characters)
-    AsciiControl(u8),
-}
-
-impl CharEscape {
-    #[inline]
-    fn from_escape_table(escape: u8, byte: u8) -> CharEscape {
-        match escape {
-            self::BB => CharEscape::Backspace,
-            self::TT => CharEscape::Tab,
-            self::NN => CharEscape::LineFeed,
-            self::FF => CharEscape::FormFeed,
-            self::RR => CharEscape::CarriageReturn,
-            self::QU => CharEscape::Quote,
-            self::BS => CharEscape::ReverseSolidus,
-            self::UU => CharEscape::AsciiControl(byte),
-            _ => unreachable!(),
-        }
-    }
-}
-
-/// This trait abstracts away serializing the JSON control characters, which allows the user to
-/// optionally pretty print the JSON output.
-pub trait Formatter {
-    /// Writes a `null` value to the specified writer.
-    #[inline]
-    fn write_null<W>(&mut self, writer: &mut W) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        writer.write_all(b"null")
-    }
-
-    /// Writes a `true` or `false` value to the specified writer.
-    #[inline]
-    fn write_bool<W>(&mut self, writer: &mut W, value: bool) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        let s = if value {
-            b"true" as &[u8]
-        } else {
-            b"false" as &[u8]
-        };
-        writer.write_all(s)
-    }
-
-    /// Writes an integer value like `-123` to the specified writer.
-    #[inline]
-    fn write_i8<W>(&mut self, writer: &mut W, value: i8) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        let mut buffer = itoa::Buffer::new();
-        let s = buffer.format(value);
-        writer.write_all(s.as_bytes())
-    }
-
-    /// Writes an integer value like `-123` to the specified writer.
-    #[inline]
-    fn write_i16<W>(&mut self, writer: &mut W, value: i16) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        let mut buffer = itoa::Buffer::new();
-        let s = buffer.format(value);
-        writer.write_all(s.as_bytes())
-    }
-
-    /// Writes an integer value like `-123` to the specified writer.
-    #[inline]
-    fn write_i32<W>(&mut self, writer: &mut W, value: i32) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        let mut buffer = itoa::Buffer::new();
-        let s = buffer.format(value);
-        writer.write_all(s.as_bytes())
-    }
-
-    /// Writes an integer value like `-123` to the specified writer.
-    #[inline]
-    fn write_i64<W>(&mut self, writer: &mut W, value: i64) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        let mut buffer = itoa::Buffer::new();
-        let s = buffer.format(value);
-        writer.write_all(s.as_bytes())
-    }
-
-    /// Writes an integer value like `-123` to the specified writer.
-    #[inline]
-    fn write_i128<W>(&mut self, writer: &mut W, value: i128) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        let mut buffer = itoa::Buffer::new();
-        let s = buffer.format(value);
-        writer.write_all(s.as_bytes())
-    }
-
-    /// Writes an integer value like `123` to the specified writer.
-    #[inline]
-    fn write_u8<W>(&mut self, writer: &mut W, value: u8) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        let mut buffer = itoa::Buffer::new();
-        let s = buffer.format(value);
-        writer.write_all(s.as_bytes())
-    }
-
-    /// Writes an integer value like `123` to the specified writer.
-    #[inline]
-    fn write_u16<W>(&mut self, writer: &mut W, value: u16) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        let mut buffer = itoa::Buffer::new();
-        let s = buffer.format(value);
-        writer.write_all(s.as_bytes())
-    }
-
-    /// Writes an integer value like `123` to the specified writer.
-    #[inline]
-    fn write_u32<W>(&mut self, writer: &mut W, value: u32) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        let mut buffer = itoa::Buffer::new();
-        let s = buffer.format(value);
-        writer.write_all(s.as_bytes())
-    }
-
-    /// Writes an integer value like `123` to the specified writer.
-    #[inline]
-    fn write_u64<W>(&mut self, writer: &mut W, value: u64) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        let mut buffer = itoa::Buffer::new();
-        let s = buffer.format(value);
-        writer.write_all(s.as_bytes())
-    }
-
-    /// Writes an integer value like `123` to the specified writer.
-    #[inline]
-    fn write_u128<W>(&mut self, writer: &mut W, value: u128) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        let mut buffer = itoa::Buffer::new();
-        let s = buffer.format(value);
-        writer.write_all(s.as_bytes())
-    }
-
-    /// Writes a floating point value like `-31.26e+12` to the specified writer.
-    ///
-    /// # Special cases
-    ///
-    /// This function **does not** check for NaN or infinity. If the input
-    /// number is not a finite float, the printed representation will be some
-    /// correctly formatted but unspecified numerical value.
-    ///
-    /// Please check [`is_finite`] yourself before calling this function, or
-    /// check [`is_nan`] and [`is_infinite`] and handle those cases yourself
-    /// with a different `Formatter` method.
-    ///
-    /// [`is_finite`]: f32::is_finite
-    /// [`is_nan`]: f32::is_nan
-    /// [`is_infinite`]: f32::is_infinite
-    #[inline]
-    fn write_f32<W>(&mut self, writer: &mut W, value: f32) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        let mut buffer = ryu::Buffer::new();
-        let s = buffer.format_finite(value);
-        writer.write_all(s.as_bytes())
-    }
-
-    /// Writes a floating point value like `-31.26e+12` to the specified writer.
-    ///
-    /// # Special cases
-    ///
-    /// This function **does not** check for NaN or infinity. If the input
-    /// number is not a finite float, the printed representation will be some
-    /// correctly formatted but unspecified numerical value.
-    ///
-    /// Please check [`is_finite`] yourself before calling this function, or
-    /// check [`is_nan`] and [`is_infinite`] and handle those cases yourself
-    /// with a different `Formatter` method.
-    ///
-    /// [`is_finite`]: f64::is_finite
-    /// [`is_nan`]: f64::is_nan
-    /// [`is_infinite`]: f64::is_infinite
-    #[inline]
-    fn write_f64<W>(&mut self, writer: &mut W, value: f64) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        let mut buffer = ryu::Buffer::new();
-        let s = buffer.format_finite(value);
-        writer.write_all(s.as_bytes())
-    }
-
-    /// Writes a number that has already been rendered to a string.
-    #[inline]
-    fn write_number_str<W>(&mut self, writer: &mut W, value: &str) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        writer.write_all(value.as_bytes())
-    }
-
-    /// Called before each series of `write_string_fragment` and
-    /// `write_char_escape`.  Writes a `"` to the specified writer.
-    #[inline]
-    fn begin_string<W>(&mut self, writer: &mut W) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        writer.write_all(b"\"")
-    }
-
-    /// Called after each series of `write_string_fragment` and
-    /// `write_char_escape`.  Writes a `"` to the specified writer.
-    #[inline]
-    fn end_string<W>(&mut self, writer: &mut W) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        writer.write_all(b"\"")
-    }
-
-    /// Writes a string fragment that doesn't need any escaping to the
-    /// specified writer.
-    #[inline]
-    fn write_string_fragment<W>(&mut self, writer: &mut W, fragment: &str) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        writer.write_all(fragment.as_bytes())
-    }
-
-    /// Writes a character escape code to the specified writer.
-    #[inline]
-    fn write_char_escape<W>(&mut self, writer: &mut W, char_escape: CharEscape) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        use self::CharEscape::*;
-
-        let s = match char_escape {
-            Quote => b"\\\"",
-            ReverseSolidus => b"\\\\",
-            Solidus => b"\\/",
-            Backspace => b"\\b",
-            FormFeed => b"\\f",
-            LineFeed => b"\\n",
-            CarriageReturn => b"\\r",
-            Tab => b"\\t",
-            AsciiControl(byte) => {
-                static HEX_DIGITS: [u8; 16] = *b"0123456789abcdef";
-                let bytes = &[
-                    b'\\',
-                    b'u',
-                    b'0',
-                    b'0',
-                    HEX_DIGITS[(byte >> 4) as usize],
-                    HEX_DIGITS[(byte & 0xF) as usize],
-                ];
-                return writer.write_all(bytes);
-            }
-        };
-
-        writer.write_all(s)
-    }
-
-    /// Writes the representation of a byte array. Formatters can choose whether
-    /// to represent bytes as a JSON array of integers (the default), or some
-    /// JSON string encoding like hex or base64.
-    fn write_byte_array<W>(&mut self, writer: &mut W, value: &[u8]) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        tri!(self.begin_array(writer));
-        let mut first = true;
-        for byte in value {
-            tri!(self.begin_array_value(writer, first));
-            tri!(self.write_u8(writer, *byte));
-            tri!(self.end_array_value(writer));
-            first = false;
-        }
-        self.end_array(writer)
-    }
-
-    /// Called before every array.  Writes a `[` to the specified
-    /// writer.
-    #[inline]
-    fn begin_array<W>(&mut self, writer: &mut W) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        writer.write_all(b"[")
-    }
-
-    /// Called after every array.  Writes a `]` to the specified
-    /// writer.
-    #[inline]
-    fn end_array<W>(&mut self, writer: &mut W) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        writer.write_all(b"]")
-    }
-
-    /// Called before every array value.  Writes a `,` if needed to
-    /// the specified writer.
-    #[inline]
-    fn begin_array_value<W>(&mut self, writer: &mut W, first: bool) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        if first {
-            Ok(())
-        } else {
-            writer.write_all(b",")
-        }
-    }
-
-    /// Called after every array value.
-    #[inline]
-    fn end_array_value<W>(&mut self, _writer: &mut W) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        Ok(())
-    }
-
-    /// Called before every object.  Writes a `{` to the specified
-    /// writer.
-    #[inline]
-    fn begin_object<W>(&mut self, writer: &mut W) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        writer.write_all(b"{")
-    }
-
-    /// Called after every object.  Writes a `}` to the specified
-    /// writer.
-    #[inline]
-    fn end_object<W>(&mut self, writer: &mut W) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        writer.write_all(b"}")
-    }
-
-    /// Called before every object key.
-    #[inline]
-    fn begin_object_key<W>(&mut self, writer: &mut W, first: bool) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        if first {
-            Ok(())
-        } else {
-            writer.write_all(b",")
-        }
-    }
-
-    /// Called after every object key.  A `:` should be written to the
-    /// specified writer by either this method or
-    /// `begin_object_value`.
-    #[inline]
-    fn end_object_key<W>(&mut self, _writer: &mut W) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        Ok(())
-    }
-
-    /// Called before every object value.  A `:` should be written to
-    /// the specified writer by either this method or
-    /// `end_object_key`.
-    #[inline]
-    fn begin_object_value<W>(&mut self, writer: &mut W) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        writer.write_all(b":")
-    }
-
-    /// Called after every object value.
-    #[inline]
-    fn end_object_value<W>(&mut self, _writer: &mut W) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        Ok(())
-    }
-
-    /// Writes a raw JSON fragment that doesn't need any escaping to the
-    /// specified writer.
-    #[inline]
-    fn write_raw_fragment<W>(&mut self, writer: &mut W, fragment: &str) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        writer.write_all(fragment.as_bytes())
-    }
-}
-
-/// This structure compacts a JSON value with no extra whitespace.
-#[derive(Clone, Debug)]
-pub struct CompactFormatter;
-
-impl Formatter for CompactFormatter {}
-
-/// This structure pretty prints a JSON value to make it human readable.
-#[derive(Clone, Debug)]
-pub struct PrettyFormatter<'a> {
-    current_indent: usize,
-    has_value: bool,
-    indent: &'a [u8],
-}
-
-impl<'a> PrettyFormatter<'a> {
-    /// Construct a pretty printer formatter that defaults to using two spaces for indentation.
-    pub fn new() -> Self {
-        PrettyFormatter::with_indent(b"  ")
-    }
-
-    /// Construct a pretty printer formatter that uses the `indent` string for indentation.
-    pub fn with_indent(indent: &'a [u8]) -> Self {
-        PrettyFormatter {
-            current_indent: 0,
-            has_value: false,
-            indent,
-        }
-    }
-}
-
-impl<'a> Default for PrettyFormatter<'a> {
-    fn default() -> Self {
-        PrettyFormatter::new()
-    }
-}
-
-impl<'a> Formatter for PrettyFormatter<'a> {
-    #[inline]
-    fn begin_array<W>(&mut self, writer: &mut W) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        self.current_indent += 1;
-        self.has_value = false;
-        writer.write_all(b"[")
-    }
-
-    #[inline]
-    fn end_array<W>(&mut self, writer: &mut W) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        self.current_indent -= 1;
-
-        if self.has_value {
-            tri!(writer.write_all(b"\n"));
-            tri!(indent(writer, self.current_indent, self.indent));
-        }
-
-        writer.write_all(b"]")
-    }
-
-    #[inline]
-    fn begin_array_value<W>(&mut self, writer: &mut W, first: bool) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        tri!(writer.write_all(if first { b"\n" } else { b",\n" }));
-        indent(writer, self.current_indent, self.indent)
-    }
-
-    #[inline]
-    fn end_array_value<W>(&mut self, _writer: &mut W) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        self.has_value = true;
-        Ok(())
-    }
-
-    #[inline]
-    fn begin_object<W>(&mut self, writer: &mut W) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        self.current_indent += 1;
-        self.has_value = false;
-        writer.write_all(b"{")
-    }
-
-    #[inline]
-    fn end_object<W>(&mut self, writer: &mut W) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        self.current_indent -= 1;
-
-        if self.has_value {
-            tri!(writer.write_all(b"\n"));
-            tri!(indent(writer, self.current_indent, self.indent));
-        }
-
-        writer.write_all(b"}")
-    }
-
-    #[inline]
-    fn begin_object_key<W>(&mut self, writer: &mut W, first: bool) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        tri!(writer.write_all(if first { b"\n" } else { b",\n" }));
-        indent(writer, self.current_indent, self.indent)
-    }
-
-    #[inline]
-    fn begin_object_value<W>(&mut self, writer: &mut W) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        writer.write_all(b": ")
-    }
-
-    #[inline]
-    fn end_object_value<W>(&mut self, _writer: &mut W) -> io::Result<()>
-    where
-        W: ?Sized + io::Write,
-    {
-        self.has_value = true;
-        Ok(())
-    }
-}
-
-fn format_escaped_str<W, F>(writer: &mut W, formatter: &mut F, value: &str) -> io::Result<()>
-where
-    W: ?Sized + io::Write,
-    F: ?Sized + Formatter,
-{
-    tri!(formatter.begin_string(writer));
-    tri!(format_escaped_str_contents(writer, formatter, value));
-    formatter.end_string(writer)
-}
-
-fn format_escaped_str_contents<W, F>(
-    writer: &mut W,
-    formatter: &mut F,
-    value: &str,
-) -> io::Result<()>
-where
-    W: ?Sized + io::Write,
-    F: ?Sized + Formatter,
-{
-    let bytes = value.as_bytes();
-
-    let mut start = 0;
-
-    for (i, &byte) in bytes.iter().enumerate() {
-        let escape = ESCAPE[byte as usize];
-        if escape == 0 {
-            continue;
-        }
-
-        if start < i {
-            tri!(formatter.write_string_fragment(writer, &value[start..i]));
-        }
-
-        let char_escape = CharEscape::from_escape_table(escape, byte);
-        tri!(formatter.write_char_escape(writer, char_escape));
-
-        start = i + 1;
-    }
-
-    if start == bytes.len() {
-        return Ok(());
-    }
-
-    formatter.write_string_fragment(writer, &value[start..])
-}
-
-const BB: u8 = b'b'; // \x08
-const TT: u8 = b't'; // \x09
-const NN: u8 = b'n'; // \x0A
-const FF: u8 = b'f'; // \x0C
-const RR: u8 = b'r'; // \x0D
-const QU: u8 = b'"'; // \x22
-const BS: u8 = b'\\'; // \x5C
-const UU: u8 = b'u'; // \x00...\x1F except the ones above
-const __: u8 = 0;
-
-// Lookup table of escape sequences. A value of b'x' at index i means that byte
-// i is escaped as "\x" in JSON. A value of 0 means that byte i is not escaped.
-static ESCAPE: [u8; 256] = [
-    //   1   2   3   4   5   6   7   8   9   A   B   C   D   E   F
-    UU, UU, UU, UU, UU, UU, UU, UU, BB, TT, NN, UU, FF, RR, UU, UU, // 0
-    UU, UU, UU, UU, UU, UU, UU, UU, UU, UU, UU, UU, UU, UU, UU, UU, // 1
-    __, __, QU, __, __, __, __, __, __, __, __, __, __, __, __, __, // 2
-    __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 3
-    __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 4
-    __, __, __, __, __, __, __, __, __, __, __, __, BS, __, __, __, // 5
-    __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 6
-    __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 7
-    __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 8
-    __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 9
-    __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // A
-    __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // B
-    __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // C
-    __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // D
-    __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // E
-    __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // F
-];
-
-/// Serialize the given data structure as JSON into the I/O stream.
-///
-/// Serialization guarantees it only feeds valid UTF-8 sequences to the writer.
-///
-/// # Errors
-///
-/// Serialization can fail if `T`'s implementation of `Serialize` decides to
-/// fail, or if `T` contains a map with non-string keys.
-#[inline]
-#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
-pub fn to_writer<W, T>(writer: W, value: &T) -> Result<()>
-where
-    W: io::Write,
-    T: ?Sized + Serialize,
-{
-    let mut ser = Serializer::new(writer);
-    value.serialize(&mut ser)
-}
-
-/// Serialize the given data structure as pretty-printed JSON into the I/O
-/// stream.
-///
-/// Serialization guarantees it only feeds valid UTF-8 sequences to the writer.
-///
-/// # Errors
-///
-/// Serialization can fail if `T`'s implementation of `Serialize` decides to
-/// fail, or if `T` contains a map with non-string keys.
-#[inline]
-#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
-pub fn to_writer_pretty<W, T>(writer: W, value: &T) -> Result<()>
-where
-    W: io::Write,
-    T: ?Sized + Serialize,
-{
-    let mut ser = Serializer::pretty(writer);
-    value.serialize(&mut ser)
-}
-
-/// Serialize the given data structure as a JSON byte vector.
-///
-/// # Errors
-///
-/// Serialization can fail if `T`'s implementation of `Serialize` decides to
-/// fail, or if `T` contains a map with non-string keys.
-#[inline]
-pub fn to_vec<T>(value: &T) -> Result<Vec<u8>>
-where
-    T: ?Sized + Serialize,
-{
-    let mut writer = Vec::with_capacity(128);
-    tri!(to_writer(&mut writer, value));
-    Ok(writer)
-}
-
-/// Serialize the given data structure as a pretty-printed JSON byte vector.
-///
-/// # Errors
-///
-/// Serialization can fail if `T`'s implementation of `Serialize` decides to
-/// fail, or if `T` contains a map with non-string keys.
-#[inline]
-pub fn to_vec_pretty<T>(value: &T) -> Result<Vec<u8>>
-where
-    T: ?Sized + Serialize,
-{
-    let mut writer = Vec::with_capacity(128);
-    tri!(to_writer_pretty(&mut writer, value));
-    Ok(writer)
-}
-
-/// Serialize the given data structure as a String of JSON.
-///
-/// # Errors
-///
-/// Serialization can fail if `T`'s implementation of `Serialize` decides to
-/// fail, or if `T` contains a map with non-string keys.
-#[inline]
-pub fn to_string<T>(value: &T) -> Result<String>
-where
-    T: ?Sized + Serialize,
-{
-    let vec = tri!(to_vec(value));
-    let string = unsafe {
-        // We do not emit invalid UTF-8.
-        String::from_utf8_unchecked(vec)
-    };
-    Ok(string)
-}
-
-/// Serialize the given data structure as a pretty-printed String of JSON.
-///
-/// # Errors
-///
-/// Serialization can fail if `T`'s implementation of `Serialize` decides to
-/// fail, or if `T` contains a map with non-string keys.
-#[inline]
-pub fn to_string_pretty<T>(value: &T) -> Result<String>
-where
-    T: ?Sized + Serialize,
-{
-    let vec = tri!(to_vec_pretty(value));
-    let string = unsafe {
-        // We do not emit invalid UTF-8.
-        String::from_utf8_unchecked(vec)
-    };
-    Ok(string)
-}
-
-fn indent<W>(wr: &mut W, n: usize, s: &[u8]) -> io::Result<()>
-where
-    W: ?Sized + io::Write,
-{
-    for _ in 0..n {
-        tri!(wr.write_all(s));
-    }
-
-    Ok(())
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/value/de.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/value/de.rs
deleted file mode 100644
index dd4698e..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/value/de.rs
+++ /dev/null
@@ -1,1500 +0,0 @@
-use crate::error::{Error, ErrorCode};
-use crate::map::Map;
-use crate::number::Number;
-use crate::value::Value;
-use alloc::borrow::{Cow, ToOwned};
-use alloc::string::String;
-#[cfg(feature = "raw_value")]
-use alloc::string::ToString;
-use alloc::vec::{self, Vec};
-use core::fmt;
-use core::slice;
-use core::str::FromStr;
-use serde::de::{
-    self, Deserialize, DeserializeSeed, Deserializer as _, EnumAccess, Expected, IntoDeserializer,
-    MapAccess, SeqAccess, Unexpected, VariantAccess, Visitor,
-};
-use serde::forward_to_deserialize_any;
-
-#[cfg(feature = "arbitrary_precision")]
-use crate::number::NumberFromString;
-
-impl<'de> Deserialize<'de> for Value {
-    #[inline]
-    fn deserialize<D>(deserializer: D) -> Result<Value, D::Error>
-    where
-        D: serde::Deserializer<'de>,
-    {
-        struct ValueVisitor;
-
-        impl<'de> Visitor<'de> for ValueVisitor {
-            type Value = Value;
-
-            fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-                formatter.write_str("any valid JSON value")
-            }
-
-            #[inline]
-            fn visit_bool<E>(self, value: bool) -> Result<Value, E> {
-                Ok(Value::Bool(value))
-            }
-
-            #[inline]
-            fn visit_i64<E>(self, value: i64) -> Result<Value, E> {
-                Ok(Value::Number(value.into()))
-            }
-
-            fn visit_i128<E>(self, value: i128) -> Result<Value, E>
-            where
-                E: serde::de::Error,
-            {
-                let de = serde::de::value::I128Deserializer::new(value);
-                Number::deserialize(de).map(Value::Number)
-            }
-
-            #[inline]
-            fn visit_u64<E>(self, value: u64) -> Result<Value, E> {
-                Ok(Value::Number(value.into()))
-            }
-
-            fn visit_u128<E>(self, value: u128) -> Result<Value, E>
-            where
-                E: serde::de::Error,
-            {
-                let de = serde::de::value::U128Deserializer::new(value);
-                Number::deserialize(de).map(Value::Number)
-            }
-
-            #[inline]
-            fn visit_f64<E>(self, value: f64) -> Result<Value, E> {
-                Ok(Number::from_f64(value).map_or(Value::Null, Value::Number))
-            }
-
-            #[cfg(any(feature = "std", feature = "alloc"))]
-            #[inline]
-            fn visit_str<E>(self, value: &str) -> Result<Value, E>
-            where
-                E: serde::de::Error,
-            {
-                self.visit_string(String::from(value))
-            }
-
-            #[cfg(any(feature = "std", feature = "alloc"))]
-            #[inline]
-            fn visit_string<E>(self, value: String) -> Result<Value, E> {
-                Ok(Value::String(value))
-            }
-
-            #[inline]
-            fn visit_none<E>(self) -> Result<Value, E> {
-                Ok(Value::Null)
-            }
-
-            #[inline]
-            fn visit_some<D>(self, deserializer: D) -> Result<Value, D::Error>
-            where
-                D: serde::Deserializer<'de>,
-            {
-                Deserialize::deserialize(deserializer)
-            }
-
-            #[inline]
-            fn visit_unit<E>(self) -> Result<Value, E> {
-                Ok(Value::Null)
-            }
-
-            #[inline]
-            fn visit_seq<V>(self, mut visitor: V) -> Result<Value, V::Error>
-            where
-                V: SeqAccess<'de>,
-            {
-                let mut vec = Vec::new();
-
-                while let Some(elem) = tri!(visitor.next_element()) {
-                    vec.push(elem);
-                }
-
-                Ok(Value::Array(vec))
-            }
-
-            #[cfg(any(feature = "std", feature = "alloc"))]
-            fn visit_map<V>(self, mut visitor: V) -> Result<Value, V::Error>
-            where
-                V: MapAccess<'de>,
-            {
-                match tri!(visitor.next_key_seed(KeyClassifier)) {
-                    #[cfg(feature = "arbitrary_precision")]
-                    Some(KeyClass::Number) => {
-                        let number: NumberFromString = tri!(visitor.next_value());
-                        Ok(Value::Number(number.value))
-                    }
-                    #[cfg(feature = "raw_value")]
-                    Some(KeyClass::RawValue) => {
-                        let value = tri!(visitor.next_value_seed(crate::raw::BoxedFromString));
-                        crate::from_str(value.get()).map_err(de::Error::custom)
-                    }
-                    Some(KeyClass::Map(first_key)) => {
-                        let mut values = Map::new();
-
-                        values.insert(first_key, tri!(visitor.next_value()));
-                        while let Some((key, value)) = tri!(visitor.next_entry()) {
-                            values.insert(key, value);
-                        }
-
-                        Ok(Value::Object(values))
-                    }
-                    None => Ok(Value::Object(Map::new())),
-                }
-            }
-        }
-
-        deserializer.deserialize_any(ValueVisitor)
-    }
-}
-
-impl FromStr for Value {
-    type Err = Error;
-    fn from_str(s: &str) -> Result<Value, Error> {
-        super::super::de::from_str(s)
-    }
-}
-
-macro_rules! deserialize_number {
-    ($method:ident) => {
-        #[cfg(not(feature = "arbitrary_precision"))]
-        fn $method<V>(self, visitor: V) -> Result<V::Value, Error>
-        where
-            V: Visitor<'de>,
-        {
-            match self {
-                Value::Number(n) => n.deserialize_any(visitor),
-                _ => Err(self.invalid_type(&visitor)),
-            }
-        }
-
-        #[cfg(feature = "arbitrary_precision")]
-        fn $method<V>(self, visitor: V) -> Result<V::Value, Error>
-        where
-            V: Visitor<'de>,
-        {
-            match self {
-                Value::Number(n) => n.$method(visitor),
-                _ => self.deserialize_any(visitor),
-            }
-        }
-    };
-}
-
-fn visit_array<'de, V>(array: Vec<Value>, visitor: V) -> Result<V::Value, Error>
-where
-    V: Visitor<'de>,
-{
-    let len = array.len();
-    let mut deserializer = SeqDeserializer::new(array);
-    let seq = tri!(visitor.visit_seq(&mut deserializer));
-    let remaining = deserializer.iter.len();
-    if remaining == 0 {
-        Ok(seq)
-    } else {
-        Err(serde::de::Error::invalid_length(
-            len,
-            &"fewer elements in array",
-        ))
-    }
-}
-
-impl<'de> serde::Deserializer<'de> for Map<String, Value> {
-    type Error = Error;
-
-    fn deserialize_any<V>(self, visitor: V) -> Result<V::Value, Self::Error>
-    where
-        V: Visitor<'de>,
-    {
-        let len = self.len();
-        let mut deserializer = MapDeserializer::new(self);
-        let map = tri!(visitor.visit_map(&mut deserializer));
-        let remaining = deserializer.iter.len();
-        if remaining == 0 {
-            Ok(map)
-        } else {
-            Err(serde::de::Error::invalid_length(
-                len,
-                &"fewer elements in map",
-            ))
-        }
-    }
-
-    fn deserialize_enum<V>(
-        self,
-        _name: &'static str,
-        _variants: &'static [&'static str],
-        visitor: V,
-    ) -> Result<V::Value, Self::Error>
-    where
-        V: Visitor<'de>,
-    {
-        let mut iter = self.into_iter();
-        let (variant, value) = match iter.next() {
-            Some(v) => v,
-            None => {
-                return Err(serde::de::Error::invalid_value(
-                    Unexpected::Map,
-                    &"map with a single key",
-                ));
-            }
-        };
-        // enums are encoded in json as maps with a single key:value pair
-        if iter.next().is_some() {
-            return Err(serde::de::Error::invalid_value(
-                Unexpected::Map,
-                &"map with a single key",
-            ));
-        }
-
-        visitor.visit_enum(EnumDeserializer {
-            variant,
-            value: Some(value),
-        })
-    }
-
-    fn deserialize_ignored_any<V>(self, visitor: V) -> Result<V::Value, Self::Error>
-    where
-        V: Visitor<'de>,
-    {
-        drop(self);
-        visitor.visit_unit()
-    }
-
-    forward_to_deserialize_any! {
-        bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string
-        bytes byte_buf option unit unit_struct newtype_struct seq tuple
-        tuple_struct map struct identifier
-    }
-}
-
-impl<'de> serde::Deserializer<'de> for Value {
-    type Error = Error;
-
-    #[inline]
-    fn deserialize_any<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        match self {
-            Value::Null => visitor.visit_unit(),
-            Value::Bool(v) => visitor.visit_bool(v),
-            Value::Number(n) => n.deserialize_any(visitor),
-            #[cfg(any(feature = "std", feature = "alloc"))]
-            Value::String(v) => visitor.visit_string(v),
-            #[cfg(not(any(feature = "std", feature = "alloc")))]
-            Value::String(_) => unreachable!(),
-            Value::Array(v) => visit_array(v, visitor),
-            Value::Object(v) => v.deserialize_any(visitor),
-        }
-    }
-
-    deserialize_number!(deserialize_i8);
-    deserialize_number!(deserialize_i16);
-    deserialize_number!(deserialize_i32);
-    deserialize_number!(deserialize_i64);
-    deserialize_number!(deserialize_i128);
-    deserialize_number!(deserialize_u8);
-    deserialize_number!(deserialize_u16);
-    deserialize_number!(deserialize_u32);
-    deserialize_number!(deserialize_u64);
-    deserialize_number!(deserialize_u128);
-    deserialize_number!(deserialize_f32);
-    deserialize_number!(deserialize_f64);
-
-    #[inline]
-    fn deserialize_option<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        match self {
-            Value::Null => visitor.visit_none(),
-            _ => visitor.visit_some(self),
-        }
-    }
-
-    #[inline]
-    fn deserialize_enum<V>(
-        self,
-        name: &'static str,
-        variants: &'static [&'static str],
-        visitor: V,
-    ) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        match self {
-            Value::Object(value) => value.deserialize_enum(name, variants, visitor),
-            Value::String(variant) => visitor.visit_enum(EnumDeserializer {
-                variant,
-                value: None,
-            }),
-            other => Err(serde::de::Error::invalid_type(
-                other.unexpected(),
-                &"string or map",
-            )),
-        }
-    }
-
-    #[inline]
-    fn deserialize_newtype_struct<V>(
-        self,
-        name: &'static str,
-        visitor: V,
-    ) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        #[cfg(feature = "raw_value")]
-        {
-            if name == crate::raw::TOKEN {
-                return visitor.visit_map(crate::raw::OwnedRawDeserializer {
-                    raw_value: Some(self.to_string()),
-                });
-            }
-        }
-
-        let _ = name;
-        visitor.visit_newtype_struct(self)
-    }
-
-    fn deserialize_bool<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        match self {
-            Value::Bool(v) => visitor.visit_bool(v),
-            _ => Err(self.invalid_type(&visitor)),
-        }
-    }
-
-    fn deserialize_char<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        self.deserialize_string(visitor)
-    }
-
-    fn deserialize_str<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        self.deserialize_string(visitor)
-    }
-
-    fn deserialize_string<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        match self {
-            #[cfg(any(feature = "std", feature = "alloc"))]
-            Value::String(v) => visitor.visit_string(v),
-            _ => Err(self.invalid_type(&visitor)),
-        }
-    }
-
-    fn deserialize_bytes<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        self.deserialize_byte_buf(visitor)
-    }
-
-    fn deserialize_byte_buf<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        match self {
-            #[cfg(any(feature = "std", feature = "alloc"))]
-            Value::String(v) => visitor.visit_string(v),
-            Value::Array(v) => visit_array(v, visitor),
-            _ => Err(self.invalid_type(&visitor)),
-        }
-    }
-
-    fn deserialize_unit<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        match self {
-            Value::Null => visitor.visit_unit(),
-            _ => Err(self.invalid_type(&visitor)),
-        }
-    }
-
-    fn deserialize_unit_struct<V>(self, _name: &'static str, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        self.deserialize_unit(visitor)
-    }
-
-    fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        match self {
-            Value::Array(v) => visit_array(v, visitor),
-            _ => Err(self.invalid_type(&visitor)),
-        }
-    }
-
-    fn deserialize_tuple<V>(self, _len: usize, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        self.deserialize_seq(visitor)
-    }
-
-    fn deserialize_tuple_struct<V>(
-        self,
-        _name: &'static str,
-        _len: usize,
-        visitor: V,
-    ) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        self.deserialize_seq(visitor)
-    }
-
-    fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        match self {
-            Value::Object(v) => v.deserialize_any(visitor),
-            _ => Err(self.invalid_type(&visitor)),
-        }
-    }
-
-    fn deserialize_struct<V>(
-        self,
-        _name: &'static str,
-        _fields: &'static [&'static str],
-        visitor: V,
-    ) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        match self {
-            Value::Array(v) => visit_array(v, visitor),
-            Value::Object(v) => v.deserialize_any(visitor),
-            _ => Err(self.invalid_type(&visitor)),
-        }
-    }
-
-    fn deserialize_identifier<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        self.deserialize_string(visitor)
-    }
-
-    fn deserialize_ignored_any<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        drop(self);
-        visitor.visit_unit()
-    }
-}
-
-struct EnumDeserializer {
-    variant: String,
-    value: Option<Value>,
-}
-
-impl<'de> EnumAccess<'de> for EnumDeserializer {
-    type Error = Error;
-    type Variant = VariantDeserializer;
-
-    fn variant_seed<V>(self, seed: V) -> Result<(V::Value, VariantDeserializer), Error>
-    where
-        V: DeserializeSeed<'de>,
-    {
-        let variant = self.variant.into_deserializer();
-        let visitor = VariantDeserializer { value: self.value };
-        seed.deserialize(variant).map(|v| (v, visitor))
-    }
-}
-
-impl<'de> IntoDeserializer<'de, Error> for Value {
-    type Deserializer = Self;
-
-    fn into_deserializer(self) -> Self::Deserializer {
-        self
-    }
-}
-
-impl<'de> IntoDeserializer<'de, Error> for &'de Value {
-    type Deserializer = Self;
-
-    fn into_deserializer(self) -> Self::Deserializer {
-        self
-    }
-}
-
-struct VariantDeserializer {
-    value: Option<Value>,
-}
-
-impl<'de> VariantAccess<'de> for VariantDeserializer {
-    type Error = Error;
-
-    fn unit_variant(self) -> Result<(), Error> {
-        match self.value {
-            Some(value) => Deserialize::deserialize(value),
-            None => Ok(()),
-        }
-    }
-
-    fn newtype_variant_seed<T>(self, seed: T) -> Result<T::Value, Error>
-    where
-        T: DeserializeSeed<'de>,
-    {
-        match self.value {
-            Some(value) => seed.deserialize(value),
-            None => Err(serde::de::Error::invalid_type(
-                Unexpected::UnitVariant,
-                &"newtype variant",
-            )),
-        }
-    }
-
-    fn tuple_variant<V>(self, _len: usize, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        match self.value {
-            Some(Value::Array(v)) => {
-                if v.is_empty() {
-                    visitor.visit_unit()
-                } else {
-                    visit_array(v, visitor)
-                }
-            }
-            Some(other) => Err(serde::de::Error::invalid_type(
-                other.unexpected(),
-                &"tuple variant",
-            )),
-            None => Err(serde::de::Error::invalid_type(
-                Unexpected::UnitVariant,
-                &"tuple variant",
-            )),
-        }
-    }
-
-    fn struct_variant<V>(
-        self,
-        _fields: &'static [&'static str],
-        visitor: V,
-    ) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        match self.value {
-            Some(Value::Object(v)) => v.deserialize_any(visitor),
-            Some(other) => Err(serde::de::Error::invalid_type(
-                other.unexpected(),
-                &"struct variant",
-            )),
-            None => Err(serde::de::Error::invalid_type(
-                Unexpected::UnitVariant,
-                &"struct variant",
-            )),
-        }
-    }
-}
-
-struct SeqDeserializer {
-    iter: vec::IntoIter<Value>,
-}
-
-impl SeqDeserializer {
-    fn new(vec: Vec<Value>) -> Self {
-        SeqDeserializer {
-            iter: vec.into_iter(),
-        }
-    }
-}
-
-impl<'de> SeqAccess<'de> for SeqDeserializer {
-    type Error = Error;
-
-    fn next_element_seed<T>(&mut self, seed: T) -> Result<Option<T::Value>, Error>
-    where
-        T: DeserializeSeed<'de>,
-    {
-        match self.iter.next() {
-            Some(value) => seed.deserialize(value).map(Some),
-            None => Ok(None),
-        }
-    }
-
-    fn size_hint(&self) -> Option<usize> {
-        match self.iter.size_hint() {
-            (lower, Some(upper)) if lower == upper => Some(upper),
-            _ => None,
-        }
-    }
-}
-
-struct MapDeserializer {
-    iter: <Map<String, Value> as IntoIterator>::IntoIter,
-    value: Option<Value>,
-}
-
-impl MapDeserializer {
-    fn new(map: Map<String, Value>) -> Self {
-        MapDeserializer {
-            iter: map.into_iter(),
-            value: None,
-        }
-    }
-}
-
-impl<'de> MapAccess<'de> for MapDeserializer {
-    type Error = Error;
-
-    fn next_key_seed<T>(&mut self, seed: T) -> Result<Option<T::Value>, Error>
-    where
-        T: DeserializeSeed<'de>,
-    {
-        match self.iter.next() {
-            Some((key, value)) => {
-                self.value = Some(value);
-                let key_de = MapKeyDeserializer {
-                    key: Cow::Owned(key),
-                };
-                seed.deserialize(key_de).map(Some)
-            }
-            None => Ok(None),
-        }
-    }
-
-    fn next_value_seed<T>(&mut self, seed: T) -> Result<T::Value, Error>
-    where
-        T: DeserializeSeed<'de>,
-    {
-        match self.value.take() {
-            Some(value) => seed.deserialize(value),
-            None => Err(serde::de::Error::custom("value is missing")),
-        }
-    }
-
-    fn size_hint(&self) -> Option<usize> {
-        match self.iter.size_hint() {
-            (lower, Some(upper)) if lower == upper => Some(upper),
-            _ => None,
-        }
-    }
-}
-
-macro_rules! deserialize_value_ref_number {
-    ($method:ident) => {
-        #[cfg(not(feature = "arbitrary_precision"))]
-        fn $method<V>(self, visitor: V) -> Result<V::Value, Error>
-        where
-            V: Visitor<'de>,
-        {
-            match self {
-                Value::Number(n) => n.deserialize_any(visitor),
-                _ => Err(self.invalid_type(&visitor)),
-            }
-        }
-
-        #[cfg(feature = "arbitrary_precision")]
-        fn $method<V>(self, visitor: V) -> Result<V::Value, Error>
-        where
-            V: Visitor<'de>,
-        {
-            match self {
-                Value::Number(n) => n.$method(visitor),
-                _ => self.deserialize_any(visitor),
-            }
-        }
-    };
-}
-
-fn visit_array_ref<'de, V>(array: &'de [Value], visitor: V) -> Result<V::Value, Error>
-where
-    V: Visitor<'de>,
-{
-    let len = array.len();
-    let mut deserializer = SeqRefDeserializer::new(array);
-    let seq = tri!(visitor.visit_seq(&mut deserializer));
-    let remaining = deserializer.iter.len();
-    if remaining == 0 {
-        Ok(seq)
-    } else {
-        Err(serde::de::Error::invalid_length(
-            len,
-            &"fewer elements in array",
-        ))
-    }
-}
-
-impl<'de> serde::Deserializer<'de> for &'de Map<String, Value> {
-    type Error = Error;
-
-    fn deserialize_any<V>(self, visitor: V) -> Result<V::Value, Self::Error>
-    where
-        V: Visitor<'de>,
-    {
-        let len = self.len();
-        let mut deserializer = MapRefDeserializer::new(self);
-        let map = tri!(visitor.visit_map(&mut deserializer));
-        let remaining = deserializer.iter.len();
-        if remaining == 0 {
-            Ok(map)
-        } else {
-            Err(serde::de::Error::invalid_length(
-                len,
-                &"fewer elements in map",
-            ))
-        }
-    }
-
-    fn deserialize_enum<V>(
-        self,
-        _name: &'static str,
-        _variants: &'static [&'static str],
-        visitor: V,
-    ) -> Result<V::Value, Self::Error>
-    where
-        V: Visitor<'de>,
-    {
-        let mut iter = self.into_iter();
-        let (variant, value) = match iter.next() {
-            Some(v) => v,
-            None => {
-                return Err(serde::de::Error::invalid_value(
-                    Unexpected::Map,
-                    &"map with a single key",
-                ));
-            }
-        };
-        // enums are encoded in json as maps with a single key:value pair
-        if iter.next().is_some() {
-            return Err(serde::de::Error::invalid_value(
-                Unexpected::Map,
-                &"map with a single key",
-            ));
-        }
-
-        visitor.visit_enum(EnumRefDeserializer {
-            variant,
-            value: Some(value),
-        })
-    }
-
-    fn deserialize_ignored_any<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        visitor.visit_unit()
-    }
-
-    forward_to_deserialize_any! {
-        bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string
-        bytes byte_buf option unit unit_struct newtype_struct seq tuple
-        tuple_struct map struct identifier
-    }
-}
-
-impl<'de> serde::Deserializer<'de> for &'de Value {
-    type Error = Error;
-
-    fn deserialize_any<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        match self {
-            Value::Null => visitor.visit_unit(),
-            Value::Bool(v) => visitor.visit_bool(*v),
-            Value::Number(n) => n.deserialize_any(visitor),
-            Value::String(v) => visitor.visit_borrowed_str(v),
-            Value::Array(v) => visit_array_ref(v, visitor),
-            Value::Object(v) => v.deserialize_any(visitor),
-        }
-    }
-
-    deserialize_value_ref_number!(deserialize_i8);
-    deserialize_value_ref_number!(deserialize_i16);
-    deserialize_value_ref_number!(deserialize_i32);
-    deserialize_value_ref_number!(deserialize_i64);
-    deserialize_number!(deserialize_i128);
-    deserialize_value_ref_number!(deserialize_u8);
-    deserialize_value_ref_number!(deserialize_u16);
-    deserialize_value_ref_number!(deserialize_u32);
-    deserialize_value_ref_number!(deserialize_u64);
-    deserialize_number!(deserialize_u128);
-    deserialize_value_ref_number!(deserialize_f32);
-    deserialize_value_ref_number!(deserialize_f64);
-
-    fn deserialize_option<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        match *self {
-            Value::Null => visitor.visit_none(),
-            _ => visitor.visit_some(self),
-        }
-    }
-
-    fn deserialize_enum<V>(
-        self,
-        name: &'static str,
-        variants: &'static [&'static str],
-        visitor: V,
-    ) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        match self {
-            Value::Object(value) => value.deserialize_enum(name, variants, visitor),
-            Value::String(variant) => visitor.visit_enum(EnumRefDeserializer {
-                variant,
-                value: None,
-            }),
-            other => Err(serde::de::Error::invalid_type(
-                other.unexpected(),
-                &"string or map",
-            )),
-        }
-    }
-
-    #[inline]
-    fn deserialize_newtype_struct<V>(
-        self,
-        name: &'static str,
-        visitor: V,
-    ) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        #[cfg(feature = "raw_value")]
-        {
-            if name == crate::raw::TOKEN {
-                return visitor.visit_map(crate::raw::OwnedRawDeserializer {
-                    raw_value: Some(self.to_string()),
-                });
-            }
-        }
-
-        let _ = name;
-        visitor.visit_newtype_struct(self)
-    }
-
-    fn deserialize_bool<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        match *self {
-            Value::Bool(v) => visitor.visit_bool(v),
-            _ => Err(self.invalid_type(&visitor)),
-        }
-    }
-
-    fn deserialize_char<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        self.deserialize_str(visitor)
-    }
-
-    fn deserialize_str<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        match self {
-            Value::String(v) => visitor.visit_borrowed_str(v),
-            _ => Err(self.invalid_type(&visitor)),
-        }
-    }
-
-    fn deserialize_string<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        self.deserialize_str(visitor)
-    }
-
-    fn deserialize_bytes<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        match self {
-            Value::String(v) => visitor.visit_borrowed_str(v),
-            Value::Array(v) => visit_array_ref(v, visitor),
-            _ => Err(self.invalid_type(&visitor)),
-        }
-    }
-
-    fn deserialize_byte_buf<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        self.deserialize_bytes(visitor)
-    }
-
-    fn deserialize_unit<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        match *self {
-            Value::Null => visitor.visit_unit(),
-            _ => Err(self.invalid_type(&visitor)),
-        }
-    }
-
-    fn deserialize_unit_struct<V>(self, _name: &'static str, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        self.deserialize_unit(visitor)
-    }
-
-    fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        match self {
-            Value::Array(v) => visit_array_ref(v, visitor),
-            _ => Err(self.invalid_type(&visitor)),
-        }
-    }
-
-    fn deserialize_tuple<V>(self, _len: usize, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        self.deserialize_seq(visitor)
-    }
-
-    fn deserialize_tuple_struct<V>(
-        self,
-        _name: &'static str,
-        _len: usize,
-        visitor: V,
-    ) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        self.deserialize_seq(visitor)
-    }
-
-    fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        match self {
-            Value::Object(v) => v.deserialize_any(visitor),
-            _ => Err(self.invalid_type(&visitor)),
-        }
-    }
-
-    fn deserialize_struct<V>(
-        self,
-        _name: &'static str,
-        _fields: &'static [&'static str],
-        visitor: V,
-    ) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        match self {
-            Value::Array(v) => visit_array_ref(v, visitor),
-            Value::Object(v) => v.deserialize_any(visitor),
-            _ => Err(self.invalid_type(&visitor)),
-        }
-    }
-
-    fn deserialize_identifier<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        self.deserialize_str(visitor)
-    }
-
-    fn deserialize_ignored_any<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        visitor.visit_unit()
-    }
-}
-
-struct EnumRefDeserializer<'de> {
-    variant: &'de str,
-    value: Option<&'de Value>,
-}
-
-impl<'de> EnumAccess<'de> for EnumRefDeserializer<'de> {
-    type Error = Error;
-    type Variant = VariantRefDeserializer<'de>;
-
-    fn variant_seed<V>(self, seed: V) -> Result<(V::Value, Self::Variant), Error>
-    where
-        V: DeserializeSeed<'de>,
-    {
-        let variant = self.variant.into_deserializer();
-        let visitor = VariantRefDeserializer { value: self.value };
-        seed.deserialize(variant).map(|v| (v, visitor))
-    }
-}
-
-struct VariantRefDeserializer<'de> {
-    value: Option<&'de Value>,
-}
-
-impl<'de> VariantAccess<'de> for VariantRefDeserializer<'de> {
-    type Error = Error;
-
-    fn unit_variant(self) -> Result<(), Error> {
-        match self.value {
-            Some(value) => Deserialize::deserialize(value),
-            None => Ok(()),
-        }
-    }
-
-    fn newtype_variant_seed<T>(self, seed: T) -> Result<T::Value, Error>
-    where
-        T: DeserializeSeed<'de>,
-    {
-        match self.value {
-            Some(value) => seed.deserialize(value),
-            None => Err(serde::de::Error::invalid_type(
-                Unexpected::UnitVariant,
-                &"newtype variant",
-            )),
-        }
-    }
-
-    fn tuple_variant<V>(self, _len: usize, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        match self.value {
-            Some(Value::Array(v)) => {
-                if v.is_empty() {
-                    visitor.visit_unit()
-                } else {
-                    visit_array_ref(v, visitor)
-                }
-            }
-            Some(other) => Err(serde::de::Error::invalid_type(
-                other.unexpected(),
-                &"tuple variant",
-            )),
-            None => Err(serde::de::Error::invalid_type(
-                Unexpected::UnitVariant,
-                &"tuple variant",
-            )),
-        }
-    }
-
-    fn struct_variant<V>(
-        self,
-        _fields: &'static [&'static str],
-        visitor: V,
-    ) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        match self.value {
-            Some(Value::Object(v)) => v.deserialize_any(visitor),
-            Some(other) => Err(serde::de::Error::invalid_type(
-                other.unexpected(),
-                &"struct variant",
-            )),
-            None => Err(serde::de::Error::invalid_type(
-                Unexpected::UnitVariant,
-                &"struct variant",
-            )),
-        }
-    }
-}
-
-struct SeqRefDeserializer<'de> {
-    iter: slice::Iter<'de, Value>,
-}
-
-impl<'de> SeqRefDeserializer<'de> {
-    fn new(slice: &'de [Value]) -> Self {
-        SeqRefDeserializer { iter: slice.iter() }
-    }
-}
-
-impl<'de> SeqAccess<'de> for SeqRefDeserializer<'de> {
-    type Error = Error;
-
-    fn next_element_seed<T>(&mut self, seed: T) -> Result<Option<T::Value>, Error>
-    where
-        T: DeserializeSeed<'de>,
-    {
-        match self.iter.next() {
-            Some(value) => seed.deserialize(value).map(Some),
-            None => Ok(None),
-        }
-    }
-
-    fn size_hint(&self) -> Option<usize> {
-        match self.iter.size_hint() {
-            (lower, Some(upper)) if lower == upper => Some(upper),
-            _ => None,
-        }
-    }
-}
-
-struct MapRefDeserializer<'de> {
-    iter: <&'de Map<String, Value> as IntoIterator>::IntoIter,
-    value: Option<&'de Value>,
-}
-
-impl<'de> MapRefDeserializer<'de> {
-    fn new(map: &'de Map<String, Value>) -> Self {
-        MapRefDeserializer {
-            iter: map.into_iter(),
-            value: None,
-        }
-    }
-}
-
-impl<'de> MapAccess<'de> for MapRefDeserializer<'de> {
-    type Error = Error;
-
-    fn next_key_seed<T>(&mut self, seed: T) -> Result<Option<T::Value>, Error>
-    where
-        T: DeserializeSeed<'de>,
-    {
-        match self.iter.next() {
-            Some((key, value)) => {
-                self.value = Some(value);
-                let key_de = MapKeyDeserializer {
-                    key: Cow::Borrowed(&**key),
-                };
-                seed.deserialize(key_de).map(Some)
-            }
-            None => Ok(None),
-        }
-    }
-
-    fn next_value_seed<T>(&mut self, seed: T) -> Result<T::Value, Error>
-    where
-        T: DeserializeSeed<'de>,
-    {
-        match self.value.take() {
-            Some(value) => seed.deserialize(value),
-            None => Err(serde::de::Error::custom("value is missing")),
-        }
-    }
-
-    fn size_hint(&self) -> Option<usize> {
-        match self.iter.size_hint() {
-            (lower, Some(upper)) if lower == upper => Some(upper),
-            _ => None,
-        }
-    }
-}
-
-struct MapKeyDeserializer<'de> {
-    key: Cow<'de, str>,
-}
-
-macro_rules! deserialize_numeric_key {
-    ($method:ident) => {
-        deserialize_numeric_key!($method, deserialize_number);
-    };
-
-    ($method:ident, $using:ident) => {
-        fn $method<V>(self, visitor: V) -> Result<V::Value, Error>
-        where
-            V: Visitor<'de>,
-        {
-            let mut de = crate::Deserializer::from_str(&self.key);
-
-            match tri!(de.peek()) {
-                Some(b'0'..=b'9' | b'-') => {}
-                _ => return Err(Error::syntax(ErrorCode::ExpectedNumericKey, 0, 0)),
-            }
-
-            let number = tri!(de.$using(visitor));
-
-            if tri!(de.peek()).is_some() {
-                return Err(Error::syntax(ErrorCode::ExpectedNumericKey, 0, 0));
-            }
-
-            Ok(number)
-        }
-    };
-}
-
-impl<'de> serde::Deserializer<'de> for MapKeyDeserializer<'de> {
-    type Error = Error;
-
-    fn deserialize_any<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        BorrowedCowStrDeserializer::new(self.key).deserialize_any(visitor)
-    }
-
-    deserialize_numeric_key!(deserialize_i8);
-    deserialize_numeric_key!(deserialize_i16);
-    deserialize_numeric_key!(deserialize_i32);
-    deserialize_numeric_key!(deserialize_i64);
-    deserialize_numeric_key!(deserialize_u8);
-    deserialize_numeric_key!(deserialize_u16);
-    deserialize_numeric_key!(deserialize_u32);
-    deserialize_numeric_key!(deserialize_u64);
-    #[cfg(not(feature = "float_roundtrip"))]
-    deserialize_numeric_key!(deserialize_f32);
-    deserialize_numeric_key!(deserialize_f64);
-
-    #[cfg(feature = "float_roundtrip")]
-    deserialize_numeric_key!(deserialize_f32, do_deserialize_f32);
-    deserialize_numeric_key!(deserialize_i128, do_deserialize_i128);
-    deserialize_numeric_key!(deserialize_u128, do_deserialize_u128);
-
-    fn deserialize_bool<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        if self.key == "true" {
-            visitor.visit_bool(true)
-        } else if self.key == "false" {
-            visitor.visit_bool(false)
-        } else {
-            Err(serde::de::Error::invalid_type(
-                Unexpected::Str(&self.key),
-                &visitor,
-            ))
-        }
-    }
-
-    #[inline]
-    fn deserialize_option<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        // Map keys cannot be null.
-        visitor.visit_some(self)
-    }
-
-    #[inline]
-    fn deserialize_newtype_struct<V>(
-        self,
-        _name: &'static str,
-        visitor: V,
-    ) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        visitor.visit_newtype_struct(self)
-    }
-
-    fn deserialize_enum<V>(
-        self,
-        name: &'static str,
-        variants: &'static [&'static str],
-        visitor: V,
-    ) -> Result<V::Value, Error>
-    where
-        V: Visitor<'de>,
-    {
-        self.key
-            .into_deserializer()
-            .deserialize_enum(name, variants, visitor)
-    }
-
-    forward_to_deserialize_any! {
-        char str string bytes byte_buf unit unit_struct seq tuple tuple_struct
-        map struct identifier ignored_any
-    }
-}
-
-struct KeyClassifier;
-
-enum KeyClass {
-    Map(String),
-    #[cfg(feature = "arbitrary_precision")]
-    Number,
-    #[cfg(feature = "raw_value")]
-    RawValue,
-}
-
-impl<'de> DeserializeSeed<'de> for KeyClassifier {
-    type Value = KeyClass;
-
-    fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
-    where
-        D: serde::Deserializer<'de>,
-    {
-        deserializer.deserialize_str(self)
-    }
-}
-
-impl<'de> Visitor<'de> for KeyClassifier {
-    type Value = KeyClass;
-
-    fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-        formatter.write_str("a string key")
-    }
-
-    fn visit_str<E>(self, s: &str) -> Result<Self::Value, E>
-    where
-        E: de::Error,
-    {
-        match s {
-            #[cfg(feature = "arbitrary_precision")]
-            crate::number::TOKEN => Ok(KeyClass::Number),
-            #[cfg(feature = "raw_value")]
-            crate::raw::TOKEN => Ok(KeyClass::RawValue),
-            _ => Ok(KeyClass::Map(s.to_owned())),
-        }
-    }
-
-    #[cfg(any(feature = "std", feature = "alloc"))]
-    fn visit_string<E>(self, s: String) -> Result<Self::Value, E>
-    where
-        E: de::Error,
-    {
-        match s.as_str() {
-            #[cfg(feature = "arbitrary_precision")]
-            crate::number::TOKEN => Ok(KeyClass::Number),
-            #[cfg(feature = "raw_value")]
-            crate::raw::TOKEN => Ok(KeyClass::RawValue),
-            _ => Ok(KeyClass::Map(s)),
-        }
-    }
-}
-
-impl Value {
-    #[cold]
-    fn invalid_type<E>(&self, exp: &dyn Expected) -> E
-    where
-        E: serde::de::Error,
-    {
-        serde::de::Error::invalid_type(self.unexpected(), exp)
-    }
-
-    #[cold]
-    fn unexpected(&self) -> Unexpected {
-        match self {
-            Value::Null => Unexpected::Unit,
-            Value::Bool(b) => Unexpected::Bool(*b),
-            Value::Number(n) => n.unexpected(),
-            Value::String(s) => Unexpected::Str(s),
-            Value::Array(_) => Unexpected::Seq,
-            Value::Object(_) => Unexpected::Map,
-        }
-    }
-}
-
-struct BorrowedCowStrDeserializer<'de> {
-    value: Cow<'de, str>,
-}
-
-impl<'de> BorrowedCowStrDeserializer<'de> {
-    fn new(value: Cow<'de, str>) -> Self {
-        BorrowedCowStrDeserializer { value }
-    }
-}
-
-impl<'de> de::Deserializer<'de> for BorrowedCowStrDeserializer<'de> {
-    type Error = Error;
-
-    fn deserialize_any<V>(self, visitor: V) -> Result<V::Value, Error>
-    where
-        V: de::Visitor<'de>,
-    {
-        match self.value {
-            Cow::Borrowed(string) => visitor.visit_borrowed_str(string),
-            #[cfg(any(feature = "std", feature = "alloc"))]
-            Cow::Owned(string) => visitor.visit_string(string),
-            #[cfg(not(any(feature = "std", feature = "alloc")))]
-            Cow::Owned(_) => unreachable!(),
-        }
-    }
-
-    fn deserialize_enum<V>(
-        self,
-        _name: &str,
-        _variants: &'static [&'static str],
-        visitor: V,
-    ) -> Result<V::Value, Error>
-    where
-        V: de::Visitor<'de>,
-    {
-        visitor.visit_enum(self)
-    }
-
-    forward_to_deserialize_any! {
-        bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string
-        bytes byte_buf option unit unit_struct newtype_struct seq tuple
-        tuple_struct map struct identifier ignored_any
-    }
-}
-
-impl<'de> de::EnumAccess<'de> for BorrowedCowStrDeserializer<'de> {
-    type Error = Error;
-    type Variant = UnitOnly;
-
-    fn variant_seed<T>(self, seed: T) -> Result<(T::Value, Self::Variant), Error>
-    where
-        T: de::DeserializeSeed<'de>,
-    {
-        let value = tri!(seed.deserialize(self));
-        Ok((value, UnitOnly))
-    }
-}
-
-struct UnitOnly;
-
-impl<'de> de::VariantAccess<'de> for UnitOnly {
-    type Error = Error;
-
-    fn unit_variant(self) -> Result<(), Error> {
-        Ok(())
-    }
-
-    fn newtype_variant_seed<T>(self, _seed: T) -> Result<T::Value, Error>
-    where
-        T: de::DeserializeSeed<'de>,
-    {
-        Err(de::Error::invalid_type(
-            Unexpected::UnitVariant,
-            &"newtype variant",
-        ))
-    }
-
-    fn tuple_variant<V>(self, _len: usize, _visitor: V) -> Result<V::Value, Error>
-    where
-        V: de::Visitor<'de>,
-    {
-        Err(de::Error::invalid_type(
-            Unexpected::UnitVariant,
-            &"tuple variant",
-        ))
-    }
-
-    fn struct_variant<V>(
-        self,
-        _fields: &'static [&'static str],
-        _visitor: V,
-    ) -> Result<V::Value, Error>
-    where
-        V: de::Visitor<'de>,
-    {
-        Err(de::Error::invalid_type(
-            Unexpected::UnitVariant,
-            &"struct variant",
-        ))
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/value/from.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/value/from.rs
deleted file mode 100644
index df4b2038c..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/value/from.rs
+++ /dev/null
@@ -1,284 +0,0 @@
-use super::Value;
-use crate::map::Map;
-use crate::number::Number;
-use alloc::borrow::{Cow, ToOwned};
-use alloc::string::String;
-use alloc::vec::Vec;
-
-macro_rules! from_integer {
-    ($($ty:ident)*) => {
-        $(
-            impl From<$ty> for Value {
-                fn from(n: $ty) -> Self {
-                    Value::Number(n.into())
-                }
-            }
-        )*
-    };
-}
-
-from_integer! {
-    i8 i16 i32 i64 isize
-    u8 u16 u32 u64 usize
-}
-
-#[cfg(feature = "arbitrary_precision")]
-from_integer! {
-    i128 u128
-}
-
-impl From<f32> for Value {
-    /// Convert 32-bit floating point number to `Value::Number`, or
-    /// `Value::Null` if infinite or NaN.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use serde_json::Value;
-    ///
-    /// let f: f32 = 13.37;
-    /// let x: Value = f.into();
-    /// ```
-    fn from(f: f32) -> Self {
-        Number::from_f32(f).map_or(Value::Null, Value::Number)
-    }
-}
-
-impl From<f64> for Value {
-    /// Convert 64-bit floating point number to `Value::Number`, or
-    /// `Value::Null` if infinite or NaN.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use serde_json::Value;
-    ///
-    /// let f: f64 = 13.37;
-    /// let x: Value = f.into();
-    /// ```
-    fn from(f: f64) -> Self {
-        Number::from_f64(f).map_or(Value::Null, Value::Number)
-    }
-}
-
-impl From<bool> for Value {
-    /// Convert boolean to `Value::Bool`.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use serde_json::Value;
-    ///
-    /// let b = false;
-    /// let x: Value = b.into();
-    /// ```
-    fn from(f: bool) -> Self {
-        Value::Bool(f)
-    }
-}
-
-impl From<String> for Value {
-    /// Convert `String` to `Value::String`.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use serde_json::Value;
-    ///
-    /// let s: String = "lorem".to_owned();
-    /// let x: Value = s.into();
-    /// ```
-    fn from(f: String) -> Self {
-        Value::String(f)
-    }
-}
-
-impl From<&str> for Value {
-    /// Convert string slice to `Value::String`.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use serde_json::Value;
-    ///
-    /// let s: &str = "lorem";
-    /// let x: Value = s.into();
-    /// ```
-    fn from(f: &str) -> Self {
-        Value::String(f.to_owned())
-    }
-}
-
-impl<'a> From<Cow<'a, str>> for Value {
-    /// Convert copy-on-write string to `Value::String`.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use serde_json::Value;
-    /// use std::borrow::Cow;
-    ///
-    /// let s: Cow<str> = Cow::Borrowed("lorem");
-    /// let x: Value = s.into();
-    /// ```
-    ///
-    /// ```
-    /// use serde_json::Value;
-    /// use std::borrow::Cow;
-    ///
-    /// let s: Cow<str> = Cow::Owned("lorem".to_owned());
-    /// let x: Value = s.into();
-    /// ```
-    fn from(f: Cow<'a, str>) -> Self {
-        Value::String(f.into_owned())
-    }
-}
-
-impl From<Number> for Value {
-    /// Convert `Number` to `Value::Number`.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use serde_json::{Number, Value};
-    ///
-    /// let n = Number::from(7);
-    /// let x: Value = n.into();
-    /// ```
-    fn from(f: Number) -> Self {
-        Value::Number(f)
-    }
-}
-
-impl From<Map<String, Value>> for Value {
-    /// Convert map (with string keys) to `Value::Object`.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use serde_json::{Map, Value};
-    ///
-    /// let mut m = Map::new();
-    /// m.insert("Lorem".to_owned(), "ipsum".into());
-    /// let x: Value = m.into();
-    /// ```
-    fn from(f: Map<String, Value>) -> Self {
-        Value::Object(f)
-    }
-}
-
-impl<T: Into<Value>> From<Vec<T>> for Value {
-    /// Convert a `Vec` to `Value::Array`.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use serde_json::Value;
-    ///
-    /// let v = vec!["lorem", "ipsum", "dolor"];
-    /// let x: Value = v.into();
-    /// ```
-    fn from(f: Vec<T>) -> Self {
-        Value::Array(f.into_iter().map(Into::into).collect())
-    }
-}
-
-impl<T: Into<Value>, const N: usize> From<[T; N]> for Value {
-    fn from(array: [T; N]) -> Self {
-        Value::Array(array.into_iter().map(Into::into).collect())
-    }
-}
-
-impl<T: Clone + Into<Value>> From<&[T]> for Value {
-    /// Convert a slice to `Value::Array`.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use serde_json::Value;
-    ///
-    /// let v: &[&str] = &["lorem", "ipsum", "dolor"];
-    /// let x: Value = v.into();
-    /// ```
-    fn from(f: &[T]) -> Self {
-        Value::Array(f.iter().cloned().map(Into::into).collect())
-    }
-}
-
-impl<T: Into<Value>> FromIterator<T> for Value {
-    /// Create a `Value::Array` by collecting an iterator of array elements.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use serde_json::Value;
-    ///
-    /// let v = std::iter::repeat(42).take(5);
-    /// let x: Value = v.collect();
-    /// ```
-    ///
-    /// ```
-    /// use serde_json::Value;
-    ///
-    /// let v: Vec<_> = vec!["lorem", "ipsum", "dolor"];
-    /// let x: Value = v.into_iter().collect();
-    /// ```
-    ///
-    /// ```
-    /// use std::iter::FromIterator;
-    /// use serde_json::Value;
-    ///
-    /// let x: Value = Value::from_iter(vec!["lorem", "ipsum", "dolor"]);
-    /// ```
-    fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
-        Value::Array(iter.into_iter().map(Into::into).collect())
-    }
-}
-
-impl<K: Into<String>, V: Into<Value>> FromIterator<(K, V)> for Value {
-    /// Create a `Value::Object` by collecting an iterator of key-value pairs.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use serde_json::Value;
-    ///
-    /// let v: Vec<_> = vec![("lorem", 40), ("ipsum", 2)];
-    /// let x: Value = v.into_iter().collect();
-    /// ```
-    fn from_iter<I: IntoIterator<Item = (K, V)>>(iter: I) -> Self {
-        Value::Object(
-            iter.into_iter()
-                .map(|(k, v)| (k.into(), v.into()))
-                .collect(),
-        )
-    }
-}
-
-impl From<()> for Value {
-    /// Convert `()` to `Value::Null`.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use serde_json::Value;
-    ///
-    /// let u = ();
-    /// let x: Value = u.into();
-    /// ```
-    fn from((): ()) -> Self {
-        Value::Null
-    }
-}
-
-impl<T> From<Option<T>> for Value
-where
-    T: Into<Value>,
-{
-    fn from(opt: Option<T>) -> Self {
-        match opt {
-            None => Value::Null,
-            Some(value) => Into::into(value),
-        }
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/value/index.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/value/index.rs
deleted file mode 100644
index 7b001100..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/value/index.rs
+++ /dev/null
@@ -1,258 +0,0 @@
-use super::Value;
-use crate::map::Map;
-use alloc::borrow::ToOwned;
-use alloc::string::String;
-use core::fmt::{self, Display};
-use core::ops;
-
-/// A type that can be used to index into a `serde_json::Value`.
-///
-/// The [`get`] and [`get_mut`] methods of `Value` accept any type that
-/// implements `Index`, as does the [square-bracket indexing operator]. This
-/// trait is implemented for strings which are used as the index into a JSON
-/// map, and for `usize` which is used as the index into a JSON array.
-///
-/// [`get`]: Value::get
-/// [`get_mut`]: Value::get_mut
-/// [square-bracket indexing operator]: Value#impl-Index%3CI%3E-for-Value
-///
-/// This trait is sealed and cannot be implemented for types outside of
-/// `serde_json`.
-///
-/// # Examples
-///
-/// ```
-/// # use serde_json::json;
-/// #
-/// let data = json!({ "inner": [1, 2, 3] });
-///
-/// // Data is a JSON map so it can be indexed with a string.
-/// let inner = &data["inner"];
-///
-/// // Inner is a JSON array so it can be indexed with an integer.
-/// let first = &inner[0];
-///
-/// assert_eq!(first, 1);
-/// ```
-pub trait Index: private::Sealed {
-    /// Return None if the key is not already in the array or object.
-    #[doc(hidden)]
-    fn index_into<'v>(&self, v: &'v Value) -> Option<&'v Value>;
-
-    /// Return None if the key is not already in the array or object.
-    #[doc(hidden)]
-    fn index_into_mut<'v>(&self, v: &'v mut Value) -> Option<&'v mut Value>;
-
-    /// Panic if array index out of bounds. If key is not already in the object,
-    /// insert it with a value of null. Panic if Value is a type that cannot be
-    /// indexed into, except if Value is null then it can be treated as an empty
-    /// object.
-    #[doc(hidden)]
-    fn index_or_insert<'v>(&self, v: &'v mut Value) -> &'v mut Value;
-}
-
-impl Index for usize {
-    fn index_into<'v>(&self, v: &'v Value) -> Option<&'v Value> {
-        match v {
-            Value::Array(vec) => vec.get(*self),
-            _ => None,
-        }
-    }
-    fn index_into_mut<'v>(&self, v: &'v mut Value) -> Option<&'v mut Value> {
-        match v {
-            Value::Array(vec) => vec.get_mut(*self),
-            _ => None,
-        }
-    }
-    fn index_or_insert<'v>(&self, v: &'v mut Value) -> &'v mut Value {
-        match v {
-            Value::Array(vec) => {
-                let len = vec.len();
-                vec.get_mut(*self).unwrap_or_else(|| {
-                    panic!(
-                        "cannot access index {} of JSON array of length {}",
-                        self, len
-                    )
-                })
-            }
-            _ => panic!("cannot access index {} of JSON {}", self, Type(v)),
-        }
-    }
-}
-
-impl Index for str {
-    fn index_into<'v>(&self, v: &'v Value) -> Option<&'v Value> {
-        match v {
-            Value::Object(map) => map.get(self),
-            _ => None,
-        }
-    }
-    fn index_into_mut<'v>(&self, v: &'v mut Value) -> Option<&'v mut Value> {
-        match v {
-            Value::Object(map) => map.get_mut(self),
-            _ => None,
-        }
-    }
-    fn index_or_insert<'v>(&self, v: &'v mut Value) -> &'v mut Value {
-        if let Value::Null = v {
-            *v = Value::Object(Map::new());
-        }
-        match v {
-            Value::Object(map) => map.entry(self.to_owned()).or_insert(Value::Null),
-            _ => panic!("cannot access key {:?} in JSON {}", self, Type(v)),
-        }
-    }
-}
-
-impl Index for String {
-    fn index_into<'v>(&self, v: &'v Value) -> Option<&'v Value> {
-        self[..].index_into(v)
-    }
-    fn index_into_mut<'v>(&self, v: &'v mut Value) -> Option<&'v mut Value> {
-        self[..].index_into_mut(v)
-    }
-    fn index_or_insert<'v>(&self, v: &'v mut Value) -> &'v mut Value {
-        self[..].index_or_insert(v)
-    }
-}
-
-impl<T> Index for &T
-where
-    T: ?Sized + Index,
-{
-    fn index_into<'v>(&self, v: &'v Value) -> Option<&'v Value> {
-        (**self).index_into(v)
-    }
-    fn index_into_mut<'v>(&self, v: &'v mut Value) -> Option<&'v mut Value> {
-        (**self).index_into_mut(v)
-    }
-    fn index_or_insert<'v>(&self, v: &'v mut Value) -> &'v mut Value {
-        (**self).index_or_insert(v)
-    }
-}
-
-// Prevent users from implementing the Index trait.
-mod private {
-    pub trait Sealed {}
-    impl Sealed for usize {}
-    impl Sealed for str {}
-    impl Sealed for alloc::string::String {}
-    impl<T> Sealed for &T where T: ?Sized + Sealed {}
-}
-
-/// Used in panic messages.
-struct Type<'a>(&'a Value);
-
-impl<'a> Display for Type<'a> {
-    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-        match *self.0 {
-            Value::Null => formatter.write_str("null"),
-            Value::Bool(_) => formatter.write_str("boolean"),
-            Value::Number(_) => formatter.write_str("number"),
-            Value::String(_) => formatter.write_str("string"),
-            Value::Array(_) => formatter.write_str("array"),
-            Value::Object(_) => formatter.write_str("object"),
-        }
-    }
-}
-
-// The usual semantics of Index is to panic on invalid indexing.
-//
-// That said, the usual semantics are for things like Vec and BTreeMap which
-// have different use cases than Value. If you are working with a Vec, you know
-// that you are working with a Vec and you can get the len of the Vec and make
-// sure your indices are within bounds. The Value use cases are more
-// loosey-goosey. You got some JSON from an endpoint and you want to pull values
-// out of it. Outside of this Index impl, you already have the option of using
-// value.as_array() and working with the Vec directly, or matching on
-// Value::Array and getting the Vec directly. The Index impl means you can skip
-// that and index directly into the thing using a concise syntax. You don't have
-// to check the type, you don't have to check the len, it is all about what you
-// expect the Value to look like.
-//
-// Basically the use cases that would be well served by panicking here are
-// better served by using one of the other approaches: get and get_mut,
-// as_array, or match. The value of this impl is that it adds a way of working
-// with Value that is not well served by the existing approaches: concise and
-// careless and sometimes that is exactly what you want.
-impl<I> ops::Index<I> for Value
-where
-    I: Index,
-{
-    type Output = Value;
-
-    /// Index into a `serde_json::Value` using the syntax `value[0]` or
-    /// `value["k"]`.
-    ///
-    /// Returns `Value::Null` if the type of `self` does not match the type of
-    /// the index, for example if the index is a string and `self` is an array
-    /// or a number. Also returns `Value::Null` if the given key does not exist
-    /// in the map or the given index is not within the bounds of the array.
-    ///
-    /// For retrieving deeply nested values, you should have a look at the
-    /// `Value::pointer` method.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let data = json!({
-    ///     "x": {
-    ///         "y": ["z", "zz"]
-    ///     }
-    /// });
-    ///
-    /// assert_eq!(data["x"]["y"], json!(["z", "zz"]));
-    /// assert_eq!(data["x"]["y"][0], json!("z"));
-    ///
-    /// assert_eq!(data["a"], json!(null)); // returns null for undefined values
-    /// assert_eq!(data["a"]["b"], json!(null)); // does not panic
-    /// ```
-    fn index(&self, index: I) -> &Value {
-        static NULL: Value = Value::Null;
-        index.index_into(self).unwrap_or(&NULL)
-    }
-}
-
-impl<I> ops::IndexMut<I> for Value
-where
-    I: Index,
-{
-    /// Write into a `serde_json::Value` using the syntax `value[0] = ...` or
-    /// `value["k"] = ...`.
-    ///
-    /// If the index is a number, the value must be an array of length bigger
-    /// than the index. Indexing into a value that is not an array or an array
-    /// that is too small will panic.
-    ///
-    /// If the index is a string, the value must be an object or null which is
-    /// treated like an empty object. If the key is not already present in the
-    /// object, it will be inserted with a value of null. Indexing into a value
-    /// that is neither an object nor null will panic.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let mut data = json!({ "x": 0 });
-    ///
-    /// // replace an existing key
-    /// data["x"] = json!(1);
-    ///
-    /// // insert a new key
-    /// data["y"] = json!([false, false, false]);
-    ///
-    /// // replace an array value
-    /// data["y"][0] = json!(true);
-    ///
-    /// // inserted a deeply nested key
-    /// data["a"]["b"]["c"]["d"] = json!(true);
-    ///
-    /// println!("{}", data);
-    /// ```
-    fn index_mut(&mut self, index: I) -> &mut Value {
-        index.index_or_insert(self)
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/value/mod.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/value/mod.rs
deleted file mode 100644
index 6b40f9a..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/value/mod.rs
+++ /dev/null
@@ -1,1035 +0,0 @@
-//! The Value enum, a loosely typed way of representing any valid JSON value.
-//!
-//! # Constructing JSON
-//!
-//! Serde JSON provides a [`json!` macro][macro] to build `serde_json::Value`
-//! objects with very natural JSON syntax.
-//!
-//! ```
-//! use serde_json::json;
-//!
-//! fn main() {
-//!     // The type of `john` is `serde_json::Value`
-//!     let john = json!({
-//!         "name": "John Doe",
-//!         "age": 43,
-//!         "phones": [
-//!             "+44 1234567",
-//!             "+44 2345678"
-//!         ]
-//!     });
-//!
-//!     println!("first phone number: {}", john["phones"][0]);
-//!
-//!     // Convert to a string of JSON and print it out
-//!     println!("{}", john.to_string());
-//! }
-//! ```
-//!
-//! The `Value::to_string()` function converts a `serde_json::Value` into a
-//! `String` of JSON text.
-//!
-//! One neat thing about the `json!` macro is that variables and expressions can
-//! be interpolated directly into the JSON value as you are building it. Serde
-//! will check at compile time that the value you are interpolating is able to
-//! be represented as JSON.
-//!
-//! ```
-//! # use serde_json::json;
-//! #
-//! # fn random_phone() -> u16 { 0 }
-//! #
-//! let full_name = "John Doe";
-//! let age_last_year = 42;
-//!
-//! // The type of `john` is `serde_json::Value`
-//! let john = json!({
-//!     "name": full_name,
-//!     "age": age_last_year + 1,
-//!     "phones": [
-//!         format!("+44 {}", random_phone())
-//!     ]
-//! });
-//! ```
-//!
-//! A string of JSON data can be parsed into a `serde_json::Value` by the
-//! [`serde_json::from_str`][from_str] function. There is also
-//! [`from_slice`][from_slice] for parsing from a byte slice `&[u8]` and
-//! [`from_reader`][from_reader] for parsing from any `io::Read` like a File or
-//! a TCP stream.
-//!
-//! ```
-//! use serde_json::{json, Value, Error};
-//!
-//! fn untyped_example() -> Result<(), Error> {
-//!     // Some JSON input data as a &str. Maybe this comes from the user.
-//!     let data = r#"
-//!         {
-//!             "name": "John Doe",
-//!             "age": 43,
-//!             "phones": [
-//!                 "+44 1234567",
-//!                 "+44 2345678"
-//!             ]
-//!         }"#;
-//!
-//!     // Parse the string of data into serde_json::Value.
-//!     let v: Value = serde_json::from_str(data)?;
-//!
-//!     // Access parts of the data by indexing with square brackets.
-//!     println!("Please call {} at the number {}", v["name"], v["phones"][0]);
-//!
-//!     Ok(())
-//! }
-//! #
-//! # untyped_example().unwrap();
-//! ```
-//!
-//! [macro]: crate::json
-//! [from_str]: crate::de::from_str
-//! [from_slice]: crate::de::from_slice
-//! [from_reader]: crate::de::from_reader
-
-use crate::error::Error;
-use crate::io;
-use alloc::string::String;
-use alloc::vec::Vec;
-use core::fmt::{self, Debug, Display};
-use core::mem;
-use core::str;
-use serde::de::DeserializeOwned;
-use serde::ser::Serialize;
-
-pub use self::index::Index;
-pub use self::ser::Serializer;
-pub use crate::map::Map;
-pub use crate::number::Number;
-
-#[cfg(feature = "raw_value")]
-#[cfg_attr(docsrs, doc(cfg(feature = "raw_value")))]
-pub use crate::raw::{to_raw_value, RawValue};
-
-/// Represents any valid JSON value.
-///
-/// See the [`serde_json::value` module documentation](self) for usage examples.
-#[derive(Clone, Eq, PartialEq, Hash)]
-pub enum Value {
-    /// Represents a JSON null value.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let v = json!(null);
-    /// ```
-    Null,
-
-    /// Represents a JSON boolean.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let v = json!(true);
-    /// ```
-    Bool(bool),
-
-    /// Represents a JSON number, whether integer or floating point.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let v = json!(12.5);
-    /// ```
-    Number(Number),
-
-    /// Represents a JSON string.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let v = json!("a string");
-    /// ```
-    String(String),
-
-    /// Represents a JSON array.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let v = json!(["an", "array"]);
-    /// ```
-    Array(Vec<Value>),
-
-    /// Represents a JSON object.
-    ///
-    /// By default the map is backed by a BTreeMap. Enable the `preserve_order`
-    /// feature of serde_json to use IndexMap instead, which preserves
-    /// entries in the order they are inserted into the map. In particular, this
-    /// allows JSON data to be deserialized into a Value and serialized to a
-    /// string while retaining the order of map keys in the input.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let v = json!({ "an": "object" });
-    /// ```
-    Object(Map<String, Value>),
-}
-
-impl Debug for Value {
-    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-        match self {
-            Value::Null => formatter.write_str("Null"),
-            Value::Bool(boolean) => write!(formatter, "Bool({})", boolean),
-            Value::Number(number) => Debug::fmt(number, formatter),
-            Value::String(string) => write!(formatter, "String({:?})", string),
-            Value::Array(vec) => {
-                tri!(formatter.write_str("Array "));
-                Debug::fmt(vec, formatter)
-            }
-            Value::Object(map) => {
-                tri!(formatter.write_str("Object "));
-                Debug::fmt(map, formatter)
-            }
-        }
-    }
-}
-
-impl Display for Value {
-    /// Display a JSON value as a string.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let json = json!({ "city": "London", "street": "10 Downing Street" });
-    ///
-    /// // Compact format:
-    /// //
-    /// // {"city":"London","street":"10 Downing Street"}
-    /// let compact = format!("{}", json);
-    /// assert_eq!(compact,
-    ///     "{\"city\":\"London\",\"street\":\"10 Downing Street\"}");
-    ///
-    /// // Pretty format:
-    /// //
-    /// // {
-    /// //   "city": "London",
-    /// //   "street": "10 Downing Street"
-    /// // }
-    /// let pretty = format!("{:#}", json);
-    /// assert_eq!(pretty,
-    ///     "{\n  \"city\": \"London\",\n  \"street\": \"10 Downing Street\"\n}");
-    /// ```
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        struct WriterFormatter<'a, 'b: 'a> {
-            inner: &'a mut fmt::Formatter<'b>,
-        }
-
-        impl<'a, 'b> io::Write for WriterFormatter<'a, 'b> {
-            fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
-                // Safety: the serializer below only emits valid utf8 when using
-                // the default formatter.
-                let s = unsafe { str::from_utf8_unchecked(buf) };
-                tri!(self.inner.write_str(s).map_err(io_error));
-                Ok(buf.len())
-            }
-
-            fn flush(&mut self) -> io::Result<()> {
-                Ok(())
-            }
-        }
-
-        fn io_error(_: fmt::Error) -> io::Error {
-            // Error value does not matter because Display impl just maps it
-            // back to fmt::Error.
-            io::Error::new(io::ErrorKind::Other, "fmt error")
-        }
-
-        let alternate = f.alternate();
-        let mut wr = WriterFormatter { inner: f };
-        if alternate {
-            // {:#}
-            super::ser::to_writer_pretty(&mut wr, self).map_err(|_| fmt::Error)
-        } else {
-            // {}
-            super::ser::to_writer(&mut wr, self).map_err(|_| fmt::Error)
-        }
-    }
-}
-
-fn parse_index(s: &str) -> Option<usize> {
-    if s.starts_with('+') || (s.starts_with('0') && s.len() != 1) {
-        return None;
-    }
-    s.parse().ok()
-}
-
-impl Value {
-    /// Index into a JSON array or map. A string index can be used to access a
-    /// value in a map, and a usize index can be used to access an element of an
-    /// array.
-    ///
-    /// Returns `None` if the type of `self` does not match the type of the
-    /// index, for example if the index is a string and `self` is an array or a
-    /// number. Also returns `None` if the given key does not exist in the map
-    /// or the given index is not within the bounds of the array.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let object = json!({ "A": 65, "B": 66, "C": 67 });
-    /// assert_eq!(*object.get("A").unwrap(), json!(65));
-    ///
-    /// let array = json!([ "A", "B", "C" ]);
-    /// assert_eq!(*array.get(2).unwrap(), json!("C"));
-    ///
-    /// assert_eq!(array.get("A"), None);
-    /// ```
-    ///
-    /// Square brackets can also be used to index into a value in a more concise
-    /// way. This returns `Value::Null` in cases where `get` would have returned
-    /// `None`.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let object = json!({
-    ///     "A": ["a", "á", "à"],
-    ///     "B": ["b", "b́"],
-    ///     "C": ["c", "ć", "Ä‡ÌŁ", "ᾉ"],
-    /// });
-    /// assert_eq!(object["B"][0], json!("b"));
-    ///
-    /// assert_eq!(object["D"], json!(null));
-    /// assert_eq!(object[0]["x"]["y"]["z"], json!(null));
-    /// ```
-    pub fn get<I: Index>(&self, index: I) -> Option<&Value> {
-        index.index_into(self)
-    }
-
-    /// Mutably index into a JSON array or map. A string index can be used to
-    /// access a value in a map, and a usize index can be used to access an
-    /// element of an array.
-    ///
-    /// Returns `None` if the type of `self` does not match the type of the
-    /// index, for example if the index is a string and `self` is an array or a
-    /// number. Also returns `None` if the given key does not exist in the map
-    /// or the given index is not within the bounds of the array.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let mut object = json!({ "A": 65, "B": 66, "C": 67 });
-    /// *object.get_mut("A").unwrap() = json!(69);
-    ///
-    /// let mut array = json!([ "A", "B", "C" ]);
-    /// *array.get_mut(2).unwrap() = json!("D");
-    /// ```
-    pub fn get_mut<I: Index>(&mut self, index: I) -> Option<&mut Value> {
-        index.index_into_mut(self)
-    }
-
-    /// Returns true if the `Value` is an Object. Returns false otherwise.
-    ///
-    /// For any Value on which `is_object` returns true, `as_object` and
-    /// `as_object_mut` are guaranteed to return the map representation of the
-    /// object.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let obj = json!({ "a": { "nested": true }, "b": ["an", "array"] });
-    ///
-    /// assert!(obj.is_object());
-    /// assert!(obj["a"].is_object());
-    ///
-    /// // array, not an object
-    /// assert!(!obj["b"].is_object());
-    /// ```
-    pub fn is_object(&self) -> bool {
-        self.as_object().is_some()
-    }
-
-    /// If the `Value` is an Object, returns the associated Map. Returns None
-    /// otherwise.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let v = json!({ "a": { "nested": true }, "b": ["an", "array"] });
-    ///
-    /// // The length of `{"nested": true}` is 1 entry.
-    /// assert_eq!(v["a"].as_object().unwrap().len(), 1);
-    ///
-    /// // The array `["an", "array"]` is not an object.
-    /// assert_eq!(v["b"].as_object(), None);
-    /// ```
-    pub fn as_object(&self) -> Option<&Map<String, Value>> {
-        match self {
-            Value::Object(map) => Some(map),
-            _ => None,
-        }
-    }
-
-    /// If the `Value` is an Object, returns the associated mutable Map.
-    /// Returns None otherwise.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let mut v = json!({ "a": { "nested": true } });
-    ///
-    /// v["a"].as_object_mut().unwrap().clear();
-    /// assert_eq!(v, json!({ "a": {} }));
-    /// ```
-    pub fn as_object_mut(&mut self) -> Option<&mut Map<String, Value>> {
-        match self {
-            Value::Object(map) => Some(map),
-            _ => None,
-        }
-    }
-
-    /// Returns true if the `Value` is an Array. Returns false otherwise.
-    ///
-    /// For any Value on which `is_array` returns true, `as_array` and
-    /// `as_array_mut` are guaranteed to return the vector representing the
-    /// array.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let obj = json!({ "a": ["an", "array"], "b": { "an": "object" } });
-    ///
-    /// assert!(obj["a"].is_array());
-    ///
-    /// // an object, not an array
-    /// assert!(!obj["b"].is_array());
-    /// ```
-    pub fn is_array(&self) -> bool {
-        self.as_array().is_some()
-    }
-
-    /// If the `Value` is an Array, returns the associated vector. Returns None
-    /// otherwise.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let v = json!({ "a": ["an", "array"], "b": { "an": "object" } });
-    ///
-    /// // The length of `["an", "array"]` is 2 elements.
-    /// assert_eq!(v["a"].as_array().unwrap().len(), 2);
-    ///
-    /// // The object `{"an": "object"}` is not an array.
-    /// assert_eq!(v["b"].as_array(), None);
-    /// ```
-    pub fn as_array(&self) -> Option<&Vec<Value>> {
-        match self {
-            Value::Array(array) => Some(array),
-            _ => None,
-        }
-    }
-
-    /// If the `Value` is an Array, returns the associated mutable vector.
-    /// Returns None otherwise.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let mut v = json!({ "a": ["an", "array"] });
-    ///
-    /// v["a"].as_array_mut().unwrap().clear();
-    /// assert_eq!(v, json!({ "a": [] }));
-    /// ```
-    pub fn as_array_mut(&mut self) -> Option<&mut Vec<Value>> {
-        match self {
-            Value::Array(list) => Some(list),
-            _ => None,
-        }
-    }
-
-    /// Returns true if the `Value` is a String. Returns false otherwise.
-    ///
-    /// For any Value on which `is_string` returns true, `as_str` is guaranteed
-    /// to return the string slice.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let v = json!({ "a": "some string", "b": false });
-    ///
-    /// assert!(v["a"].is_string());
-    ///
-    /// // The boolean `false` is not a string.
-    /// assert!(!v["b"].is_string());
-    /// ```
-    pub fn is_string(&self) -> bool {
-        self.as_str().is_some()
-    }
-
-    /// If the `Value` is a String, returns the associated str. Returns None
-    /// otherwise.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let v = json!({ "a": "some string", "b": false });
-    ///
-    /// assert_eq!(v["a"].as_str(), Some("some string"));
-    ///
-    /// // The boolean `false` is not a string.
-    /// assert_eq!(v["b"].as_str(), None);
-    ///
-    /// // JSON values are printed in JSON representation, so strings are in quotes.
-    /// //
-    /// //    The value is: "some string"
-    /// println!("The value is: {}", v["a"]);
-    ///
-    /// // Rust strings are printed without quotes.
-    /// //
-    /// //    The value is: some string
-    /// println!("The value is: {}", v["a"].as_str().unwrap());
-    /// ```
-    pub fn as_str(&self) -> Option<&str> {
-        match self {
-            Value::String(s) => Some(s),
-            _ => None,
-        }
-    }
-
-    /// Returns true if the `Value` is a Number. Returns false otherwise.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let v = json!({ "a": 1, "b": "2" });
-    ///
-    /// assert!(v["a"].is_number());
-    ///
-    /// // The string `"2"` is a string, not a number.
-    /// assert!(!v["b"].is_number());
-    /// ```
-    pub fn is_number(&self) -> bool {
-        match *self {
-            Value::Number(_) => true,
-            _ => false,
-        }
-    }
-
-    /// If the `Value` is a Number, returns the associated [`Number`]. Returns
-    /// None otherwise.
-    ///
-    /// ```
-    /// # use serde_json::{json, Number};
-    /// #
-    /// let v = json!({ "a": 1, "b": 2.2, "c": -3, "d": "4" });
-    ///
-    /// assert_eq!(v["a"].as_number(), Some(&Number::from(1u64)));
-    /// assert_eq!(v["b"].as_number(), Some(&Number::from_f64(2.2).unwrap()));
-    /// assert_eq!(v["c"].as_number(), Some(&Number::from(-3i64)));
-    ///
-    /// // The string `"4"` is not a number.
-    /// assert_eq!(v["d"].as_number(), None);
-    /// ```
-    pub fn as_number(&self) -> Option<&Number> {
-        match self {
-            Value::Number(number) => Some(number),
-            _ => None,
-        }
-    }
-
-    /// Returns true if the `Value` is an integer between `i64::MIN` and
-    /// `i64::MAX`.
-    ///
-    /// For any Value on which `is_i64` returns true, `as_i64` is guaranteed to
-    /// return the integer value.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let big = i64::max_value() as u64 + 10;
-    /// let v = json!({ "a": 64, "b": big, "c": 256.0 });
-    ///
-    /// assert!(v["a"].is_i64());
-    ///
-    /// // Greater than i64::MAX.
-    /// assert!(!v["b"].is_i64());
-    ///
-    /// // Numbers with a decimal point are not considered integers.
-    /// assert!(!v["c"].is_i64());
-    /// ```
-    pub fn is_i64(&self) -> bool {
-        match self {
-            Value::Number(n) => n.is_i64(),
-            _ => false,
-        }
-    }
-
-    /// Returns true if the `Value` is an integer between zero and `u64::MAX`.
-    ///
-    /// For any Value on which `is_u64` returns true, `as_u64` is guaranteed to
-    /// return the integer value.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let v = json!({ "a": 64, "b": -64, "c": 256.0 });
-    ///
-    /// assert!(v["a"].is_u64());
-    ///
-    /// // Negative integer.
-    /// assert!(!v["b"].is_u64());
-    ///
-    /// // Numbers with a decimal point are not considered integers.
-    /// assert!(!v["c"].is_u64());
-    /// ```
-    pub fn is_u64(&self) -> bool {
-        match self {
-            Value::Number(n) => n.is_u64(),
-            _ => false,
-        }
-    }
-
-    /// Returns true if the `Value` is a number that can be represented by f64.
-    ///
-    /// For any Value on which `is_f64` returns true, `as_f64` is guaranteed to
-    /// return the floating point value.
-    ///
-    /// Currently this function returns true if and only if both `is_i64` and
-    /// `is_u64` return false but this is not a guarantee in the future.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let v = json!({ "a": 256.0, "b": 64, "c": -64 });
-    ///
-    /// assert!(v["a"].is_f64());
-    ///
-    /// // Integers.
-    /// assert!(!v["b"].is_f64());
-    /// assert!(!v["c"].is_f64());
-    /// ```
-    pub fn is_f64(&self) -> bool {
-        match self {
-            Value::Number(n) => n.is_f64(),
-            _ => false,
-        }
-    }
-
-    /// If the `Value` is an integer, represent it as i64 if possible. Returns
-    /// None otherwise.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let big = i64::max_value() as u64 + 10;
-    /// let v = json!({ "a": 64, "b": big, "c": 256.0 });
-    ///
-    /// assert_eq!(v["a"].as_i64(), Some(64));
-    /// assert_eq!(v["b"].as_i64(), None);
-    /// assert_eq!(v["c"].as_i64(), None);
-    /// ```
-    pub fn as_i64(&self) -> Option<i64> {
-        match self {
-            Value::Number(n) => n.as_i64(),
-            _ => None,
-        }
-    }
-
-    /// If the `Value` is an integer, represent it as u64 if possible. Returns
-    /// None otherwise.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let v = json!({ "a": 64, "b": -64, "c": 256.0 });
-    ///
-    /// assert_eq!(v["a"].as_u64(), Some(64));
-    /// assert_eq!(v["b"].as_u64(), None);
-    /// assert_eq!(v["c"].as_u64(), None);
-    /// ```
-    pub fn as_u64(&self) -> Option<u64> {
-        match self {
-            Value::Number(n) => n.as_u64(),
-            _ => None,
-        }
-    }
-
-    /// If the `Value` is a number, represent it as f64 if possible. Returns
-    /// None otherwise.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let v = json!({ "a": 256.0, "b": 64, "c": -64 });
-    ///
-    /// assert_eq!(v["a"].as_f64(), Some(256.0));
-    /// assert_eq!(v["b"].as_f64(), Some(64.0));
-    /// assert_eq!(v["c"].as_f64(), Some(-64.0));
-    /// ```
-    pub fn as_f64(&self) -> Option<f64> {
-        match self {
-            Value::Number(n) => n.as_f64(),
-            _ => None,
-        }
-    }
-
-    /// Returns true if the `Value` is a Boolean. Returns false otherwise.
-    ///
-    /// For any Value on which `is_boolean` returns true, `as_bool` is
-    /// guaranteed to return the boolean value.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let v = json!({ "a": false, "b": "false" });
-    ///
-    /// assert!(v["a"].is_boolean());
-    ///
-    /// // The string `"false"` is a string, not a boolean.
-    /// assert!(!v["b"].is_boolean());
-    /// ```
-    pub fn is_boolean(&self) -> bool {
-        self.as_bool().is_some()
-    }
-
-    /// If the `Value` is a Boolean, returns the associated bool. Returns None
-    /// otherwise.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let v = json!({ "a": false, "b": "false" });
-    ///
-    /// assert_eq!(v["a"].as_bool(), Some(false));
-    ///
-    /// // The string `"false"` is a string, not a boolean.
-    /// assert_eq!(v["b"].as_bool(), None);
-    /// ```
-    pub fn as_bool(&self) -> Option<bool> {
-        match *self {
-            Value::Bool(b) => Some(b),
-            _ => None,
-        }
-    }
-
-    /// Returns true if the `Value` is a Null. Returns false otherwise.
-    ///
-    /// For any Value on which `is_null` returns true, `as_null` is guaranteed
-    /// to return `Some(())`.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let v = json!({ "a": null, "b": false });
-    ///
-    /// assert!(v["a"].is_null());
-    ///
-    /// // The boolean `false` is not null.
-    /// assert!(!v["b"].is_null());
-    /// ```
-    pub fn is_null(&self) -> bool {
-        self.as_null().is_some()
-    }
-
-    /// If the `Value` is a Null, returns (). Returns None otherwise.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let v = json!({ "a": null, "b": false });
-    ///
-    /// assert_eq!(v["a"].as_null(), Some(()));
-    ///
-    /// // The boolean `false` is not null.
-    /// assert_eq!(v["b"].as_null(), None);
-    /// ```
-    pub fn as_null(&self) -> Option<()> {
-        match *self {
-            Value::Null => Some(()),
-            _ => None,
-        }
-    }
-
-    /// Looks up a value by a JSON Pointer.
-    ///
-    /// JSON Pointer defines a string syntax for identifying a specific value
-    /// within a JavaScript Object Notation (JSON) document.
-    ///
-    /// A Pointer is a Unicode string with the reference tokens separated by `/`.
-    /// Inside tokens `/` is replaced by `~1` and `~` is replaced by `~0`. The
-    /// addressed value is returned and if there is no such value `None` is
-    /// returned.
-    ///
-    /// For more information read [RFC6901](https://tools.ietf.org/html/rfc6901).
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let data = json!({
-    ///     "x": {
-    ///         "y": ["z", "zz"]
-    ///     }
-    /// });
-    ///
-    /// assert_eq!(data.pointer("/x/y/1").unwrap(), &json!("zz"));
-    /// assert_eq!(data.pointer("/a/b/c"), None);
-    /// ```
-    pub fn pointer(&self, pointer: &str) -> Option<&Value> {
-        if pointer.is_empty() {
-            return Some(self);
-        }
-        if !pointer.starts_with('/') {
-            return None;
-        }
-        pointer
-            .split('/')
-            .skip(1)
-            .map(|x| x.replace("~1", "/").replace("~0", "~"))
-            .try_fold(self, |target, token| match target {
-                Value::Object(map) => map.get(&token),
-                Value::Array(list) => parse_index(&token).and_then(|x| list.get(x)),
-                _ => None,
-            })
-    }
-
-    /// Looks up a value by a JSON Pointer and returns a mutable reference to
-    /// that value.
-    ///
-    /// JSON Pointer defines a string syntax for identifying a specific value
-    /// within a JavaScript Object Notation (JSON) document.
-    ///
-    /// A Pointer is a Unicode string with the reference tokens separated by `/`.
-    /// Inside tokens `/` is replaced by `~1` and `~` is replaced by `~0`. The
-    /// addressed value is returned and if there is no such value `None` is
-    /// returned.
-    ///
-    /// For more information read [RFC6901](https://tools.ietf.org/html/rfc6901).
-    ///
-    /// # Example of Use
-    ///
-    /// ```
-    /// use serde_json::Value;
-    ///
-    /// fn main() {
-    ///     let s = r#"{"x": 1.0, "y": 2.0}"#;
-    ///     let mut value: Value = serde_json::from_str(s).unwrap();
-    ///
-    ///     // Check value using read-only pointer
-    ///     assert_eq!(value.pointer("/x"), Some(&1.0.into()));
-    ///     // Change value with direct assignment
-    ///     *value.pointer_mut("/x").unwrap() = 1.5.into();
-    ///     // Check that new value was written
-    ///     assert_eq!(value.pointer("/x"), Some(&1.5.into()));
-    ///     // Or change the value only if it exists
-    ///     value.pointer_mut("/x").map(|v| *v = 1.5.into());
-    ///
-    ///     // "Steal" ownership of a value. Can replace with any valid Value.
-    ///     let old_x = value.pointer_mut("/x").map(Value::take).unwrap();
-    ///     assert_eq!(old_x, 1.5);
-    ///     assert_eq!(value.pointer("/x").unwrap(), &Value::Null);
-    /// }
-    /// ```
-    pub fn pointer_mut(&mut self, pointer: &str) -> Option<&mut Value> {
-        if pointer.is_empty() {
-            return Some(self);
-        }
-        if !pointer.starts_with('/') {
-            return None;
-        }
-        pointer
-            .split('/')
-            .skip(1)
-            .map(|x| x.replace("~1", "/").replace("~0", "~"))
-            .try_fold(self, |target, token| match target {
-                Value::Object(map) => map.get_mut(&token),
-                Value::Array(list) => parse_index(&token).and_then(move |x| list.get_mut(x)),
-                _ => None,
-            })
-    }
-
-    /// Takes the value out of the `Value`, leaving a `Null` in its place.
-    ///
-    /// ```
-    /// # use serde_json::json;
-    /// #
-    /// let mut v = json!({ "x": "y" });
-    /// assert_eq!(v["x"].take(), json!("y"));
-    /// assert_eq!(v, json!({ "x": null }));
-    /// ```
-    pub fn take(&mut self) -> Value {
-        mem::replace(self, Value::Null)
-    }
-
-    /// Reorders the entries of all `Value::Object` nested within this JSON
-    /// value according to `str`'s usual ordering.
-    ///
-    /// If serde_json's "preserve_order" feature is not enabled, this method
-    /// does no work because all JSON maps are always kept in a sorted state.
-    ///
-    /// If serde_json's "preserve_order" feature is enabled, this method
-    /// destroys the original source order or insertion order of the JSON
-    /// objects in favor of an alphanumerical order that matches how a BTreeMap
-    /// with the same contents would be ordered.
-    pub fn sort_all_objects(&mut self) {
-        #[cfg(feature = "preserve_order")]
-        {
-            match self {
-                Value::Object(map) => {
-                    map.sort_keys();
-                    map.values_mut().for_each(Value::sort_all_objects);
-                }
-                Value::Array(list) => {
-                    list.iter_mut().for_each(Value::sort_all_objects);
-                }
-                _ => {}
-            }
-        }
-    }
-}
-
-/// The default value is `Value::Null`.
-///
-/// This is useful for handling omitted `Value` fields when deserializing.
-///
-/// # Examples
-///
-/// ```
-/// # use serde::Deserialize;
-/// use serde_json::Value;
-///
-/// #[derive(Deserialize)]
-/// struct Settings {
-///     level: i32,
-///     #[serde(default)]
-///     extras: Value,
-/// }
-///
-/// # fn try_main() -> Result<(), serde_json::Error> {
-/// let data = r#" { "level": 42 } "#;
-/// let s: Settings = serde_json::from_str(data)?;
-///
-/// assert_eq!(s.level, 42);
-/// assert_eq!(s.extras, Value::Null);
-/// #
-/// #     Ok(())
-/// # }
-/// #
-/// # try_main().unwrap()
-/// ```
-impl Default for Value {
-    fn default() -> Value {
-        Value::Null
-    }
-}
-
-mod de;
-mod from;
-mod index;
-mod partial_eq;
-mod ser;
-
-/// Convert a `T` into `serde_json::Value` which is an enum that can represent
-/// any valid JSON data.
-///
-/// # Example
-///
-/// ```
-/// use serde::Serialize;
-/// use serde_json::json;
-/// use std::error::Error;
-///
-/// #[derive(Serialize)]
-/// struct User {
-///     fingerprint: String,
-///     location: String,
-/// }
-///
-/// fn compare_json_values() -> Result<(), Box<dyn Error>> {
-///     let u = User {
-///         fingerprint: "0xF9BA143B95FF6D82".to_owned(),
-///         location: "Menlo Park, CA".to_owned(),
-///     };
-///
-///     // The type of `expected` is `serde_json::Value`
-///     let expected = json!({
-///         "fingerprint": "0xF9BA143B95FF6D82",
-///         "location": "Menlo Park, CA",
-///     });
-///
-///     let v = serde_json::to_value(u).unwrap();
-///     assert_eq!(v, expected);
-///
-///     Ok(())
-/// }
-/// #
-/// # compare_json_values().unwrap();
-/// ```
-///
-/// # Errors
-///
-/// This conversion can fail if `T`'s implementation of `Serialize` decides to
-/// fail, or if `T` contains a map with non-string keys.
-///
-/// ```
-/// use std::collections::BTreeMap;
-///
-/// fn main() {
-///     // The keys in this map are vectors, not strings.
-///     let mut map = BTreeMap::new();
-///     map.insert(vec![32, 64], "x86");
-///
-///     println!("{}", serde_json::to_value(map).unwrap_err());
-/// }
-/// ```
-// Taking by value is more friendly to iterator adapters, option and result
-// consumers, etc. See https://github.com/serde-rs/json/pull/149.
-pub fn to_value<T>(value: T) -> Result<Value, Error>
-where
-    T: Serialize,
-{
-    value.serialize(Serializer)
-}
-
-/// Interpret a `serde_json::Value` as an instance of type `T`.
-///
-/// # Example
-///
-/// ```
-/// use serde::Deserialize;
-/// use serde_json::json;
-///
-/// #[derive(Deserialize, Debug)]
-/// struct User {
-///     fingerprint: String,
-///     location: String,
-/// }
-///
-/// fn main() {
-///     // The type of `j` is `serde_json::Value`
-///     let j = json!({
-///         "fingerprint": "0xF9BA143B95FF6D82",
-///         "location": "Menlo Park, CA"
-///     });
-///
-///     let u: User = serde_json::from_value(j).unwrap();
-///     println!("{:#?}", u);
-/// }
-/// ```
-///
-/// # Errors
-///
-/// This conversion can fail if the structure of the Value does not match the
-/// structure expected by `T`, for example if `T` is a struct type but the Value
-/// contains something other than a JSON map. It can also fail if the structure
-/// is correct but `T`'s implementation of `Deserialize` decides that something
-/// is wrong with the data, for example required struct fields are missing from
-/// the JSON map or some number is too big to fit in the expected primitive
-/// type.
-pub fn from_value<T>(value: Value) -> Result<T, Error>
-where
-    T: DeserializeOwned,
-{
-    T::deserialize(value)
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/value/partial_eq.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/value/partial_eq.rs
deleted file mode 100644
index 8626eed..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/value/partial_eq.rs
+++ /dev/null
@@ -1,103 +0,0 @@
-use super::Value;
-use alloc::string::String;
-
-fn eq_i64(value: &Value, other: i64) -> bool {
-    value.as_i64() == Some(other)
-}
-
-fn eq_u64(value: &Value, other: u64) -> bool {
-    value.as_u64() == Some(other)
-}
-
-fn eq_f32(value: &Value, other: f32) -> bool {
-    match value {
-        Value::Number(n) => n.as_f32() == Some(other),
-        _ => false,
-    }
-}
-
-fn eq_f64(value: &Value, other: f64) -> bool {
-    value.as_f64() == Some(other)
-}
-
-fn eq_bool(value: &Value, other: bool) -> bool {
-    value.as_bool() == Some(other)
-}
-
-fn eq_str(value: &Value, other: &str) -> bool {
-    value.as_str() == Some(other)
-}
-
-impl PartialEq<str> for Value {
-    fn eq(&self, other: &str) -> bool {
-        eq_str(self, other)
-    }
-}
-
-impl PartialEq<&str> for Value {
-    fn eq(&self, other: &&str) -> bool {
-        eq_str(self, *other)
-    }
-}
-
-impl PartialEq<Value> for str {
-    fn eq(&self, other: &Value) -> bool {
-        eq_str(other, self)
-    }
-}
-
-impl PartialEq<Value> for &str {
-    fn eq(&self, other: &Value) -> bool {
-        eq_str(other, *self)
-    }
-}
-
-impl PartialEq<String> for Value {
-    fn eq(&self, other: &String) -> bool {
-        eq_str(self, other.as_str())
-    }
-}
-
-impl PartialEq<Value> for String {
-    fn eq(&self, other: &Value) -> bool {
-        eq_str(other, self.as_str())
-    }
-}
-
-macro_rules! partialeq_numeric {
-    ($($eq:ident [$($ty:ty)*])*) => {
-        $($(
-            impl PartialEq<$ty> for Value {
-                fn eq(&self, other: &$ty) -> bool {
-                    $eq(self, *other as _)
-                }
-            }
-
-            impl PartialEq<Value> for $ty {
-                fn eq(&self, other: &Value) -> bool {
-                    $eq(other, *self as _)
-                }
-            }
-
-            impl<'a> PartialEq<$ty> for &'a Value {
-                fn eq(&self, other: &$ty) -> bool {
-                    $eq(*self, *other as _)
-                }
-            }
-
-            impl<'a> PartialEq<$ty> for &'a mut Value {
-                fn eq(&self, other: &$ty) -> bool {
-                    $eq(*self, *other as _)
-                }
-            }
-        )*)*
-    }
-}
-
-partialeq_numeric! {
-    eq_i64[i8 i16 i32 i64 isize]
-    eq_u64[u8 u16 u32 u64 usize]
-    eq_f32[f32]
-    eq_f64[f64]
-    eq_bool[bool]
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/value/ser.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/value/ser.rs
deleted file mode 100644
index 1ffe7b7..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/value/ser.rs
+++ /dev/null
@@ -1,1063 +0,0 @@
-use crate::error::{Error, ErrorCode, Result};
-use crate::map::Map;
-use crate::value::{to_value, Value};
-use alloc::borrow::ToOwned;
-use alloc::string::{String, ToString};
-use alloc::vec::Vec;
-use core::fmt::Display;
-use core::result;
-use serde::ser::{Impossible, Serialize};
-
-impl Serialize for Value {
-    #[inline]
-    fn serialize<S>(&self, serializer: S) -> result::Result<S::Ok, S::Error>
-    where
-        S: ::serde::Serializer,
-    {
-        match self {
-            Value::Null => serializer.serialize_unit(),
-            Value::Bool(b) => serializer.serialize_bool(*b),
-            Value::Number(n) => n.serialize(serializer),
-            Value::String(s) => serializer.serialize_str(s),
-            Value::Array(v) => v.serialize(serializer),
-            #[cfg(any(feature = "std", feature = "alloc"))]
-            Value::Object(m) => {
-                use serde::ser::SerializeMap;
-                let mut map = tri!(serializer.serialize_map(Some(m.len())));
-                for (k, v) in m {
-                    tri!(map.serialize_entry(k, v));
-                }
-                map.end()
-            }
-            #[cfg(not(any(feature = "std", feature = "alloc")))]
-            Value::Object(_) => unreachable!(),
-        }
-    }
-}
-
-/// Serializer whose output is a `Value`.
-///
-/// This is the serializer that backs [`serde_json::to_value`][crate::to_value].
-/// Unlike the main serde_json serializer which goes from some serializable
-/// value of type `T` to JSON text, this one goes from `T` to
-/// `serde_json::Value`.
-///
-/// The `to_value` function is implementable as:
-///
-/// ```
-/// use serde::Serialize;
-/// use serde_json::{Error, Value};
-///
-/// pub fn to_value<T>(input: T) -> Result<Value, Error>
-/// where
-///     T: Serialize,
-/// {
-///     input.serialize(serde_json::value::Serializer)
-/// }
-/// ```
-pub struct Serializer;
-
-impl serde::Serializer for Serializer {
-    type Ok = Value;
-    type Error = Error;
-
-    type SerializeSeq = SerializeVec;
-    type SerializeTuple = SerializeVec;
-    type SerializeTupleStruct = SerializeVec;
-    type SerializeTupleVariant = SerializeTupleVariant;
-    type SerializeMap = SerializeMap;
-    type SerializeStruct = SerializeMap;
-    type SerializeStructVariant = SerializeStructVariant;
-
-    #[inline]
-    fn serialize_bool(self, value: bool) -> Result<Value> {
-        Ok(Value::Bool(value))
-    }
-
-    #[inline]
-    fn serialize_i8(self, value: i8) -> Result<Value> {
-        self.serialize_i64(value as i64)
-    }
-
-    #[inline]
-    fn serialize_i16(self, value: i16) -> Result<Value> {
-        self.serialize_i64(value as i64)
-    }
-
-    #[inline]
-    fn serialize_i32(self, value: i32) -> Result<Value> {
-        self.serialize_i64(value as i64)
-    }
-
-    fn serialize_i64(self, value: i64) -> Result<Value> {
-        Ok(Value::Number(value.into()))
-    }
-
-    fn serialize_i128(self, value: i128) -> Result<Value> {
-        #[cfg(feature = "arbitrary_precision")]
-        {
-            Ok(Value::Number(value.into()))
-        }
-
-        #[cfg(not(feature = "arbitrary_precision"))]
-        {
-            if let Ok(value) = u64::try_from(value) {
-                Ok(Value::Number(value.into()))
-            } else if let Ok(value) = i64::try_from(value) {
-                Ok(Value::Number(value.into()))
-            } else {
-                Err(Error::syntax(ErrorCode::NumberOutOfRange, 0, 0))
-            }
-        }
-    }
-
-    #[inline]
-    fn serialize_u8(self, value: u8) -> Result<Value> {
-        self.serialize_u64(value as u64)
-    }
-
-    #[inline]
-    fn serialize_u16(self, value: u16) -> Result<Value> {
-        self.serialize_u64(value as u64)
-    }
-
-    #[inline]
-    fn serialize_u32(self, value: u32) -> Result<Value> {
-        self.serialize_u64(value as u64)
-    }
-
-    #[inline]
-    fn serialize_u64(self, value: u64) -> Result<Value> {
-        Ok(Value::Number(value.into()))
-    }
-
-    fn serialize_u128(self, value: u128) -> Result<Value> {
-        #[cfg(feature = "arbitrary_precision")]
-        {
-            Ok(Value::Number(value.into()))
-        }
-
-        #[cfg(not(feature = "arbitrary_precision"))]
-        {
-            if let Ok(value) = u64::try_from(value) {
-                Ok(Value::Number(value.into()))
-            } else {
-                Err(Error::syntax(ErrorCode::NumberOutOfRange, 0, 0))
-            }
-        }
-    }
-
-    #[inline]
-    fn serialize_f32(self, float: f32) -> Result<Value> {
-        Ok(Value::from(float))
-    }
-
-    #[inline]
-    fn serialize_f64(self, float: f64) -> Result<Value> {
-        Ok(Value::from(float))
-    }
-
-    #[inline]
-    fn serialize_char(self, value: char) -> Result<Value> {
-        let mut s = String::new();
-        s.push(value);
-        Ok(Value::String(s))
-    }
-
-    #[inline]
-    fn serialize_str(self, value: &str) -> Result<Value> {
-        Ok(Value::String(value.to_owned()))
-    }
-
-    fn serialize_bytes(self, value: &[u8]) -> Result<Value> {
-        let vec = value.iter().map(|&b| Value::Number(b.into())).collect();
-        Ok(Value::Array(vec))
-    }
-
-    #[inline]
-    fn serialize_unit(self) -> Result<Value> {
-        Ok(Value::Null)
-    }
-
-    #[inline]
-    fn serialize_unit_struct(self, _name: &'static str) -> Result<Value> {
-        self.serialize_unit()
-    }
-
-    #[inline]
-    fn serialize_unit_variant(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        variant: &'static str,
-    ) -> Result<Value> {
-        self.serialize_str(variant)
-    }
-
-    #[inline]
-    fn serialize_newtype_struct<T>(self, _name: &'static str, value: &T) -> Result<Value>
-    where
-        T: ?Sized + Serialize,
-    {
-        value.serialize(self)
-    }
-
-    fn serialize_newtype_variant<T>(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        variant: &'static str,
-        value: &T,
-    ) -> Result<Value>
-    where
-        T: ?Sized + Serialize,
-    {
-        let mut values = Map::new();
-        values.insert(String::from(variant), tri!(to_value(value)));
-        Ok(Value::Object(values))
-    }
-
-    #[inline]
-    fn serialize_none(self) -> Result<Value> {
-        self.serialize_unit()
-    }
-
-    #[inline]
-    fn serialize_some<T>(self, value: &T) -> Result<Value>
-    where
-        T: ?Sized + Serialize,
-    {
-        value.serialize(self)
-    }
-
-    fn serialize_seq(self, len: Option<usize>) -> Result<Self::SerializeSeq> {
-        Ok(SerializeVec {
-            vec: Vec::with_capacity(len.unwrap_or(0)),
-        })
-    }
-
-    fn serialize_tuple(self, len: usize) -> Result<Self::SerializeTuple> {
-        self.serialize_seq(Some(len))
-    }
-
-    fn serialize_tuple_struct(
-        self,
-        _name: &'static str,
-        len: usize,
-    ) -> Result<Self::SerializeTupleStruct> {
-        self.serialize_seq(Some(len))
-    }
-
-    fn serialize_tuple_variant(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        variant: &'static str,
-        len: usize,
-    ) -> Result<Self::SerializeTupleVariant> {
-        Ok(SerializeTupleVariant {
-            name: String::from(variant),
-            vec: Vec::with_capacity(len),
-        })
-    }
-
-    fn serialize_map(self, len: Option<usize>) -> Result<Self::SerializeMap> {
-        Ok(SerializeMap::Map {
-            map: Map::with_capacity(len.unwrap_or(0)),
-            next_key: None,
-        })
-    }
-
-    fn serialize_struct(self, name: &'static str, len: usize) -> Result<Self::SerializeStruct> {
-        match name {
-            #[cfg(feature = "arbitrary_precision")]
-            crate::number::TOKEN => Ok(SerializeMap::Number { out_value: None }),
-            #[cfg(feature = "raw_value")]
-            crate::raw::TOKEN => Ok(SerializeMap::RawValue { out_value: None }),
-            _ => self.serialize_map(Some(len)),
-        }
-    }
-
-    fn serialize_struct_variant(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        variant: &'static str,
-        _len: usize,
-    ) -> Result<Self::SerializeStructVariant> {
-        Ok(SerializeStructVariant {
-            name: String::from(variant),
-            map: Map::new(),
-        })
-    }
-
-    fn collect_str<T>(self, value: &T) -> Result<Value>
-    where
-        T: ?Sized + Display,
-    {
-        Ok(Value::String(value.to_string()))
-    }
-}
-
-pub struct SerializeVec {
-    vec: Vec<Value>,
-}
-
-pub struct SerializeTupleVariant {
-    name: String,
-    vec: Vec<Value>,
-}
-
-pub enum SerializeMap {
-    Map {
-        map: Map<String, Value>,
-        next_key: Option<String>,
-    },
-    #[cfg(feature = "arbitrary_precision")]
-    Number { out_value: Option<Value> },
-    #[cfg(feature = "raw_value")]
-    RawValue { out_value: Option<Value> },
-}
-
-pub struct SerializeStructVariant {
-    name: String,
-    map: Map<String, Value>,
-}
-
-impl serde::ser::SerializeSeq for SerializeVec {
-    type Ok = Value;
-    type Error = Error;
-
-    fn serialize_element<T>(&mut self, value: &T) -> Result<()>
-    where
-        T: ?Sized + Serialize,
-    {
-        self.vec.push(tri!(to_value(value)));
-        Ok(())
-    }
-
-    fn end(self) -> Result<Value> {
-        Ok(Value::Array(self.vec))
-    }
-}
-
-impl serde::ser::SerializeTuple for SerializeVec {
-    type Ok = Value;
-    type Error = Error;
-
-    fn serialize_element<T>(&mut self, value: &T) -> Result<()>
-    where
-        T: ?Sized + Serialize,
-    {
-        serde::ser::SerializeSeq::serialize_element(self, value)
-    }
-
-    fn end(self) -> Result<Value> {
-        serde::ser::SerializeSeq::end(self)
-    }
-}
-
-impl serde::ser::SerializeTupleStruct for SerializeVec {
-    type Ok = Value;
-    type Error = Error;
-
-    fn serialize_field<T>(&mut self, value: &T) -> Result<()>
-    where
-        T: ?Sized + Serialize,
-    {
-        serde::ser::SerializeSeq::serialize_element(self, value)
-    }
-
-    fn end(self) -> Result<Value> {
-        serde::ser::SerializeSeq::end(self)
-    }
-}
-
-impl serde::ser::SerializeTupleVariant for SerializeTupleVariant {
-    type Ok = Value;
-    type Error = Error;
-
-    fn serialize_field<T>(&mut self, value: &T) -> Result<()>
-    where
-        T: ?Sized + Serialize,
-    {
-        self.vec.push(tri!(to_value(value)));
-        Ok(())
-    }
-
-    fn end(self) -> Result<Value> {
-        let mut object = Map::new();
-
-        object.insert(self.name, Value::Array(self.vec));
-
-        Ok(Value::Object(object))
-    }
-}
-
-impl serde::ser::SerializeMap for SerializeMap {
-    type Ok = Value;
-    type Error = Error;
-
-    fn serialize_key<T>(&mut self, key: &T) -> Result<()>
-    where
-        T: ?Sized + Serialize,
-    {
-        match self {
-            SerializeMap::Map { next_key, .. } => {
-                *next_key = Some(tri!(key.serialize(MapKeySerializer)));
-                Ok(())
-            }
-            #[cfg(feature = "arbitrary_precision")]
-            SerializeMap::Number { .. } => unreachable!(),
-            #[cfg(feature = "raw_value")]
-            SerializeMap::RawValue { .. } => unreachable!(),
-        }
-    }
-
-    fn serialize_value<T>(&mut self, value: &T) -> Result<()>
-    where
-        T: ?Sized + Serialize,
-    {
-        match self {
-            SerializeMap::Map { map, next_key } => {
-                let key = next_key.take();
-                // Panic because this indicates a bug in the program rather than an
-                // expected failure.
-                let key = key.expect("serialize_value called before serialize_key");
-                map.insert(key, tri!(to_value(value)));
-                Ok(())
-            }
-            #[cfg(feature = "arbitrary_precision")]
-            SerializeMap::Number { .. } => unreachable!(),
-            #[cfg(feature = "raw_value")]
-            SerializeMap::RawValue { .. } => unreachable!(),
-        }
-    }
-
-    fn end(self) -> Result<Value> {
-        match self {
-            SerializeMap::Map { map, .. } => Ok(Value::Object(map)),
-            #[cfg(feature = "arbitrary_precision")]
-            SerializeMap::Number { .. } => unreachable!(),
-            #[cfg(feature = "raw_value")]
-            SerializeMap::RawValue { .. } => unreachable!(),
-        }
-    }
-}
-
-struct MapKeySerializer;
-
-fn key_must_be_a_string() -> Error {
-    Error::syntax(ErrorCode::KeyMustBeAString, 0, 0)
-}
-
-fn float_key_must_be_finite() -> Error {
-    Error::syntax(ErrorCode::FloatKeyMustBeFinite, 0, 0)
-}
-
-impl serde::Serializer for MapKeySerializer {
-    type Ok = String;
-    type Error = Error;
-
-    type SerializeSeq = Impossible<String, Error>;
-    type SerializeTuple = Impossible<String, Error>;
-    type SerializeTupleStruct = Impossible<String, Error>;
-    type SerializeTupleVariant = Impossible<String, Error>;
-    type SerializeMap = Impossible<String, Error>;
-    type SerializeStruct = Impossible<String, Error>;
-    type SerializeStructVariant = Impossible<String, Error>;
-
-    #[inline]
-    fn serialize_unit_variant(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        variant: &'static str,
-    ) -> Result<String> {
-        Ok(variant.to_owned())
-    }
-
-    #[inline]
-    fn serialize_newtype_struct<T>(self, _name: &'static str, value: &T) -> Result<String>
-    where
-        T: ?Sized + Serialize,
-    {
-        value.serialize(self)
-    }
-
-    fn serialize_bool(self, value: bool) -> Result<String> {
-        Ok(if value { "true" } else { "false" }.to_owned())
-    }
-
-    fn serialize_i8(self, value: i8) -> Result<String> {
-        Ok(itoa::Buffer::new().format(value).to_owned())
-    }
-
-    fn serialize_i16(self, value: i16) -> Result<String> {
-        Ok(itoa::Buffer::new().format(value).to_owned())
-    }
-
-    fn serialize_i32(self, value: i32) -> Result<String> {
-        Ok(itoa::Buffer::new().format(value).to_owned())
-    }
-
-    fn serialize_i64(self, value: i64) -> Result<String> {
-        Ok(itoa::Buffer::new().format(value).to_owned())
-    }
-
-    fn serialize_i128(self, value: i128) -> Result<String> {
-        Ok(itoa::Buffer::new().format(value).to_owned())
-    }
-
-    fn serialize_u8(self, value: u8) -> Result<String> {
-        Ok(itoa::Buffer::new().format(value).to_owned())
-    }
-
-    fn serialize_u16(self, value: u16) -> Result<String> {
-        Ok(itoa::Buffer::new().format(value).to_owned())
-    }
-
-    fn serialize_u32(self, value: u32) -> Result<String> {
-        Ok(itoa::Buffer::new().format(value).to_owned())
-    }
-
-    fn serialize_u64(self, value: u64) -> Result<String> {
-        Ok(itoa::Buffer::new().format(value).to_owned())
-    }
-
-    fn serialize_u128(self, value: u128) -> Result<String> {
-        Ok(itoa::Buffer::new().format(value).to_owned())
-    }
-
-    fn serialize_f32(self, value: f32) -> Result<String> {
-        if value.is_finite() {
-            Ok(ryu::Buffer::new().format_finite(value).to_owned())
-        } else {
-            Err(float_key_must_be_finite())
-        }
-    }
-
-    fn serialize_f64(self, value: f64) -> Result<String> {
-        if value.is_finite() {
-            Ok(ryu::Buffer::new().format_finite(value).to_owned())
-        } else {
-            Err(float_key_must_be_finite())
-        }
-    }
-
-    #[inline]
-    fn serialize_char(self, value: char) -> Result<String> {
-        Ok({
-            let mut s = String::new();
-            s.push(value);
-            s
-        })
-    }
-
-    #[inline]
-    fn serialize_str(self, value: &str) -> Result<String> {
-        Ok(value.to_owned())
-    }
-
-    fn serialize_bytes(self, _value: &[u8]) -> Result<String> {
-        Err(key_must_be_a_string())
-    }
-
-    fn serialize_unit(self) -> Result<String> {
-        Err(key_must_be_a_string())
-    }
-
-    fn serialize_unit_struct(self, _name: &'static str) -> Result<String> {
-        Err(key_must_be_a_string())
-    }
-
-    fn serialize_newtype_variant<T>(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        _variant: &'static str,
-        _value: &T,
-    ) -> Result<String>
-    where
-        T: ?Sized + Serialize,
-    {
-        Err(key_must_be_a_string())
-    }
-
-    fn serialize_none(self) -> Result<String> {
-        Err(key_must_be_a_string())
-    }
-
-    fn serialize_some<T>(self, _value: &T) -> Result<String>
-    where
-        T: ?Sized + Serialize,
-    {
-        Err(key_must_be_a_string())
-    }
-
-    fn serialize_seq(self, _len: Option<usize>) -> Result<Self::SerializeSeq> {
-        Err(key_must_be_a_string())
-    }
-
-    fn serialize_tuple(self, _len: usize) -> Result<Self::SerializeTuple> {
-        Err(key_must_be_a_string())
-    }
-
-    fn serialize_tuple_struct(
-        self,
-        _name: &'static str,
-        _len: usize,
-    ) -> Result<Self::SerializeTupleStruct> {
-        Err(key_must_be_a_string())
-    }
-
-    fn serialize_tuple_variant(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        _variant: &'static str,
-        _len: usize,
-    ) -> Result<Self::SerializeTupleVariant> {
-        Err(key_must_be_a_string())
-    }
-
-    fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap> {
-        Err(key_must_be_a_string())
-    }
-
-    fn serialize_struct(self, _name: &'static str, _len: usize) -> Result<Self::SerializeStruct> {
-        Err(key_must_be_a_string())
-    }
-
-    fn serialize_struct_variant(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        _variant: &'static str,
-        _len: usize,
-    ) -> Result<Self::SerializeStructVariant> {
-        Err(key_must_be_a_string())
-    }
-
-    fn collect_str<T>(self, value: &T) -> Result<String>
-    where
-        T: ?Sized + Display,
-    {
-        Ok(value.to_string())
-    }
-}
-
-impl serde::ser::SerializeStruct for SerializeMap {
-    type Ok = Value;
-    type Error = Error;
-
-    fn serialize_field<T>(&mut self, key: &'static str, value: &T) -> Result<()>
-    where
-        T: ?Sized + Serialize,
-    {
-        match self {
-            SerializeMap::Map { .. } => serde::ser::SerializeMap::serialize_entry(self, key, value),
-            #[cfg(feature = "arbitrary_precision")]
-            SerializeMap::Number { out_value } => {
-                if key == crate::number::TOKEN {
-                    *out_value = Some(tri!(value.serialize(NumberValueEmitter)));
-                    Ok(())
-                } else {
-                    Err(invalid_number())
-                }
-            }
-            #[cfg(feature = "raw_value")]
-            SerializeMap::RawValue { out_value } => {
-                if key == crate::raw::TOKEN {
-                    *out_value = Some(tri!(value.serialize(RawValueEmitter)));
-                    Ok(())
-                } else {
-                    Err(invalid_raw_value())
-                }
-            }
-        }
-    }
-
-    fn end(self) -> Result<Value> {
-        match self {
-            SerializeMap::Map { .. } => serde::ser::SerializeMap::end(self),
-            #[cfg(feature = "arbitrary_precision")]
-            SerializeMap::Number { out_value, .. } => {
-                Ok(out_value.expect("number value was not emitted"))
-            }
-            #[cfg(feature = "raw_value")]
-            SerializeMap::RawValue { out_value, .. } => {
-                Ok(out_value.expect("raw value was not emitted"))
-            }
-        }
-    }
-}
-
-impl serde::ser::SerializeStructVariant for SerializeStructVariant {
-    type Ok = Value;
-    type Error = Error;
-
-    fn serialize_field<T>(&mut self, key: &'static str, value: &T) -> Result<()>
-    where
-        T: ?Sized + Serialize,
-    {
-        self.map.insert(String::from(key), tri!(to_value(value)));
-        Ok(())
-    }
-
-    fn end(self) -> Result<Value> {
-        let mut object = Map::new();
-
-        object.insert(self.name, Value::Object(self.map));
-
-        Ok(Value::Object(object))
-    }
-}
-
-#[cfg(feature = "arbitrary_precision")]
-struct NumberValueEmitter;
-
-#[cfg(feature = "arbitrary_precision")]
-fn invalid_number() -> Error {
-    Error::syntax(ErrorCode::InvalidNumber, 0, 0)
-}
-
-#[cfg(feature = "arbitrary_precision")]
-impl serde::ser::Serializer for NumberValueEmitter {
-    type Ok = Value;
-    type Error = Error;
-
-    type SerializeSeq = Impossible<Value, Error>;
-    type SerializeTuple = Impossible<Value, Error>;
-    type SerializeTupleStruct = Impossible<Value, Error>;
-    type SerializeTupleVariant = Impossible<Value, Error>;
-    type SerializeMap = Impossible<Value, Error>;
-    type SerializeStruct = Impossible<Value, Error>;
-    type SerializeStructVariant = Impossible<Value, Error>;
-
-    fn serialize_bool(self, _v: bool) -> Result<Value> {
-        Err(invalid_number())
-    }
-
-    fn serialize_i8(self, _v: i8) -> Result<Value> {
-        Err(invalid_number())
-    }
-
-    fn serialize_i16(self, _v: i16) -> Result<Value> {
-        Err(invalid_number())
-    }
-
-    fn serialize_i32(self, _v: i32) -> Result<Value> {
-        Err(invalid_number())
-    }
-
-    fn serialize_i64(self, _v: i64) -> Result<Value> {
-        Err(invalid_number())
-    }
-
-    fn serialize_u8(self, _v: u8) -> Result<Value> {
-        Err(invalid_number())
-    }
-
-    fn serialize_u16(self, _v: u16) -> Result<Value> {
-        Err(invalid_number())
-    }
-
-    fn serialize_u32(self, _v: u32) -> Result<Value> {
-        Err(invalid_number())
-    }
-
-    fn serialize_u64(self, _v: u64) -> Result<Value> {
-        Err(invalid_number())
-    }
-
-    fn serialize_f32(self, _v: f32) -> Result<Value> {
-        Err(invalid_number())
-    }
-
-    fn serialize_f64(self, _v: f64) -> Result<Value> {
-        Err(invalid_number())
-    }
-
-    fn serialize_char(self, _v: char) -> Result<Value> {
-        Err(invalid_number())
-    }
-
-    fn serialize_str(self, value: &str) -> Result<Value> {
-        let n = tri!(value.to_owned().parse());
-        Ok(Value::Number(n))
-    }
-
-    fn serialize_bytes(self, _value: &[u8]) -> Result<Value> {
-        Err(invalid_number())
-    }
-
-    fn serialize_none(self) -> Result<Value> {
-        Err(invalid_number())
-    }
-
-    fn serialize_some<T>(self, _value: &T) -> Result<Value>
-    where
-        T: ?Sized + Serialize,
-    {
-        Err(invalid_number())
-    }
-
-    fn serialize_unit(self) -> Result<Value> {
-        Err(invalid_number())
-    }
-
-    fn serialize_unit_struct(self, _name: &'static str) -> Result<Value> {
-        Err(invalid_number())
-    }
-
-    fn serialize_unit_variant(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        _variant: &'static str,
-    ) -> Result<Value> {
-        Err(invalid_number())
-    }
-
-    fn serialize_newtype_struct<T>(self, _name: &'static str, _value: &T) -> Result<Value>
-    where
-        T: ?Sized + Serialize,
-    {
-        Err(invalid_number())
-    }
-
-    fn serialize_newtype_variant<T>(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        _variant: &'static str,
-        _value: &T,
-    ) -> Result<Value>
-    where
-        T: ?Sized + Serialize,
-    {
-        Err(invalid_number())
-    }
-
-    fn serialize_seq(self, _len: Option<usize>) -> Result<Self::SerializeSeq> {
-        Err(invalid_number())
-    }
-
-    fn serialize_tuple(self, _len: usize) -> Result<Self::SerializeTuple> {
-        Err(invalid_number())
-    }
-
-    fn serialize_tuple_struct(
-        self,
-        _name: &'static str,
-        _len: usize,
-    ) -> Result<Self::SerializeTupleStruct> {
-        Err(invalid_number())
-    }
-
-    fn serialize_tuple_variant(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        _variant: &'static str,
-        _len: usize,
-    ) -> Result<Self::SerializeTupleVariant> {
-        Err(invalid_number())
-    }
-
-    fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap> {
-        Err(invalid_number())
-    }
-
-    fn serialize_struct(self, _name: &'static str, _len: usize) -> Result<Self::SerializeStruct> {
-        Err(invalid_number())
-    }
-
-    fn serialize_struct_variant(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        _variant: &'static str,
-        _len: usize,
-    ) -> Result<Self::SerializeStructVariant> {
-        Err(invalid_number())
-    }
-}
-
-#[cfg(feature = "raw_value")]
-struct RawValueEmitter;
-
-#[cfg(feature = "raw_value")]
-fn invalid_raw_value() -> Error {
-    Error::syntax(ErrorCode::ExpectedSomeValue, 0, 0)
-}
-
-#[cfg(feature = "raw_value")]
-impl serde::ser::Serializer for RawValueEmitter {
-    type Ok = Value;
-    type Error = Error;
-
-    type SerializeSeq = Impossible<Value, Error>;
-    type SerializeTuple = Impossible<Value, Error>;
-    type SerializeTupleStruct = Impossible<Value, Error>;
-    type SerializeTupleVariant = Impossible<Value, Error>;
-    type SerializeMap = Impossible<Value, Error>;
-    type SerializeStruct = Impossible<Value, Error>;
-    type SerializeStructVariant = Impossible<Value, Error>;
-
-    fn serialize_bool(self, _v: bool) -> Result<Value> {
-        Err(invalid_raw_value())
-    }
-
-    fn serialize_i8(self, _v: i8) -> Result<Value> {
-        Err(invalid_raw_value())
-    }
-
-    fn serialize_i16(self, _v: i16) -> Result<Value> {
-        Err(invalid_raw_value())
-    }
-
-    fn serialize_i32(self, _v: i32) -> Result<Value> {
-        Err(invalid_raw_value())
-    }
-
-    fn serialize_i64(self, _v: i64) -> Result<Value> {
-        Err(invalid_raw_value())
-    }
-
-    fn serialize_u8(self, _v: u8) -> Result<Value> {
-        Err(invalid_raw_value())
-    }
-
-    fn serialize_u16(self, _v: u16) -> Result<Value> {
-        Err(invalid_raw_value())
-    }
-
-    fn serialize_u32(self, _v: u32) -> Result<Value> {
-        Err(invalid_raw_value())
-    }
-
-    fn serialize_u64(self, _v: u64) -> Result<Value> {
-        Err(invalid_raw_value())
-    }
-
-    fn serialize_f32(self, _v: f32) -> Result<Value> {
-        Err(invalid_raw_value())
-    }
-
-    fn serialize_f64(self, _v: f64) -> Result<Value> {
-        Err(invalid_raw_value())
-    }
-
-    fn serialize_char(self, _v: char) -> Result<Value> {
-        Err(invalid_raw_value())
-    }
-
-    fn serialize_str(self, value: &str) -> Result<Value> {
-        crate::from_str(value)
-    }
-
-    fn serialize_bytes(self, _value: &[u8]) -> Result<Value> {
-        Err(invalid_raw_value())
-    }
-
-    fn serialize_none(self) -> Result<Value> {
-        Err(invalid_raw_value())
-    }
-
-    fn serialize_some<T>(self, _value: &T) -> Result<Value>
-    where
-        T: ?Sized + Serialize,
-    {
-        Err(invalid_raw_value())
-    }
-
-    fn serialize_unit(self) -> Result<Value> {
-        Err(invalid_raw_value())
-    }
-
-    fn serialize_unit_struct(self, _name: &'static str) -> Result<Value> {
-        Err(invalid_raw_value())
-    }
-
-    fn serialize_unit_variant(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        _variant: &'static str,
-    ) -> Result<Value> {
-        Err(invalid_raw_value())
-    }
-
-    fn serialize_newtype_struct<T>(self, _name: &'static str, _value: &T) -> Result<Value>
-    where
-        T: ?Sized + Serialize,
-    {
-        Err(invalid_raw_value())
-    }
-
-    fn serialize_newtype_variant<T>(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        _variant: &'static str,
-        _value: &T,
-    ) -> Result<Value>
-    where
-        T: ?Sized + Serialize,
-    {
-        Err(invalid_raw_value())
-    }
-
-    fn serialize_seq(self, _len: Option<usize>) -> Result<Self::SerializeSeq> {
-        Err(invalid_raw_value())
-    }
-
-    fn serialize_tuple(self, _len: usize) -> Result<Self::SerializeTuple> {
-        Err(invalid_raw_value())
-    }
-
-    fn serialize_tuple_struct(
-        self,
-        _name: &'static str,
-        _len: usize,
-    ) -> Result<Self::SerializeTupleStruct> {
-        Err(invalid_raw_value())
-    }
-
-    fn serialize_tuple_variant(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        _variant: &'static str,
-        _len: usize,
-    ) -> Result<Self::SerializeTupleVariant> {
-        Err(invalid_raw_value())
-    }
-
-    fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap> {
-        Err(invalid_raw_value())
-    }
-
-    fn serialize_struct(self, _name: &'static str, _len: usize) -> Result<Self::SerializeStruct> {
-        Err(invalid_raw_value())
-    }
-
-    fn serialize_struct_variant(
-        self,
-        _name: &'static str,
-        _variant_index: u32,
-        _variant: &'static str,
-        _len: usize,
-    ) -> Result<Self::SerializeStructVariant> {
-        Err(invalid_raw_value())
-    }
-
-    fn collect_str<T>(self, value: &T) -> Result<Self::Ok>
-    where
-        T: ?Sized + Display,
-    {
-        self.serialize_str(&value.to_string())
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/compiletest.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/compiletest.rs
deleted file mode 100644
index 23a6a06..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/compiletest.rs
+++ /dev/null
@@ -1,7 +0,0 @@
-#[rustversion::attr(not(nightly), ignore = "requires nightly")]
-#[cfg_attr(miri, ignore = "incompatible with miri")]
-#[test]
-fn ui() {
-    let t = trybuild::TestCases::new();
-    t.compile_fail("tests/ui/*.rs");
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/debug.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/debug.rs
deleted file mode 100644
index 8ddcf5a3..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/debug.rs
+++ /dev/null
@@ -1,81 +0,0 @@
-use indoc::indoc;
-use serde_json::{json, Number, Value};
-
-#[test]
-fn number() {
-    assert_eq!(format!("{:?}", Number::from(1)), "Number(1)");
-    assert_eq!(format!("{:?}", Number::from(-1)), "Number(-1)");
-    assert_eq!(
-        format!("{:?}", Number::from_f64(1.0).unwrap()),
-        "Number(1.0)"
-    );
-}
-
-#[test]
-fn value_null() {
-    assert_eq!(format!("{:?}", json!(null)), "Null");
-}
-
-#[test]
-fn value_bool() {
-    assert_eq!(format!("{:?}", json!(true)), "Bool(true)");
-    assert_eq!(format!("{:?}", json!(false)), "Bool(false)");
-}
-
-#[test]
-fn value_number() {
-    assert_eq!(format!("{:?}", json!(1)), "Number(1)");
-    assert_eq!(format!("{:?}", json!(-1)), "Number(-1)");
-    assert_eq!(format!("{:?}", json!(1.0)), "Number(1.0)");
-    assert_eq!(Number::from_f64(1.0).unwrap().to_string(), "1.0"); // not just "1"
-    assert_eq!(Number::from_f64(12e40).unwrap().to_string(), "1.2e41");
-}
-
-#[test]
-fn value_string() {
-    assert_eq!(format!("{:?}", json!("s")), "String(\"s\")");
-}
-
-#[test]
-fn value_array() {
-    assert_eq!(format!("{:?}", json!([])), "Array []");
-}
-
-#[test]
-fn value_object() {
-    assert_eq!(format!("{:?}", json!({})), "Object {}");
-}
-
-#[test]
-fn error() {
-    let err = serde_json::from_str::<Value>("{0}").unwrap_err();
-    let expected = "Error(\"key must be a string\", line: 1, column: 2)";
-    assert_eq!(format!("{:?}", err), expected);
-}
-
-#[test]
-fn indented() {
-    let j = json!({
-        "Array": [true],
-        "Bool": true,
-        "EmptyArray": [],
-        "EmptyObject": {},
-        "Null": null,
-        "Number": 1,
-        "String": "...",
-    });
-    let expected = indoc! {r#"
-        Object {
-            "Array": Array [
-                Bool(true),
-            ],
-            "Bool": Bool(true),
-            "EmptyArray": Array [],
-            "EmptyObject": Object {},
-            "Null": Null,
-            "Number": Number(1),
-            "String": String("..."),
-        }"#
-    };
-    assert_eq!(format!("{:#?}", j), expected);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/lexical.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/lexical.rs
deleted file mode 100644
index 7796795..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/lexical.rs
+++ /dev/null
@@ -1,49 +0,0 @@
-#![allow(
-    clippy::cast_lossless,
-    clippy::cast_possible_truncation,
-    clippy::cast_possible_wrap,
-    clippy::cast_precision_loss,
-    clippy::cast_sign_loss,
-    clippy::comparison_chain,
-    clippy::doc_markdown,
-    clippy::excessive_precision,
-    clippy::float_cmp,
-    clippy::if_not_else,
-    clippy::let_underscore_untyped,
-    clippy::module_name_repetitions,
-    clippy::needless_late_init,
-    clippy::question_mark,
-    clippy::shadow_unrelated,
-    clippy::similar_names,
-    clippy::single_match_else,
-    clippy::too_many_lines,
-    clippy::unreadable_literal,
-    clippy::unseparated_literal_suffix,
-    clippy::wildcard_imports
-)]
-
-extern crate alloc;
-
-#[path = "../src/lexical/mod.rs"]
-mod lexical;
-
-#[path = "lexical/algorithm.rs"]
-mod algorithm;
-
-#[path = "lexical/exponent.rs"]
-mod exponent;
-
-#[path = "lexical/float.rs"]
-mod float;
-
-#[path = "lexical/math.rs"]
-mod math;
-
-#[path = "lexical/num.rs"]
-mod num;
-
-#[path = "lexical/parse.rs"]
-mod parse;
-
-#[path = "lexical/rounding.rs"]
-mod rounding;
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/lexical/algorithm.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/lexical/algorithm.rs
deleted file mode 100644
index 7f3a2c6..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/lexical/algorithm.rs
+++ /dev/null
@@ -1,110 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-use crate::lexical::algorithm::*;
-use crate::lexical::num::Float;
-
-#[test]
-fn float_fast_path_test() {
-    // valid
-    let mantissa = (1 << f32::MANTISSA_SIZE) - 1;
-    let (min_exp, max_exp) = f32::exponent_limit();
-    for exp in min_exp..=max_exp {
-        let f = fast_path::<f32>(mantissa, exp);
-        assert!(f.is_some(), "should be valid {:?}.", (mantissa, exp));
-    }
-
-    // Check slightly above valid exponents
-    let f = fast_path::<f32>(123, 15);
-    assert_eq!(f, Some(1.23e+17));
-
-    // Exponent is 1 too high, pushes over the mantissa.
-    let f = fast_path::<f32>(123, 16);
-    assert!(f.is_none());
-
-    // Mantissa is too large, checked_mul should overflow.
-    let f = fast_path::<f32>(mantissa, 11);
-    assert!(f.is_none());
-
-    // invalid exponents
-    let (min_exp, max_exp) = f32::exponent_limit();
-    let f = fast_path::<f32>(mantissa, min_exp - 1);
-    assert!(f.is_none(), "exponent under min_exp");
-
-    let f = fast_path::<f32>(mantissa, max_exp + 1);
-    assert!(f.is_none(), "exponent above max_exp");
-}
-
-#[test]
-fn double_fast_path_test() {
-    // valid
-    let mantissa = (1 << f64::MANTISSA_SIZE) - 1;
-    let (min_exp, max_exp) = f64::exponent_limit();
-    for exp in min_exp..=max_exp {
-        let f = fast_path::<f64>(mantissa, exp);
-        assert!(f.is_some(), "should be valid {:?}.", (mantissa, exp));
-    }
-
-    // invalid exponents
-    let (min_exp, max_exp) = f64::exponent_limit();
-    let f = fast_path::<f64>(mantissa, min_exp - 1);
-    assert!(f.is_none(), "exponent under min_exp");
-
-    let f = fast_path::<f64>(mantissa, max_exp + 1);
-    assert!(f.is_none(), "exponent above max_exp");
-
-    assert_eq!(
-        Some(0.04628372940652459),
-        fast_path::<f64>(4628372940652459, -17)
-    );
-    assert_eq!(None, fast_path::<f64>(26383446160308229, -272));
-}
-
-#[test]
-fn moderate_path_test() {
-    let (f, valid) = moderate_path::<f64>(1234567890, -1, false);
-    assert!(valid, "should be valid");
-    assert_eq!(f.into_float::<f64>(), 123456789.0);
-
-    let (f, valid) = moderate_path::<f64>(1234567891, -1, false);
-    assert!(valid, "should be valid");
-    assert_eq!(f.into_float::<f64>(), 123456789.1);
-
-    let (f, valid) = moderate_path::<f64>(12345678912, -2, false);
-    assert!(valid, "should be valid");
-    assert_eq!(f.into_float::<f64>(), 123456789.12);
-
-    let (f, valid) = moderate_path::<f64>(123456789123, -3, false);
-    assert!(valid, "should be valid");
-    assert_eq!(f.into_float::<f64>(), 123456789.123);
-
-    let (f, valid) = moderate_path::<f64>(1234567891234, -4, false);
-    assert!(valid, "should be valid");
-    assert_eq!(f.into_float::<f64>(), 123456789.1234);
-
-    let (f, valid) = moderate_path::<f64>(12345678912345, -5, false);
-    assert!(valid, "should be valid");
-    assert_eq!(f.into_float::<f64>(), 123456789.12345);
-
-    let (f, valid) = moderate_path::<f64>(123456789123456, -6, false);
-    assert!(valid, "should be valid");
-    assert_eq!(f.into_float::<f64>(), 123456789.123456);
-
-    let (f, valid) = moderate_path::<f64>(1234567891234567, -7, false);
-    assert!(valid, "should be valid");
-    assert_eq!(f.into_float::<f64>(), 123456789.1234567);
-
-    let (f, valid) = moderate_path::<f64>(12345678912345679, -8, false);
-    assert!(valid, "should be valid");
-    assert_eq!(f.into_float::<f64>(), 123456789.12345679);
-
-    let (f, valid) = moderate_path::<f64>(4628372940652459, -17, false);
-    assert!(valid, "should be valid");
-    assert_eq!(f.into_float::<f64>(), 0.04628372940652459);
-
-    let (f, valid) = moderate_path::<f64>(26383446160308229, -272, false);
-    assert!(valid, "should be valid");
-    assert_eq!(f.into_float::<f64>(), 2.6383446160308229e-256);
-
-    let (_, valid) = moderate_path::<f64>(26383446160308230, -272, false);
-    assert!(!valid, "should be invalid");
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/lexical/exponent.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/lexical/exponent.rs
deleted file mode 100644
index c109ff0..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/lexical/exponent.rs
+++ /dev/null
@@ -1,36 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-use crate::lexical::exponent::*;
-
-#[test]
-fn scientific_exponent_test() {
-    // 0 digits in the integer
-    assert_eq!(scientific_exponent(0, 0, 5), -6);
-    assert_eq!(scientific_exponent(10, 0, 5), 4);
-    assert_eq!(scientific_exponent(-10, 0, 5), -16);
-
-    // >0 digits in the integer
-    assert_eq!(scientific_exponent(0, 1, 5), 0);
-    assert_eq!(scientific_exponent(0, 2, 5), 1);
-    assert_eq!(scientific_exponent(0, 2, 20), 1);
-    assert_eq!(scientific_exponent(10, 2, 20), 11);
-    assert_eq!(scientific_exponent(-10, 2, 20), -9);
-
-    // Underflow
-    assert_eq!(scientific_exponent(i32::MIN, 0, 0), i32::MIN);
-    assert_eq!(scientific_exponent(i32::MIN, 0, 5), i32::MIN);
-
-    // Overflow
-    assert_eq!(scientific_exponent(i32::MAX, 0, 0), i32::MAX - 1);
-    assert_eq!(scientific_exponent(i32::MAX, 5, 0), i32::MAX);
-}
-
-#[test]
-fn mantissa_exponent_test() {
-    assert_eq!(mantissa_exponent(10, 5, 0), 5);
-    assert_eq!(mantissa_exponent(0, 5, 0), -5);
-    assert_eq!(mantissa_exponent(i32::MAX, 5, 0), i32::MAX - 5);
-    assert_eq!(mantissa_exponent(i32::MAX, 0, 5), i32::MAX);
-    assert_eq!(mantissa_exponent(i32::MIN, 5, 0), i32::MIN);
-    assert_eq!(mantissa_exponent(i32::MIN, 0, 5), i32::MIN + 5);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/lexical/float.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/lexical/float.rs
deleted file mode 100644
index c87e7e1..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/lexical/float.rs
+++ /dev/null
@@ -1,581 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-use crate::lexical::float::ExtendedFloat;
-use crate::lexical::rounding::round_nearest_tie_even;
-use std::{f32, f64};
-
-// NORMALIZE
-
-fn check_normalize(mant: u64, exp: i32, shift: u32, r_mant: u64, r_exp: i32) {
-    let mut x = ExtendedFloat { mant, exp };
-    assert_eq!(x.normalize(), shift);
-    assert_eq!(
-        x,
-        ExtendedFloat {
-            mant: r_mant,
-            exp: r_exp
-        }
-    );
-}
-
-#[test]
-fn normalize_test() {
-    // F32
-    // 0
-    check_normalize(0, 0, 0, 0, 0);
-
-    // min value
-    check_normalize(1, -149, 63, 9223372036854775808, -212);
-
-    // 1.0e-40
-    check_normalize(71362, -149, 47, 10043308644012916736, -196);
-
-    // 1.0e-20
-    check_normalize(12379400, -90, 40, 13611294244890214400, -130);
-
-    // 1.0
-    check_normalize(8388608, -23, 40, 9223372036854775808, -63);
-
-    // 1e20
-    check_normalize(11368684, 43, 40, 12500000250510966784, 3);
-
-    // max value
-    check_normalize(16777213, 104, 40, 18446740775174668288, 64);
-
-    // F64
-
-    // min value
-    check_normalize(1, -1074, 63, 9223372036854775808, -1137);
-
-    // 1.0e-250
-    check_normalize(6448907850777164, -883, 11, 13207363278391631872, -894);
-
-    // 1.0e-150
-    check_normalize(7371020360979573, -551, 11, 15095849699286165504, -562);
-
-    // 1.0e-45
-    check_normalize(6427752177035961, -202, 11, 13164036458569648128, -213);
-
-    // 1.0e-40
-    check_normalize(4903985730770844, -185, 11, 10043362776618688512, -196);
-
-    // 1.0e-20
-    check_normalize(6646139978924579, -119, 11, 13611294676837537792, -130);
-
-    // 1.0
-    check_normalize(4503599627370496, -52, 11, 9223372036854775808, -63);
-
-    // 1e20
-    check_normalize(6103515625000000, 14, 11, 12500000000000000000, 3);
-
-    // 1e40
-    check_normalize(8271806125530277, 80, 11, 16940658945086007296, 69);
-
-    // 1e150
-    check_normalize(5503284107318959, 446, 11, 11270725851789228032, 435);
-
-    // 1e250
-    check_normalize(6290184345309700, 778, 11, 12882297539194265600, 767);
-
-    // max value
-    check_normalize(9007199254740991, 971, 11, 18446744073709549568, 960);
-}
-
-// ROUND
-
-fn check_round_to_f32(mant: u64, exp: i32, r_mant: u64, r_exp: i32) {
-    let mut x = ExtendedFloat { mant, exp };
-    x.round_to_native::<f32, _>(round_nearest_tie_even);
-    assert_eq!(
-        x,
-        ExtendedFloat {
-            mant: r_mant,
-            exp: r_exp
-        }
-    );
-}
-
-#[test]
-fn round_to_f32_test() {
-    // This is lossy, so some of these values are **slightly** rounded.
-
-    // underflow
-    check_round_to_f32(9223372036854775808, -213, 0, -149);
-
-    // min value
-    check_round_to_f32(9223372036854775808, -212, 1, -149);
-
-    // 1.0e-40
-    check_round_to_f32(10043308644012916736, -196, 71362, -149);
-
-    // 1.0e-20
-    check_round_to_f32(13611294244890214400, -130, 12379400, -90);
-
-    // 1.0
-    check_round_to_f32(9223372036854775808, -63, 8388608, -23);
-
-    // 1e20
-    check_round_to_f32(12500000250510966784, 3, 11368684, 43);
-
-    // max value
-    check_round_to_f32(18446740775174668288, 64, 16777213, 104);
-
-    // overflow
-    check_round_to_f32(18446740775174668288, 65, 16777213, 105);
-}
-
-fn check_round_to_f64(mant: u64, exp: i32, r_mant: u64, r_exp: i32) {
-    let mut x = ExtendedFloat { mant, exp };
-    x.round_to_native::<f64, _>(round_nearest_tie_even);
-    assert_eq!(
-        x,
-        ExtendedFloat {
-            mant: r_mant,
-            exp: r_exp
-        }
-    );
-}
-
-#[test]
-fn round_to_f64_test() {
-    // This is lossy, so some of these values are **slightly** rounded.
-
-    // underflow
-    check_round_to_f64(9223372036854775808, -1138, 0, -1074);
-
-    // min value
-    check_round_to_f64(9223372036854775808, -1137, 1, -1074);
-
-    // 1.0e-250
-    check_round_to_f64(15095849699286165504, -562, 7371020360979573, -551);
-
-    // 1.0e-150
-    check_round_to_f64(15095849699286165504, -562, 7371020360979573, -551);
-
-    // 1.0e-45
-    check_round_to_f64(13164036458569648128, -213, 6427752177035961, -202);
-
-    // 1.0e-40
-    check_round_to_f64(10043362776618688512, -196, 4903985730770844, -185);
-
-    // 1.0e-20
-    check_round_to_f64(13611294676837537792, -130, 6646139978924579, -119);
-
-    // 1.0
-    check_round_to_f64(9223372036854775808, -63, 4503599627370496, -52);
-
-    // 1e20
-    check_round_to_f64(12500000000000000000, 3, 6103515625000000, 14);
-
-    // 1e40
-    check_round_to_f64(16940658945086007296, 69, 8271806125530277, 80);
-
-    // 1e150
-    check_round_to_f64(11270725851789228032, 435, 5503284107318959, 446);
-
-    // 1e250
-    check_round_to_f64(12882297539194265600, 767, 6290184345309700, 778);
-
-    // max value
-    check_round_to_f64(18446744073709549568, 960, 9007199254740991, 971);
-
-    // Bug fixes
-    // 1.2345e-308
-    check_round_to_f64(10234494226754558294, -1086, 2498655817078750, -1074);
-}
-
-fn assert_normalized_eq(mut x: ExtendedFloat, mut y: ExtendedFloat) {
-    x.normalize();
-    y.normalize();
-    assert_eq!(x, y);
-}
-
-#[test]
-fn from_float() {
-    let values: [f32; 26] = [
-        1e-40, 2e-40, 1e-35, 2e-35, 1e-30, 2e-30, 1e-25, 2e-25, 1e-20, 2e-20, 1e-15, 2e-15, 1e-10,
-        2e-10, 1e-5, 2e-5, 1.0, 2.0, 1e5, 2e5, 1e10, 2e10, 1e15, 2e15, 1e20, 2e20,
-    ];
-    for value in &values {
-        assert_normalized_eq(
-            ExtendedFloat::from_float(*value),
-            ExtendedFloat::from_float(*value as f64),
-        );
-    }
-}
-
-// TO
-
-// Sample of interesting numbers to check during standard test builds.
-const INTEGERS: [u64; 32] = [
-    0,                    // 0x0
-    1,                    // 0x1
-    7,                    // 0x7
-    15,                   // 0xF
-    112,                  // 0x70
-    119,                  // 0x77
-    127,                  // 0x7F
-    240,                  // 0xF0
-    247,                  // 0xF7
-    255,                  // 0xFF
-    2032,                 // 0x7F0
-    2039,                 // 0x7F7
-    2047,                 // 0x7FF
-    4080,                 // 0xFF0
-    4087,                 // 0xFF7
-    4095,                 // 0xFFF
-    65520,                // 0xFFF0
-    65527,                // 0xFFF7
-    65535,                // 0xFFFF
-    1048560,              // 0xFFFF0
-    1048567,              // 0xFFFF7
-    1048575,              // 0xFFFFF
-    16777200,             // 0xFFFFF0
-    16777207,             // 0xFFFFF7
-    16777215,             // 0xFFFFFF
-    268435440,            // 0xFFFFFF0
-    268435447,            // 0xFFFFFF7
-    268435455,            // 0xFFFFFFF
-    4294967280,           // 0xFFFFFFF0
-    4294967287,           // 0xFFFFFFF7
-    4294967295,           // 0xFFFFFFFF
-    18446744073709551615, // 0xFFFFFFFFFFFFFFFF
-];
-
-#[test]
-fn to_f32_test() {
-    // underflow
-    let x = ExtendedFloat {
-        mant: 9223372036854775808,
-        exp: -213,
-    };
-    assert_eq!(x.into_float::<f32>(), 0.0);
-
-    // min value
-    let x = ExtendedFloat {
-        mant: 9223372036854775808,
-        exp: -212,
-    };
-    assert_eq!(x.into_float::<f32>(), 1e-45);
-
-    // 1.0e-40
-    let x = ExtendedFloat {
-        mant: 10043308644012916736,
-        exp: -196,
-    };
-    assert_eq!(x.into_float::<f32>(), 1e-40);
-
-    // 1.0e-20
-    let x = ExtendedFloat {
-        mant: 13611294244890214400,
-        exp: -130,
-    };
-    assert_eq!(x.into_float::<f32>(), 1e-20);
-
-    // 1.0
-    let x = ExtendedFloat {
-        mant: 9223372036854775808,
-        exp: -63,
-    };
-    assert_eq!(x.into_float::<f32>(), 1.0);
-
-    // 1e20
-    let x = ExtendedFloat {
-        mant: 12500000250510966784,
-        exp: 3,
-    };
-    assert_eq!(x.into_float::<f32>(), 1e20);
-
-    // max value
-    let x = ExtendedFloat {
-        mant: 18446740775174668288,
-        exp: 64,
-    };
-    assert_eq!(x.into_float::<f32>(), 3.402823e38);
-
-    // almost max, high exp
-    let x = ExtendedFloat {
-        mant: 1048575,
-        exp: 108,
-    };
-    assert_eq!(x.into_float::<f32>(), 3.4028204e38);
-
-    // max value + 1
-    let x = ExtendedFloat {
-        mant: 16777216,
-        exp: 104,
-    };
-    assert_eq!(x.into_float::<f32>(), f32::INFINITY);
-
-    // max value + 1
-    let x = ExtendedFloat {
-        mant: 1048576,
-        exp: 108,
-    };
-    assert_eq!(x.into_float::<f32>(), f32::INFINITY);
-
-    // 1e40
-    let x = ExtendedFloat {
-        mant: 16940658945086007296,
-        exp: 69,
-    };
-    assert_eq!(x.into_float::<f32>(), f32::INFINITY);
-
-    // Integers.
-    for int in &INTEGERS {
-        let fp = ExtendedFloat { mant: *int, exp: 0 };
-        assert_eq!(fp.into_float::<f32>(), *int as f32, "{:?} as f32", *int);
-    }
-}
-
-#[test]
-fn to_f64_test() {
-    // underflow
-    let x = ExtendedFloat {
-        mant: 9223372036854775808,
-        exp: -1138,
-    };
-    assert_eq!(x.into_float::<f64>(), 0.0);
-
-    // min value
-    let x = ExtendedFloat {
-        mant: 9223372036854775808,
-        exp: -1137,
-    };
-    assert_eq!(x.into_float::<f64>(), 5e-324);
-
-    // 1.0e-250
-    let x = ExtendedFloat {
-        mant: 13207363278391631872,
-        exp: -894,
-    };
-    assert_eq!(x.into_float::<f64>(), 1e-250);
-
-    // 1.0e-150
-    let x = ExtendedFloat {
-        mant: 15095849699286165504,
-        exp: -562,
-    };
-    assert_eq!(x.into_float::<f64>(), 1e-150);
-
-    // 1.0e-45
-    let x = ExtendedFloat {
-        mant: 13164036458569648128,
-        exp: -213,
-    };
-    assert_eq!(x.into_float::<f64>(), 1e-45);
-
-    // 1.0e-40
-    let x = ExtendedFloat {
-        mant: 10043362776618688512,
-        exp: -196,
-    };
-    assert_eq!(x.into_float::<f64>(), 1e-40);
-
-    // 1.0e-20
-    let x = ExtendedFloat {
-        mant: 13611294676837537792,
-        exp: -130,
-    };
-    assert_eq!(x.into_float::<f64>(), 1e-20);
-
-    // 1.0
-    let x = ExtendedFloat {
-        mant: 9223372036854775808,
-        exp: -63,
-    };
-    assert_eq!(x.into_float::<f64>(), 1.0);
-
-    // 1e20
-    let x = ExtendedFloat {
-        mant: 12500000000000000000,
-        exp: 3,
-    };
-    assert_eq!(x.into_float::<f64>(), 1e20);
-
-    // 1e40
-    let x = ExtendedFloat {
-        mant: 16940658945086007296,
-        exp: 69,
-    };
-    assert_eq!(x.into_float::<f64>(), 1e40);
-
-    // 1e150
-    let x = ExtendedFloat {
-        mant: 11270725851789228032,
-        exp: 435,
-    };
-    assert_eq!(x.into_float::<f64>(), 1e150);
-
-    // 1e250
-    let x = ExtendedFloat {
-        mant: 12882297539194265600,
-        exp: 767,
-    };
-    assert_eq!(x.into_float::<f64>(), 1e250);
-
-    // max value
-    let x = ExtendedFloat {
-        mant: 9007199254740991,
-        exp: 971,
-    };
-    assert_eq!(x.into_float::<f64>(), 1.7976931348623157e308);
-
-    // max value
-    let x = ExtendedFloat {
-        mant: 18446744073709549568,
-        exp: 960,
-    };
-    assert_eq!(x.into_float::<f64>(), 1.7976931348623157e308);
-
-    // overflow
-    let x = ExtendedFloat {
-        mant: 9007199254740992,
-        exp: 971,
-    };
-    assert_eq!(x.into_float::<f64>(), f64::INFINITY);
-
-    // overflow
-    let x = ExtendedFloat {
-        mant: 18446744073709549568,
-        exp: 961,
-    };
-    assert_eq!(x.into_float::<f64>(), f64::INFINITY);
-
-    // Underflow
-    // Adapted from failures in strtod.
-    let x = ExtendedFloat {
-        exp: -1139,
-        mant: 18446744073709550712,
-    };
-    assert_eq!(x.into_float::<f64>(), 0.0);
-
-    let x = ExtendedFloat {
-        exp: -1139,
-        mant: 18446744073709551460,
-    };
-    assert_eq!(x.into_float::<f64>(), 0.0);
-
-    let x = ExtendedFloat {
-        exp: -1138,
-        mant: 9223372036854776103,
-    };
-    assert_eq!(x.into_float::<f64>(), 5e-324);
-
-    // Integers.
-    for int in &INTEGERS {
-        let fp = ExtendedFloat { mant: *int, exp: 0 };
-        assert_eq!(fp.into_float::<f64>(), *int as f64, "{:?} as f64", *int);
-    }
-}
-
-// OPERATIONS
-
-fn check_mul(a: ExtendedFloat, b: ExtendedFloat, c: ExtendedFloat) {
-    let r = a.mul(&b);
-    assert_eq!(r, c);
-}
-
-#[test]
-fn mul_test() {
-    // Normalized (64-bit mantissa)
-    let a = ExtendedFloat {
-        mant: 13164036458569648128,
-        exp: -213,
-    };
-    let b = ExtendedFloat {
-        mant: 9223372036854775808,
-        exp: -62,
-    };
-    let c = ExtendedFloat {
-        mant: 6582018229284824064,
-        exp: -211,
-    };
-    check_mul(a, b, c);
-
-    // Check with integers
-    // 64-bit mantissa
-    let mut a = ExtendedFloat { mant: 10, exp: 0 };
-    let mut b = ExtendedFloat { mant: 10, exp: 0 };
-    a.normalize();
-    b.normalize();
-    assert_eq!(a.mul(&b).into_float::<f64>(), 100.0);
-
-    // Check both values need high bits set.
-    let a = ExtendedFloat {
-        mant: 1 << 32,
-        exp: -31,
-    };
-    let b = ExtendedFloat {
-        mant: 1 << 32,
-        exp: -31,
-    };
-    assert_eq!(a.mul(&b).into_float::<f64>(), 4.0);
-
-    // Check both values need high bits set.
-    let a = ExtendedFloat {
-        mant: 10 << 31,
-        exp: -31,
-    };
-    let b = ExtendedFloat {
-        mant: 10 << 31,
-        exp: -31,
-    };
-    assert_eq!(a.mul(&b).into_float::<f64>(), 100.0);
-}
-
-fn check_imul(mut a: ExtendedFloat, b: ExtendedFloat, c: ExtendedFloat) {
-    a.imul(&b);
-    assert_eq!(a, c);
-}
-
-#[test]
-fn imul_test() {
-    // Normalized (64-bit mantissa)
-    let a = ExtendedFloat {
-        mant: 13164036458569648128,
-        exp: -213,
-    };
-    let b = ExtendedFloat {
-        mant: 9223372036854775808,
-        exp: -62,
-    };
-    let c = ExtendedFloat {
-        mant: 6582018229284824064,
-        exp: -211,
-    };
-    check_imul(a, b, c);
-
-    // Check with integers
-    // 64-bit mantissa
-    let mut a = ExtendedFloat { mant: 10, exp: 0 };
-    let mut b = ExtendedFloat { mant: 10, exp: 0 };
-    a.normalize();
-    b.normalize();
-    a.imul(&b);
-    assert_eq!(a.into_float::<f64>(), 100.0);
-
-    // Check both values need high bits set.
-    let mut a = ExtendedFloat {
-        mant: 1 << 32,
-        exp: -31,
-    };
-    let b = ExtendedFloat {
-        mant: 1 << 32,
-        exp: -31,
-    };
-    a.imul(&b);
-    assert_eq!(a.into_float::<f64>(), 4.0);
-
-    // Check both values need high bits set.
-    let mut a = ExtendedFloat {
-        mant: 10 << 31,
-        exp: -31,
-    };
-    let b = ExtendedFloat {
-        mant: 10 << 31,
-        exp: -31,
-    };
-    a.imul(&b);
-    assert_eq!(a.into_float::<f64>(), 100.0);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/lexical/math.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/lexical/math.rs
deleted file mode 100644
index 454eaa6..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/lexical/math.rs
+++ /dev/null
@@ -1,211 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-use crate::lexical::math::{Limb, Math};
-use std::cmp;
-
-#[derive(Clone, Default)]
-struct Bigint {
-    data: Vec<Limb>,
-}
-
-impl Math for Bigint {
-    fn data(&self) -> &Vec<Limb> {
-        &self.data
-    }
-
-    fn data_mut(&mut self) -> &mut Vec<Limb> {
-        &mut self.data
-    }
-}
-
-#[cfg(fast_arithmetic = "32")]
-pub(crate) fn from_u32(x: &[u32]) -> Vec<Limb> {
-    x.iter().cloned().collect()
-}
-
-#[cfg(fast_arithmetic = "64")]
-pub(crate) fn from_u32(x: &[u32]) -> Vec<Limb> {
-    let mut v = Vec::<Limb>::default();
-    for xi in x.chunks(2) {
-        match xi.len() {
-            1 => v.push(xi[0] as u64),
-            2 => v.push(((xi[1] as u64) << 32) | (xi[0] as u64)),
-            _ => unreachable!(),
-        }
-    }
-
-    v
-}
-
-#[test]
-fn compare_test() {
-    // Simple
-    let x = Bigint {
-        data: from_u32(&[1]),
-    };
-    let y = Bigint {
-        data: from_u32(&[2]),
-    };
-    assert_eq!(x.compare(&y), cmp::Ordering::Less);
-    assert_eq!(x.compare(&x), cmp::Ordering::Equal);
-    assert_eq!(y.compare(&x), cmp::Ordering::Greater);
-
-    // Check asymmetric
-    let x = Bigint {
-        data: from_u32(&[5, 1]),
-    };
-    let y = Bigint {
-        data: from_u32(&[2]),
-    };
-    assert_eq!(x.compare(&y), cmp::Ordering::Greater);
-    assert_eq!(x.compare(&x), cmp::Ordering::Equal);
-    assert_eq!(y.compare(&x), cmp::Ordering::Less);
-
-    // Check when we use reverse ordering properly.
-    let x = Bigint {
-        data: from_u32(&[5, 1, 9]),
-    };
-    let y = Bigint {
-        data: from_u32(&[6, 2, 8]),
-    };
-    assert_eq!(x.compare(&y), cmp::Ordering::Greater);
-    assert_eq!(x.compare(&x), cmp::Ordering::Equal);
-    assert_eq!(y.compare(&x), cmp::Ordering::Less);
-
-    // Complex scenario, check it properly uses reverse ordering.
-    let x = Bigint {
-        data: from_u32(&[0, 1, 9]),
-    };
-    let y = Bigint {
-        data: from_u32(&[4294967295, 0, 9]),
-    };
-    assert_eq!(x.compare(&y), cmp::Ordering::Greater);
-    assert_eq!(x.compare(&x), cmp::Ordering::Equal);
-    assert_eq!(y.compare(&x), cmp::Ordering::Less);
-}
-
-#[test]
-fn hi64_test() {
-    assert_eq!(Bigint::from_u64(0xA).hi64(), (0xA000000000000000, false));
-    assert_eq!(Bigint::from_u64(0xAB).hi64(), (0xAB00000000000000, false));
-    assert_eq!(
-        Bigint::from_u64(0xAB00000000).hi64(),
-        (0xAB00000000000000, false)
-    );
-    assert_eq!(
-        Bigint::from_u64(0xA23456789A).hi64(),
-        (0xA23456789A000000, false)
-    );
-}
-
-#[test]
-fn bit_length_test() {
-    let x = Bigint {
-        data: from_u32(&[0, 0, 0, 1]),
-    };
-    assert_eq!(x.bit_length(), 97);
-
-    let x = Bigint {
-        data: from_u32(&[0, 0, 0, 3]),
-    };
-    assert_eq!(x.bit_length(), 98);
-
-    let x = Bigint {
-        data: from_u32(&[1 << 31]),
-    };
-    assert_eq!(x.bit_length(), 32);
-}
-
-#[test]
-fn iadd_small_test() {
-    // Overflow check (single)
-    // This should set all the internal data values to 0, the top
-    // value to (1<<31), and the bottom value to (4>>1).
-    // This is because the max_value + 1 leads to all 0s, we set the
-    // topmost bit to 1.
-    let mut x = Bigint {
-        data: from_u32(&[4294967295]),
-    };
-    x.iadd_small(5);
-    assert_eq!(x.data, from_u32(&[4, 1]));
-
-    // No overflow, single value
-    let mut x = Bigint {
-        data: from_u32(&[5]),
-    };
-    x.iadd_small(7);
-    assert_eq!(x.data, from_u32(&[12]));
-
-    // Single carry, internal overflow
-    let mut x = Bigint::from_u64(0x80000000FFFFFFFF);
-    x.iadd_small(7);
-    assert_eq!(x.data, from_u32(&[6, 0x80000001]));
-
-    // Double carry, overflow
-    let mut x = Bigint::from_u64(0xFFFFFFFFFFFFFFFF);
-    x.iadd_small(7);
-    assert_eq!(x.data, from_u32(&[6, 0, 1]));
-}
-
-#[test]
-fn imul_small_test() {
-    // No overflow check, 1-int.
-    let mut x = Bigint {
-        data: from_u32(&[5]),
-    };
-    x.imul_small(7);
-    assert_eq!(x.data, from_u32(&[35]));
-
-    // No overflow check, 2-ints.
-    let mut x = Bigint::from_u64(0x4000000040000);
-    x.imul_small(5);
-    assert_eq!(x.data, from_u32(&[0x00140000, 0x140000]));
-
-    // Overflow, 1 carry.
-    let mut x = Bigint {
-        data: from_u32(&[0x33333334]),
-    };
-    x.imul_small(5);
-    assert_eq!(x.data, from_u32(&[4, 1]));
-
-    // Overflow, 1 carry, internal.
-    let mut x = Bigint::from_u64(0x133333334);
-    x.imul_small(5);
-    assert_eq!(x.data, from_u32(&[4, 6]));
-
-    // Overflow, 2 carries.
-    let mut x = Bigint::from_u64(0x3333333333333334);
-    x.imul_small(5);
-    assert_eq!(x.data, from_u32(&[4, 0, 1]));
-}
-
-#[test]
-fn shl_test() {
-    // Pattern generated via `''.join(["1" +"0"*i for i in range(20)])`
-    let mut big = Bigint {
-        data: from_u32(&[0xD2210408]),
-    };
-    big.ishl(5);
-    assert_eq!(big.data, from_u32(&[0x44208100, 0x1A]));
-    big.ishl(32);
-    assert_eq!(big.data, from_u32(&[0, 0x44208100, 0x1A]));
-    big.ishl(27);
-    assert_eq!(big.data, from_u32(&[0, 0, 0xD2210408]));
-
-    // 96-bits of previous pattern
-    let mut big = Bigint {
-        data: from_u32(&[0x20020010, 0x8040100, 0xD2210408]),
-    };
-    big.ishl(5);
-    assert_eq!(big.data, from_u32(&[0x400200, 0x802004, 0x44208101, 0x1A]));
-    big.ishl(32);
-    assert_eq!(
-        big.data,
-        from_u32(&[0, 0x400200, 0x802004, 0x44208101, 0x1A])
-    );
-    big.ishl(27);
-    assert_eq!(
-        big.data,
-        from_u32(&[0, 0, 0x20020010, 0x8040100, 0xD2210408])
-    );
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/lexical/num.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/lexical/num.rs
deleted file mode 100644
index e7d08652..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/lexical/num.rs
+++ /dev/null
@@ -1,75 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-use crate::lexical::num::{AsPrimitive, Float, Integer, Number};
-
-fn check_as_primitive<T: AsPrimitive>(t: T) {
-    let _: u32 = t.as_u32();
-    let _: u64 = t.as_u64();
-    let _: u128 = t.as_u128();
-    let _: usize = t.as_usize();
-    let _: f32 = t.as_f32();
-    let _: f64 = t.as_f64();
-}
-
-#[test]
-fn as_primitive_test() {
-    check_as_primitive(1u32);
-    check_as_primitive(1u64);
-    check_as_primitive(1u128);
-    check_as_primitive(1usize);
-    check_as_primitive(1f32);
-    check_as_primitive(1f64);
-}
-
-fn check_number<T: Number>(x: T, y: T) {
-    // Copy, partialeq, partialord
-    let _ = x;
-    assert!(x < y);
-    assert!(x != y);
-
-    // Operations
-    let _ = y + x;
-
-    // Conversions already tested.
-}
-
-#[test]
-fn number_test() {
-    check_number(1u32, 5);
-    check_number(1u64, 5);
-    check_number(1u128, 5);
-    check_number(1usize, 5);
-    check_number(1f32, 5.0);
-    check_number(1f64, 5.0);
-}
-
-fn check_integer<T: Integer>(x: T) {
-    // Bitwise operations
-    let _ = x & T::ZERO;
-}
-
-#[test]
-fn integer_test() {
-    check_integer(65u32);
-    check_integer(65u64);
-    check_integer(65u128);
-    check_integer(65usize);
-}
-
-fn check_float<T: Float>(x: T) {
-    // Check functions
-    let _ = x.pow10(5);
-    let _ = x.to_bits();
-    assert!(T::from_bits(x.to_bits()) == x);
-
-    // Check properties
-    let _ = x.to_bits() & T::EXPONENT_MASK;
-    let _ = x.to_bits() & T::HIDDEN_BIT_MASK;
-    let _ = x.to_bits() & T::MANTISSA_MASK;
-}
-
-#[test]
-fn float_test() {
-    check_float(123f32);
-    check_float(123f64);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/lexical/parse.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/lexical/parse.rs
deleted file mode 100644
index 03ec1a9..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/lexical/parse.rs
+++ /dev/null
@@ -1,204 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-use crate::lexical::num::Float;
-use crate::lexical::{parse_concise_float, parse_truncated_float};
-use core::f64;
-use core::fmt::Debug;
-
-fn check_concise_float<F>(mantissa: u64, exponent: i32, expected: F)
-where
-    F: Float + Debug,
-{
-    assert_eq!(parse_concise_float::<F>(mantissa, exponent), expected);
-}
-
-fn check_truncated_float<F>(integer: &str, fraction: &str, exponent: i32, expected: F)
-where
-    F: Float + Debug,
-{
-    let integer = integer.as_bytes();
-    let fraction = fraction.as_bytes();
-    assert_eq!(
-        parse_truncated_float::<F>(integer, fraction, exponent),
-        expected,
-    );
-}
-
-#[test]
-fn parse_f32_test() {
-    check_concise_float(0, 0, 0.0_f32);
-    check_concise_float(12345, -4, 1.2345_f32);
-    check_concise_float(12345, -3, 12.345_f32);
-    check_concise_float(123456789, -4, 12345.6789_f32);
-    check_concise_float(12345, 6, 1.2345e10_f32);
-    check_concise_float(12345, -42, 1.2345e-38_f32);
-
-    // Check expected rounding, using borderline cases.
-    // Round-down, halfway
-    check_concise_float(16777216, 0, 16777216.0_f32);
-    check_concise_float(16777217, 0, 16777216.0_f32);
-    check_concise_float(16777218, 0, 16777218.0_f32);
-    check_concise_float(33554432, 0, 33554432.0_f32);
-    check_concise_float(33554434, 0, 33554432.0_f32);
-    check_concise_float(33554436, 0, 33554436.0_f32);
-    check_concise_float(17179869184, 0, 17179869184.0_f32);
-    check_concise_float(17179870208, 0, 17179869184.0_f32);
-    check_concise_float(17179871232, 0, 17179871232.0_f32);
-
-    // Round-up, halfway
-    check_concise_float(16777218, 0, 16777218.0_f32);
-    check_concise_float(16777219, 0, 16777220.0_f32);
-    check_concise_float(16777220, 0, 16777220.0_f32);
-
-    check_concise_float(33554436, 0, 33554436.0_f32);
-    check_concise_float(33554438, 0, 33554440.0_f32);
-    check_concise_float(33554440, 0, 33554440.0_f32);
-
-    check_concise_float(17179871232, 0, 17179871232.0_f32);
-    check_concise_float(17179872256, 0, 17179873280.0_f32);
-    check_concise_float(17179873280, 0, 17179873280.0_f32);
-
-    // Round-up, above halfway
-    check_concise_float(33554435, 0, 33554436.0_f32);
-    check_concise_float(17179870209, 0, 17179871232.0_f32);
-
-    // Check exactly halfway, round-up at halfway
-    check_truncated_float("1", "00000017881393432617187499", 0, 1.0000001_f32);
-    check_truncated_float("1", "000000178813934326171875", 0, 1.0000002_f32);
-    check_truncated_float("1", "00000017881393432617187501", 0, 1.0000002_f32);
-}
-
-#[test]
-fn parse_f64_test() {
-    check_concise_float(0, 0, 0.0_f64);
-    check_concise_float(12345, -4, 1.2345_f64);
-    check_concise_float(12345, -3, 12.345_f64);
-    check_concise_float(123456789, -4, 12345.6789_f64);
-    check_concise_float(12345, 6, 1.2345e10_f64);
-    check_concise_float(12345, -312, 1.2345e-308_f64);
-
-    // Check expected rounding, using borderline cases.
-    // Round-down, halfway
-    check_concise_float(9007199254740992, 0, 9007199254740992.0_f64);
-    check_concise_float(9007199254740993, 0, 9007199254740992.0_f64);
-    check_concise_float(9007199254740994, 0, 9007199254740994.0_f64);
-
-    check_concise_float(18014398509481984, 0, 18014398509481984.0_f64);
-    check_concise_float(18014398509481986, 0, 18014398509481984.0_f64);
-    check_concise_float(18014398509481988, 0, 18014398509481988.0_f64);
-
-    check_concise_float(9223372036854775808, 0, 9223372036854775808.0_f64);
-    check_concise_float(9223372036854776832, 0, 9223372036854775808.0_f64);
-    check_concise_float(9223372036854777856, 0, 9223372036854777856.0_f64);
-
-    check_truncated_float(
-        "11417981541647679048466287755595961091061972992",
-        "",
-        0,
-        11417981541647679048466287755595961091061972992.0_f64,
-    );
-    check_truncated_float(
-        "11417981541647680316116887983825362587765178368",
-        "",
-        0,
-        11417981541647679048466287755595961091061972992.0_f64,
-    );
-    check_truncated_float(
-        "11417981541647681583767488212054764084468383744",
-        "",
-        0,
-        11417981541647681583767488212054764084468383744.0_f64,
-    );
-
-    // Round-up, halfway
-    check_concise_float(9007199254740994, 0, 9007199254740994.0_f64);
-    check_concise_float(9007199254740995, 0, 9007199254740996.0_f64);
-    check_concise_float(9007199254740996, 0, 9007199254740996.0_f64);
-
-    check_concise_float(18014398509481988, 0, 18014398509481988.0_f64);
-    check_concise_float(18014398509481990, 0, 18014398509481992.0_f64);
-    check_concise_float(18014398509481992, 0, 18014398509481992.0_f64);
-
-    check_concise_float(9223372036854777856, 0, 9223372036854777856.0_f64);
-    check_concise_float(9223372036854778880, 0, 9223372036854779904.0_f64);
-    check_concise_float(9223372036854779904, 0, 9223372036854779904.0_f64);
-
-    check_truncated_float(
-        "11417981541647681583767488212054764084468383744",
-        "",
-        0,
-        11417981541647681583767488212054764084468383744.0_f64,
-    );
-    check_truncated_float(
-        "11417981541647682851418088440284165581171589120",
-        "",
-        0,
-        11417981541647684119068688668513567077874794496.0_f64,
-    );
-    check_truncated_float(
-        "11417981541647684119068688668513567077874794496",
-        "",
-        0,
-        11417981541647684119068688668513567077874794496.0_f64,
-    );
-
-    // Round-up, above halfway
-    check_concise_float(9223372036854776833, 0, 9223372036854777856.0_f64);
-    check_truncated_float(
-        "11417981541647680316116887983825362587765178369",
-        "",
-        0,
-        11417981541647681583767488212054764084468383744.0_f64,
-    );
-
-    // Rounding error
-    // Adapted from failures in strtod.
-    check_concise_float(22250738585072014, -324, 2.2250738585072014e-308_f64);
-    check_truncated_float("2", "2250738585072011360574097967091319759348195463516456480234261097248222220210769455165295239081350879141491589130396211068700864386945946455276572074078206217433799881410632673292535522868813721490129811224514518898490572223072852551331557550159143974763979834118019993239625482890171070818506906306666559949382757725720157630626906633326475653000092458883164330377797918696120494973903778297049050510806099407302629371289589500035837999672072543043602840788957717961509455167482434710307026091446215722898802581825451803257070188608721131280795122334262883686223215037756666225039825343359745688844239002654981983854879482922068947216898310996983658468140228542433306603398508864458040010349339704275671864433837704860378616227717385456230658746790140867233276367187499", -308, 2.225073858507201e-308_f64);
-    check_truncated_float("2", "22507385850720113605740979670913197593481954635164564802342610972482222202107694551652952390813508791414915891303962110687008643869459464552765720740782062174337998814106326732925355228688137214901298112245145188984905722230728525513315575501591439747639798341180199932396254828901710708185069063066665599493827577257201576306269066333264756530000924588831643303777979186961204949739037782970490505108060994073026293712895895000358379996720725430436028407889577179615094551674824347103070260914462157228988025818254518032570701886087211312807951223342628836862232150377566662250398253433597456888442390026549819838548794829220689472168983109969836584681402285424333066033985088644580400103493397042756718644338377048603786162277173854562306587467901408672332763671875", -308, 2.2250738585072014e-308_f64);
-    check_truncated_float("2", "2250738585072011360574097967091319759348195463516456480234261097248222220210769455165295239081350879141491589130396211068700864386945946455276572074078206217433799881410632673292535522868813721490129811224514518898490572223072852551331557550159143974763979834118019993239625482890171070818506906306666559949382757725720157630626906633326475653000092458883164330377797918696120494973903778297049050510806099407302629371289589500035837999672072543043602840788957717961509455167482434710307026091446215722898802581825451803257070188608721131280795122334262883686223215037756666225039825343359745688844239002654981983854879482922068947216898310996983658468140228542433306603398508864458040010349339704275671864433837704860378616227717385456230658746790140867233276367187501", -308, 2.2250738585072014e-308_f64);
-    check_truncated_float("179769313486231580793728971405303415079934132710037826936173778980444968292764750946649017977587207096330286416692887910946555547851940402630657488671505820681908902000708383676273854845817711531764475730270069855571366959622842914819860834936475292719074168444365510704342711559699508093042880177904174497791", "9999999999999999999999999999999999999999999999999999999999999999999999", 0, 1.7976931348623157e+308_f64);
-    check_truncated_float("7", "4109846876186981626485318930233205854758970392148714663837852375101326090531312779794975454245398856969484704316857659638998506553390969459816219401617281718945106978546710679176872575177347315553307795408549809608457500958111373034747658096871009590975442271004757307809711118935784838675653998783503015228055934046593739791790738723868299395818481660169122019456499931289798411362062484498678713572180352209017023903285791732520220528974020802906854021606612375549983402671300035812486479041385743401875520901590172592547146296175134159774938718574737870961645638908718119841271673056017045493004705269590165763776884908267986972573366521765567941072508764337560846003984904972149117463085539556354188641513168478436313080237596295773983001708984374999", -324, 5.0e-324_f64);
-    check_truncated_float("7", "4109846876186981626485318930233205854758970392148714663837852375101326090531312779794975454245398856969484704316857659638998506553390969459816219401617281718945106978546710679176872575177347315553307795408549809608457500958111373034747658096871009590975442271004757307809711118935784838675653998783503015228055934046593739791790738723868299395818481660169122019456499931289798411362062484498678713572180352209017023903285791732520220528974020802906854021606612375549983402671300035812486479041385743401875520901590172592547146296175134159774938718574737870961645638908718119841271673056017045493004705269590165763776884908267986972573366521765567941072508764337560846003984904972149117463085539556354188641513168478436313080237596295773983001708984375", -324, 1.0e-323_f64);
-    check_truncated_float("7", "4109846876186981626485318930233205854758970392148714663837852375101326090531312779794975454245398856969484704316857659638998506553390969459816219401617281718945106978546710679176872575177347315553307795408549809608457500958111373034747658096871009590975442271004757307809711118935784838675653998783503015228055934046593739791790738723868299395818481660169122019456499931289798411362062484498678713572180352209017023903285791732520220528974020802906854021606612375549983402671300035812486479041385743401875520901590172592547146296175134159774938718574737870961645638908718119841271673056017045493004705269590165763776884908267986972573366521765567941072508764337560846003984904972149117463085539556354188641513168478436313080237596295773983001708984375001", -324, 1.0e-323_f64);
-    check_truncated_float("", "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000024703282292062327208828439643411068618252990130716238221279284125033775363510437593264991818081799618989828234772285886546332835517796989819938739800539093906315035659515570226392290858392449105184435931802849936536152500319370457678249219365623669863658480757001585769269903706311928279558551332927834338409351978015531246597263579574622766465272827220056374006485499977096599470454020828166226237857393450736339007967761930577506740176324673600968951340535537458516661134223766678604162159680461914467291840300530057530849048765391711386591646239524912623653881879636239373280423891018672348497668235089863388587925628302755995657524455507255189313690836254779186948667994968324049705821028513185451396213837722826145437693412532098591327667236328125", 0, 0.0_f64);
-
-    // Rounding error
-    // Adapted from:
-    //  https://www.exploringbinary.com/how-glibc-strtod-works/
-    check_truncated_float("", "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000022250738585072008890245868760858598876504231122409594654935248025624400092282356951787758888037591552642309780950434312085877387158357291821993020294379224223559819827501242041788969571311791082261043971979604000454897391938079198936081525613113376149842043271751033627391549782731594143828136275113838604094249464942286316695429105080201815926642134996606517803095075913058719846423906068637102005108723282784678843631944515866135041223479014792369585208321597621066375401613736583044193603714778355306682834535634005074073040135602968046375918583163124224521599262546494300836851861719422417646455137135420132217031370496583210154654068035397417906022589503023501937519773030945763173210852507299305089761582519159720757232455434770912461317493580281734466552734375", 0, 2.2250738585072011e-308_f64);
-
-    // Rounding error
-    // Adapted from test-parse-random failures.
-    check_concise_float(1009, -31, 1.009e-28_f64);
-    check_concise_float(18294, 304, f64::INFINITY);
-
-    // Rounding error
-    // Adapted from a @dangrabcad's issue #20.
-    check_concise_float(7689539722041643, 149, 7.689539722041643e164_f64);
-    check_truncated_float("768953972204164300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "", 0, 7.689539722041643e164_f64);
-    check_truncated_float("768953972204164300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "0", 0, 7.689539722041643e164_f64);
-
-    // Check other cases similar to @dangrabcad's issue #20.
-    check_truncated_float("9223372036854776833", "0", 0, 9223372036854777856.0_f64);
-    check_truncated_float(
-        "11417981541647680316116887983825362587765178369",
-        "0",
-        0,
-        11417981541647681583767488212054764084468383744.0_f64,
-    );
-    check_concise_float(90071992547409950, -1, 9007199254740996.0_f64);
-    check_concise_float(180143985094819900, -1, 18014398509481992.0_f64);
-    check_truncated_float("9223372036854778880", "0", 0, 9223372036854779904.0_f64);
-    check_truncated_float(
-        "11417981541647682851418088440284165581171589120",
-        "0",
-        0,
-        11417981541647684119068688668513567077874794496.0_f64,
-    );
-
-    // Check other cases ostensibly identified via proptest.
-    check_truncated_float("71610528364411830000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "0", 0, 71610528364411830000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0_f64);
-    check_truncated_float("126769393745745060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "0", 0, 126769393745745060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0_f64);
-    check_truncated_float("38652960461239320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "0", 0, 38652960461239320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0_f64);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/lexical/rounding.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/lexical/rounding.rs
deleted file mode 100644
index 7ea1771..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/lexical/rounding.rs
+++ /dev/null
@@ -1,316 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-use crate::lexical::float::ExtendedFloat;
-use crate::lexical::num::Float;
-use crate::lexical::rounding::*;
-
-// MASKS
-
-#[test]
-fn lower_n_mask_test() {
-    assert_eq!(lower_n_mask(0u64), 0b0);
-    assert_eq!(lower_n_mask(1u64), 0b1);
-    assert_eq!(lower_n_mask(2u64), 0b11);
-    assert_eq!(lower_n_mask(10u64), 0b1111111111);
-    assert_eq!(lower_n_mask(32u64), 0b11111111111111111111111111111111);
-}
-
-#[test]
-fn lower_n_halfway_test() {
-    assert_eq!(lower_n_halfway(0u64), 0b0);
-    assert_eq!(lower_n_halfway(1u64), 0b1);
-    assert_eq!(lower_n_halfway(2u64), 0b10);
-    assert_eq!(lower_n_halfway(10u64), 0b1000000000);
-    assert_eq!(lower_n_halfway(32u64), 0b10000000000000000000000000000000);
-}
-
-#[test]
-fn nth_bit_test() {
-    assert_eq!(nth_bit(0u64), 0b1);
-    assert_eq!(nth_bit(1u64), 0b10);
-    assert_eq!(nth_bit(2u64), 0b100);
-    assert_eq!(nth_bit(10u64), 0b10000000000);
-    assert_eq!(nth_bit(31u64), 0b10000000000000000000000000000000);
-}
-
-#[test]
-fn internal_n_mask_test() {
-    assert_eq!(internal_n_mask(1u64, 0u64), 0b0);
-    assert_eq!(internal_n_mask(1u64, 1u64), 0b1);
-    assert_eq!(internal_n_mask(2u64, 1u64), 0b10);
-    assert_eq!(internal_n_mask(4u64, 2u64), 0b1100);
-    assert_eq!(internal_n_mask(10u64, 2u64), 0b1100000000);
-    assert_eq!(internal_n_mask(10u64, 4u64), 0b1111000000);
-    assert_eq!(
-        internal_n_mask(32u64, 4u64),
-        0b11110000000000000000000000000000
-    );
-}
-
-// NEAREST ROUNDING
-
-#[test]
-fn round_nearest_test() {
-    // Check exactly halfway (b'1100000')
-    let mut fp = ExtendedFloat { mant: 0x60, exp: 0 };
-    let (above, halfway) = round_nearest(&mut fp, 6);
-    assert!(!above);
-    assert!(halfway);
-    assert_eq!(fp.mant, 1);
-
-    // Check above halfway (b'1100001')
-    let mut fp = ExtendedFloat { mant: 0x61, exp: 0 };
-    let (above, halfway) = round_nearest(&mut fp, 6);
-    assert!(above);
-    assert!(!halfway);
-    assert_eq!(fp.mant, 1);
-
-    // Check below halfway (b'1011111')
-    let mut fp = ExtendedFloat { mant: 0x5F, exp: 0 };
-    let (above, halfway) = round_nearest(&mut fp, 6);
-    assert!(!above);
-    assert!(!halfway);
-    assert_eq!(fp.mant, 1);
-}
-
-// DIRECTED ROUNDING
-
-#[test]
-fn round_downward_test() {
-    // b0000000
-    let mut fp = ExtendedFloat { mant: 0x00, exp: 0 };
-    round_downward(&mut fp, 6);
-    assert_eq!(fp.mant, 0);
-
-    // b1000000
-    let mut fp = ExtendedFloat { mant: 0x40, exp: 0 };
-    round_downward(&mut fp, 6);
-    assert_eq!(fp.mant, 1);
-
-    // b1100000
-    let mut fp = ExtendedFloat { mant: 0x60, exp: 0 };
-    round_downward(&mut fp, 6);
-    assert_eq!(fp.mant, 1);
-
-    // b1110000
-    let mut fp = ExtendedFloat { mant: 0x70, exp: 0 };
-    round_downward(&mut fp, 6);
-    assert_eq!(fp.mant, 1);
-}
-
-#[test]
-fn round_nearest_tie_even_test() {
-    // Check round-up, halfway
-    let mut fp = ExtendedFloat { mant: 0x60, exp: 0 };
-    round_nearest_tie_even(&mut fp, 6);
-    assert_eq!(fp.mant, 2);
-
-    // Check round-down, halfway
-    let mut fp = ExtendedFloat { mant: 0x20, exp: 0 };
-    round_nearest_tie_even(&mut fp, 6);
-    assert_eq!(fp.mant, 0);
-
-    // Check round-up, above halfway
-    let mut fp = ExtendedFloat { mant: 0x61, exp: 0 };
-    round_nearest_tie_even(&mut fp, 6);
-    assert_eq!(fp.mant, 2);
-
-    let mut fp = ExtendedFloat { mant: 0x21, exp: 0 };
-    round_nearest_tie_even(&mut fp, 6);
-    assert_eq!(fp.mant, 1);
-
-    // Check round-down, below halfway
-    let mut fp = ExtendedFloat { mant: 0x5F, exp: 0 };
-    round_nearest_tie_even(&mut fp, 6);
-    assert_eq!(fp.mant, 1);
-
-    let mut fp = ExtendedFloat { mant: 0x1F, exp: 0 };
-    round_nearest_tie_even(&mut fp, 6);
-    assert_eq!(fp.mant, 0);
-}
-
-// HIGH-LEVEL
-
-#[test]
-fn round_to_float_test() {
-    // Denormal
-    let mut fp = ExtendedFloat {
-        mant: 1 << 63,
-        exp: f64::DENORMAL_EXPONENT - 15,
-    };
-    round_to_float::<f64, _>(&mut fp, round_nearest_tie_even);
-    assert_eq!(fp.mant, 1 << 48);
-    assert_eq!(fp.exp, f64::DENORMAL_EXPONENT);
-
-    // Halfway, round-down (b'1000000000000000000000000000000000000000000000000000010000000000')
-    let mut fp = ExtendedFloat {
-        mant: 0x8000000000000400,
-        exp: -63,
-    };
-    round_to_float::<f64, _>(&mut fp, round_nearest_tie_even);
-    assert_eq!(fp.mant, 1 << 52);
-    assert_eq!(fp.exp, -52);
-
-    // Halfway, round-up (b'1000000000000000000000000000000000000000000000000000110000000000')
-    let mut fp = ExtendedFloat {
-        mant: 0x8000000000000C00,
-        exp: -63,
-    };
-    round_to_float::<f64, _>(&mut fp, round_nearest_tie_even);
-    assert_eq!(fp.mant, (1 << 52) + 2);
-    assert_eq!(fp.exp, -52);
-
-    // Above halfway
-    let mut fp = ExtendedFloat {
-        mant: 0x8000000000000401,
-        exp: -63,
-    };
-    round_to_float::<f64, _>(&mut fp, round_nearest_tie_even);
-    assert_eq!(fp.mant, (1 << 52) + 1);
-    assert_eq!(fp.exp, -52);
-
-    let mut fp = ExtendedFloat {
-        mant: 0x8000000000000C01,
-        exp: -63,
-    };
-    round_to_float::<f64, _>(&mut fp, round_nearest_tie_even);
-    assert_eq!(fp.mant, (1 << 52) + 2);
-    assert_eq!(fp.exp, -52);
-
-    // Below halfway
-    let mut fp = ExtendedFloat {
-        mant: 0x80000000000003FF,
-        exp: -63,
-    };
-    round_to_float::<f64, _>(&mut fp, round_nearest_tie_even);
-    assert_eq!(fp.mant, 1 << 52);
-    assert_eq!(fp.exp, -52);
-
-    let mut fp = ExtendedFloat {
-        mant: 0x8000000000000BFF,
-        exp: -63,
-    };
-    round_to_float::<f64, _>(&mut fp, round_nearest_tie_even);
-    assert_eq!(fp.mant, (1 << 52) + 1);
-    assert_eq!(fp.exp, -52);
-}
-
-#[test]
-fn avoid_overflow_test() {
-    // Avoid overflow, fails by 1
-    let mut fp = ExtendedFloat {
-        mant: 0xFFFFFFFFFFFF,
-        exp: f64::MAX_EXPONENT + 5,
-    };
-    avoid_overflow::<f64>(&mut fp);
-    assert_eq!(fp.mant, 0xFFFFFFFFFFFF);
-    assert_eq!(fp.exp, f64::MAX_EXPONENT + 5);
-
-    // Avoid overflow, succeeds
-    let mut fp = ExtendedFloat {
-        mant: 0xFFFFFFFFFFFF,
-        exp: f64::MAX_EXPONENT + 4,
-    };
-    avoid_overflow::<f64>(&mut fp);
-    assert_eq!(fp.mant, 0x1FFFFFFFFFFFE0);
-    assert_eq!(fp.exp, f64::MAX_EXPONENT - 1);
-}
-
-#[test]
-fn round_to_native_test() {
-    // Overflow
-    let mut fp = ExtendedFloat {
-        mant: 0xFFFFFFFFFFFF,
-        exp: f64::MAX_EXPONENT + 4,
-    };
-    round_to_native::<f64, _>(&mut fp, round_nearest_tie_even);
-    assert_eq!(fp.mant, 0x1FFFFFFFFFFFE0);
-    assert_eq!(fp.exp, f64::MAX_EXPONENT - 1);
-
-    // Need denormal
-    let mut fp = ExtendedFloat {
-        mant: 1,
-        exp: f64::DENORMAL_EXPONENT + 48,
-    };
-    round_to_native::<f64, _>(&mut fp, round_nearest_tie_even);
-    assert_eq!(fp.mant, 1 << 48);
-    assert_eq!(fp.exp, f64::DENORMAL_EXPONENT);
-
-    // Halfway, round-down (b'10000000000000000000000000000000000000000000000000000100000')
-    let mut fp = ExtendedFloat {
-        mant: 0x400000000000020,
-        exp: -58,
-    };
-    round_to_native::<f64, _>(&mut fp, round_nearest_tie_even);
-    assert_eq!(fp.mant, 1 << 52);
-    assert_eq!(fp.exp, -52);
-
-    // Halfway, round-up (b'10000000000000000000000000000000000000000000000000001100000')
-    let mut fp = ExtendedFloat {
-        mant: 0x400000000000060,
-        exp: -58,
-    };
-    round_to_native::<f64, _>(&mut fp, round_nearest_tie_even);
-    assert_eq!(fp.mant, (1 << 52) + 2);
-    assert_eq!(fp.exp, -52);
-
-    // Above halfway
-    let mut fp = ExtendedFloat {
-        mant: 0x400000000000021,
-        exp: -58,
-    };
-    round_to_native::<f64, _>(&mut fp, round_nearest_tie_even);
-    assert_eq!(fp.mant, (1 << 52) + 1);
-    assert_eq!(fp.exp, -52);
-
-    let mut fp = ExtendedFloat {
-        mant: 0x400000000000061,
-        exp: -58,
-    };
-    round_to_native::<f64, _>(&mut fp, round_nearest_tie_even);
-    assert_eq!(fp.mant, (1 << 52) + 2);
-    assert_eq!(fp.exp, -52);
-
-    // Below halfway
-    let mut fp = ExtendedFloat {
-        mant: 0x40000000000001F,
-        exp: -58,
-    };
-    round_to_native::<f64, _>(&mut fp, round_nearest_tie_even);
-    assert_eq!(fp.mant, 1 << 52);
-    assert_eq!(fp.exp, -52);
-
-    let mut fp = ExtendedFloat {
-        mant: 0x40000000000005F,
-        exp: -58,
-    };
-    round_to_native::<f64, _>(&mut fp, round_nearest_tie_even);
-    assert_eq!(fp.mant, (1 << 52) + 1);
-    assert_eq!(fp.exp, -52);
-
-    // Underflow
-    // Adapted from failures in strtod.
-    let mut fp = ExtendedFloat {
-        exp: -1139,
-        mant: 18446744073709550712,
-    };
-    round_to_native::<f64, _>(&mut fp, round_nearest_tie_even);
-    assert_eq!(fp.mant, 0);
-    assert_eq!(fp.exp, 0);
-
-    let mut fp = ExtendedFloat {
-        exp: -1139,
-        mant: 18446744073709551460,
-    };
-    round_to_native::<f64, _>(&mut fp, round_nearest_tie_even);
-    assert_eq!(fp.mant, 0);
-    assert_eq!(fp.exp, 0);
-
-    let mut fp = ExtendedFloat {
-        exp: -1138,
-        mant: 9223372036854776103,
-    };
-    round_to_native::<f64, _>(&mut fp, round_nearest_tie_even);
-    assert_eq!(fp.mant, 1);
-    assert_eq!(fp.exp, -1074);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/macros/mod.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/macros/mod.rs
deleted file mode 100644
index aaf820f..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/macros/mod.rs
+++ /dev/null
@@ -1,61 +0,0 @@
-#![allow(unused_macro_rules)]
-
-macro_rules! json_str {
-    ([]) => {
-        "[]"
-    };
-    ([ $e0:tt $(, $e:tt)* $(,)? ]) => {
-        concat!("[",
-            json_str!($e0),
-            $(",", json_str!($e),)*
-        "]")
-    };
-    ({}) => {
-        "{}"
-    };
-    ({ $k0:tt : $v0:tt $(, $k:tt : $v:tt)* $(,)? }) => {
-        concat!("{",
-            stringify!($k0), ":", json_str!($v0),
-            $(",", stringify!($k), ":", json_str!($v),)*
-        "}")
-    };
-    (($other:tt)) => {
-        $other
-    };
-    ($other:tt) => {
-        stringify!($other)
-    };
-}
-
-macro_rules! pretty_str {
-    ($json:tt) => {
-        pretty_str_impl!("", $json)
-    };
-}
-
-macro_rules! pretty_str_impl {
-    ($indent:expr, []) => {
-        "[]"
-    };
-    ($indent:expr, [ $e0:tt $(, $e:tt)* $(,)? ]) => {
-        concat!("[\n  ",
-            $indent, pretty_str_impl!(concat!("  ", $indent), $e0),
-            $(",\n  ", $indent, pretty_str_impl!(concat!("  ", $indent), $e),)*
-        "\n", $indent, "]")
-    };
-    ($indent:expr, {}) => {
-        "{}"
-    };
-    ($indent:expr, { $k0:tt : $v0:tt $(, $k:tt : $v:tt)* $(,)? }) => {
-        concat!("{\n  ",
-            $indent, stringify!($k0), ": ", pretty_str_impl!(concat!("  ", $indent), $v0),
-            $(",\n  ", $indent, stringify!($k), ": ", pretty_str_impl!(concat!("  ", $indent), $v),)*
-        "\n", $indent, "}")
-    };
-    ($indent:expr, ($other:tt)) => {
-        $other
-    };
-    ($indent:expr, $other:tt) => {
-        stringify!($other)
-    };
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/map.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/map.rs
deleted file mode 100644
index aa3cb25..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/map.rs
+++ /dev/null
@@ -1,57 +0,0 @@
-use serde_json::{from_str, Map, Value};
-
-#[test]
-fn test_preserve_order() {
-    // Sorted order
-    #[cfg(not(feature = "preserve_order"))]
-    const EXPECTED: &[&str] = &["a", "b", "c"];
-
-    // Insertion order
-    #[cfg(feature = "preserve_order")]
-    const EXPECTED: &[&str] = &["b", "a", "c"];
-
-    let v: Value = from_str(r#"{"b":null,"a":null,"c":null}"#).unwrap();
-    let keys: Vec<_> = v.as_object().unwrap().keys().collect();
-    assert_eq!(keys, EXPECTED);
-}
-
-#[test]
-#[cfg(feature = "preserve_order")]
-fn test_shift_insert() {
-    let mut v: Value = from_str(r#"{"b":null,"a":null,"c":null}"#).unwrap();
-    let val = v.as_object_mut().unwrap();
-    val.shift_insert(0, "d".to_owned(), Value::Null);
-
-    let keys: Vec<_> = val.keys().collect();
-    assert_eq!(keys, &["d", "b", "a", "c"]);
-}
-
-#[test]
-fn test_append() {
-    // Sorted order
-    #[cfg(not(feature = "preserve_order"))]
-    const EXPECTED: &[&str] = &["a", "b", "c"];
-
-    // Insertion order
-    #[cfg(feature = "preserve_order")]
-    const EXPECTED: &[&str] = &["b", "a", "c"];
-
-    let mut v: Value = from_str(r#"{"b":null,"a":null,"c":null}"#).unwrap();
-    let val = v.as_object_mut().unwrap();
-    let mut m = Map::new();
-    m.append(val);
-    let keys: Vec<_> = m.keys().collect();
-
-    assert_eq!(keys, EXPECTED);
-    assert!(val.is_empty());
-}
-
-#[test]
-fn test_retain() {
-    let mut v: Value = from_str(r#"{"b":null,"a":null,"c":null}"#).unwrap();
-    let val = v.as_object_mut().unwrap();
-    val.retain(|k, _| k.as_str() != "b");
-
-    let keys: Vec<_> = val.keys().collect();
-    assert_eq!(keys, &["a", "c"]);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/regression.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/regression.rs
deleted file mode 100644
index 315bb15..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/regression.rs
+++ /dev/null
@@ -1,5 +0,0 @@
-#![allow(clippy::elidable_lifetime_names, clippy::needless_lifetimes)]
-
-mod regression {
-    automod::dir!("tests/regression");
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/regression/issue1004.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/regression/issue1004.rs
deleted file mode 100644
index c09fb96..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/regression/issue1004.rs
+++ /dev/null
@@ -1,12 +0,0 @@
-#![cfg(feature = "arbitrary_precision")]
-
-#[test]
-fn test() {
-    let float = 5.55f32;
-    let value = serde_json::to_value(float).unwrap();
-    let json = serde_json::to_string(&value).unwrap();
-
-    // If the f32 were cast to f64 by Value before serialization, then this
-    // would incorrectly serialize as 5.550000190734863.
-    assert_eq!(json, "5.55");
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/regression/issue520.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/regression/issue520.rs
deleted file mode 100644
index 730ecc6..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/regression/issue520.rs
+++ /dev/null
@@ -1,20 +0,0 @@
-#![allow(clippy::float_cmp)]
-
-use serde_derive::{Deserialize, Serialize};
-
-#[derive(Serialize, Deserialize, Debug)]
-#[serde(tag = "type", content = "data")]
-enum E {
-    Float(f32),
-}
-
-#[test]
-fn test() {
-    let e = E::Float(159.1);
-    let v = serde_json::to_value(e).unwrap();
-    let e = serde_json::from_value::<E>(v).unwrap();
-
-    match e {
-        E::Float(f) => assert_eq!(f, 159.1),
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/regression/issue795.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/regression/issue795.rs
deleted file mode 100644
index 411e8af..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/regression/issue795.rs
+++ /dev/null
@@ -1,62 +0,0 @@
-#![allow(clippy::assertions_on_result_states)]
-
-use serde::de::{
-    Deserialize, Deserializer, EnumAccess, IgnoredAny, MapAccess, VariantAccess, Visitor,
-};
-use serde_json::json;
-use std::fmt;
-
-#[derive(Debug)]
-pub enum Enum {
-    Variant {
-        #[allow(dead_code)]
-        x: u8,
-    },
-}
-
-impl<'de> Deserialize<'de> for Enum {
-    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
-    where
-        D: Deserializer<'de>,
-    {
-        struct EnumVisitor;
-
-        impl<'de> Visitor<'de> for EnumVisitor {
-            type Value = Enum;
-
-            fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-                formatter.write_str("enum Enum")
-            }
-
-            fn visit_enum<A>(self, data: A) -> Result<Self::Value, A::Error>
-            where
-                A: EnumAccess<'de>,
-            {
-                let (IgnoredAny, variant) = data.variant()?;
-                variant.struct_variant(&["x"], self)
-            }
-
-            fn visit_map<A>(self, mut data: A) -> Result<Self::Value, A::Error>
-            where
-                A: MapAccess<'de>,
-            {
-                let mut x = 0;
-                if let Some((IgnoredAny, value)) = data.next_entry()? {
-                    x = value;
-                }
-                Ok(Enum::Variant { x })
-            }
-        }
-
-        deserializer.deserialize_enum("Enum", &["Variant"], EnumVisitor)
-    }
-}
-
-#[test]
-fn test() {
-    let s = r#" {"Variant":{"x":0,"y":0}} "#;
-    assert!(serde_json::from_str::<Enum>(s).is_err());
-
-    let j = json!({"Variant":{"x":0,"y":0}});
-    assert!(serde_json::from_value::<Enum>(j).is_err());
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/regression/issue845.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/regression/issue845.rs
deleted file mode 100644
index 7b6564da..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/regression/issue845.rs
+++ /dev/null
@@ -1,74 +0,0 @@
-#![allow(clippy::trait_duplication_in_bounds)] // https://github.com/rust-lang/rust-clippy/issues/8757
-
-use serde::{Deserialize, Deserializer};
-use std::fmt::{self, Display};
-use std::marker::PhantomData;
-use std::str::FromStr;
-
-pub struct NumberVisitor<T> {
-    marker: PhantomData<T>,
-}
-
-impl<'de, T> serde::de::Visitor<'de> for NumberVisitor<T>
-where
-    T: TryFrom<u64> + TryFrom<i64> + FromStr,
-    <T as TryFrom<u64>>::Error: Display,
-    <T as TryFrom<i64>>::Error: Display,
-    <T as FromStr>::Err: Display,
-{
-    type Value = T;
-
-    fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-        formatter.write_str("an integer or string")
-    }
-
-    fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
-    where
-        E: serde::de::Error,
-    {
-        T::try_from(v).map_err(serde::de::Error::custom)
-    }
-
-    fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E>
-    where
-        E: serde::de::Error,
-    {
-        T::try_from(v).map_err(serde::de::Error::custom)
-    }
-
-    fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
-    where
-        E: serde::de::Error,
-    {
-        v.parse().map_err(serde::de::Error::custom)
-    }
-}
-
-fn deserialize_integer_or_string<'de, D, T>(deserializer: D) -> Result<T, D::Error>
-where
-    D: Deserializer<'de>,
-    T: TryFrom<u64> + TryFrom<i64> + FromStr,
-    <T as TryFrom<u64>>::Error: Display,
-    <T as TryFrom<i64>>::Error: Display,
-    <T as FromStr>::Err: Display,
-{
-    deserializer.deserialize_any(NumberVisitor {
-        marker: PhantomData,
-    })
-}
-
-#[derive(Deserialize, Debug)]
-pub struct Struct {
-    #[serde(deserialize_with = "deserialize_integer_or_string")]
-    #[allow(dead_code)]
-    pub i: i64,
-}
-
-#[test]
-fn test() {
-    let j = r#" {"i":100} "#;
-    println!("{:?}", serde_json::from_str::<Struct>(j).unwrap());
-
-    let j = r#" {"i":"100"} "#;
-    println!("{:?}", serde_json::from_str::<Struct>(j).unwrap());
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/regression/issue953.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/regression/issue953.rs
deleted file mode 100644
index 771aa52..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/regression/issue953.rs
+++ /dev/null
@@ -1,9 +0,0 @@
-use serde_json::Value;
-
-#[test]
-fn test() {
-    let x1 = serde_json::from_str::<Value>("18446744073709551615.");
-    assert!(x1.is_err());
-    let x2 = serde_json::from_str::<Value>("18446744073709551616.");
-    assert!(x2.is_err());
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/stream.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/stream.rs
deleted file mode 100644
index fa52cede..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/stream.rs
+++ /dev/null
@@ -1,182 +0,0 @@
-#![allow(clippy::assertions_on_result_states)]
-
-use serde_json::{json, Deserializer, Value};
-
-// Rustfmt issue https://github.com/rust-lang-nursery/rustfmt/issues/2740
-#[rustfmt::skip]
-macro_rules! test_stream {
-    ($data:expr, $ty:ty, |$stream:ident| $test:block) => {
-        {
-            let de = Deserializer::from_str($data);
-            let mut $stream = de.into_iter::<$ty>();
-            assert_eq!($stream.byte_offset(), 0);
-            $test
-        }
-        {
-            let de = Deserializer::from_slice($data.as_bytes());
-            let mut $stream = de.into_iter::<$ty>();
-            assert_eq!($stream.byte_offset(), 0);
-            $test
-        }
-        {
-            let mut bytes = $data.as_bytes();
-            let de = Deserializer::from_reader(&mut bytes);
-            let mut $stream = de.into_iter::<$ty>();
-            assert_eq!($stream.byte_offset(), 0);
-            $test
-        }
-    };
-}
-
-#[test]
-fn test_json_stream_newlines() {
-    let data = "{\"x\":39} {\"x\":40}{\"x\":41}\n{\"x\":42}";
-
-    test_stream!(data, Value, |stream| {
-        assert_eq!(stream.next().unwrap().unwrap()["x"], 39);
-        assert_eq!(stream.byte_offset(), 8);
-
-        assert_eq!(stream.next().unwrap().unwrap()["x"], 40);
-        assert_eq!(stream.byte_offset(), 17);
-
-        assert_eq!(stream.next().unwrap().unwrap()["x"], 41);
-        assert_eq!(stream.byte_offset(), 25);
-
-        assert_eq!(stream.next().unwrap().unwrap()["x"], 42);
-        assert_eq!(stream.byte_offset(), 34);
-
-        assert!(stream.next().is_none());
-        assert_eq!(stream.byte_offset(), 34);
-    });
-}
-
-#[test]
-fn test_json_stream_trailing_whitespaces() {
-    let data = "{\"x\":42} \t\n";
-
-    test_stream!(data, Value, |stream| {
-        assert_eq!(stream.next().unwrap().unwrap()["x"], 42);
-        assert_eq!(stream.byte_offset(), 8);
-
-        assert!(stream.next().is_none());
-        assert_eq!(stream.byte_offset(), 11);
-    });
-}
-
-#[test]
-fn test_json_stream_truncated() {
-    let data = "{\"x\":40}\n{\"x\":";
-
-    test_stream!(data, Value, |stream| {
-        assert_eq!(stream.next().unwrap().unwrap()["x"], 40);
-        assert_eq!(stream.byte_offset(), 8);
-
-        assert!(stream.next().unwrap().unwrap_err().is_eof());
-        assert_eq!(stream.byte_offset(), 9);
-    });
-}
-
-#[test]
-fn test_json_stream_truncated_decimal() {
-    let data = "{\"x\":4.";
-
-    test_stream!(data, Value, |stream| {
-        assert!(stream.next().unwrap().unwrap_err().is_eof());
-        assert_eq!(stream.byte_offset(), 0);
-    });
-}
-
-#[test]
-fn test_json_stream_truncated_negative() {
-    let data = "{\"x\":-";
-
-    test_stream!(data, Value, |stream| {
-        assert!(stream.next().unwrap().unwrap_err().is_eof());
-        assert_eq!(stream.byte_offset(), 0);
-    });
-}
-
-#[test]
-fn test_json_stream_truncated_exponent() {
-    let data = "{\"x\":4e";
-
-    test_stream!(data, Value, |stream| {
-        assert!(stream.next().unwrap().unwrap_err().is_eof());
-        assert_eq!(stream.byte_offset(), 0);
-    });
-}
-
-#[test]
-fn test_json_stream_empty() {
-    let data = "";
-
-    test_stream!(data, Value, |stream| {
-        assert!(stream.next().is_none());
-        assert_eq!(stream.byte_offset(), 0);
-    });
-}
-
-#[test]
-fn test_json_stream_primitive() {
-    let data = "{} true{}1[]\nfalse\"hey\"2 ";
-
-    test_stream!(data, Value, |stream| {
-        assert_eq!(stream.next().unwrap().unwrap(), json!({}));
-        assert_eq!(stream.byte_offset(), 2);
-
-        assert_eq!(stream.next().unwrap().unwrap(), true);
-        assert_eq!(stream.byte_offset(), 7);
-
-        assert_eq!(stream.next().unwrap().unwrap(), json!({}));
-        assert_eq!(stream.byte_offset(), 9);
-
-        assert_eq!(stream.next().unwrap().unwrap(), 1);
-        assert_eq!(stream.byte_offset(), 10);
-
-        assert_eq!(stream.next().unwrap().unwrap(), json!([]));
-        assert_eq!(stream.byte_offset(), 12);
-
-        assert_eq!(stream.next().unwrap().unwrap(), false);
-        assert_eq!(stream.byte_offset(), 18);
-
-        assert_eq!(stream.next().unwrap().unwrap(), "hey");
-        assert_eq!(stream.byte_offset(), 23);
-
-        assert_eq!(stream.next().unwrap().unwrap(), 2);
-        assert_eq!(stream.byte_offset(), 24);
-
-        assert!(stream.next().is_none());
-        assert_eq!(stream.byte_offset(), 25);
-    });
-}
-
-#[test]
-fn test_json_stream_invalid_literal() {
-    let data = "truefalse";
-
-    test_stream!(data, Value, |stream| {
-        let second = stream.next().unwrap().unwrap_err();
-        assert_eq!(second.to_string(), "trailing characters at line 1 column 5");
-    });
-}
-
-#[test]
-fn test_json_stream_invalid_number() {
-    let data = "1true";
-
-    test_stream!(data, Value, |stream| {
-        let second = stream.next().unwrap().unwrap_err();
-        assert_eq!(second.to_string(), "trailing characters at line 1 column 2");
-    });
-}
-
-#[test]
-fn test_error() {
-    let data = "true wrong false";
-
-    test_stream!(data, Value, |stream| {
-        assert_eq!(stream.next().unwrap().unwrap(), true);
-        assert!(stream.next().unwrap().is_err());
-        assert!(stream.next().is_none());
-    });
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/test.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/test.rs
deleted file mode 100644
index d41a233..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/test.rs
+++ /dev/null
@@ -1,2560 +0,0 @@
-#![allow(
-    clippy::assertions_on_result_states,
-    clippy::byte_char_slices,
-    clippy::cast_precision_loss,
-    clippy::derive_partial_eq_without_eq,
-    clippy::excessive_precision,
-    clippy::float_cmp,
-    clippy::incompatible_msrv, // https://github.com/rust-lang/rust-clippy/issues/12257
-    clippy::items_after_statements,
-    clippy::large_digit_groups,
-    clippy::let_underscore_untyped,
-    clippy::shadow_unrelated,
-    clippy::too_many_lines,
-    clippy::unreadable_literal,
-    clippy::unseparated_literal_suffix,
-    clippy::vec_init_then_push,
-    clippy::zero_sized_map_values
-)]
-
-#[macro_use]
-mod macros;
-
-#[cfg(feature = "raw_value")]
-use ref_cast::RefCast;
-use serde::de::{self, IgnoredAny, IntoDeserializer};
-use serde::ser::{self, SerializeMap, SerializeSeq, Serializer};
-use serde::{Deserialize, Serialize};
-use serde_bytes::{ByteBuf, Bytes};
-#[cfg(feature = "raw_value")]
-use serde_json::value::RawValue;
-use serde_json::{
-    from_reader, from_slice, from_str, from_value, json, to_string, to_string_pretty, to_value,
-    to_vec, Deserializer, Number, Value,
-};
-use std::collections::BTreeMap;
-#[cfg(feature = "raw_value")]
-use std::collections::HashMap;
-use std::fmt::{self, Debug};
-use std::hash::BuildHasher;
-#[cfg(feature = "raw_value")]
-use std::hash::{Hash, Hasher};
-use std::io;
-use std::iter;
-use std::marker::PhantomData;
-use std::mem;
-use std::str::FromStr;
-use std::{f32, f64};
-
-macro_rules! treemap {
-    () => {
-        BTreeMap::new()
-    };
-    ($($k:expr => $v:expr),+ $(,)?) => {
-        {
-            let mut m = BTreeMap::new();
-            $(
-                m.insert($k, $v);
-            )+
-            m
-        }
-    };
-}
-
-#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
-#[serde(deny_unknown_fields)]
-enum Animal {
-    Dog,
-    Frog(String, Vec<isize>),
-    Cat { age: usize, name: String },
-    AntHive(Vec<String>),
-}
-
-#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
-struct Inner {
-    a: (),
-    b: usize,
-    c: Vec<String>,
-}
-
-#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
-struct Outer {
-    inner: Vec<Inner>,
-}
-
-fn test_encode_ok<T>(errors: &[(T, &str)])
-where
-    T: PartialEq + Debug + ser::Serialize,
-{
-    for &(ref value, out) in errors {
-        let out = out.to_string();
-
-        let s = to_string(value).unwrap();
-        assert_eq!(s, out);
-
-        let v = to_value(value).unwrap();
-        let s = to_string(&v).unwrap();
-        assert_eq!(s, out);
-    }
-}
-
-fn test_pretty_encode_ok<T>(errors: &[(T, &str)])
-where
-    T: PartialEq + Debug + ser::Serialize,
-{
-    for &(ref value, out) in errors {
-        let out = out.to_string();
-
-        let s = to_string_pretty(value).unwrap();
-        assert_eq!(s, out);
-
-        let v = to_value(value).unwrap();
-        let s = to_string_pretty(&v).unwrap();
-        assert_eq!(s, out);
-    }
-}
-
-#[test]
-fn test_write_null() {
-    let tests = &[((), "null")];
-    test_encode_ok(tests);
-    test_pretty_encode_ok(tests);
-}
-
-#[test]
-fn test_write_u64() {
-    let tests = &[(3u64, "3"), (u64::MAX, &u64::MAX.to_string())];
-    test_encode_ok(tests);
-    test_pretty_encode_ok(tests);
-}
-
-#[test]
-fn test_write_i64() {
-    let tests = &[
-        (3i64, "3"),
-        (-2i64, "-2"),
-        (-1234i64, "-1234"),
-        (i64::MIN, &i64::MIN.to_string()),
-    ];
-    test_encode_ok(tests);
-    test_pretty_encode_ok(tests);
-}
-
-#[test]
-fn test_write_f64() {
-    let tests = &[
-        (3.0, "3.0"),
-        (3.1, "3.1"),
-        (-1.5, "-1.5"),
-        (0.5, "0.5"),
-        (f64::MIN, "-1.7976931348623157e308"),
-        (f64::MAX, "1.7976931348623157e308"),
-        (f64::EPSILON, "2.220446049250313e-16"),
-    ];
-    test_encode_ok(tests);
-    test_pretty_encode_ok(tests);
-}
-
-#[test]
-fn test_encode_nonfinite_float_yields_null() {
-    let v = to_value(f64::NAN.copysign(1.0)).unwrap();
-    assert!(v.is_null());
-
-    let v = to_value(f64::NAN.copysign(-1.0)).unwrap();
-    assert!(v.is_null());
-
-    let v = to_value(f64::INFINITY).unwrap();
-    assert!(v.is_null());
-
-    let v = to_value(-f64::INFINITY).unwrap();
-    assert!(v.is_null());
-
-    let v = to_value(f32::NAN.copysign(1.0)).unwrap();
-    assert!(v.is_null());
-
-    let v = to_value(f32::NAN.copysign(-1.0)).unwrap();
-    assert!(v.is_null());
-
-    let v = to_value(f32::INFINITY).unwrap();
-    assert!(v.is_null());
-
-    let v = to_value(-f32::INFINITY).unwrap();
-    assert!(v.is_null());
-}
-
-#[test]
-fn test_write_str() {
-    let tests = &[("", "\"\""), ("foo", "\"foo\"")];
-    test_encode_ok(tests);
-    test_pretty_encode_ok(tests);
-}
-
-#[test]
-fn test_write_bool() {
-    let tests = &[(true, "true"), (false, "false")];
-    test_encode_ok(tests);
-    test_pretty_encode_ok(tests);
-}
-
-#[test]
-fn test_write_char() {
-    let tests = &[
-        ('n', "\"n\""),
-        ('"', "\"\\\"\""),
-        ('\\', "\"\\\\\""),
-        ('/', "\"/\""),
-        ('\x08', "\"\\b\""),
-        ('\x0C', "\"\\f\""),
-        ('\n', "\"\\n\""),
-        ('\r', "\"\\r\""),
-        ('\t', "\"\\t\""),
-        ('\x0B', "\"\\u000b\""),
-        ('\u{3A3}', "\"\u{3A3}\""),
-    ];
-    test_encode_ok(tests);
-    test_pretty_encode_ok(tests);
-}
-
-#[test]
-fn test_write_list() {
-    test_encode_ok(&[
-        (vec![], "[]"),
-        (vec![true], "[true]"),
-        (vec![true, false], "[true,false]"),
-    ]);
-
-    test_encode_ok(&[
-        (vec![vec![], vec![], vec![]], "[[],[],[]]"),
-        (vec![vec![1, 2, 3], vec![], vec![]], "[[1,2,3],[],[]]"),
-        (vec![vec![], vec![1, 2, 3], vec![]], "[[],[1,2,3],[]]"),
-        (vec![vec![], vec![], vec![1, 2, 3]], "[[],[],[1,2,3]]"),
-    ]);
-
-    test_pretty_encode_ok(&[
-        (vec![vec![], vec![], vec![]], pretty_str!([[], [], []])),
-        (
-            vec![vec![1, 2, 3], vec![], vec![]],
-            pretty_str!([[1, 2, 3], [], []]),
-        ),
-        (
-            vec![vec![], vec![1, 2, 3], vec![]],
-            pretty_str!([[], [1, 2, 3], []]),
-        ),
-        (
-            vec![vec![], vec![], vec![1, 2, 3]],
-            pretty_str!([[], [], [1, 2, 3]]),
-        ),
-    ]);
-
-    test_pretty_encode_ok(&[
-        (vec![], "[]"),
-        (vec![true], pretty_str!([true])),
-        (vec![true, false], pretty_str!([true, false])),
-    ]);
-
-    let long_test_list = json!([false, null, ["foo\nbar", 3.5]]);
-
-    test_encode_ok(&[(
-        long_test_list.clone(),
-        json_str!([false, null, ["foo\nbar", 3.5]]),
-    )]);
-
-    test_pretty_encode_ok(&[(
-        long_test_list,
-        pretty_str!([false, null, ["foo\nbar", 3.5]]),
-    )]);
-}
-
-#[test]
-fn test_write_object() {
-    test_encode_ok(&[
-        (treemap!(), "{}"),
-        (treemap!("a".to_owned() => true), "{\"a\":true}"),
-        (
-            treemap!(
-                "a".to_owned() => true,
-                "b".to_owned() => false,
-            ),
-            "{\"a\":true,\"b\":false}",
-        ),
-    ]);
-
-    test_encode_ok(&[
-        (
-            treemap![
-                "a".to_owned() => treemap![],
-                "b".to_owned() => treemap![],
-                "c".to_owned() => treemap![],
-            ],
-            "{\"a\":{},\"b\":{},\"c\":{}}",
-        ),
-        (
-            treemap![
-                "a".to_owned() => treemap![
-                    "a".to_owned() => treemap!["a" => vec![1,2,3]],
-                    "b".to_owned() => treemap![],
-                    "c".to_owned() => treemap![],
-                ],
-                "b".to_owned() => treemap![],
-                "c".to_owned() => treemap![],
-            ],
-            "{\"a\":{\"a\":{\"a\":[1,2,3]},\"b\":{},\"c\":{}},\"b\":{},\"c\":{}}",
-        ),
-        (
-            treemap![
-                "a".to_owned() => treemap![],
-                "b".to_owned() => treemap![
-                    "a".to_owned() => treemap!["a" => vec![1,2,3]],
-                    "b".to_owned() => treemap![],
-                    "c".to_owned() => treemap![],
-                ],
-                "c".to_owned() => treemap![],
-            ],
-            "{\"a\":{},\"b\":{\"a\":{\"a\":[1,2,3]},\"b\":{},\"c\":{}},\"c\":{}}",
-        ),
-        (
-            treemap![
-                "a".to_owned() => treemap![],
-                "b".to_owned() => treemap![],
-                "c".to_owned() => treemap![
-                    "a".to_owned() => treemap!["a" => vec![1,2,3]],
-                    "b".to_owned() => treemap![],
-                    "c".to_owned() => treemap![],
-                ],
-            ],
-            "{\"a\":{},\"b\":{},\"c\":{\"a\":{\"a\":[1,2,3]},\"b\":{},\"c\":{}}}",
-        ),
-    ]);
-
-    test_encode_ok(&[(treemap!['c' => ()], "{\"c\":null}")]);
-
-    test_pretty_encode_ok(&[
-        (
-            treemap![
-                "a".to_owned() => treemap![],
-                "b".to_owned() => treemap![],
-                "c".to_owned() => treemap![],
-            ],
-            pretty_str!({
-                "a": {},
-                "b": {},
-                "c": {}
-            }),
-        ),
-        (
-            treemap![
-                "a".to_owned() => treemap![
-                    "a".to_owned() => treemap!["a" => vec![1,2,3]],
-                    "b".to_owned() => treemap![],
-                    "c".to_owned() => treemap![],
-                ],
-                "b".to_owned() => treemap![],
-                "c".to_owned() => treemap![],
-            ],
-            pretty_str!({
-                "a": {
-                    "a": {
-                        "a": [
-                            1,
-                            2,
-                            3
-                        ]
-                    },
-                    "b": {},
-                    "c": {}
-                },
-                "b": {},
-                "c": {}
-            }),
-        ),
-        (
-            treemap![
-                "a".to_owned() => treemap![],
-                "b".to_owned() => treemap![
-                    "a".to_owned() => treemap!["a" => vec![1,2,3]],
-                    "b".to_owned() => treemap![],
-                    "c".to_owned() => treemap![],
-                ],
-                "c".to_owned() => treemap![],
-            ],
-            pretty_str!({
-                "a": {},
-                "b": {
-                    "a": {
-                        "a": [
-                            1,
-                            2,
-                            3
-                        ]
-                    },
-                    "b": {},
-                    "c": {}
-                },
-                "c": {}
-            }),
-        ),
-        (
-            treemap![
-                "a".to_owned() => treemap![],
-                "b".to_owned() => treemap![],
-                "c".to_owned() => treemap![
-                    "a".to_owned() => treemap!["a" => vec![1,2,3]],
-                    "b".to_owned() => treemap![],
-                    "c".to_owned() => treemap![],
-                ],
-            ],
-            pretty_str!({
-                "a": {},
-                "b": {},
-                "c": {
-                    "a": {
-                        "a": [
-                            1,
-                            2,
-                            3
-                        ]
-                    },
-                    "b": {},
-                    "c": {}
-                }
-            }),
-        ),
-    ]);
-
-    test_pretty_encode_ok(&[
-        (treemap!(), "{}"),
-        (
-            treemap!("a".to_owned() => true),
-            pretty_str!({
-                "a": true
-            }),
-        ),
-        (
-            treemap!(
-                "a".to_owned() => true,
-                "b".to_owned() => false,
-            ),
-            pretty_str!( {
-                "a": true,
-                "b": false
-            }),
-        ),
-    ]);
-
-    let complex_obj = json!({
-        "b": [
-            {"c": "\x0c\x1f\r"},
-            {"d": ""}
-        ]
-    });
-
-    test_encode_ok(&[(
-        complex_obj.clone(),
-        json_str!({
-            "b": [
-                {
-                    "c": (r#""\f\u001f\r""#)
-                },
-                {
-                    "d": ""
-                }
-            ]
-        }),
-    )]);
-
-    test_pretty_encode_ok(&[(
-        complex_obj,
-        pretty_str!({
-            "b": [
-                {
-                    "c": (r#""\f\u001f\r""#)
-                },
-                {
-                    "d": ""
-                }
-            ]
-        }),
-    )]);
-}
-
-#[test]
-fn test_write_tuple() {
-    test_encode_ok(&[((5,), "[5]")]);
-
-    test_pretty_encode_ok(&[((5,), pretty_str!([5]))]);
-
-    test_encode_ok(&[((5, (6, "abc")), "[5,[6,\"abc\"]]")]);
-
-    test_pretty_encode_ok(&[((5, (6, "abc")), pretty_str!([5, [6, "abc"]]))]);
-}
-
-#[test]
-fn test_write_enum() {
-    test_encode_ok(&[
-        (Animal::Dog, "\"Dog\""),
-        (
-            Animal::Frog("Henry".to_owned(), vec![]),
-            "{\"Frog\":[\"Henry\",[]]}",
-        ),
-        (
-            Animal::Frog("Henry".to_owned(), vec![349]),
-            "{\"Frog\":[\"Henry\",[349]]}",
-        ),
-        (
-            Animal::Frog("Henry".to_owned(), vec![349, 102]),
-            "{\"Frog\":[\"Henry\",[349,102]]}",
-        ),
-        (
-            Animal::Cat {
-                age: 5,
-                name: "Kate".to_owned(),
-            },
-            "{\"Cat\":{\"age\":5,\"name\":\"Kate\"}}",
-        ),
-        (
-            Animal::AntHive(vec!["Bob".to_owned(), "Stuart".to_owned()]),
-            "{\"AntHive\":[\"Bob\",\"Stuart\"]}",
-        ),
-    ]);
-
-    test_pretty_encode_ok(&[
-        (Animal::Dog, "\"Dog\""),
-        (
-            Animal::Frog("Henry".to_owned(), vec![]),
-            pretty_str!({
-                "Frog": [
-                    "Henry",
-                    []
-                ]
-            }),
-        ),
-        (
-            Animal::Frog("Henry".to_owned(), vec![349]),
-            pretty_str!({
-                "Frog": [
-                    "Henry",
-                    [
-                        349
-                    ]
-                ]
-            }),
-        ),
-        (
-            Animal::Frog("Henry".to_owned(), vec![349, 102]),
-            pretty_str!({
-                "Frog": [
-                    "Henry",
-                    [
-                      349,
-                      102
-                    ]
-                ]
-            }),
-        ),
-    ]);
-}
-
-#[test]
-fn test_write_option() {
-    test_encode_ok(&[(None, "null"), (Some("jodhpurs"), "\"jodhpurs\"")]);
-
-    test_encode_ok(&[
-        (None, "null"),
-        (Some(vec!["foo", "bar"]), "[\"foo\",\"bar\"]"),
-    ]);
-
-    test_pretty_encode_ok(&[(None, "null"), (Some("jodhpurs"), "\"jodhpurs\"")]);
-
-    test_pretty_encode_ok(&[
-        (None, "null"),
-        (Some(vec!["foo", "bar"]), pretty_str!(["foo", "bar"])),
-    ]);
-}
-
-#[test]
-fn test_write_newtype_struct() {
-    #[derive(Serialize, PartialEq, Debug)]
-    struct Newtype(BTreeMap<String, i32>);
-
-    let inner = Newtype(treemap!(String::from("inner") => 123));
-    let outer = treemap!(String::from("outer") => to_value(&inner).unwrap());
-
-    test_encode_ok(&[(inner, r#"{"inner":123}"#)]);
-
-    test_encode_ok(&[(outer, r#"{"outer":{"inner":123}}"#)]);
-}
-
-#[test]
-fn test_deserialize_number_to_untagged_enum() {
-    #[derive(Eq, PartialEq, Deserialize, Debug)]
-    #[serde(untagged)]
-    enum E {
-        N(i64),
-    }
-
-    assert_eq!(E::N(0), E::deserialize(Number::from(0)).unwrap());
-}
-
-fn test_parse_ok<T>(tests: Vec<(&str, T)>)
-where
-    T: Clone + Debug + PartialEq + ser::Serialize + de::DeserializeOwned,
-{
-    for (s, value) in tests {
-        let v: T = from_str(s).unwrap();
-        assert_eq!(v, value.clone());
-
-        let v: T = from_slice(s.as_bytes()).unwrap();
-        assert_eq!(v, value.clone());
-
-        // Make sure we can deserialize into a `Value`.
-        let json_value: Value = from_str(s).unwrap();
-        assert_eq!(json_value, to_value(&value).unwrap());
-
-        // Make sure we can deserialize from a `&Value`.
-        let v = T::deserialize(&json_value).unwrap();
-        assert_eq!(v, value);
-
-        // Make sure we can deserialize from a `Value`.
-        let v: T = from_value(json_value.clone()).unwrap();
-        assert_eq!(v, value);
-
-        // Make sure we can round trip back to `Value`.
-        let json_value2: Value = from_value(json_value.clone()).unwrap();
-        assert_eq!(json_value2, json_value);
-
-        // Make sure we can fully ignore.
-        let twoline = s.to_owned() + "\n3735928559";
-        let mut de = Deserializer::from_str(&twoline);
-        IgnoredAny::deserialize(&mut de).unwrap();
-        assert_eq!(0xDEAD_BEEF, u64::deserialize(&mut de).unwrap());
-
-        // Make sure every prefix is an EOF error, except that a prefix of a
-        // number may be a valid number.
-        if !json_value.is_number() {
-            for (i, _) in s.trim_end().char_indices() {
-                assert!(from_str::<Value>(&s[..i]).unwrap_err().is_eof());
-                assert!(from_str::<IgnoredAny>(&s[..i]).unwrap_err().is_eof());
-            }
-        }
-    }
-}
-
-// For testing representations that the deserializer accepts but the serializer
-// never generates. These do not survive a round-trip through Value.
-fn test_parse_unusual_ok<T>(tests: Vec<(&str, T)>)
-where
-    T: Clone + Debug + PartialEq + ser::Serialize + de::DeserializeOwned,
-{
-    for (s, value) in tests {
-        let v: T = from_str(s).unwrap();
-        assert_eq!(v, value.clone());
-
-        let v: T = from_slice(s.as_bytes()).unwrap();
-        assert_eq!(v, value.clone());
-    }
-}
-
-macro_rules! test_parse_err {
-    ($name:ident::<$($ty:ty),*>($arg:expr) => $expected:expr) => {
-        let actual = $name::<$($ty),*>($arg).unwrap_err().to_string();
-        assert_eq!(actual, $expected, "unexpected {} error", stringify!($name));
-    };
-}
-
-fn test_parse_err<T>(errors: &[(&str, &'static str)])
-where
-    T: Debug + PartialEq + de::DeserializeOwned,
-{
-    for &(s, err) in errors {
-        test_parse_err!(from_str::<T>(s) => err);
-        test_parse_err!(from_slice::<T>(s.as_bytes()) => err);
-    }
-}
-
-fn test_parse_slice_err<T>(errors: &[(&[u8], &'static str)])
-where
-    T: Debug + PartialEq + de::DeserializeOwned,
-{
-    for &(s, err) in errors {
-        test_parse_err!(from_slice::<T>(s) => err);
-    }
-}
-
-fn test_fromstr_parse_err<T>(errors: &[(&str, &'static str)])
-where
-    T: Debug + PartialEq + FromStr,
-    <T as FromStr>::Err: ToString,
-{
-    for &(s, err) in errors {
-        let actual = s.parse::<T>().unwrap_err().to_string();
-        assert_eq!(actual, err, "unexpected parsing error");
-    }
-}
-
-#[test]
-fn test_parse_null() {
-    test_parse_err::<()>(&[
-        ("n", "EOF while parsing a value at line 1 column 1"),
-        ("nul", "EOF while parsing a value at line 1 column 3"),
-        ("nulla", "trailing characters at line 1 column 5"),
-    ]);
-
-    test_parse_ok(vec![("null", ())]);
-}
-
-#[test]
-fn test_parse_bool() {
-    test_parse_err::<bool>(&[
-        ("t", "EOF while parsing a value at line 1 column 1"),
-        ("truz", "expected ident at line 1 column 4"),
-        ("f", "EOF while parsing a value at line 1 column 1"),
-        ("faz", "expected ident at line 1 column 3"),
-        ("truea", "trailing characters at line 1 column 5"),
-        ("falsea", "trailing characters at line 1 column 6"),
-    ]);
-
-    test_parse_ok(vec![
-        ("true", true),
-        (" true ", true),
-        ("false", false),
-        (" false ", false),
-    ]);
-}
-
-#[test]
-fn test_parse_char() {
-    test_parse_err::<char>(&[
-        (
-            "\"ab\"",
-            "invalid value: string \"ab\", expected a character at line 1 column 4",
-        ),
-        (
-            "10",
-            "invalid type: integer `10`, expected a character at line 1 column 2",
-        ),
-    ]);
-
-    test_parse_ok(vec![
-        ("\"n\"", 'n'),
-        ("\"\\\"\"", '"'),
-        ("\"\\\\\"", '\\'),
-        ("\"/\"", '/'),
-        ("\"\\b\"", '\x08'),
-        ("\"\\f\"", '\x0C'),
-        ("\"\\n\"", '\n'),
-        ("\"\\r\"", '\r'),
-        ("\"\\t\"", '\t'),
-        ("\"\\u000b\"", '\x0B'),
-        ("\"\\u000B\"", '\x0B'),
-        ("\"\u{3A3}\"", '\u{3A3}'),
-    ]);
-}
-
-#[test]
-fn test_parse_number_errors() {
-    test_parse_err::<f64>(&[
-        ("+", "expected value at line 1 column 1"),
-        (".", "expected value at line 1 column 1"),
-        ("-", "EOF while parsing a value at line 1 column 1"),
-        ("00", "invalid number at line 1 column 2"),
-        ("0x80", "trailing characters at line 1 column 2"),
-        ("\\0", "expected value at line 1 column 1"),
-        (".0", "expected value at line 1 column 1"),
-        ("0.", "EOF while parsing a value at line 1 column 2"),
-        ("1.", "EOF while parsing a value at line 1 column 2"),
-        ("1.a", "invalid number at line 1 column 3"),
-        ("1.e1", "invalid number at line 1 column 3"),
-        ("1e", "EOF while parsing a value at line 1 column 2"),
-        ("1e+", "EOF while parsing a value at line 1 column 3"),
-        ("1a", "trailing characters at line 1 column 2"),
-        (
-            "100e777777777777777777777777777",
-            "number out of range at line 1 column 14",
-        ),
-        (
-            "-100e777777777777777777777777777",
-            "number out of range at line 1 column 15",
-        ),
-        (
-            "1000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000000000000000000000000000000000000000000000\
-             000000000", // 1e309
-            "number out of range at line 1 column 310",
-        ),
-        (
-            "1000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000000000000000000000000000000000000000000000\
-             .0e9", // 1e309
-            "number out of range at line 1 column 305",
-        ),
-        (
-            "1000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000000000000000000000000000000000000000000000\
-             e9", // 1e309
-            "number out of range at line 1 column 303",
-        ),
-    ]);
-}
-
-#[test]
-fn test_parse_i64() {
-    test_parse_ok(vec![
-        ("-2", -2),
-        ("-1234", -1234),
-        (" -1234 ", -1234),
-        (&i64::MIN.to_string(), i64::MIN),
-        (&i64::MAX.to_string(), i64::MAX),
-    ]);
-}
-
-#[test]
-fn test_parse_u64() {
-    test_parse_ok(vec![
-        ("0", 0u64),
-        ("3", 3u64),
-        ("1234", 1234),
-        (&u64::MAX.to_string(), u64::MAX),
-    ]);
-}
-
-#[test]
-fn test_parse_negative_zero() {
-    for negative_zero in &[
-        "-0",
-        "-0.0",
-        "-0e2",
-        "-0.0e2",
-        "-1e-400",
-        "-1e-4000000000000000000000000000000000000000000000000",
-    ] {
-        assert!(
-            from_str::<f32>(negative_zero).unwrap().is_sign_negative(),
-            "should have been negative: {:?}",
-            negative_zero,
-        );
-        assert!(
-            from_str::<f64>(negative_zero).unwrap().is_sign_negative(),
-            "should have been negative: {:?}",
-            negative_zero,
-        );
-    }
-}
-
-#[test]
-fn test_parse_f64() {
-    test_parse_ok(vec![
-        ("0.0", 0.0f64),
-        ("3.0", 3.0f64),
-        ("3.1", 3.1),
-        ("-1.2", -1.2),
-        ("0.4", 0.4),
-        // Edge case from:
-        // https://github.com/serde-rs/json/issues/536#issuecomment-583714900
-        ("2.638344616030823e-256", 2.638344616030823e-256),
-    ]);
-
-    #[cfg(not(feature = "arbitrary_precision"))]
-    test_parse_ok(vec![
-        // With arbitrary-precision enabled, this parses as Number{"3.00"}
-        // but the float is Number{"3.0"}
-        ("3.00", 3.0f64),
-        ("0.4e5", 0.4e5),
-        ("0.4e+5", 0.4e5),
-        ("0.4e15", 0.4e15),
-        ("0.4e+15", 0.4e15),
-        ("0.4e-01", 0.4e-1),
-        (" 0.4e-01 ", 0.4e-1),
-        ("0.4e-001", 0.4e-1),
-        ("0.4e-0", 0.4e0),
-        ("0.00e00", 0.0),
-        ("0.00e+00", 0.0),
-        ("0.00e-00", 0.0),
-        ("3.5E-2147483647", 0.0),
-        ("0.0100000000000000000001", 0.01),
-        (
-            &format!("{}", (i64::MIN as f64) - 1.0),
-            (i64::MIN as f64) - 1.0,
-        ),
-        (
-            &format!("{}", (u64::MAX as f64) + 1.0),
-            (u64::MAX as f64) + 1.0,
-        ),
-        (&format!("{}", f64::EPSILON), f64::EPSILON),
-        (
-            "0.0000000000000000000000000000000000000000000000000123e50",
-            1.23,
-        ),
-        ("100e-777777777777777777777777777", 0.0),
-        (
-            "1010101010101010101010101010101010101010",
-            10101010101010101010e20,
-        ),
-        (
-            "0.1010101010101010101010101010101010101010",
-            0.1010101010101010101,
-        ),
-        ("0e1000000000000000000000000000000000000000000000", 0.0),
-        (
-            "1000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000000000000000000000000000000000000000000000\
-             00000000",
-            1e308,
-        ),
-        (
-            "1000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000000000000000000000000000000000000000000000\
-             .0e8",
-            1e308,
-        ),
-        (
-            "1000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000000000000000000000000000000000000000000000\
-             e8",
-            1e308,
-        ),
-        (
-            "1000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000000000000000000000000000000000000000000000\
-             000000000000000000e-10",
-            1e308,
-        ),
-    ]);
-}
-
-#[test]
-fn test_value_as_f64() {
-    let v = serde_json::from_str::<Value>("1e1000");
-
-    #[cfg(not(feature = "arbitrary_precision"))]
-    assert!(v.is_err());
-
-    #[cfg(feature = "arbitrary_precision")]
-    assert_eq!(v.unwrap().as_f64(), None);
-}
-
-// Test roundtrip with some values that were not perfectly roundtripped by the
-// old f64 deserializer.
-#[cfg(feature = "float_roundtrip")]
-#[test]
-fn test_roundtrip_f64() {
-    for &float in &[
-        // Samples from quickcheck-ing roundtrip with `input: f64`. Comments
-        // indicate the value returned by the old deserializer.
-        51.24817837550540_4,  // 51.2481783755054_1
-        -93.3113703768803_3,  // -93.3113703768803_2
-        -36.5739948427534_36, // -36.5739948427534_4
-        52.31400820410624_4,  // 52.31400820410624_
-        97.4536532003468_5,   // 97.4536532003468_4
-        // Samples from `rng.next_u64` + `f64::from_bits` + `is_finite` filter.
-        2.0030397744267762e-253,
-        7.101215824554616e260,
-        1.769268377902049e74,
-        -1.6727517818542075e58,
-        3.9287532173373315e299,
-    ] {
-        let json = serde_json::to_string(&float).unwrap();
-        let output: f64 = serde_json::from_str(&json).unwrap();
-        assert_eq!(float, output);
-    }
-}
-
-#[test]
-fn test_roundtrip_f32() {
-    // This number has 1 ULP error if parsed via f64 and converted to f32.
-    // https://github.com/serde-rs/json/pull/671#issuecomment-628534468
-    let float = 7.038531e-26;
-    let json = serde_json::to_string(&float).unwrap();
-    let output: f32 = serde_json::from_str(&json).unwrap();
-    assert_eq!(float, output);
-}
-
-#[test]
-fn test_serialize_char() {
-    let value = json!(
-        ({
-            let mut map = BTreeMap::new();
-            map.insert('c', ());
-            map
-        })
-    );
-    assert_eq!(&Value::Null, value.get("c").unwrap());
-}
-
-#[cfg(feature = "arbitrary_precision")]
-#[test]
-fn test_malicious_number() {
-    #[derive(Serialize)]
-    #[serde(rename = "$serde_json::private::Number")]
-    struct S {
-        #[serde(rename = "$serde_json::private::Number")]
-        f: &'static str,
-    }
-
-    let actual = serde_json::to_value(&S { f: "not a number" })
-        .unwrap_err()
-        .to_string();
-    assert_eq!(actual, "invalid number at line 1 column 1");
-}
-
-#[test]
-fn test_parse_number() {
-    test_parse_ok(vec![
-        ("0.0", Number::from_f64(0.0f64).unwrap()),
-        ("3.0", Number::from_f64(3.0f64).unwrap()),
-        ("3.1", Number::from_f64(3.1).unwrap()),
-        ("-1.2", Number::from_f64(-1.2).unwrap()),
-        ("0.4", Number::from_f64(0.4).unwrap()),
-    ]);
-
-    test_fromstr_parse_err::<Number>(&[
-        (" 1.0", "invalid number at line 1 column 1"),
-        ("1.0 ", "invalid number at line 1 column 4"),
-        ("\t1.0", "invalid number at line 1 column 1"),
-        ("1.0\t", "invalid number at line 1 column 4"),
-    ]);
-
-    #[cfg(feature = "arbitrary_precision")]
-    test_parse_ok(vec![
-        ("1e999", Number::from_string_unchecked("1e999".to_owned())),
-        ("1e+999", Number::from_string_unchecked("1e+999".to_owned())),
-        ("-1e999", Number::from_string_unchecked("-1e999".to_owned())),
-        ("1e-999", Number::from_string_unchecked("1e-999".to_owned())),
-        ("1E999", Number::from_string_unchecked("1E999".to_owned())),
-        ("1E+999", Number::from_string_unchecked("1E+999".to_owned())),
-        ("-1E999", Number::from_string_unchecked("-1E999".to_owned())),
-        ("1E-999", Number::from_string_unchecked("1E-999".to_owned())),
-        ("1E+000", Number::from_string_unchecked("1E+000".to_owned())),
-        (
-            "2.3e999",
-            Number::from_string_unchecked("2.3e999".to_owned()),
-        ),
-        (
-            "-2.3e999",
-            Number::from_string_unchecked("-2.3e999".to_owned()),
-        ),
-    ]);
-}
-
-#[test]
-fn test_parse_string() {
-    test_parse_err::<String>(&[
-        ("\"", "EOF while parsing a string at line 1 column 1"),
-        ("\"lol", "EOF while parsing a string at line 1 column 4"),
-        ("\"lol\"a", "trailing characters at line 1 column 6"),
-        (
-            "\"\\uD83C\\uFFFF\"",
-            "lone leading surrogate in hex escape at line 1 column 13",
-        ),
-        (
-            "\"\n\"",
-            "control character (\\u0000-\\u001F) found while parsing a string at line 2 column 0",
-        ),
-        (
-            "\"\x1F\"",
-            "control character (\\u0000-\\u001F) found while parsing a string at line 1 column 2",
-        ),
-    ]);
-
-    test_parse_slice_err::<String>(&[
-        (
-            &[b'"', 159, 146, 150, b'"'],
-            "invalid unicode code point at line 1 column 5",
-        ),
-        (
-            &[b'"', b'\\', b'n', 159, 146, 150, b'"'],
-            "invalid unicode code point at line 1 column 7",
-        ),
-        (
-            &[b'"', b'\\', b'u', 48, 48, 51],
-            "EOF while parsing a string at line 1 column 6",
-        ),
-        (
-            &[b'"', b'\\', b'u', 250, 48, 51, 48, b'"'],
-            "invalid escape at line 1 column 7",
-        ),
-        (
-            &[b'"', b'\\', b'u', 48, 250, 51, 48, b'"'],
-            "invalid escape at line 1 column 7",
-        ),
-        (
-            &[b'"', b'\\', b'u', 48, 48, 250, 48, b'"'],
-            "invalid escape at line 1 column 7",
-        ),
-        (
-            &[b'"', b'\\', b'u', 48, 48, 51, 250, b'"'],
-            "invalid escape at line 1 column 7",
-        ),
-        (
-            &[b'"', b'\n', b'"'],
-            "control character (\\u0000-\\u001F) found while parsing a string at line 2 column 0",
-        ),
-        (
-            &[b'"', b'\x1F', b'"'],
-            "control character (\\u0000-\\u001F) found while parsing a string at line 1 column 2",
-        ),
-    ]);
-
-    test_parse_ok(vec![
-        ("\"\"", String::new()),
-        ("\"foo\"", "foo".to_owned()),
-        (" \"foo\" ", "foo".to_owned()),
-        ("\"\\\"\"", "\"".to_owned()),
-        ("\"\\b\"", "\x08".to_owned()),
-        ("\"\\n\"", "\n".to_owned()),
-        ("\"\\r\"", "\r".to_owned()),
-        ("\"\\t\"", "\t".to_owned()),
-        ("\"\\u12ab\"", "\u{12ab}".to_owned()),
-        ("\"\\uAB12\"", "\u{AB12}".to_owned()),
-        ("\"\\uD83C\\uDF95\"", "\u{1F395}".to_owned()),
-    ]);
-}
-
-#[test]
-fn test_parse_list() {
-    test_parse_err::<Vec<f64>>(&[
-        ("[", "EOF while parsing a list at line 1 column 1"),
-        ("[ ", "EOF while parsing a list at line 1 column 2"),
-        ("[1", "EOF while parsing a list at line 1 column 2"),
-        ("[1,", "EOF while parsing a value at line 1 column 3"),
-        ("[1,]", "trailing comma at line 1 column 4"),
-        ("[1 2]", "expected `,` or `]` at line 1 column 4"),
-        ("[]a", "trailing characters at line 1 column 3"),
-    ]);
-
-    test_parse_ok(vec![
-        ("[]", vec![]),
-        ("[ ]", vec![]),
-        ("[null]", vec![()]),
-        (" [ null ] ", vec![()]),
-    ]);
-
-    test_parse_ok(vec![("[true]", vec![true])]);
-
-    test_parse_ok(vec![("[3,1]", vec![3u64, 1]), (" [ 3 , 1 ] ", vec![3, 1])]);
-
-    test_parse_ok(vec![("[[3], [1, 2]]", vec![vec![3u64], vec![1, 2]])]);
-
-    test_parse_ok(vec![("[1]", (1u64,))]);
-
-    test_parse_ok(vec![("[1, 2]", (1u64, 2u64))]);
-
-    test_parse_ok(vec![("[1, 2, 3]", (1u64, 2u64, 3u64))]);
-
-    test_parse_ok(vec![("[1, [2, 3]]", (1u64, (2u64, 3u64)))]);
-}
-
-#[test]
-fn test_parse_object() {
-    test_parse_err::<BTreeMap<String, u32>>(&[
-        ("{", "EOF while parsing an object at line 1 column 1"),
-        ("{ ", "EOF while parsing an object at line 1 column 2"),
-        ("{1", "key must be a string at line 1 column 2"),
-        ("{ \"a\"", "EOF while parsing an object at line 1 column 5"),
-        ("{\"a\"", "EOF while parsing an object at line 1 column 4"),
-        ("{\"a\" ", "EOF while parsing an object at line 1 column 5"),
-        ("{\"a\" 1", "expected `:` at line 1 column 6"),
-        ("{\"a\":", "EOF while parsing a value at line 1 column 5"),
-        ("{\"a\":1", "EOF while parsing an object at line 1 column 6"),
-        ("{\"a\":1 1", "expected `,` or `}` at line 1 column 8"),
-        ("{\"a\":1,", "EOF while parsing a value at line 1 column 7"),
-        ("{}a", "trailing characters at line 1 column 3"),
-    ]);
-
-    test_parse_ok(vec![
-        ("{}", treemap!()),
-        ("{ }", treemap!()),
-        ("{\"a\":3}", treemap!("a".to_owned() => 3u64)),
-        ("{ \"a\" : 3 }", treemap!("a".to_owned() => 3)),
-        (
-            "{\"a\":3,\"b\":4}",
-            treemap!("a".to_owned() => 3, "b".to_owned() => 4),
-        ),
-        (
-            " { \"a\" : 3 , \"b\" : 4 } ",
-            treemap!("a".to_owned() => 3, "b".to_owned() => 4),
-        ),
-    ]);
-
-    test_parse_ok(vec![(
-        "{\"a\": {\"b\": 3, \"c\": 4}}",
-        treemap!(
-            "a".to_owned() => treemap!(
-                "b".to_owned() => 3u64,
-                "c".to_owned() => 4,
-            ),
-        ),
-    )]);
-
-    test_parse_ok(vec![("{\"c\":null}", treemap!('c' => ()))]);
-}
-
-#[test]
-fn test_parse_struct() {
-    test_parse_err::<Outer>(&[
-        (
-            "5",
-            "invalid type: integer `5`, expected struct Outer at line 1 column 1",
-        ),
-        (
-            "\"hello\"",
-            "invalid type: string \"hello\", expected struct Outer at line 1 column 7",
-        ),
-        (
-            "{\"inner\": true}",
-            "invalid type: boolean `true`, expected a sequence at line 1 column 14",
-        ),
-        ("{}", "missing field `inner` at line 1 column 2"),
-        (
-            r#"{"inner": [{"b": 42, "c": []}]}"#,
-            "missing field `a` at line 1 column 29",
-        ),
-    ]);
-
-    test_parse_ok(vec![
-        (
-            "{
-                \"inner\": []
-            }",
-            Outer { inner: vec![] },
-        ),
-        (
-            "{
-                \"inner\": [
-                    { \"a\": null, \"b\": 2, \"c\": [\"abc\", \"xyz\"] }
-                ]
-            }",
-            Outer {
-                inner: vec![Inner {
-                    a: (),
-                    b: 2,
-                    c: vec!["abc".to_owned(), "xyz".to_owned()],
-                }],
-            },
-        ),
-    ]);
-
-    let v: Outer = from_str(
-        "[
-            [
-                [ null, 2, [\"abc\", \"xyz\"] ]
-            ]
-        ]",
-    )
-    .unwrap();
-
-    assert_eq!(
-        v,
-        Outer {
-            inner: vec![Inner {
-                a: (),
-                b: 2,
-                c: vec!["abc".to_owned(), "xyz".to_owned()],
-            }],
-        }
-    );
-
-    let j = json!([null, 2, []]);
-    Inner::deserialize(&j).unwrap();
-    Inner::deserialize(j).unwrap();
-}
-
-#[test]
-fn test_parse_option() {
-    test_parse_ok(vec![
-        ("null", None::<String>),
-        ("\"jodhpurs\"", Some("jodhpurs".to_owned())),
-    ]);
-
-    #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
-    struct Foo {
-        x: Option<isize>,
-    }
-
-    let value: Foo = from_str("{}").unwrap();
-    assert_eq!(value, Foo { x: None });
-
-    test_parse_ok(vec![
-        ("{\"x\": null}", Foo { x: None }),
-        ("{\"x\": 5}", Foo { x: Some(5) }),
-    ]);
-}
-
-#[test]
-fn test_parse_enum_errors() {
-    test_parse_err::<Animal>(
-        &[
-            ("{}", "expected value at line 1 column 2"),
-            ("[]", "expected value at line 1 column 1"),
-            ("\"unknown\"",
-             "unknown variant `unknown`, expected one of `Dog`, `Frog`, `Cat`, `AntHive` at line 1 column 9"),
-            ("{\"unknown\":null}",
-             "unknown variant `unknown`, expected one of `Dog`, `Frog`, `Cat`, `AntHive` at line 1 column 10"),
-            ("{\"Dog\":", "EOF while parsing a value at line 1 column 7"),
-            ("{\"Dog\":}", "expected value at line 1 column 8"),
-            ("{\"Dog\":{}}", "invalid type: map, expected unit at line 1 column 7"),
-            ("\"Frog\"", "invalid type: unit variant, expected tuple variant"),
-            ("\"Frog\" 0 ", "invalid type: unit variant, expected tuple variant"),
-            ("{\"Frog\":{}}",
-             "invalid type: map, expected tuple variant Animal::Frog at line 1 column 8"),
-            ("{\"Cat\":[]}", "invalid length 0, expected struct variant Animal::Cat with 2 elements at line 1 column 9"),
-            ("{\"Cat\":[0]}", "invalid length 1, expected struct variant Animal::Cat with 2 elements at line 1 column 10"),
-            ("{\"Cat\":[0, \"\", 2]}", "trailing characters at line 1 column 16"),
-            ("{\"Cat\":{\"age\": 5, \"name\": \"Kate\", \"foo\":\"bar\"}",
-             "unknown field `foo`, expected `age` or `name` at line 1 column 39"),
-
-            // JSON does not allow trailing commas in data structures
-            ("{\"Cat\":[0, \"Kate\",]}", "trailing comma at line 1 column 19"),
-            ("{\"Cat\":{\"age\": 2, \"name\": \"Kate\",}}",
-             "trailing comma at line 1 column 34"),
-        ],
-    );
-}
-
-#[test]
-fn test_parse_enum() {
-    test_parse_ok(vec![
-        ("\"Dog\"", Animal::Dog),
-        (" \"Dog\" ", Animal::Dog),
-        (
-            "{\"Frog\":[\"Henry\",[]]}",
-            Animal::Frog("Henry".to_owned(), vec![]),
-        ),
-        (
-            " { \"Frog\": [ \"Henry\" , [ 349, 102 ] ] } ",
-            Animal::Frog("Henry".to_owned(), vec![349, 102]),
-        ),
-        (
-            "{\"Cat\": {\"age\": 5, \"name\": \"Kate\"}}",
-            Animal::Cat {
-                age: 5,
-                name: "Kate".to_owned(),
-            },
-        ),
-        (
-            " { \"Cat\" : { \"age\" : 5 , \"name\" : \"Kate\" } } ",
-            Animal::Cat {
-                age: 5,
-                name: "Kate".to_owned(),
-            },
-        ),
-        (
-            " { \"AntHive\" : [\"Bob\", \"Stuart\"] } ",
-            Animal::AntHive(vec!["Bob".to_owned(), "Stuart".to_owned()]),
-        ),
-    ]);
-
-    test_parse_unusual_ok(vec![
-        ("{\"Dog\":null}", Animal::Dog),
-        (" { \"Dog\" : null } ", Animal::Dog),
-    ]);
-
-    test_parse_ok(vec![(
-        concat!(
-            "{",
-            "  \"a\": \"Dog\",",
-            "  \"b\": {\"Frog\":[\"Henry\", []]}",
-            "}"
-        ),
-        treemap!(
-            "a".to_owned() => Animal::Dog,
-            "b".to_owned() => Animal::Frog("Henry".to_owned(), vec![]),
-        ),
-    )]);
-}
-
-#[test]
-fn test_parse_trailing_whitespace() {
-    test_parse_ok(vec![
-        ("[1, 2] ", vec![1u64, 2]),
-        ("[1, 2]\n", vec![1, 2]),
-        ("[1, 2]\t", vec![1, 2]),
-        ("[1, 2]\t \n", vec![1, 2]),
-    ]);
-}
-
-#[test]
-fn test_multiline_errors() {
-    test_parse_err::<BTreeMap<String, String>>(&[(
-        "{\n  \"foo\":\n \"bar\"",
-        "EOF while parsing an object at line 3 column 6",
-    )]);
-}
-
-#[test]
-fn test_missing_option_field() {
-    #[derive(Debug, PartialEq, Deserialize)]
-    struct Foo {
-        x: Option<u32>,
-    }
-
-    let value: Foo = from_str("{}").unwrap();
-    assert_eq!(value, Foo { x: None });
-
-    let value: Foo = from_str("{\"x\": 5}").unwrap();
-    assert_eq!(value, Foo { x: Some(5) });
-
-    let value: Foo = from_value(json!({})).unwrap();
-    assert_eq!(value, Foo { x: None });
-
-    let value: Foo = from_value(json!({"x": 5})).unwrap();
-    assert_eq!(value, Foo { x: Some(5) });
-}
-
-#[test]
-fn test_missing_nonoption_field() {
-    #[derive(Debug, PartialEq, Deserialize)]
-    struct Foo {
-        x: u32,
-    }
-
-    test_parse_err::<Foo>(&[("{}", "missing field `x` at line 1 column 2")]);
-}
-
-#[test]
-fn test_missing_renamed_field() {
-    #[derive(Debug, PartialEq, Deserialize)]
-    struct Foo {
-        #[serde(rename = "y")]
-        x: Option<u32>,
-    }
-
-    let value: Foo = from_str("{}").unwrap();
-    assert_eq!(value, Foo { x: None });
-
-    let value: Foo = from_str("{\"y\": 5}").unwrap();
-    assert_eq!(value, Foo { x: Some(5) });
-
-    let value: Foo = from_value(json!({})).unwrap();
-    assert_eq!(value, Foo { x: None });
-
-    let value: Foo = from_value(json!({"y": 5})).unwrap();
-    assert_eq!(value, Foo { x: Some(5) });
-}
-
-#[test]
-fn test_serialize_seq_with_no_len() {
-    #[derive(Clone, Debug, PartialEq)]
-    struct MyVec<T>(Vec<T>);
-
-    impl<T> ser::Serialize for MyVec<T>
-    where
-        T: ser::Serialize,
-    {
-        fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
-        where
-            S: ser::Serializer,
-        {
-            let mut seq = serializer.serialize_seq(None)?;
-            for elem in &self.0 {
-                seq.serialize_element(elem)?;
-            }
-            seq.end()
-        }
-    }
-
-    struct Visitor<T> {
-        marker: PhantomData<MyVec<T>>,
-    }
-
-    impl<'de, T> de::Visitor<'de> for Visitor<T>
-    where
-        T: de::Deserialize<'de>,
-    {
-        type Value = MyVec<T>;
-
-        fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-            formatter.write_str("array")
-        }
-
-        fn visit_unit<E>(self) -> Result<MyVec<T>, E>
-        where
-            E: de::Error,
-        {
-            Ok(MyVec(Vec::new()))
-        }
-
-        fn visit_seq<V>(self, mut visitor: V) -> Result<MyVec<T>, V::Error>
-        where
-            V: de::SeqAccess<'de>,
-        {
-            let mut values = Vec::new();
-
-            while let Some(value) = visitor.next_element()? {
-                values.push(value);
-            }
-
-            Ok(MyVec(values))
-        }
-    }
-
-    impl<'de, T> de::Deserialize<'de> for MyVec<T>
-    where
-        T: de::Deserialize<'de>,
-    {
-        fn deserialize<D>(deserializer: D) -> Result<MyVec<T>, D::Error>
-        where
-            D: de::Deserializer<'de>,
-        {
-            deserializer.deserialize_map(Visitor {
-                marker: PhantomData,
-            })
-        }
-    }
-
-    let mut vec = Vec::new();
-    vec.push(MyVec(Vec::new()));
-    vec.push(MyVec(Vec::new()));
-    let vec: MyVec<MyVec<u32>> = MyVec(vec);
-
-    test_encode_ok(&[(vec.clone(), "[[],[]]")]);
-
-    let s = to_string_pretty(&vec).unwrap();
-    let expected = pretty_str!([[], []]);
-    assert_eq!(s, expected);
-}
-
-#[test]
-fn test_serialize_map_with_no_len() {
-    #[derive(Clone, Debug, PartialEq)]
-    struct MyMap<K, V>(BTreeMap<K, V>);
-
-    impl<K, V> ser::Serialize for MyMap<K, V>
-    where
-        K: ser::Serialize + Ord,
-        V: ser::Serialize,
-    {
-        fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
-        where
-            S: ser::Serializer,
-        {
-            let mut map = serializer.serialize_map(None)?;
-            for (k, v) in &self.0 {
-                map.serialize_entry(k, v)?;
-            }
-            map.end()
-        }
-    }
-
-    struct Visitor<K, V> {
-        marker: PhantomData<MyMap<K, V>>,
-    }
-
-    impl<'de, K, V> de::Visitor<'de> for Visitor<K, V>
-    where
-        K: de::Deserialize<'de> + Eq + Ord,
-        V: de::Deserialize<'de>,
-    {
-        type Value = MyMap<K, V>;
-
-        fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
-            formatter.write_str("map")
-        }
-
-        fn visit_unit<E>(self) -> Result<MyMap<K, V>, E>
-        where
-            E: de::Error,
-        {
-            Ok(MyMap(BTreeMap::new()))
-        }
-
-        fn visit_map<Visitor>(self, mut visitor: Visitor) -> Result<MyMap<K, V>, Visitor::Error>
-        where
-            Visitor: de::MapAccess<'de>,
-        {
-            let mut values = BTreeMap::new();
-
-            while let Some((key, value)) = visitor.next_entry()? {
-                values.insert(key, value);
-            }
-
-            Ok(MyMap(values))
-        }
-    }
-
-    impl<'de, K, V> de::Deserialize<'de> for MyMap<K, V>
-    where
-        K: de::Deserialize<'de> + Eq + Ord,
-        V: de::Deserialize<'de>,
-    {
-        fn deserialize<D>(deserializer: D) -> Result<MyMap<K, V>, D::Error>
-        where
-            D: de::Deserializer<'de>,
-        {
-            deserializer.deserialize_map(Visitor {
-                marker: PhantomData,
-            })
-        }
-    }
-
-    let mut map = BTreeMap::new();
-    map.insert("a", MyMap(BTreeMap::new()));
-    map.insert("b", MyMap(BTreeMap::new()));
-    let map: MyMap<_, MyMap<u32, u32>> = MyMap(map);
-
-    test_encode_ok(&[(map.clone(), "{\"a\":{},\"b\":{}}")]);
-
-    let s = to_string_pretty(&map).unwrap();
-    let expected = pretty_str!({
-        "a": {},
-        "b": {}
-    });
-    assert_eq!(s, expected);
-}
-
-#[cfg(not(miri))]
-#[test]
-fn test_deserialize_from_stream() {
-    use serde_json::to_writer;
-    use std::net::{TcpListener, TcpStream};
-    use std::thread;
-
-    #[derive(Debug, PartialEq, Serialize, Deserialize)]
-    struct Message {
-        message: String,
-    }
-
-    let l = TcpListener::bind("localhost:20000").unwrap();
-
-    thread::spawn(|| {
-        let l = l;
-        for stream in l.incoming() {
-            let mut stream = stream.unwrap();
-            let read_stream = stream.try_clone().unwrap();
-
-            let mut de = Deserializer::from_reader(read_stream);
-            let request = Message::deserialize(&mut de).unwrap();
-            let response = Message {
-                message: request.message,
-            };
-            to_writer(&mut stream, &response).unwrap();
-        }
-    });
-
-    let mut stream = TcpStream::connect("localhost:20000").unwrap();
-    let request = Message {
-        message: "hi there".to_owned(),
-    };
-    to_writer(&mut stream, &request).unwrap();
-
-    let mut de = Deserializer::from_reader(stream);
-    let response = Message::deserialize(&mut de).unwrap();
-
-    assert_eq!(request, response);
-}
-
-#[test]
-fn test_serialize_rejects_adt_keys() {
-    let map = treemap!(
-        Some("a") => 2,
-        Some("b") => 4,
-        None => 6,
-    );
-
-    let err = to_vec(&map).unwrap_err();
-    assert_eq!(err.to_string(), "key must be a string");
-}
-
-#[test]
-fn test_bytes_ser() {
-    let buf = vec![];
-    let bytes = Bytes::new(&buf);
-    assert_eq!(to_string(&bytes).unwrap(), "[]".to_owned());
-
-    let buf = vec![1, 2, 3];
-    let bytes = Bytes::new(&buf);
-    assert_eq!(to_string(&bytes).unwrap(), "[1,2,3]".to_owned());
-}
-
-#[test]
-fn test_byte_buf_ser() {
-    let bytes = ByteBuf::new();
-    assert_eq!(to_string(&bytes).unwrap(), "[]".to_owned());
-
-    let bytes = ByteBuf::from(vec![1, 2, 3]);
-    assert_eq!(to_string(&bytes).unwrap(), "[1,2,3]".to_owned());
-}
-
-#[test]
-fn test_byte_buf_de() {
-    let bytes = ByteBuf::new();
-    let v: ByteBuf = from_str("[]").unwrap();
-    assert_eq!(v, bytes);
-
-    let bytes = ByteBuf::from(vec![1, 2, 3]);
-    let v: ByteBuf = from_str("[1, 2, 3]").unwrap();
-    assert_eq!(v, bytes);
-}
-
-#[test]
-fn test_byte_buf_de_invalid_surrogates() {
-    let bytes = ByteBuf::from(vec![237, 160, 188]);
-    let v: ByteBuf = from_str(r#""\ud83c""#).unwrap();
-    assert_eq!(v, bytes);
-
-    let bytes = ByteBuf::from(vec![237, 160, 188, 10]);
-    let v: ByteBuf = from_str(r#""\ud83c\n""#).unwrap();
-    assert_eq!(v, bytes);
-
-    let bytes = ByteBuf::from(vec![237, 160, 188, 32]);
-    let v: ByteBuf = from_str(r#""\ud83c ""#).unwrap();
-    assert_eq!(v, bytes);
-
-    let res = from_str::<ByteBuf>(r#""\ud83c\!""#);
-    assert!(res.is_err());
-
-    let res = from_str::<ByteBuf>(r#""\ud83c\u""#);
-    assert!(res.is_err());
-
-    // lone trailing surrogate
-    let bytes = ByteBuf::from(vec![237, 176, 129]);
-    let v: ByteBuf = from_str(r#""\udc01""#).unwrap();
-    assert_eq!(v, bytes);
-
-    // leading surrogate followed by other leading surrogate
-    let bytes = ByteBuf::from(vec![237, 160, 188, 237, 160, 188]);
-    let v: ByteBuf = from_str(r#""\ud83c\ud83c""#).unwrap();
-    assert_eq!(v, bytes);
-
-    // leading surrogate followed by "a" (U+0061) in \u encoding
-    let bytes = ByteBuf::from(vec![237, 160, 188, 97]);
-    let v: ByteBuf = from_str(r#""\ud83c\u0061""#).unwrap();
-    assert_eq!(v, bytes);
-
-    // leading surrogate followed by U+0080
-    let bytes = ByteBuf::from(vec![237, 160, 188, 194, 128]);
-    let v: ByteBuf = from_str(r#""\ud83c\u0080""#).unwrap();
-    assert_eq!(v, bytes);
-
-    // leading surrogate followed by U+FFFF
-    let bytes = ByteBuf::from(vec![237, 160, 188, 239, 191, 191]);
-    let v: ByteBuf = from_str(r#""\ud83c\uffff""#).unwrap();
-    assert_eq!(v, bytes);
-}
-
-#[test]
-fn test_byte_buf_de_surrogate_pair() {
-    // leading surrogate followed by trailing surrogate
-    let bytes = ByteBuf::from(vec![240, 159, 128, 128]);
-    let v: ByteBuf = from_str(r#""\ud83c\udc00""#).unwrap();
-    assert_eq!(v, bytes);
-
-    // leading surrogate followed by a surrogate pair
-    let bytes = ByteBuf::from(vec![237, 160, 188, 240, 159, 128, 128]);
-    let v: ByteBuf = from_str(r#""\ud83c\ud83c\udc00""#).unwrap();
-    assert_eq!(v, bytes);
-}
-
-#[cfg(feature = "raw_value")]
-#[test]
-fn test_raw_de_invalid_surrogates() {
-    use serde_json::value::RawValue;
-
-    assert!(from_str::<Box<RawValue>>(r#""\ud83c""#).is_ok());
-    assert!(from_str::<Box<RawValue>>(r#""\ud83c\n""#).is_ok());
-    assert!(from_str::<Box<RawValue>>(r#""\ud83c ""#).is_ok());
-    assert!(from_str::<Box<RawValue>>(r#""\udc01 ""#).is_ok());
-    assert!(from_str::<Box<RawValue>>(r#""\udc01\!""#).is_err());
-    assert!(from_str::<Box<RawValue>>(r#""\udc01\u""#).is_err());
-    assert!(from_str::<Box<RawValue>>(r#""\ud83c\ud83c""#).is_ok());
-    assert!(from_str::<Box<RawValue>>(r#""\ud83c\u0061""#).is_ok());
-    assert!(from_str::<Box<RawValue>>(r#""\ud83c\u0080""#).is_ok());
-    assert!(from_str::<Box<RawValue>>(r#""\ud83c\uffff""#).is_ok());
-}
-
-#[cfg(feature = "raw_value")]
-#[test]
-fn test_raw_de_surrogate_pair() {
-    use serde_json::value::RawValue;
-
-    assert!(from_str::<Box<RawValue>>(r#""\ud83c\udc00""#).is_ok());
-}
-
-#[test]
-fn test_byte_buf_de_multiple() {
-    let s: Vec<ByteBuf> = from_str(r#"["ab\nc", "cd\ne"]"#).unwrap();
-    let a = ByteBuf::from(b"ab\nc".to_vec());
-    let b = ByteBuf::from(b"cd\ne".to_vec());
-    assert_eq!(vec![a, b], s);
-}
-
-#[test]
-fn test_json_pointer() {
-    // Test case taken from https://tools.ietf.org/html/rfc6901#page-5
-    let data: Value = from_str(
-        r#"{
-        "foo": ["bar", "baz"],
-        "": 0,
-        "a/b": 1,
-        "c%d": 2,
-        "e^f": 3,
-        "g|h": 4,
-        "i\\j": 5,
-        "k\"l": 6,
-        " ": 7,
-        "m~n": 8
-    }"#,
-    )
-    .unwrap();
-    assert_eq!(data.pointer("").unwrap(), &data);
-    assert_eq!(data.pointer("/foo").unwrap(), &json!(["bar", "baz"]));
-    assert_eq!(data.pointer("/foo/0").unwrap(), &json!("bar"));
-    assert_eq!(data.pointer("/").unwrap(), &json!(0));
-    assert_eq!(data.pointer("/a~1b").unwrap(), &json!(1));
-    assert_eq!(data.pointer("/c%d").unwrap(), &json!(2));
-    assert_eq!(data.pointer("/e^f").unwrap(), &json!(3));
-    assert_eq!(data.pointer("/g|h").unwrap(), &json!(4));
-    assert_eq!(data.pointer("/i\\j").unwrap(), &json!(5));
-    assert_eq!(data.pointer("/k\"l").unwrap(), &json!(6));
-    assert_eq!(data.pointer("/ ").unwrap(), &json!(7));
-    assert_eq!(data.pointer("/m~0n").unwrap(), &json!(8));
-    // Invalid pointers
-    assert!(data.pointer("/unknown").is_none());
-    assert!(data.pointer("/e^f/ertz").is_none());
-    assert!(data.pointer("/foo/00").is_none());
-    assert!(data.pointer("/foo/01").is_none());
-}
-
-#[test]
-fn test_json_pointer_mut() {
-    // Test case taken from https://tools.ietf.org/html/rfc6901#page-5
-    let mut data: Value = from_str(
-        r#"{
-        "foo": ["bar", "baz"],
-        "": 0,
-        "a/b": 1,
-        "c%d": 2,
-        "e^f": 3,
-        "g|h": 4,
-        "i\\j": 5,
-        "k\"l": 6,
-        " ": 7,
-        "m~n": 8
-    }"#,
-    )
-    .unwrap();
-
-    // Basic pointer checks
-    assert_eq!(data.pointer_mut("/foo").unwrap(), &json!(["bar", "baz"]));
-    assert_eq!(data.pointer_mut("/foo/0").unwrap(), &json!("bar"));
-    assert_eq!(data.pointer_mut("/").unwrap(), 0);
-    assert_eq!(data.pointer_mut("/a~1b").unwrap(), 1);
-    assert_eq!(data.pointer_mut("/c%d").unwrap(), 2);
-    assert_eq!(data.pointer_mut("/e^f").unwrap(), 3);
-    assert_eq!(data.pointer_mut("/g|h").unwrap(), 4);
-    assert_eq!(data.pointer_mut("/i\\j").unwrap(), 5);
-    assert_eq!(data.pointer_mut("/k\"l").unwrap(), 6);
-    assert_eq!(data.pointer_mut("/ ").unwrap(), 7);
-    assert_eq!(data.pointer_mut("/m~0n").unwrap(), 8);
-
-    // Invalid pointers
-    assert!(data.pointer_mut("/unknown").is_none());
-    assert!(data.pointer_mut("/e^f/ertz").is_none());
-    assert!(data.pointer_mut("/foo/00").is_none());
-    assert!(data.pointer_mut("/foo/01").is_none());
-
-    // Mutable pointer checks
-    *data.pointer_mut("/").unwrap() = 100.into();
-    assert_eq!(data.pointer("/").unwrap(), 100);
-    *data.pointer_mut("/foo/0").unwrap() = json!("buzz");
-    assert_eq!(data.pointer("/foo/0").unwrap(), &json!("buzz"));
-
-    // Example of ownership stealing
-    assert_eq!(
-        data.pointer_mut("/a~1b")
-            .map(|m| mem::replace(m, json!(null)))
-            .unwrap(),
-        1
-    );
-    assert_eq!(data.pointer("/a~1b").unwrap(), &json!(null));
-
-    // Need to compare against a clone so we don't anger the borrow checker
-    // by taking out two references to a mutable value
-    let mut d2 = data.clone();
-    assert_eq!(data.pointer_mut("").unwrap(), &mut d2);
-}
-
-#[test]
-fn test_stack_overflow() {
-    let brackets: String = iter::repeat('[')
-        .take(127)
-        .chain(iter::repeat(']').take(127))
-        .collect();
-    let _: Value = from_str(&brackets).unwrap();
-
-    let brackets = "[".repeat(129);
-    test_parse_err::<Value>(&[(&brackets, "recursion limit exceeded at line 1 column 128")]);
-}
-
-#[test]
-#[cfg(feature = "unbounded_depth")]
-fn test_disable_recursion_limit() {
-    let brackets: String = iter::repeat('[')
-        .take(140)
-        .chain(iter::repeat(']').take(140))
-        .collect();
-
-    let mut deserializer = Deserializer::from_str(&brackets);
-    deserializer.disable_recursion_limit();
-    Value::deserialize(&mut deserializer).unwrap();
-}
-
-#[test]
-fn test_integer_key() {
-    // map with integer keys
-    let map = treemap!(
-        1 => 2,
-        -1 => 6,
-    );
-    let j = r#"{"-1":6,"1":2}"#;
-    test_encode_ok(&[(&map, j)]);
-    test_parse_ok(vec![(j, map)]);
-
-    test_parse_err::<BTreeMap<i32, ()>>(&[
-        (
-            r#"{"x":null}"#,
-            "invalid value: expected key to be a number in quotes at line 1 column 2",
-        ),
-        (
-            r#"{" 123":null}"#,
-            "invalid value: expected key to be a number in quotes at line 1 column 2",
-        ),
-        (r#"{"123 ":null}"#, "expected `\"` at line 1 column 6"),
-    ]);
-
-    let err = from_value::<BTreeMap<i32, ()>>(json!({" 123":null})).unwrap_err();
-    assert_eq!(
-        err.to_string(),
-        "invalid value: expected key to be a number in quotes",
-    );
-
-    let err = from_value::<BTreeMap<i32, ()>>(json!({"123 ":null})).unwrap_err();
-    assert_eq!(
-        err.to_string(),
-        "invalid value: expected key to be a number in quotes",
-    );
-}
-
-#[test]
-fn test_integer128_key() {
-    let map = treemap! {
-        100000000000000000000000000000000000000u128 => (),
-    };
-    let j = r#"{"100000000000000000000000000000000000000":null}"#;
-    assert_eq!(to_string(&map).unwrap(), j);
-    assert_eq!(from_str::<BTreeMap<u128, ()>>(j).unwrap(), map);
-}
-
-#[test]
-fn test_float_key() {
-    #[derive(Eq, PartialEq, Ord, PartialOrd, Debug, Clone)]
-    struct Float;
-    impl Serialize for Float {
-        fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
-        where
-            S: Serializer,
-        {
-            serializer.serialize_f32(1.23)
-        }
-    }
-    impl<'de> Deserialize<'de> for Float {
-        fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
-        where
-            D: de::Deserializer<'de>,
-        {
-            f32::deserialize(deserializer).map(|_| Float)
-        }
-    }
-
-    // map with float key
-    let map = treemap!(Float => "x".to_owned());
-    let j = r#"{"1.23":"x"}"#;
-
-    test_encode_ok(&[(&map, j)]);
-    test_parse_ok(vec![(j, map)]);
-
-    let j = r#"{"x": null}"#;
-    test_parse_err::<BTreeMap<Float, ()>>(&[(
-        j,
-        "invalid value: expected key to be a number in quotes at line 1 column 2",
-    )]);
-}
-
-#[test]
-fn test_deny_non_finite_f32_key() {
-    // We store float bits so that we can derive Ord, and other traits. In a
-    // real context the code might involve a crate like ordered-float.
-
-    #[derive(Eq, PartialEq, Ord, PartialOrd, Debug, Clone)]
-    struct F32Bits(u32);
-    impl Serialize for F32Bits {
-        fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
-        where
-            S: Serializer,
-        {
-            serializer.serialize_f32(f32::from_bits(self.0))
-        }
-    }
-
-    let map = treemap!(F32Bits(f32::INFINITY.to_bits()) => "x".to_owned());
-    assert!(serde_json::to_string(&map).is_err());
-    assert!(serde_json::to_value(map).is_err());
-
-    let map = treemap!(F32Bits(f32::NEG_INFINITY.to_bits()) => "x".to_owned());
-    assert!(serde_json::to_string(&map).is_err());
-    assert!(serde_json::to_value(map).is_err());
-
-    let map = treemap!(F32Bits(f32::NAN.to_bits()) => "x".to_owned());
-    assert!(serde_json::to_string(&map).is_err());
-    assert!(serde_json::to_value(map).is_err());
-}
-
-#[test]
-fn test_deny_non_finite_f64_key() {
-    // We store float bits so that we can derive Ord, and other traits. In a
-    // real context the code might involve a crate like ordered-float.
-
-    #[derive(Eq, PartialEq, Ord, PartialOrd, Debug, Clone)]
-    struct F64Bits(u64);
-    impl Serialize for F64Bits {
-        fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
-        where
-            S: Serializer,
-        {
-            serializer.serialize_f64(f64::from_bits(self.0))
-        }
-    }
-
-    let map = treemap!(F64Bits(f64::INFINITY.to_bits()) => "x".to_owned());
-    assert!(serde_json::to_string(&map).is_err());
-    assert!(serde_json::to_value(map).is_err());
-
-    let map = treemap!(F64Bits(f64::NEG_INFINITY.to_bits()) => "x".to_owned());
-    assert!(serde_json::to_string(&map).is_err());
-    assert!(serde_json::to_value(map).is_err());
-
-    let map = treemap!(F64Bits(f64::NAN.to_bits()) => "x".to_owned());
-    assert!(serde_json::to_string(&map).is_err());
-    assert!(serde_json::to_value(map).is_err());
-}
-
-#[test]
-fn test_boolean_key() {
-    let map = treemap!(false => 0, true => 1);
-    let j = r#"{"false":0,"true":1}"#;
-    test_encode_ok(&[(&map, j)]);
-    test_parse_ok(vec![(j, map)]);
-}
-
-#[test]
-fn test_borrowed_key() {
-    let map: BTreeMap<&str, ()> = from_str("{\"borrowed\":null}").unwrap();
-    let expected = treemap! { "borrowed" => () };
-    assert_eq!(map, expected);
-
-    #[derive(Deserialize, Debug, Ord, PartialOrd, Eq, PartialEq)]
-    struct NewtypeStr<'a>(&'a str);
-
-    let map: BTreeMap<NewtypeStr, ()> = from_str("{\"borrowed\":null}").unwrap();
-    let expected = treemap! { NewtypeStr("borrowed") => () };
-    assert_eq!(map, expected);
-}
-
-#[test]
-fn test_effectively_string_keys() {
-    #[derive(Eq, PartialEq, Ord, PartialOrd, Debug, Clone, Serialize, Deserialize)]
-    enum Enum {
-        One,
-        Two,
-    }
-    let map = treemap! {
-        Enum::One => 1,
-        Enum::Two => 2,
-    };
-    let expected = r#"{"One":1,"Two":2}"#;
-    test_encode_ok(&[(&map, expected)]);
-    test_parse_ok(vec![(expected, map)]);
-
-    #[derive(Eq, PartialEq, Ord, PartialOrd, Debug, Clone, Serialize, Deserialize)]
-    struct Wrapper(String);
-    let map = treemap! {
-        Wrapper("zero".to_owned()) => 0,
-        Wrapper("one".to_owned()) => 1,
-    };
-    let expected = r#"{"one":1,"zero":0}"#;
-    test_encode_ok(&[(&map, expected)]);
-    test_parse_ok(vec![(expected, map)]);
-}
-
-#[test]
-fn test_json_macro() {
-    // This is tricky because the <...> is not a single TT and the comma inside
-    // looks like an array element separator.
-    let _ = json!([
-        <Result<(), ()> as Clone>::clone(&Ok(())),
-        <Result<(), ()> as Clone>::clone(&Err(()))
-    ]);
-
-    // Same thing but in the map values.
-    let _ = json!({
-        "ok": <Result<(), ()> as Clone>::clone(&Ok(())),
-        "err": <Result<(), ()> as Clone>::clone(&Err(()))
-    });
-
-    // It works in map keys but only if they are parenthesized.
-    let _ = json!({
-        (<Result<&str, ()> as Clone>::clone(&Ok("")).unwrap()): "ok",
-        (<Result<(), &str> as Clone>::clone(&Err("")).unwrap_err()): "err"
-    });
-
-    #[deny(unused_results)]
-    let _ = json!({ "architecture": [true, null] });
-}
-
-#[test]
-fn issue_220() {
-    #[derive(Debug, PartialEq, Eq, Deserialize)]
-    enum E {
-        V(u8),
-    }
-
-    assert!(from_str::<E>(r#" "V"0 "#).is_err());
-
-    assert_eq!(from_str::<E>(r#"{"V": 0}"#).unwrap(), E::V(0));
-}
-
-#[test]
-fn test_partialeq_number() {
-    macro_rules! number_partialeq_ok {
-        ($($n:expr)*) => {
-            $(
-                let value = to_value($n).unwrap();
-                let s = $n.to_string();
-                assert_eq!(value, $n);
-                assert_eq!($n, value);
-                assert_ne!(value, s);
-            )*
-        };
-    }
-
-    number_partialeq_ok!(0 1 100
-        i8::MIN i8::MAX i16::MIN i16::MAX i32::MIN i32::MAX i64::MIN i64::MAX
-        u8::MIN u8::MAX u16::MIN u16::MAX u32::MIN u32::MAX u64::MIN u64::MAX
-        f32::MIN f32::MAX f32::MIN_EXP f32::MAX_EXP f32::MIN_POSITIVE
-        f64::MIN f64::MAX f64::MIN_EXP f64::MAX_EXP f64::MIN_POSITIVE
-        f32::consts::E f32::consts::PI f32::consts::LN_2 f32::consts::LOG2_E
-        f64::consts::E f64::consts::PI f64::consts::LN_2 f64::consts::LOG2_E
-    );
-}
-
-#[test]
-fn test_partialeq_string() {
-    let v = to_value("42").unwrap();
-    assert_eq!(v, "42");
-    assert_eq!("42", v);
-    assert_ne!(v, 42);
-    assert_eq!(v, String::from("42"));
-    assert_eq!(String::from("42"), v);
-}
-
-#[test]
-fn test_partialeq_bool() {
-    let v = to_value(true).unwrap();
-    assert_eq!(v, true);
-    assert_eq!(true, v);
-    assert_ne!(v, false);
-    assert_ne!(v, "true");
-    assert_ne!(v, 1);
-    assert_ne!(v, 0);
-}
-
-struct FailReader(io::ErrorKind);
-
-impl io::Read for FailReader {
-    fn read(&mut self, _: &mut [u8]) -> io::Result<usize> {
-        Err(io::Error::new(self.0, "oh no!"))
-    }
-}
-
-#[test]
-fn test_category() {
-    assert!(from_str::<String>("123").unwrap_err().is_data());
-
-    assert!(from_str::<String>("]").unwrap_err().is_syntax());
-
-    assert!(from_str::<String>("").unwrap_err().is_eof());
-    assert!(from_str::<String>("\"").unwrap_err().is_eof());
-    assert!(from_str::<String>("\"\\").unwrap_err().is_eof());
-    assert!(from_str::<String>("\"\\u").unwrap_err().is_eof());
-    assert!(from_str::<String>("\"\\u0").unwrap_err().is_eof());
-    assert!(from_str::<String>("\"\\u00").unwrap_err().is_eof());
-    assert!(from_str::<String>("\"\\u000").unwrap_err().is_eof());
-
-    assert!(from_str::<Vec<usize>>("[").unwrap_err().is_eof());
-    assert!(from_str::<Vec<usize>>("[0").unwrap_err().is_eof());
-    assert!(from_str::<Vec<usize>>("[0,").unwrap_err().is_eof());
-
-    assert!(from_str::<BTreeMap<String, usize>>("{")
-        .unwrap_err()
-        .is_eof());
-    assert!(from_str::<BTreeMap<String, usize>>("{\"k\"")
-        .unwrap_err()
-        .is_eof());
-    assert!(from_str::<BTreeMap<String, usize>>("{\"k\":")
-        .unwrap_err()
-        .is_eof());
-    assert!(from_str::<BTreeMap<String, usize>>("{\"k\":0")
-        .unwrap_err()
-        .is_eof());
-    assert!(from_str::<BTreeMap<String, usize>>("{\"k\":0,")
-        .unwrap_err()
-        .is_eof());
-
-    let fail = FailReader(io::ErrorKind::NotConnected);
-    assert!(from_reader::<_, String>(fail).unwrap_err().is_io());
-}
-
-#[test]
-// Clippy false positive: https://github.com/Manishearth/rust-clippy/issues/292
-#[allow(clippy::needless_lifetimes)]
-fn test_into_io_error() {
-    fn io_error<'de, T: Deserialize<'de> + Debug>(j: &'static str) -> io::Error {
-        from_str::<T>(j).unwrap_err().into()
-    }
-
-    assert_eq!(
-        io_error::<String>("\"\\u").kind(),
-        io::ErrorKind::UnexpectedEof
-    );
-    assert_eq!(io_error::<String>("0").kind(), io::ErrorKind::InvalidData);
-    assert_eq!(io_error::<String>("]").kind(), io::ErrorKind::InvalidData);
-
-    let fail = FailReader(io::ErrorKind::NotConnected);
-    let io_err: io::Error = from_reader::<_, u8>(fail).unwrap_err().into();
-    assert_eq!(io_err.kind(), io::ErrorKind::NotConnected);
-}
-
-#[test]
-fn test_borrow() {
-    let s: &str = from_str("\"borrowed\"").unwrap();
-    assert_eq!("borrowed", s);
-
-    let s: &str = from_slice(b"\"borrowed\"").unwrap();
-    assert_eq!("borrowed", s);
-}
-
-#[test]
-fn null_invalid_type() {
-    let err = serde_json::from_str::<String>("null").unwrap_err();
-    assert_eq!(
-        format!("{}", err),
-        String::from("invalid type: null, expected a string at line 1 column 4")
-    );
-}
-
-#[test]
-fn test_integer128() {
-    let signed = &[i128::MIN, -1, 0, 1, i128::MAX];
-    let unsigned = &[0, 1, u128::MAX];
-
-    for integer128 in signed {
-        let expected = integer128.to_string();
-        assert_eq!(to_string(integer128).unwrap(), expected);
-        assert_eq!(from_str::<i128>(&expected).unwrap(), *integer128);
-    }
-
-    for integer128 in unsigned {
-        let expected = integer128.to_string();
-        assert_eq!(to_string(integer128).unwrap(), expected);
-        assert_eq!(from_str::<u128>(&expected).unwrap(), *integer128);
-    }
-
-    test_parse_err::<i128>(&[
-        (
-            "-170141183460469231731687303715884105729",
-            "number out of range at line 1 column 40",
-        ),
-        (
-            "170141183460469231731687303715884105728",
-            "number out of range at line 1 column 39",
-        ),
-    ]);
-
-    test_parse_err::<u128>(&[
-        ("-1", "number out of range at line 1 column 1"),
-        (
-            "340282366920938463463374607431768211456",
-            "number out of range at line 1 column 39",
-        ),
-    ]);
-}
-
-#[test]
-fn test_integer128_to_value() {
-    let signed = &[i128::from(i64::MIN), i128::from(u64::MAX)];
-    let unsigned = &[0, u128::from(u64::MAX)];
-
-    for integer128 in signed {
-        let expected = integer128.to_string();
-        assert_eq!(to_value(integer128).unwrap().to_string(), expected);
-    }
-
-    for integer128 in unsigned {
-        let expected = integer128.to_string();
-        assert_eq!(to_value(integer128).unwrap().to_string(), expected);
-    }
-
-    if !cfg!(feature = "arbitrary_precision") {
-        let err = to_value(u128::from(u64::MAX) + 1).unwrap_err();
-        assert_eq!(err.to_string(), "number out of range");
-    }
-}
-
-#[cfg(feature = "raw_value")]
-#[test]
-fn test_borrowed_raw_value() {
-    #[derive(Serialize, Deserialize)]
-    struct Wrapper<'a> {
-        a: i8,
-        #[serde(borrow)]
-        b: &'a RawValue,
-        c: i8,
-    }
-
-    let wrapper_from_str: Wrapper =
-        serde_json::from_str(r#"{"a": 1, "b": {"foo": 2}, "c": 3}"#).unwrap();
-    assert_eq!(r#"{"foo": 2}"#, wrapper_from_str.b.get());
-
-    let wrapper_to_string = serde_json::to_string(&wrapper_from_str).unwrap();
-    assert_eq!(r#"{"a":1,"b":{"foo": 2},"c":3}"#, wrapper_to_string);
-
-    let wrapper_to_value = serde_json::to_value(&wrapper_from_str).unwrap();
-    assert_eq!(json!({"a": 1, "b": {"foo": 2}, "c": 3}), wrapper_to_value);
-
-    let array_from_str: Vec<&RawValue> =
-        serde_json::from_str(r#"["a", 42, {"foo": "bar"}, null]"#).unwrap();
-    assert_eq!(r#""a""#, array_from_str[0].get());
-    assert_eq!("42", array_from_str[1].get());
-    assert_eq!(r#"{"foo": "bar"}"#, array_from_str[2].get());
-    assert_eq!("null", array_from_str[3].get());
-
-    let array_to_string = serde_json::to_string(&array_from_str).unwrap();
-    assert_eq!(r#"["a",42,{"foo": "bar"},null]"#, array_to_string);
-}
-
-#[cfg(feature = "raw_value")]
-#[test]
-fn test_raw_value_in_map_key() {
-    #[derive(RefCast)]
-    #[repr(transparent)]
-    struct RawMapKey(RawValue);
-
-    #[allow(unknown_lints)]
-    #[allow(non_local_definitions)] // false positive: https://github.com/rust-lang/rust/issues/121621
-    impl<'de> Deserialize<'de> for &'de RawMapKey {
-        fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
-        where
-            D: serde::Deserializer<'de>,
-        {
-            let raw_value = <&RawValue>::deserialize(deserializer)?;
-            Ok(RawMapKey::ref_cast(raw_value))
-        }
-    }
-
-    impl PartialEq for RawMapKey {
-        fn eq(&self, other: &Self) -> bool {
-            self.0.get() == other.0.get()
-        }
-    }
-
-    impl Eq for RawMapKey {}
-
-    impl Hash for RawMapKey {
-        fn hash<H: Hasher>(&self, hasher: &mut H) {
-            self.0.get().hash(hasher);
-        }
-    }
-
-    let map_from_str: HashMap<&RawMapKey, &RawValue> =
-        serde_json::from_str(r#" {"\\k":"\\v"} "#).unwrap();
-    let (map_k, map_v) = map_from_str.into_iter().next().unwrap();
-    assert_eq!("\"\\\\k\"", map_k.0.get());
-    assert_eq!("\"\\\\v\"", map_v.get());
-}
-
-#[cfg(feature = "raw_value")]
-#[test]
-fn test_boxed_raw_value() {
-    #[derive(Serialize, Deserialize)]
-    struct Wrapper {
-        a: i8,
-        b: Box<RawValue>,
-        c: i8,
-    }
-
-    let wrapper_from_str: Wrapper =
-        serde_json::from_str(r#"{"a": 1, "b": {"foo": 2}, "c": 3}"#).unwrap();
-    assert_eq!(r#"{"foo": 2}"#, wrapper_from_str.b.get());
-
-    let wrapper_from_reader: Wrapper =
-        serde_json::from_reader(br#"{"a": 1, "b": {"foo": 2}, "c": 3}"#.as_ref()).unwrap();
-    assert_eq!(r#"{"foo": 2}"#, wrapper_from_reader.b.get());
-
-    let wrapper_from_value: Wrapper =
-        serde_json::from_value(json!({"a": 1, "b": {"foo": 2}, "c": 3})).unwrap();
-    assert_eq!(r#"{"foo":2}"#, wrapper_from_value.b.get());
-
-    let wrapper_to_string = serde_json::to_string(&wrapper_from_str).unwrap();
-    assert_eq!(r#"{"a":1,"b":{"foo": 2},"c":3}"#, wrapper_to_string);
-
-    let wrapper_to_value = serde_json::to_value(&wrapper_from_str).unwrap();
-    assert_eq!(json!({"a": 1, "b": {"foo": 2}, "c": 3}), wrapper_to_value);
-
-    let array_from_str: Vec<Box<RawValue>> =
-        serde_json::from_str(r#"["a", 42, {"foo": "bar"}, null]"#).unwrap();
-    assert_eq!(r#""a""#, array_from_str[0].get());
-    assert_eq!("42", array_from_str[1].get());
-    assert_eq!(r#"{"foo": "bar"}"#, array_from_str[2].get());
-    assert_eq!("null", array_from_str[3].get());
-
-    let array_from_reader: Vec<Box<RawValue>> =
-        serde_json::from_reader(br#"["a", 42, {"foo": "bar"}, null]"#.as_ref()).unwrap();
-    assert_eq!(r#""a""#, array_from_reader[0].get());
-    assert_eq!("42", array_from_reader[1].get());
-    assert_eq!(r#"{"foo": "bar"}"#, array_from_reader[2].get());
-    assert_eq!("null", array_from_reader[3].get());
-
-    let array_to_string = serde_json::to_string(&array_from_str).unwrap();
-    assert_eq!(r#"["a",42,{"foo": "bar"},null]"#, array_to_string);
-}
-
-#[cfg(feature = "raw_value")]
-#[test]
-fn test_raw_invalid_utf8() {
-    let j = &[b'"', b'\xCE', b'\xF8', b'"'];
-    let value_err = serde_json::from_slice::<Value>(j).unwrap_err();
-    let raw_value_err = serde_json::from_slice::<Box<RawValue>>(j).unwrap_err();
-
-    assert_eq!(
-        value_err.to_string(),
-        "invalid unicode code point at line 1 column 4",
-    );
-    assert_eq!(
-        raw_value_err.to_string(),
-        "invalid unicode code point at line 1 column 4",
-    );
-}
-
-#[cfg(feature = "raw_value")]
-#[test]
-fn test_serialize_unsized_value_to_raw_value() {
-    assert_eq!(
-        serde_json::value::to_raw_value("foobar").unwrap().get(),
-        r#""foobar""#,
-    );
-}
-
-#[test]
-fn test_borrow_in_map_key() {
-    #[derive(Deserialize, Debug)]
-    struct Outer {
-        #[allow(dead_code)]
-        map: BTreeMap<MyMapKey, ()>,
-    }
-
-    #[derive(Ord, PartialOrd, Eq, PartialEq, Debug)]
-    struct MyMapKey(usize);
-
-    impl<'de> Deserialize<'de> for MyMapKey {
-        fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
-        where
-            D: de::Deserializer<'de>,
-        {
-            let s = <&str>::deserialize(deserializer)?;
-            let n = s.parse().map_err(de::Error::custom)?;
-            Ok(MyMapKey(n))
-        }
-    }
-
-    let value = json!({ "map": { "1": null } });
-    Outer::deserialize(&value).unwrap();
-}
-
-#[test]
-fn test_value_into_deserializer() {
-    #[derive(Deserialize)]
-    struct Outer {
-        inner: Inner,
-    }
-
-    #[derive(Deserialize)]
-    struct Inner {
-        string: String,
-    }
-
-    let mut map = BTreeMap::new();
-    map.insert("inner", json!({ "string": "Hello World" }));
-
-    let outer = Outer::deserialize(serde::de::value::MapDeserializer::new(
-        map.iter().map(|(k, v)| (*k, v)),
-    ))
-    .unwrap();
-    assert_eq!(outer.inner.string, "Hello World");
-
-    let outer = Outer::deserialize(map.into_deserializer()).unwrap();
-    assert_eq!(outer.inner.string, "Hello World");
-}
-
-#[test]
-fn hash_positive_and_negative_zero() {
-    let rand = std::hash::RandomState::new();
-
-    let k1 = serde_json::from_str::<Number>("0.0").unwrap();
-    let k2 = serde_json::from_str::<Number>("-0.0").unwrap();
-    if cfg!(feature = "arbitrary_precision") {
-        assert_ne!(k1, k2);
-        assert_ne!(rand.hash_one(k1), rand.hash_one(k2));
-    } else {
-        assert_eq!(k1, k2);
-        assert_eq!(rand.hash_one(k1), rand.hash_one(k2));
-    }
-}
-
-#[test]
-fn test_control_character_search() {
-    // Different space circumstances
-    for n in 0..16 {
-        for m in 0..16 {
-            test_parse_err::<String>(&[(
-                &format!("\"{}\n{}\"", " ".repeat(n), " ".repeat(m)),
-                "control character (\\u0000-\\u001F) found while parsing a string at line 2 column 0",
-            )]);
-        }
-    }
-
-    // Multiple occurrences
-    test_parse_err::<String>(&[(
-        "\"\t\n\r\"",
-        "control character (\\u0000-\\u001F) found while parsing a string at line 1 column 2",
-    )]);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/missing_colon.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/missing_colon.rs
deleted file mode 100644
index d93b7b9..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/missing_colon.rs
+++ /dev/null
@@ -1,5 +0,0 @@
-use serde_json::json;
-
-fn main() {
-    json!({ "a" });
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/missing_colon.stderr b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/missing_colon.stderr
deleted file mode 100644
index d5f6466..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/missing_colon.stderr
+++ /dev/null
@@ -1,12 +0,0 @@
-error: unexpected end of macro invocation
- --> tests/ui/missing_colon.rs:4:5
-  |
-4 |     json!({ "a" });
-  |     ^^^^^^^^^^^^^^ missing tokens in macro arguments
-  |
-note: while trying to match `@`
- --> src/macros.rs
-  |
-  |     (@array [$($elems:expr,)*]) => {
-  |      ^
-  = note: this error originates in the macro `$crate::json_internal` which comes from the expansion of the macro `json` (in Nightly builds, run with -Z macro-backtrace for more info)
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/missing_comma.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/missing_comma.rs
deleted file mode 100644
index 8818c3e6..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/missing_comma.rs
+++ /dev/null
@@ -1,5 +0,0 @@
-use serde_json::json;
-
-fn main() {
-    json!({ "1": "" "2": "" });
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/missing_comma.stderr b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/missing_comma.stderr
deleted file mode 100644
index b0f0e4b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/missing_comma.stderr
+++ /dev/null
@@ -1,13 +0,0 @@
-error: no rules expected `"2"`
- --> tests/ui/missing_comma.rs:4:21
-  |
-4 |     json!({ "1": "" "2": "" });
-  |                    -^^^ no rules expected this token in macro call
-  |                    |
-  |                    help: missing comma here
-  |
-note: while trying to match `,`
- --> src/macros.rs
-  |
-  |     ($e:expr , $($tt:tt)*) => {};
-  |              ^
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/missing_value.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/missing_value.rs
deleted file mode 100644
index 0ba14e2..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/missing_value.rs
+++ /dev/null
@@ -1,5 +0,0 @@
-use serde_json::json;
-
-fn main() {
-    json!({ "a" : });
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/missing_value.stderr b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/missing_value.stderr
deleted file mode 100644
index 69f6ca3..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/missing_value.stderr
+++ /dev/null
@@ -1,12 +0,0 @@
-error: unexpected end of macro invocation
- --> tests/ui/missing_value.rs:4:5
-  |
-4 |     json!({ "a" : });
-  |     ^^^^^^^^^^^^^^^^ missing tokens in macro arguments
-  |
-note: while trying to match `@`
- --> src/macros.rs
-  |
-  |     (@array [$($elems:expr,)*]) => {
-  |      ^
-  = note: this error originates in the macro `$crate::json_internal` which comes from the expansion of the macro `json` (in Nightly builds, run with -Z macro-backtrace for more info)
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/not_found.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/not_found.rs
deleted file mode 100644
index 2df6870..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/not_found.rs
+++ /dev/null
@@ -1,5 +0,0 @@
-use serde_json::json;
-
-fn main() {
-    json!({ "a" : x });
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/not_found.stderr b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/not_found.stderr
deleted file mode 100644
index 6fec1804..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/not_found.stderr
+++ /dev/null
@@ -1,5 +0,0 @@
-error[E0425]: cannot find value `x` in this scope
- --> tests/ui/not_found.rs:4:19
-  |
-4 |     json!({ "a" : x });
-  |                   ^ not found in this scope
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/parse_expr.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/parse_expr.rs
deleted file mode 100644
index e7f1805..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/parse_expr.rs
+++ /dev/null
@@ -1,5 +0,0 @@
-use serde_json::json;
-
-fn main() {
-    json!({ "a" : ~ });
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/parse_expr.stderr b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/parse_expr.stderr
deleted file mode 100644
index 70cd7416..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/parse_expr.stderr
+++ /dev/null
@@ -1,11 +0,0 @@
-error: no rules expected `~`
- --> tests/ui/parse_expr.rs:4:19
-  |
-4 |     json!({ "a" : ~ });
-  |                   ^ no rules expected this token in macro call
-  |
-note: while trying to match meta-variable `$e:expr`
- --> src/macros.rs
-  |
-  |     ($e:expr , $($tt:tt)*) => {};
-  |      ^^^^^^^
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/parse_key.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/parse_key.rs
deleted file mode 100644
index 858bd71..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/parse_key.rs
+++ /dev/null
@@ -1,5 +0,0 @@
-use serde_json::json;
-
-fn main() {
-    json!({ "".s : true });
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/parse_key.stderr b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/parse_key.stderr
deleted file mode 100644
index 15662dc5..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/parse_key.stderr
+++ /dev/null
@@ -1,5 +0,0 @@
-error[E0609]: no field `s` on type `&'static str`
- --> tests/ui/parse_key.rs:4:16
-  |
-4 |     json!({ "".s : true });
-  |                ^ unknown field
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/unexpected_after_array_element.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/unexpected_after_array_element.rs
deleted file mode 100644
index 226c58c..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/unexpected_after_array_element.rs
+++ /dev/null
@@ -1,5 +0,0 @@
-use serde_json::json;
-
-fn main() {
-    json!([ true => ]);
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/unexpected_after_array_element.stderr b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/unexpected_after_array_element.stderr
deleted file mode 100644
index b848e4db..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/unexpected_after_array_element.stderr
+++ /dev/null
@@ -1,7 +0,0 @@
-error: no rules expected `=>`
- --> tests/ui/unexpected_after_array_element.rs:4:18
-  |
-4 |     json!([ true => ]);
-  |                  ^^ no rules expected this token in macro call
-  |
-  = note: while trying to match end of macro
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/unexpected_after_map_entry.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/unexpected_after_map_entry.rs
deleted file mode 100644
index 0dfb731..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/unexpected_after_map_entry.rs
+++ /dev/null
@@ -1,5 +0,0 @@
-use serde_json::json;
-
-fn main() {
-    json!({ "k": true => });
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/unexpected_after_map_entry.stderr b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/unexpected_after_map_entry.stderr
deleted file mode 100644
index 9f77c07..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/unexpected_after_map_entry.stderr
+++ /dev/null
@@ -1,7 +0,0 @@
-error: no rules expected `=>`
- --> tests/ui/unexpected_after_map_entry.rs:4:23
-  |
-4 |     json!({ "k": true => });
-  |                       ^^ no rules expected this token in macro call
-  |
-  = note: while trying to match end of macro
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/unexpected_colon.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/unexpected_colon.rs
deleted file mode 100644
index e767ea6..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/unexpected_colon.rs
+++ /dev/null
@@ -1,5 +0,0 @@
-use serde_json::json;
-
-fn main() {
-    json!({ : true });
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/unexpected_colon.stderr b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/unexpected_colon.stderr
deleted file mode 100644
index d47e8816..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/unexpected_colon.stderr
+++ /dev/null
@@ -1,7 +0,0 @@
-error: no rules expected `:`
- --> tests/ui/unexpected_colon.rs:4:13
-  |
-4 |     json!({ : true });
-  |             ^ no rules expected this token in macro call
-  |
-  = note: while trying to match end of macro
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/unexpected_comma.rs b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/unexpected_comma.rs
deleted file mode 100644
index 338874e..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/unexpected_comma.rs
+++ /dev/null
@@ -1,5 +0,0 @@
-use serde_json::json;
-
-fn main() {
-    json!({ "a" , "b": true });
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/unexpected_comma.stderr b/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/unexpected_comma.stderr
deleted file mode 100644
index e3082745..0000000
--- a/third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/tests/ui/unexpected_comma.stderr
+++ /dev/null
@@ -1,7 +0,0 @@
-error: no rules expected `,`
- --> tests/ui/unexpected_comma.rs:4:17
-  |
-4 |     json!({ "a" , "b": true });
-  |                 ^ no rules expected this token in macro call
-  |
-  = note: while trying to match end of macro
diff --git a/third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/.cargo-checksum.json
deleted file mode 100644
index 697c9ce..0000000
--- a/third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/.cargo-checksum.json
+++ /dev/null
@@ -1 +0,0 @@
-{"files":{}}
diff --git a/third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/Cargo.toml
deleted file mode 100644
index 857c779..0000000
--- a/third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/Cargo.toml
+++ /dev/null
@@ -1,32 +0,0 @@
-# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
-#
-# When uploading crates to the registry Cargo will automatically
-# "normalize" Cargo.toml files for maximal compatibility
-# with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g., crates.io) dependencies.
-#
-# If you are reading this file be aware that the original Cargo.toml
-# will likely look very different (and much more reasonable).
-# See Cargo.toml.orig for the original contents.
-
-[package]
-edition = "2021"
-rust-version = "1.56"
-name = "unicode-linebreak"
-version = "0.1.5"
-authors = ["Axel Forsman <axelsfor@gmail.com>"]
-include = [
-    "src/**/*",
-    "LICENSE",
-]
-description = "Implementation of the Unicode Line Breaking Algorithm"
-homepage = "https://github.com/axelf4/unicode-linebreak"
-readme = "README.md"
-keywords = [
-    "unicode",
-    "text",
-    "layout",
-]
-categories = ["internationalization"]
-license = "Apache-2.0"
-repository = "https://github.com/axelf4/unicode-linebreak"
diff --git a/third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/Cargo.toml.orig
deleted file mode 100644
index acf1d2cc..0000000
--- a/third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/Cargo.toml.orig
+++ /dev/null
@@ -1,14 +0,0 @@
-[package]
-name = "unicode-linebreak"
-version = "0.1.5"
-authors = ["Axel Forsman <axelsfor@gmail.com>"]
-description = "Implementation of the Unicode Line Breaking Algorithm"
-homepage = "https://github.com/axelf4/unicode-linebreak"
-repository = "https://github.com/axelf4/unicode-linebreak"
-readme = "README.md"
-keywords = ["unicode", "text", "layout"]
-categories = ["internationalization"]
-license = "Apache-2.0"
-include = ["src/**/*", "LICENSE"]
-edition = "2021"
-rust-version = "1.56"
diff --git a/third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/LICENSE b/third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/LICENSE
deleted file mode 100644
index 261eeb9..0000000
--- a/third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/README.md b/third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/README.md
deleted file mode 100644
index d3f1de59..0000000
--- a/third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/README.md
+++ /dev/null
@@ -1,36 +0,0 @@
-# unicode-linebreak
-
-Implementation of the Line Breaking Algorithm described in [Unicode Standard Annex #14][UAX14].
-
-![test](https://github.com/axelf4/unicode-linebreak/workflows/test/badge.svg)
-[![Documentation](https://docs.rs/unicode-linebreak/badge.svg)](https://docs.rs/unicode-linebreak)
-
-Given an input text, locates "line break opportunities", or positions appropriate for wrapping
-lines when displaying text.
-
-## Example
-
-```rust
-use unicode_linebreak::{linebreaks, BreakOpportunity::{Mandatory, Allowed}};
-
-let text = "a b \nc";
-assert!(linebreaks(text).eq([
-	(2, Allowed),   // May break after first space
-	(5, Mandatory), // Must break after line feed
-	(6, Mandatory)  // Must break at end of text, so that there always is at least one LB
-]));
-```
-
-## Development
-
-After cloning the repository or modifying `LineBreak.txt` the tables
-have to be (re-)generated:
-
-```sh
-# Generate src/tables.rs
-(cd gen-tables && cargo run)
-# Run tests to make sure it was successful
-cargo test
-```
-
-[UAX14]: https://www.unicode.org/reports/tr14/
diff --git a/third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/src/lib.rs
deleted file mode 100644
index ca473d8..0000000
--- a/third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/src/lib.rs
+++ /dev/null
@@ -1,160 +0,0 @@
-//! Implementation of the Line Breaking Algorithm described in [Unicode Standard Annex #14][UAX14].
-//!
-//! Given an input text, locates "line break opportunities", or positions appropriate for wrapping
-//! lines when displaying text.
-//!
-//! # Example
-//!
-//! ```
-//! use unicode_linebreak::{linebreaks, BreakOpportunity::{Mandatory, Allowed}};
-//!
-//! let text = "a b \nc";
-//! assert!(linebreaks(text).eq([
-//!     (2, Allowed),   // May break after first space
-//!     (5, Mandatory), // Must break after line feed
-//!     (6, Mandatory)  // Must break at end of text, so that there always is at least one LB
-//! ]));
-//! ```
-//!
-//! [UAX14]: https://www.unicode.org/reports/tr14/
-
-#![no_std]
-#![deny(missing_docs, missing_debug_implementations)]
-
-use core::iter::once;
-
-/// The [Unicode version](https://www.unicode.org/versions/) conformed to.
-pub const UNICODE_VERSION: (u8, u8, u8) = (15, 0, 0);
-
-include!("shared.rs");
-include!("tables.rs");
-
-/// Returns the line break property of the specified code point.
-///
-/// # Examples
-///
-/// ```
-/// use unicode_linebreak::{BreakClass, break_property};
-/// assert_eq!(break_property(0x2CF3), BreakClass::Alphabetic);
-/// ```
-#[inline(always)]
-pub fn break_property(codepoint: u32) -> BreakClass {
-    const BMP_INDEX_LENGTH: u32 = BMP_LIMIT >> BMP_SHIFT;
-    const OMITTED_BMP_INDEX_1_LENGTH: u32 = BMP_LIMIT >> SHIFT_1;
-
-    let data_pos = if codepoint < BMP_LIMIT {
-        let i = codepoint >> BMP_SHIFT;
-        BREAK_PROP_TRIE_INDEX[i as usize] + (codepoint & (BMP_DATA_BLOCK_LENGTH - 1)) as u16
-    } else if codepoint < BREAK_PROP_TRIE_HIGH_START {
-        let i1 = codepoint >> SHIFT_1;
-        let i2 = BREAK_PROP_TRIE_INDEX
-            [(i1 + BMP_INDEX_LENGTH - OMITTED_BMP_INDEX_1_LENGTH) as usize]
-            + ((codepoint >> SHIFT_2) & (INDEX_2_BLOCK_LENGTH - 1)) as u16;
-        let i3_block = BREAK_PROP_TRIE_INDEX[i2 as usize];
-        let i3_pos = ((codepoint >> SHIFT_3) & (INDEX_3_BLOCK_LENGTH - 1)) as u16;
-
-        debug_assert!(i3_block & 0x8000 == 0, "18-bit indices are unexpected");
-        let data_block = BREAK_PROP_TRIE_INDEX[(i3_block + i3_pos) as usize];
-        data_block + (codepoint & (SMALL_DATA_BLOCK_LENGTH - 1)) as u16
-    } else {
-        return XX;
-    };
-    BREAK_PROP_TRIE_DATA[data_pos as usize]
-}
-
-/// Break opportunity type.
-#[derive(Copy, Clone, PartialEq, Eq, Debug)]
-pub enum BreakOpportunity {
-    /// A line must break at this spot.
-    Mandatory,
-    /// A line is allowed to end at this spot.
-    Allowed,
-}
-
-/// Returns an iterator over line break opportunities in the specified string.
-///
-/// Break opportunities are given as tuples of the byte index of the character succeeding the break
-/// and the type.
-///
-/// Uses the default Line Breaking Algorithm with the tailoring that Complex-Context Dependent
-/// (SA) characters get resolved to Ordinary Alphabetic and Symbol Characters (AL) regardless of
-/// General_Category.
-///
-/// # Examples
-///
-/// ```
-/// use unicode_linebreak::{linebreaks, BreakOpportunity::{Mandatory, Allowed}};
-/// assert!(linebreaks("Hello world!").eq(vec![(6, Allowed), (12, Mandatory)]));
-/// ```
-pub fn linebreaks(s: &str) -> impl Iterator<Item = (usize, BreakOpportunity)> + Clone + '_ {
-    use BreakOpportunity::{Allowed, Mandatory};
-
-    s.char_indices()
-        .map(|(i, c)| (i, break_property(c as u32) as u8))
-        .chain(once((s.len(), eot)))
-        .scan((sot, false), |state, (i, cls)| {
-            // ZWJ is handled outside the table to reduce its size
-            let val = PAIR_TABLE[state.0 as usize][cls as usize];
-            let is_mandatory = val & MANDATORY_BREAK_BIT != 0;
-            let is_break = val & ALLOWED_BREAK_BIT != 0 && (!state.1 || is_mandatory);
-            *state = (
-                val & !(ALLOWED_BREAK_BIT | MANDATORY_BREAK_BIT),
-                cls == BreakClass::ZeroWidthJoiner as u8,
-            );
-
-            Some((i, is_break, is_mandatory))
-        })
-        .filter_map(|(i, is_break, is_mandatory)| {
-            if is_break {
-                Some((i, if is_mandatory { Mandatory } else { Allowed }))
-            } else {
-                None
-            }
-        })
-}
-
-/// Divides the string at the last index where further breaks do not depend on prior context.
-///
-/// The trivial index at `eot` is excluded.
-///
-/// A common optimization is to determine only the nearest line break opportunity before the first
-/// character that would cause the line to become overfull, requiring backward traversal, of which
-/// there are two approaches:
-///
-/// * Cache breaks from forward traversals
-/// * Step backward and with `split_at_safe` find a pos to safely search forward from, repeatedly
-///
-/// # Examples
-///
-/// ```
-/// use unicode_linebreak::{linebreaks, split_at_safe};
-/// let s = "Not allowed to break within em dashes: — —";
-/// let (prev, safe) = split_at_safe(s);
-/// let n = prev.len();
-/// assert!(linebreaks(safe).eq(linebreaks(s).filter_map(|(i, x)| i.checked_sub(n).map(|i| (i, x)))));
-/// ```
-pub fn split_at_safe(s: &str) -> (&str, &str) {
-    let mut chars = s.char_indices().rev().scan(None, |state, (i, c)| {
-        let cls = break_property(c as u32);
-        let is_safe_pair = state
-            .replace(cls)
-            .map_or(false, |prev| is_safe_pair(cls, prev)); // Reversed since iterating backwards
-        Some((i, is_safe_pair))
-    });
-    chars.find(|&(_, is_safe_pair)| is_safe_pair);
-    // Include preceding char for `linebreaks` to pick up break before match (disallowed after sot)
-    s.split_at(chars.next().map_or(0, |(i, _)| i))
-}
-
-#[cfg(test)]
-mod tests {
-    use super::*;
-
-    #[test]
-    fn it_works() {
-        assert_eq!(break_property(0xA), BreakClass::LineFeed);
-        assert_eq!(break_property(0xDB80), BreakClass::Surrogate);
-        assert_eq!(break_property(0xe01ef), BreakClass::CombiningMark);
-        assert_eq!(break_property(0x10ffff), BreakClass::Unknown);
-    }
-}
diff --git a/third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/src/shared.rs b/third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/src/shared.rs
deleted file mode 100644
index c73819f..0000000
--- a/third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/src/shared.rs
+++ /dev/null
@@ -1,134 +0,0 @@
-/// Unicode line breaking class.
-#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
-#[repr(u8)]
-pub enum BreakClass {
-    // Non-tailorable
-    /// Cause a line break (after)
-    Mandatory,
-    /// Cause a line break (after), except between CR and LF
-    CarriageReturn,
-    /// Cause a line break (after)
-    LineFeed,
-    /// Prohibit a line break between the character and the preceding character
-    CombiningMark,
-    /// Cause a line break (after)
-    NextLine,
-    /// Do not occur in well-formed text
-    Surrogate,
-    /// Prohibit line breaks before and after
-    WordJoiner,
-    /// Provide a break opportunity
-    ZeroWidthSpace,
-    /// Prohibit line breaks before and after
-    NonBreakingGlue,
-    /// Enable indirect line breaks
-    Space,
-    /// Prohibit line breaks within joiner sequences
-    ZeroWidthJoiner,
-    // Break opportunities
-    /// Provide a line break opportunity before and after the character
-    BeforeAndAfter,
-    /// Generally provide a line break opportunity after the character
-    After,
-    /// Generally provide a line break opportunity before the character
-    Before,
-    /// Provide a line break opportunity after the character, except in numeric context
-    Hyphen,
-    /// Provide a line break opportunity contingent on additional information
-    Contingent,
-    // Characters prohibiting certain breaks
-    /// Prohibit line breaks before
-    ClosePunctuation,
-    /// Prohibit line breaks before
-    CloseParenthesis,
-    /// Prohibit line breaks before
-    Exclamation,
-    /// Allow only indirect line breaks between pairs
-    Inseparable,
-    /// Allow only indirect line breaks before
-    NonStarter,
-    /// Prohibit line breaks after
-    OpenPunctuation,
-    /// Act like they are both opening and closing
-    Quotation,
-    // Numeric context
-    /// Prevent breaks after any and before numeric
-    InfixSeparator,
-    /// Form numeric expressions for line breaking purposes
-    Numeric,
-    /// Do not break following a numeric expression
-    Postfix,
-    /// Do not break in front of a numeric expression
-    Prefix,
-    /// Prevent a break before, and allow a break after
-    Symbol,
-    // Other characters
-    /// Act like AL when the resolved EAW is N; otherwise, act as ID
-    Ambiguous,
-    /// Are alphabetic characters or symbols that are used with alphabetic characters
-    Alphabetic,
-    /// Treat as NS or ID for strict or normal breaking.
-    ConditionalJapaneseStarter,
-    /// Do not break from following Emoji Modifier
-    EmojiBase,
-    /// Do not break from preceding Emoji Base
-    EmojiModifier,
-    /// Form Korean syllable blocks
-    HangulLvSyllable,
-    /// Form Korean syllable blocks
-    HangulLvtSyllable,
-    /// Do not break around a following hyphen; otherwise act as Alphabetic
-    HebrewLetter,
-    /// Break before or after, except in some numeric context
-    Ideographic,
-    /// Form Korean syllable blocks
-    HangulLJamo,
-    /// Form Korean syllable blocks
-    HangulVJamo,
-    /// Form Korean syllable blocks
-    HangulTJamo,
-    /// Keep pairs together. For pairs, break before and after other classes
-    RegionalIndicator,
-    /// Provide a line break opportunity contingent on additional, language-specific context analysis
-    ComplexContext,
-    /// Have as yet unknown line breaking behavior or unassigned code positions
-    Unknown,
-}
-
-use BreakClass::{
-    After as BA, Alphabetic as AL, Ambiguous as AI, Before as BB, BeforeAndAfter as B2,
-    CarriageReturn as CR, CloseParenthesis as CP, ClosePunctuation as CL, CombiningMark as CM,
-    ComplexContext as SA, ConditionalJapaneseStarter as CJ, Contingent as CB, EmojiBase as EB,
-    EmojiModifier as EM, Exclamation as EX, HangulLJamo as JL, HangulLvSyllable as H2,
-    HangulLvtSyllable as H3, HangulTJamo as JT, HangulVJamo as JV, HebrewLetter as HL,
-    Hyphen as HY, Ideographic as ID, InfixSeparator as IS, Inseparable as IN, LineFeed as LF,
-    Mandatory as BK, NextLine as NL, NonBreakingGlue as GL, NonStarter as NS, Numeric as NU,
-    OpenPunctuation as OP, Postfix as PO, Prefix as PR, Quotation as QU, RegionalIndicator as RI,
-    Space as SP, Surrogate as SG, Symbol as SY, Unknown as XX, WordJoiner as WJ,
-    ZeroWidthJoiner as ZWJ, ZeroWidthSpace as ZW,
-};
-
-/// Ceiling for code points in the Basic Multilingual Place (BMP).
-const BMP_LIMIT: u32 = 0x10000;
-
-/// Shift size for getting index-3 table offset.
-const SHIFT_3: u32 = 4;
-/// Shift size for getting index-2 table offset.
-const SHIFT_2: u32 = 5 + SHIFT_3;
-/// Shift size for getting index-1 table offset.
-const SHIFT_1: u32 = 5 + SHIFT_2;
-/// Shift size for getting BMP block start.
-const BMP_SHIFT: u32 = 6;
-
-const INDEX_2_BLOCK_LENGTH: u32 = 1 << (SHIFT_1 - SHIFT_2);
-const INDEX_3_BLOCK_LENGTH: u32 = 1 << (SHIFT_2 - SHIFT_3);
-const SMALL_DATA_BLOCK_LENGTH: u32 = 1 << SHIFT_3;
-const BMP_DATA_BLOCK_LENGTH: u32 = 1 << BMP_SHIFT;
-
-const ALLOWED_BREAK_BIT: u8 = 0x80;
-const MANDATORY_BREAK_BIT: u8 = 0x40;
-
-#[allow(non_upper_case_globals)]
-const eot: u8 = 43;
-#[allow(non_upper_case_globals)]
-const sot: u8 = 44;
diff --git a/third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/src/tables.rs b/third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/src/tables.rs
deleted file mode 100644
index 1a5d16b..0000000
--- a/third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/src/tables.rs
+++ /dev/null
@@ -1,10 +0,0 @@
-const BREAK_PROP_TRIE_HIGH_START: u32 = 918016;
-static BREAK_PROP_TRIE_INDEX: [u16; 2844] = [0, 64, 127, 191, 247, 247, 247, 247, 247, 247, 247, 304, 368, 417, 481, 247, 247, 247, 542, 247, 558, 607, 662, 726, 790, 843, 247, 892, 950, 1003, 1029, 1093, 1157, 1221, 1270, 1324, 1384, 1446, 1509, 1571, 1634, 1696, 1759, 1821, 1885, 1947, 2009, 2071, 2135, 2197, 2260, 2322, 2386, 2448, 2512, 2576, 2639, 2703, 2766, 2830, 2894, 2958, 3016, 3080, 3144, 3208, 3256, 3314, 3378, 3410, 3442, 3482, 247, 3546, 3601, 3663, 3710, 3747, 3782, 3814, 3878, 247, 247, 247, 247, 247, 247, 247, 247, 247, 3942, 3974, 4038, 4102, 3144, 4166, 4230, 4262, 4326, 4374, 4438, 4502, 4566, 4620, 4661, 4694, 4758, 4807, 4871, 4930, 4994, 5052, 5112, 5176, 5240, 5301, 247, 247, 247, 5365, 247, 247, 247, 247, 5429, 5487, 553, 5551, 5615, 5677, 5741, 5803, 5867, 5911, 5969, 6015, 6079, 6141, 6203, 6267, 6323, 247, 247, 6366, 6418, 6482, 6514, 6515, 6514, 6566, 6630, 6690, 6754, 6818, 6882, 6943, 7004, 7045, 7099, 7158, 247, 247, 247, 247, 247, 247, 7219, 7259, 247, 247, 247, 247, 247, 7321, 7375, 247, 247, 247, 247, 7398, 7462, 7510, 7574, 7606, 7670, 7734, 7798, 7825, 7889, 7889, 7889, 7931, 7995, 8059, 8120, 8181, 8245, 7889, 7809, 8294, 8262, 8358, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 247, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 8401, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 8452, 8509, 247, 247, 247, 247, 8573, 8637, 8699, 8731, 247, 247, 247, 8795, 8857, 8921, 8985, 9043, 9107, 9164, 9228, 9291, 9355, 9419, 3144, 9480, 9543, 9591, 247, 9639, 9703, 9711, 9719, 9727, 9707, 9715, 9723, 9703, 9711, 9719, 9727, 9707, 9715, 9723, 9703, 9711, 9719, 9727, 9707, 9715, 9723, 9703, 9711, 9719, 9727, 9707, 9715, 9723, 9703, 9711, 9719, 9727, 9707, 9715, 9723, 9703, 9711, 9719, 9727, 9707, 9715, 9723, 9703, 9711, 9719, 9727, 9707, 9715, 9723, 9703, 9711, 9719, 9727, 9707, 9715, 9723, 9703, 9711, 9719, 9727, 9707, 9715, 9723, 9703, 9711, 9719, 9727, 9707, 9715, 9723, 9703, 9711, 9719, 9727, 9707, 9715, 9723, 9703, 9711, 9719, 9727, 9707, 9715, 9723, 9703, 9711, 9719, 9727, 9707, 9715, 9723, 9703, 9711, 9719, 9727, 9707, 9715, 9723, 9703, 9711, 9719, 9727, 9707, 9715, 9723, 9703, 9711, 9719, 9727, 9707, 9715, 9723, 9703, 9711, 9719, 9727, 9707, 9715, 9723, 9703, 9711, 9719, 9727, 9707, 9715, 9723, 9703, 9711, 9719, 9727, 9707, 9715, 9723, 9703, 9711, 9719, 9727, 9707, 9715, 9723, 9703, 9711, 9719, 9727, 9707, 9715, 9723, 9703, 9711, 9719, 9727, 9707, 9715, 9723, 9703, 9711, 9719, 9727, 9707, 9715, 9723, 9703, 9711, 9719, 9727, 9707, 9715, 9723, 9703, 9711, 9719, 9727, 9707, 9715, 9779, 9836, 9900, 9900, 9900, 9900, 9900, 9900, 9900, 9900, 9900, 9900, 9900, 9900, 9900, 9900, 9900, 9900, 9900, 9900, 9900, 9900, 9900, 9900, 9900, 9900, 9900, 9900, 9900, 9900, 9900, 9900, 9900, 9900, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 10028, 10092, 247, 10153, 247, 247, 247, 247, 10172, 247, 10236, 10292, 10356, 10416, 247, 10470, 10534, 10596, 10645, 10708, 2656, 2686, 2715, 2746, 2778, 2778, 2778, 2779, 2778, 2778, 2778, 2779, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2811, 2843, 503, 247, 508, 10772, 616, 616, 9964, 9964, 247, 247, 247, 247, 247, 247, 247, 1253, 10788, 247, 247, 2531, 247, 247, 247, 247, 500, 2522, 1837, 9964, 9964, 247, 247, 10795, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 247, 2522, 247, 247, 247, 1837, 551, 2055, 247, 247, 7593, 247, 1253, 247, 247, 10811, 247, 10827, 247, 247, 9631, 10842, 9964, 9964, 247, 247, 247, 247, 247, 247, 247, 247, 247, 616, 2235, 247, 247, 9631, 247, 2055, 247, 247, 1995, 247, 247, 247, 10844, 504, 504, 10859, 513, 10873, 9964, 9964, 9964, 9964, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 4421, 247, 4422, 1995, 9964, 509, 247, 247, 10889, 9964, 9964, 9964, 9964, 10905, 247, 247, 10915, 247, 10930, 247, 247, 247, 500, 783, 9964, 9964, 9964, 247, 10943, 247, 10954, 247, 1254, 9964, 9964, 9964, 9964, 247, 247, 247, 9627, 247, 630, 247, 247, 10970, 1769, 247, 10986, 4022, 11002, 247, 247, 247, 247, 9964, 9964, 247, 247, 11018, 11034, 247, 247, 247, 11050, 247, 624, 247, 1261, 247, 11066, 781, 9964, 9964, 9964, 9964, 9964, 247, 247, 247, 247, 4022, 9964, 9964, 9964, 247, 247, 247, 6454, 247, 247, 247, 4028, 247, 247, 4052, 2235, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 247, 500, 247, 247, 11082, 6455, 9964, 9964, 9964, 4858, 247, 247, 1995, 247, 362, 11098, 9964, 247, 11114, 9964, 9964, 247, 2055, 9964, 247, 4421, 549, 247, 247, 360, 11130, 630, 2920, 11146, 549, 247, 247, 11161, 11175, 247, 4022, 2235, 549, 247, 361, 11191, 11207, 247, 247, 11223, 549, 247, 247, 365, 11239, 11255, 494, 6452, 247, 513, 356, 11271, 11286, 9964, 9964, 9964, 11302, 501, 11317, 247, 247, 353, 4811, 2235, 11333, 629, 506, 11348, 1947, 11364, 11378, 4817, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 247, 247, 247, 363, 11394, 11410, 6455, 9964, 247, 247, 247, 368, 11426, 2235, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 247, 247, 353, 11442, 11457, 11466, 9964, 9964, 247, 247, 247, 368, 11482, 2235, 11498, 9964, 247, 247, 357, 11514, 2235, 9964, 9964, 9964, 3144, 2817, 2686, 11530, 9476, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 247, 247, 356, 1067, 9964, 9964, 9964, 9964, 9964, 9964, 247, 247, 247, 247, 940, 10032, 11546, 11558, 247, 11574, 11588, 2235, 9964, 9964, 9964, 9964, 622, 247, 247, 11604, 11619, 9964, 8688, 247, 247, 11635, 11651, 11667, 247, 247, 358, 11683, 11698, 247, 247, 247, 247, 4022, 11714, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 506, 247, 353, 3033, 11730, 940, 2522, 11746, 247, 3005, 3032, 4815, 9964, 9964, 9964, 9964, 1801, 247, 247, 11761, 11776, 2235, 11792, 247, 4674, 11808, 2235, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 247, 11824, 11840, 493, 247, 11852, 11866, 2235, 9964, 9964, 9964, 9964, 9964, 1837, 247, 11882, 11897, 11911, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 3798, 9964, 9964, 9964, 9964, 9964, 9964, 247, 247, 247, 247, 247, 247, 500, 11926, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 6453, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 247, 247, 247, 247, 247, 247, 6454, 247, 247, 247, 247, 247, 11942, 247, 247, 11956, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 11966, 247, 247, 247, 247, 247, 247, 247, 247, 11982, 11998, 4816, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 3955, 247, 247, 247, 247, 4421, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 247, 247, 247, 4022, 247, 500, 12014, 247, 247, 247, 247, 500, 2235, 247, 616, 12030, 247, 247, 247, 12046, 12058, 12074, 513, 1256, 247, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 247, 247, 247, 247, 247, 12085, 9964, 9964, 9964, 9964, 9964, 9964, 247, 247, 247, 247, 2056, 367, 368, 368, 12101, 549, 9964, 9964, 9964, 9964, 12117, 4820, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7869, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 4422, 9964, 9964, 7868, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 1671, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7950, 12131, 9964, 12147, 12159, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7865, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 247, 247, 247, 247, 247, 247, 1253, 2522, 4022, 12175, 4818, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 368, 368, 1000, 368, 4815, 247, 247, 247, 247, 247, 247, 247, 6453, 9964, 9964, 9964, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 4422, 247, 247, 623, 247, 247, 247, 12191, 368, 12204, 247, 12216, 247, 247, 247, 1253, 9964, 247, 247, 247, 247, 12230, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 247, 6453, 247, 6453, 247, 247, 247, 247, 247, 4421, 247, 4022, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 247, 247, 247, 247, 247, 510, 247, 247, 247, 502, 12244, 12258, 511, 247, 247, 247, 3549, 1670, 247, 3600, 12271, 493, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 624, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 12281, 12295, 12295, 12295, 368, 368, 368, 11672, 368, 368, 452, 12311, 12323, 4860, 678, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 247, 500, 12335, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 3033, 12351, 12365, 247, 247, 247, 616, 9964, 4856, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 247, 247, 2522, 12381, 9307, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 247, 12397, 9964, 247, 247, 356, 12413, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 247, 356, 2235, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 12429, 500, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 625, 4815, 9964, 9964, 247, 247, 247, 247, 12445, 12461, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 494, 247, 247, 10340, 12477, 9964, 9964, 9964, 9964, 494, 247, 247, 616, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 511, 247, 12492, 12505, 12519, 12535, 12549, 12557, 505, 2055, 12572, 2055, 9964, 9964, 9964, 6455, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 12588, 6514, 6564, 6514, 6514, 6514, 12604, 6514, 6514, 6514, 12588, 7889, 7889, 7889, 12617, 12623, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 12639, 12645, 7889, 12652, 12666, 7889, 7889, 12679, 7889, 7889, 7889, 7889, 12695, 12710, 12720, 12727, 12742, 12756, 12772, 12786, 7889, 7889, 7889, 7889, 6936, 11875, 12802, 6416, 6933, 7889, 7889, 12814, 7889, 12830, 7889, 7889, 7889, 12842, 7889, 12854, 7889, 7889, 7889, 7889, 12865, 247, 247, 12881, 7889, 7889, 12641, 12897, 12903, 7889, 7889, 7889, 247, 247, 247, 247, 247, 247, 247, 12919, 247, 247, 247, 247, 247, 12802, 7889, 7889, 6402, 247, 247, 247, 6935, 6933, 247, 247, 6935, 247, 6400, 7889, 7889, 7889, 7889, 7889, 12935, 12718, 12751, 12950, 7889, 7889, 7889, 12750, 7889, 7889, 7889, 12965, 12713, 12980, 7889, 7889, 247, 247, 247, 247, 247, 12919, 7889, 7889, 7889, 7889, 7889, 7889, 12898, 7889, 7889, 12702, 247, 247, 247, 247, 247, 247, 247, 247, 247, 512, 247, 247, 1253, 9964, 9964, 2235, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7889, 7863, 2093, 9964, 368, 368, 368, 368, 368, 368, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 9964, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 9964, 1077, 1109, 1141, 1173, 1205, 1237, 1269, 1295, 1327, 1359, 1391, 1423, 1455, 1487, 1519, 1546, 1578, 1585, 1617, 896, 896, 896, 896, 1638, 1578, 1670, 1699, 896, 896, 896, 896, 896, 1731, 1760, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 1578, 1792, 896, 1820, 202, 202, 202, 202, 202, 202, 202, 202, 1852, 202, 1884, 1903, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 1920, 1952, 1975, 896, 896, 896, 896, 2007, 896, 896, 896, 896, 896, 896, 896, 2023, 2055, 2087, 2119, 2141, 1578, 2173, 896, 2189, 2221, 2244, 2263, 2279, 2311, 896, 2336, 2368, 2400, 2432, 2464, 2496, 2528, 2560, 202, 2592, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 2592, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 896, 2624];
-static BREAK_PROP_TRIE_DATA: [BreakClass; 12996] = [
-CM,CM,CM,CM,CM,CM,CM,CM,CM,BA,LF,BK,BK,CR,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,SP,EX,QU,AL,PR,PO,AL,QU,OP,CP,AL,PR,IS,HY,IS,SY,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,IS,IS,AL,AL,AL,EX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,OP,PR,CP,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,OP,BA,CL,AL,CM,CM,CM,CM,CM,NL,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,GL,OP,PO,PR,PR,PR,AL,AI,AI,AL,AI,QU,AL,BA,AL,AL,PO,PR,AI,AI,BB,AL,AI,AI,AI,AI,AI,QU,AI,AI,AI,OP,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AI,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AI,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AI,BB,AI,AI,AI,BB,AI,AL,AL,AI,AL,AL,AL,AL,AL,AL,AL,AI,AI,AI,AI,AL,AI,AL,BB,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,GL,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,GL,GL,GL,GL,GL,GL,GL,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,AL,AL,AL,AL,IS,AL,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,XX,AL,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,CM,CM,CM,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,IS,BA,XX,XX,AL,AL,PR,XX,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,BA,CM,AL,CM,CM,AL,CM,CM,EX,CM,XX,XX,XX,XX,XX,XX,XX,XX,HL,HL,HL,HL,HL,HL,HL,HL,HL,HL,HL,HL,HL,HL,HL,HL,HL,HL,HL,HL,HL,HL,HL,HL,HL,HL,HL,XX,XX,XX,XX,HL,HL,HL,HL,AL,AL,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,PO,PO,PO,IS,IS,AL,AL,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,EX,CM,EX,EX,EX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,PO,NU,NU,AL,AL,AL,CM,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,EX,AL,CM,CM,CM,CM,CM,CM,CM,AL,AL,CM,CM,CM,CM,CM,CM,AL,AL,CM,CM,AL,CM,CM,CM,CM,AL,AL,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,CM,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,AL,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,CM,CM,CM,CM,CM,AL,AL,AL,AL,IS,EX,AL,XX,XX,CM,PR,PR,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,AL,CM,CM,CM,CM,CM,CM,CM,CM,CM,AL,CM,CM,CM,AL,CM,CM,CM,CM,CM,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,XX,XX,AL,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,XX,XX,XX,XX,XX,XX,CM,CM,CM,CM,CM,CM,CM,CM,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,AL,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,AL,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,AL,CM,CM,CM,CM,CM,CM,CM,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,BA,BA,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,XX,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,AL,AL,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,XX,AL,XX,XX,XX,AL,AL,AL,AL,XX,XX,CM,AL,CM,CM,CM,CM,CM,XX,XX,CM,CM,XX,XX,CM,CM,CM,AL,XX,XX,XX,XX,XX,XX,XX,XX,CM,XX,XX,XX,XX,AL,AL,XX,AL,AL,AL,CM,CM,XX,XX,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,AL,AL,PO,PO,AL,AL,AL,AL,AL,PO,AL,PR,AL,AL,CM,XX,CM,CM,CM,XX,AL,AL,AL,AL,AL,AL,XX,XX,XX,XX,AL,AL,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,XX,AL,AL,XX,AL,AL,XX,XX,CM,XX,CM,CM,CM,XX,XX,XX,XX,CM,CM,XX,XX,CM,CM,CM,XX,XX,XX,CM,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,XX,AL,XX,XX,XX,XX,XX,XX,XX,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,CM,CM,AL,AL,AL,CM,AL,XX,XX,XX,XX,XX,XX,XX,XX,XX,CM,CM,CM,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,XX,AL,AL,AL,AL,AL,XX,XX,CM,AL,CM,CM,CM,CM,CM,CM,XX,CM,CM,CM,XX,CM,CM,CM,XX,XX,AL,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,CM,CM,XX,XX,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,AL,PR,XX,XX,XX,XX,XX,XX,XX,AL,CM,CM,CM,CM,CM,CM,XX,CM,CM,CM,XX,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,AL,AL,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,XX,AL,AL,AL,AL,AL,XX,XX,CM,AL,CM,CM,CM,CM,CM,XX,XX,CM,CM,XX,XX,CM,CM,CM,XX,XX,XX,XX,XX,XX,XX,CM,CM,CM,XX,XX,XX,XX,AL,AL,XX,AL,AL,AL,CM,CM,XX,XX,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,XX,XX,XX,XX,XX,CM,AL,XX,AL,AL,AL,AL,AL,AL,XX,XX,XX,AL,AL,AL,XX,AL,AL,AL,AL,XX,XX,XX,AL,AL,XX,AL,XX,AL,AL,XX,XX,XX,AL,AL,XX,XX,XX,AL,AL,AL,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,XX,CM,CM,CM,XX,XX,XX,CM,CM,CM,XX,CM,CM,CM,CM,XX,XX,AL,XX,XX,XX,XX,XX,XX,CM,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,AL,AL,AL,AL,AL,AL,AL,AL,AL,PR,AL,XX,XX,XX,XX,XX,CM,CM,CM,CM,CM,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,CM,AL,CM,CM,CM,CM,CM,XX,CM,CM,CM,XX,CM,CM,CM,CM,XX,XX,XX,XX,XX,XX,XX,CM,CM,XX,AL,AL,AL,XX,XX,AL,XX,XX,AL,AL,CM,CM,XX,XX,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,XX,XX,XX,XX,XX,XX,XX,BB,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,BB,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,AL,XX,XX,CM,AL,CM,CM,CM,CM,CM,XX,CM,CM,CM,XX,CM,CM,CM,CM,XX,XX,XX,XX,XX,XX,XX,CM,CM,XX,XX,XX,XX,XX,XX,AL,AL,XX,AL,AL,CM,CM,XX,XX,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,XX,AL,AL,CM,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,CM,CM,CM,CM,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,AL,CM,CM,CM,CM,CM,XX,CM,CM,CM,XX,CM,CM,CM,CM,AL,AL,XX,XX,XX,XX,AL,AL,AL,CM,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,XX,XX,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,AL,AL,AL,AL,AL,AL,AL,AL,AL,PO,AL,AL,AL,AL,AL,AL,XX,CM,CM,CM,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,XX,XX,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,CM,XX,XX,XX,XX,CM,CM,CM,CM,CM,CM,XX,CM,XX,CM,CM,CM,CM,CM,CM,CM,CM,XX,XX,XX,XX,XX,XX,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,XX,XX,CM,CM,AL,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,XX,XX,XX,XX,PR,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,AL,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,BA,BA,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,SA,SA,XX,SA,XX,SA,SA,SA,SA,SA,XX,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,XX,SA,XX,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,XX,XX,SA,SA,SA,SA,SA,XX,SA,XX,SA,SA,SA,SA,SA,SA,SA,XX,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,XX,XX,SA,SA,SA,SA,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,BB,BB,BB,BB,AL,BB,BB,GL,BB,BB,BA,GL,EX,EX,EX,EX,EX,GL,AL,EX,AL,AL,AL,CM,CM,AL,AL,AL,AL,AL,AL,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,BA,CM,AL,CM,AL,CM,OP,CL,OP,CL,CM,CM,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,XX,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,BA,CM,CM,AL,AL,AL,AL,AL,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,XX,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,XX,BA,BA,AL,AL,AL,AL,AL,AL,CM,AL,AL,AL,AL,AL,AL,XX,AL,AL,BB,BB,BA,BB,AL,AL,AL,AL,AL,GL,GL,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,BA,BA,AL,AL,AL,AL,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,SA,SA,SA,SA,SA,SA,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,XX,XX,XX,XX,XX,AL,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,XX,XX,AL,AL,AL,AL,AL,AL,AL,XX,AL,XX,AL,AL,AL,AL,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,XX,XX,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,CM,CM,CM,AL,BA,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,AL,AL,AL,AL,AL,AL,XX,XX,BA,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,BA,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,OP,CL,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,BA,BA,BA,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,BA,BA,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,XX,CM,CM,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,BA,BA,NS,SA,BA,AL,BA,PR,SA,SA,XX,XX,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,XX,XX,XX,AL,AL,EX,EX,BA,BA,BB,AL,EX,EX,AL,CM,CM,CM,GL,CM,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,CM,CM,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,AL,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,XX,XX,XX,XX,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,XX,XX,XX,XX,AL,XX,XX,XX,EX,EX,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,XX,XX,SA,SA,SA,SA,SA,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,XX,XX,XX,XX,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,XX,XX,XX,XX,XX,XX,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,SA,XX,XX,XX,SA,SA,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,CM,XX,XX,AL,AL,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,XX,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,XX,XX,CM,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,XX,XX,XX,XX,XX,XX,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,XX,XX,XX,XX,XX,XX,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,XX,XX,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,CM,CM,CM,CM,CM,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,BA,BA,AL,BA,BA,BA,BA,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,CM,CM,CM,CM,CM,AL,AL,AL,AL,AL,AL,AL,AL,AL,BA,BA,XX,CM,CM,CM,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,AL,AL,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,XX,XX,XX,BA,BA,BA,BA,BA,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,XX,XX,XX,AL,AL,AL,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,BA,BA,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,XX,XX,XX,XX,XX,CM,CM,CM,AL,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,AL,AL,AL,AL,CM,AL,AL,AL,AL,AL,AL,CM,AL,AL,CM,CM,CM,AL,XX,XX,XX,XX,XX,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,GL,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,GL,CM,CM,CM,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,AL,AL,AL,AL,AL,AL,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,AL,AL,AL,AL,AL,AL,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,XX,AL,XX,AL,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,BB,AL,XX,BA,BA,BA,BA,BA,BA,BA,GL,BA,BA,BA,ZW,CM,ZWJ,CM,CM,BA,GL,BA,BA,B2,AI,AI,AL,QU,QU,OP,QU,QU,QU,OP,QU,AI,AI,AL,AL,IN,IN,IN,BA,BK,BK,CM,CM,CM,CM,CM,GL,PO,PO,PO,PO,PO,PO,PO,PO,AL,QU,QU,AI,NS,NS,AL,AL,AL,AL,IS,OP,CL,NS,NS,NS,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,BA,PO,BA,BA,BA,BA,AL,BA,BA,BA,WJ,AL,AL,AL,AL,XX,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,AL,AL,XX,XX,AI,AL,AL,AL,AL,AL,AL,AL,AL,OP,CL,AI,AL,AI,AI,AI,AI,AL,AL,AL,AL,AL,AL,AL,AL,OP,CL,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,PR,PR,PR,PR,PR,PR,PR,PO,PR,PR,PR,PR,PR,PR,PR,PR,PR,PR,PR,PR,PR,PR,PO,PR,PR,PR,PR,PO,PR,PR,PO,PR,PR,PR,PR,PR,PR,PR,PR,PR,PR,PR,PR,PR,PR,PR,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,PO,AL,AI,AL,AL,AL,PO,AL,AL,AL,AL,AL,AL,AL,AL,AL,AI,AL,AL,PR,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AI,AI,AL,AL,AL,AL,AL,AL,AL,AL,AI,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AI,AI,AL,AL,AL,AL,AL,AI,AL,AL,AI,AL,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AL,AL,AL,AL,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AL,AL,AL,AL,AL,AL,AL,AL,AL,AI,AL,AL,XX,XX,XX,XX,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AI,AL,AI,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AI,AL,AI,AI,AL,AL,AL,AI,AI,AL,AL,AI,AL,AL,AL,AI,AL,AI,PR,PR,AL,AI,AL,AL,AL,AL,AI,AL,AL,AI,AI,AI,AI,AL,AL,AI,AL,AI,AL,AI,AI,AI,AI,AI,AI,AL,AI,AL,AL,AL,AL,AL,AI,AI,AI,AI,AL,AL,AL,AL,AI,AI,AL,AL,AL,AL,AL,AL,AL,AL,AI,AL,AL,AL,AI,AL,AL,AL,AL,AL,AI,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AI,AI,AL,AL,AI,AI,AI,AI,AL,AL,AI,AI,AL,AL,AI,AI,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AI,AI,AL,AL,AI,AI,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AI,AL,AL,AL,AI,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AI,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AI,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,IN,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,OP,CL,OP,CL,AL,AL,AL,AL,AL,AL,AI,AL,AL,AL,AL,AL,AL,AL,ID,ID,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,OP,CL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,ID,ID,ID,ID,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AL,AL,AL,AL,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AL,AL,AI,AI,AI,AI,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AI,AI,AL,AI,AI,AI,AI,AI,AI,AI,AL,AL,AL,AL,AL,AL,AL,AL,AI,AI,AL,AL,AI,AI,AL,AL,AL,AL,AI,AI,AL,AL,AL,AL,AI,AI,AI,AL,AL,AI,AL,AL,AI,AI,AI,AI,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AI,AI,AI,AI,AL,AL,AL,AL,AL,AL,AL,AL,AL,AI,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,ID,ID,ID,ID,AL,AI,AI,AL,AL,AI,AL,AL,AL,AL,AI,AI,AL,AL,AL,AL,ID,ID,AI,AI,ID,AL,ID,ID,ID,EB,ID,ID,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,ID,ID,ID,AL,AL,AL,AL,AI,AL,AI,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AI,AI,AL,AI,AI,AI,AL,AI,ID,AI,AI,AL,AI,AI,AL,AI,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,ID,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AI,AI,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,ID,ID,ID,ID,ID,ID,ID,ID,ID,AI,AI,AI,AI,ID,AL,ID,ID,ID,AI,ID,ID,AI,AI,AI,ID,ID,AI,AI,ID,AI,AI,ID,ID,ID,AL,AI,AL,AL,AL,AL,AI,AI,ID,AI,AI,AI,AI,AI,AI,ID,ID,ID,ID,ID,AI,ID,ID,EB,ID,AI,AI,ID,ID,ID,ID,ID,AL,AL,AL,ID,ID,EB,EB,EB,EB,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AI,AL,AL,AL,QU,QU,QU,QU,QU,QU,AL,EX,EX,ID,AL,AL,AL,OP,CL,OP,CL,OP,CL,OP,CL,OP,CL,OP,CL,OP,CL,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,OP,CL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,OP,CL,OP,CL,OP,CL,OP,CL,OP,CL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,OP,CL,OP,CL,OP,CL,OP,CL,OP,CL,OP,CL,OP,CL,OP,CL,OP,CL,OP,CL,OP,CL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,OP,CL,OP,CL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,OP,CL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AI,AI,AI,AI,AI,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,AL,AL,XX,XX,XX,XX,XX,EX,BA,BA,BA,AL,EX,BA,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,XX,XX,XX,XX,XX,AL,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,XX,XX,XX,XX,AL,BA,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,CM,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,XX,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,QU,QU,QU,QU,QU,QU,QU,QU,QU,QU,QU,QU,QU,QU,BA,BA,BA,BA,BA,BA,BA,BA,AL,BA,OP,BA,AL,AL,QU,QU,AL,AL,QU,QU,OP,CL,OP,CL,OP,CL,OP,CL,BA,BA,BA,BA,EX,AL,BA,BA,AL,BA,BA,AL,AL,AL,AL,AL,B2,B2,BA,BA,BA,AL,BA,BA,OP,BA,BA,BA,BA,BA,BA,BA,BA,AL,BA,AL,BA,BA,AL,AL,AL,EX,EX,OP,CL,OP,CL,OP,CL,OP,CL,BA,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,XX,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,XX,XX,XX,XX,BA,CL,CL,ID,ID,NS,ID,ID,OP,CL,OP,CL,OP,CL,OP,CL,OP,CL,ID,ID,OP,CL,OP,CL,OP,CL,OP,CL,NS,OP,CL,CL,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,CM,CM,CM,CM,CM,CM,ID,ID,ID,ID,ID,CM,ID,ID,ID,ID,ID,NS,NS,ID,ID,ID,XX,CJ,ID,CJ,ID,CJ,ID,CJ,ID,CJ,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,CJ,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,CJ,ID,CJ,ID,CJ,ID,ID,ID,ID,ID,ID,CJ,ID,ID,ID,ID,ID,ID,CJ,CJ,XX,XX,CM,CM,NS,NS,NS,NS,ID,NS,CJ,ID,CJ,ID,CJ,ID,CJ,ID,CJ,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,CJ,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,CJ,ID,CJ,ID,CJ,ID,ID,ID,ID,ID,ID,CJ,ID,ID,ID,ID,ID,ID,CJ,CJ,ID,ID,ID,ID,NS,CJ,NS,NS,ID,XX,XX,XX,XX,XX,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,XX,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,CJ,CJ,CJ,CJ,CJ,CJ,CJ,CJ,CJ,CJ,CJ,CJ,CJ,CJ,CJ,CJ,ID,ID,ID,ID,ID,ID,ID,ID,AI,AI,AI,AI,AI,AI,AI,AI,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,NS,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,XX,XX,XX,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,BA,BA,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,BA,EX,BA,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,AL,AL,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,AL,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,AL,BA,BA,BA,BA,BA,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,XX,XX,AL,AL,XX,AL,XX,AL,AL,AL,AL,AL,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,AL,AL,AL,CM,AL,AL,AL,AL,CM,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,CM,AL,AL,AL,AL,CM,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,PO,AL,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,BB,BB,EX,EX,XX,XX,XX,XX,XX,XX,XX,XX,CM,CM,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,XX,XX,XX,XX,XX,XX,XX,XX,BA,BA,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,XX,XX,XX,XX,XX,XX,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,BB,AL,AL,CM,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,CM,CM,CM,CM,BA,BA,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,JL,XX,XX,XX,CM,CM,CM,CM,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,AL,AL,AL,AL,AL,AL,BA,BA,BA,AL,AL,AL,AL,XX,AL,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,XX,XX,XX,XX,AL,AL,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,SA,SA,SA,SA,SA,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,CM,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,XX,XX,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,XX,XX,AL,BA,BA,BA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,SA,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,SA,SA,SA,SA,SA,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,CM,BA,BA,AL,AL,AL,CM,CM,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,XX,XX,AL,AL,AL,AL,AL,AL,XX,XX,AL,AL,AL,AL,AL,AL,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,CM,CM,CM,CM,BA,CM,CM,XX,XX,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,XX,XX,XX,XX,XX,XX,H2,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H2,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H2,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H2,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,H3,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,JV,XX,XX,XX,XX,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,JT,XX,XX,XX,XX,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,SG,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,XX,XX,XX,XX,XX,HL,CM,HL,HL,HL,HL,HL,HL,HL,HL,HL,HL,AL,HL,HL,HL,HL,HL,HL,HL,HL,HL,HL,HL,HL,HL,XX,HL,HL,HL,HL,HL,XX,HL,XX,HL,HL,XX,HL,HL,XX,HL,HL,HL,HL,HL,HL,HL,HL,HL,HL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CL,OP,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,XX,XX,XX,XX,AL,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,PO,AL,AL,AL,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,IS,CL,CL,IS,IS,EX,EX,OP,CL,IN,XX,XX,XX,XX,XX,XX,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,ID,ID,ID,ID,ID,OP,CL,OP,CL,OP,CL,OP,CL,OP,CL,OP,CL,ID,ID,OP,CL,ID,ID,ID,ID,ID,ID,ID,CL,ID,CL,XX,NS,NS,EX,EX,ID,OP,CL,OP,CL,OP,CL,ID,ID,ID,ID,ID,ID,ID,ID,XX,ID,PR,PO,ID,XX,XX,XX,XX,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,WJ,XX,EX,ID,ID,PR,PO,ID,ID,OP,CL,ID,ID,CL,ID,CL,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,NS,NS,ID,ID,ID,EX,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,OP,ID,CL,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,OP,ID,CL,ID,OP,CL,CL,OP,CL,CL,NS,ID,CJ,CJ,CJ,CJ,CJ,CJ,CJ,CJ,CJ,CJ,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,NS,NS,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,XX,XX,ID,ID,ID,ID,ID,ID,XX,XX,ID,ID,ID,ID,ID,ID,XX,XX,ID,ID,ID,ID,ID,ID,XX,XX,ID,ID,ID,XX,XX,XX,PO,PR,ID,ID,ID,PR,PR,XX,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,CM,CM,CM,CB,AI,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,XX,AL,BA,BA,BA,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,XX,XX,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,CM,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,BA,AL,AL,AL,AL,AL,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,XX,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,XX,XX,XX,AL,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,XX,XX,AL,XX,AL,AL,AL,AL,AL,AL,XX,AL,AL,XX,XX,XX,AL,XX,XX,AL,AL,AL,AL,AL,AL,XX,BA,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,BA,AL,CM,CM,CM,XX,CM,CM,XX,XX,XX,XX,XX,CM,CM,CM,CM,AL,AL,AL,AL,AL,AL,XX,XX,CM,CM,CM,XX,XX,XX,XX,CM,BA,BA,BA,BA,BA,BA,BA,BA,AL,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,CM,CM,XX,XX,XX,XX,AL,AL,AL,AL,AL,BA,BA,BA,BA,BA,BA,IN,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,XX,XX,XX,BA,BA,BA,BA,BA,BA,BA,AL,AL,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,CM,CM,BA,XX,XX,CM,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,XX,XX,XX,AL,AL,CM,CM,CM,CM,AL,AL,AL,AL,XX,XX,XX,XX,XX,XX,CM,CM,CM,CM,CM,CM,CM,BA,BA,AL,AL,AL,AL,AL,XX,XX,CM,AL,AL,CM,CM,AL,XX,XX,XX,XX,XX,XX,XX,XX,XX,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,AL,AL,AL,BA,BA,CM,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,XX,XX,CM,CM,CM,CM,CM,XX,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,BA,BA,BA,BA,AL,CM,CM,AL,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,CM,AL,BB,AL,XX,XX,XX,XX,XX,XX,XX,XX,XX,CM,AL,AL,AL,AL,BA,BA,AL,BA,CM,CM,CM,CM,AL,CM,CM,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,AL,BB,AL,BA,BA,BA,CM,CM,CM,CM,CM,CM,CM,CM,BA,BA,AL,BA,BA,AL,CM,AL,CM,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,XX,AL,XX,AL,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,BA,XX,XX,XX,XX,XX,XX,CM,CM,CM,CM,XX,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,AL,XX,AL,AL,XX,AL,AL,AL,AL,AL,XX,CM,CM,AL,CM,CM,AL,XX,XX,XX,XX,XX,XX,CM,XX,XX,XX,XX,XX,AL,AL,AL,CM,CM,XX,XX,CM,CM,CM,CM,CM,CM,CM,XX,XX,XX,CM,CM,CM,CM,CM,CM,CM,AL,AL,AL,AL,BA,BA,BA,BA,AL,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,BA,BA,XX,AL,CM,AL,CM,CM,CM,CM,AL,AL,AL,AL,XX,XX,XX,XX,XX,XX,XX,XX,CM,CM,CM,CM,CM,CM,XX,XX,CM,CM,CM,CM,CM,CM,CM,CM,BB,BA,BA,EX,EX,AL,AL,AL,BA,BA,BA,BA,BA,BA,BA,BA,AL,AL,AL,AL,CM,CM,XX,XX,CM,BA,BA,AL,AL,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,BB,BB,BB,BB,BB,BB,BB,BB,BB,BB,BB,BB,BB,XX,XX,XX,CM,CM,CM,CM,CM,CM,CM,CM,AL,AL,XX,XX,XX,XX,XX,XX,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,SA,SA,BA,BA,BA,SA,AL,AL,AL,AL,AL,AL,AL,XX,XX,AL,XX,XX,AL,AL,AL,AL,XX,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,CM,CM,XX,CM,CM,XX,XX,CM,CM,CM,CM,AL,CM,CM,BA,BA,BA,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,CM,CM,CM,CM,CM,CM,CM,XX,XX,CM,CM,CM,CM,CM,CM,AL,BB,AL,CM,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,CM,CM,CM,CM,CM,CM,CM,AL,CM,CM,CM,CM,BB,AL,BA,BA,BA,BA,BB,AL,CM,XX,XX,XX,XX,XX,XX,XX,XX,AL,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,AL,AL,AL,AL,CM,CM,CM,CM,CM,CM,CM,CM,CM,CM,BA,BA,BA,AL,BB,BB,BA,BA,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,BB,BB,BB,BB,BB,BB,BB,BB,BB,BB,XX,XX,XX,XX,XX,XX,AL,BA,BA,BA,BA,BA,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,BB,EX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,CM,CM,XX,XX,XX,CM,XX,CM,CM,XX,CM,CM,CM,CM,CM,CM,AL,CM,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,XX,AL,AL,XX,AL,AL,AL,AL,AL,AL,CM,CM,XX,CM,CM,CM,CM,CM,AL,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,CM,CM,CM,CM,AL,AL,XX,XX,XX,XX,XX,XX,XX,CM,CM,AL,CM,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,CM,CM,CM,XX,XX,XX,CM,CM,CM,BA,BA,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,PO,PO,PO,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,BA,BA,BA,BA,BA,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,OP,OP,OP,CL,CL,CL,AL,AL,CL,AL,AL,AL,OP,CL,OP,CL,AL,AL,AL,AL,AL,AL,AL,AL,AL,OP,CL,CL,AL,AL,AL,AL,GL,GL,GL,GL,GL,GL,GL,OP,CL,GL,GL,GL,OP,CL,OP,CL,CM,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,CM,CM,CM,CM,CM,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,XX,XX,XX,XX,BA,BA,CM,CM,CM,CM,CM,BA,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,CM,CM,CM,CM,CM,CM,CM,BA,BA,BA,AL,AL,AL,AL,AL,AL,BA,AL,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,XX,AL,AL,AL,AL,AL,AL,AL,BA,BA,AL,AL,XX,XX,XX,XX,XX,CM,CM,CM,CM,CM,CM,CM,CM,XX,XX,XX,XX,XX,XX,XX,CM,NS,NS,NS,NS,GL,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,CJ,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,CJ,CJ,CJ,XX,XX,CJ,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,CJ,CJ,CJ,CJ,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,AL,CM,CM,BA,AL,AL,AL,AL,AL,CM,CM,CM,CM,CM,AL,AL,AL,CM,CM,CM,AL,AL,CM,CM,CM,CM,CM,CM,CM,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,CM,CM,CM,AL,AL,CM,CM,CM,AL,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,XX,XX,AL,AL,XX,XX,AL,AL,AL,AL,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,AL,XX,AL,AL,AL,AL,AL,XX,AL,XX,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,XX,XX,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,AL,AL,AL,AL,AL,CM,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,AL,AL,BA,BA,BA,BA,AL,XX,XX,XX,XX,XX,AL,AL,AL,AL,AL,AL,XX,XX,XX,XX,XX,CM,CM,CM,CM,CM,CM,CM,CM,CM,XX,XX,CM,CM,CM,CM,CM,XX,CM,CM,XX,CM,CM,CM,CM,CM,XX,XX,XX,XX,XX,CM,CM,CM,CM,CM,CM,CM,AL,AL,AL,AL,AL,AL,AL,XX,XX,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,CM,XX,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,XX,XX,XX,XX,XX,PR,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,XX,AL,AL,XX,AL,AL,AL,AL,CM,CM,CM,CM,CM,CM,CM,AL,XX,XX,XX,XX,NU,NU,NU,NU,NU,NU,NU,NU,NU,NU,XX,XX,XX,XX,OP,OP,PO,AL,AL,AL,AL,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,XX,AL,AL,XX,AL,XX,XX,AL,XX,AL,AL,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,XX,AL,XX,AL,XX,XX,XX,XX,AL,XX,XX,XX,XX,AL,XX,AL,XX,AL,XX,AL,AL,AL,XX,AL,AL,XX,AL,XX,XX,AL,XX,AL,XX,AL,XX,AL,XX,AL,AL,XX,AL,XX,XX,AL,AL,AL,AL,XX,AL,AL,AL,AL,XX,AL,AL,AL,AL,XX,AL,XX,AL,AL,AL,XX,AL,AL,AL,AL,AL,XX,AL,AL,AL,AL,AL,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,ID,ID,ID,AI,AI,AI,AI,AI,AI,AI,AI,AI,AI,AL,AL,AL,ID,ID,ID,ID,ID,ID,RI,RI,RI,RI,RI,RI,RI,RI,RI,RI,RI,RI,RI,RI,RI,RI,ID,ID,ID,ID,ID,EB,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,AL,AL,ID,ID,ID,ID,ID,AL,ID,ID,ID,EB,EB,EB,ID,ID,EB,ID,ID,EB,EB,EB,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,EM,EM,EM,EM,EM,ID,ID,EB,EB,ID,ID,EB,EB,EB,EB,EB,EB,EB,EB,EB,EB,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,EB,EB,EB,EB,EB,EB,EB,EB,EB,EB,ID,ID,ID,EB,ID,ID,ID,EB,EB,EB,ID,EB,EB,EB,ID,ID,ID,ID,ID,ID,ID,EB,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,AL,ID,AL,ID,AL,ID,ID,ID,ID,ID,EB,ID,ID,ID,ID,AL,AL,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,AL,AL,AL,AL,AL,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,EB,EB,ID,ID,ID,ID,EB,ID,ID,ID,ID,ID,EB,ID,ID,ID,ID,EB,EB,ID,ID,ID,ID,ID,ID,ID,ID,ID,AL,AL,AL,AL,AL,AL,AL,AL,ID,ID,ID,ID,AL,AL,AL,AL,AL,AL,ID,ID,ID,ID,ID,ID,EB,EB,EB,ID,ID,ID,EB,EB,EB,EB,EB,AL,AL,AL,AL,AL,AL,QU,QU,QU,NS,NS,NS,AL,AL,AL,AL,ID,ID,ID,ID,EB,EB,EB,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,EB,ID,ID,ID,AL,AL,AL,AL,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,ID,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,AL,EB,ID,ID,EB,EB,EB,EB,EB,EB,EB,EB,EB,EB,ID,ID,EB,EB,EB,ID,ID,ID,ID,ID,EB,EB,ID,EB,EB,ID,EB,ID,ID,ID,ID,EB,EB,EB,EB,EB,EB,EB,EB,EB,EB,EB,EB,EB,ID,ID,];
-
-static PAIR_TABLE: [[u8; 44]; 53] = [[192,193,194,221,196,221,198,199,200,201,221,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,221,221,212,223,224,225,226,227,228,229,230,231,232,221,221,235,],[192,193,2,221,196,221,198,199,200,201,221,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,221,221,212,223,224,225,226,227,228,229,230,231,232,221,221,235,],[192,193,194,221,196,221,198,199,200,201,221,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,221,221,212,223,224,225,226,227,228,229,230,231,232,221,221,235,],[0,1,2,3,4,29,6,7,8,9,3,139,12,141,14,143,16,17,18,19,20,21,22,23,24,25,26,27,29,29,20,159,160,161,162,35,164,165,166,167,168,29,29,235,],[192,193,194,221,196,221,198,199,200,201,221,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,221,221,212,223,224,225,226,227,228,229,230,231,232,221,221,235,],[0,1,2,29,4,29,6,7,8,9,29,139,12,141,14,143,16,17,18,19,20,21,22,23,24,25,26,27,29,29,20,159,160,161,162,35,164,165,166,167,168,29,29,235,],[0,1,2,6,4,29,6,7,8,9,6,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,29,29,20,31,32,33,34,35,36,37,38,39,40,29,29,235,],[0,1,2,157,4,157,134,7,136,45,157,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,157,157,148,159,160,161,162,163,164,165,166,167,168,157,157,235,],[0,1,2,8,4,29,6,7,8,9,8,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,29,29,20,31,32,33,34,35,36,37,38,39,40,29,29,235,],[0,1,2,157,4,157,6,7,136,9,157,139,140,141,142,143,16,17,18,147,148,149,150,23,152,153,154,27,157,157,148,159,160,161,162,163,164,165,166,167,168,157,157,235,],[0,1,2,10,4,29,6,7,8,9,10,139,12,141,14,143,16,17,18,19,20,21,22,23,24,25,26,27,29,29,20,159,160,161,162,35,164,165,166,167,168,29,29,235,],[0,1,2,11,4,157,6,7,8,50,11,11,12,141,14,143,16,17,18,19,20,149,22,23,152,153,154,27,157,157,20,159,160,161,162,163,164,165,166,167,168,157,157,235,],[0,1,2,12,4,157,6,7,136,9,12,139,12,141,14,143,16,17,18,19,20,149,22,23,152,153,154,27,157,157,20,159,160,161,162,163,164,165,166,167,168,157,157,235,],[0,1,2,13,4,29,6,7,8,9,13,11,12,13,14,143,16,17,18,19,20,21,22,23,24,25,26,27,29,29,20,31,32,33,34,35,36,37,38,39,40,29,29,235,],[0,1,2,14,4,157,6,7,136,9,14,139,12,141,14,143,16,17,18,19,20,149,22,23,24,153,154,27,157,157,20,159,160,161,162,163,164,165,166,167,168,157,157,235,],[0,1,2,15,4,157,6,7,8,9,15,139,140,141,142,143,16,17,18,147,148,149,22,23,152,153,154,27,157,157,148,159,160,161,162,163,164,165,166,167,168,157,157,235,],[0,1,2,16,4,157,6,7,8,48,16,139,12,141,14,143,16,17,18,19,20,149,22,23,152,25,26,27,157,157,20,159,160,161,162,163,164,165,166,167,168,157,157,235,],[0,1,2,17,4,29,6,7,8,49,17,139,12,141,14,143,16,17,18,19,20,149,22,23,24,25,26,27,29,29,20,159,160,161,162,35,164,165,166,167,168,29,29,235,],[0,1,2,18,4,157,6,7,8,9,18,139,12,141,14,143,16,17,18,19,20,149,22,23,152,153,154,27,157,157,20,159,160,161,162,163,164,165,166,167,168,157,157,235,],[0,1,2,19,4,157,6,7,8,9,19,139,12,141,14,143,16,17,18,19,20,149,22,23,152,153,154,27,157,157,20,159,160,161,162,163,164,165,166,167,168,157,157,235,],[0,1,2,20,4,157,6,7,8,9,20,139,12,141,14,143,16,17,18,19,20,149,22,23,152,153,154,27,157,157,20,159,160,161,162,163,164,165,166,167,168,157,157,235,],[0,1,2,21,4,29,6,7,8,46,21,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,29,29,20,31,32,33,34,35,36,37,38,39,40,29,29,235,],[0,1,2,22,4,29,6,7,8,47,22,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,29,29,20,31,32,33,34,35,36,37,38,39,40,29,29,235,],[0,1,2,23,4,29,6,7,8,9,23,139,12,141,14,143,16,17,18,19,20,149,22,23,24,153,154,27,29,29,20,159,160,161,162,35,164,165,166,167,168,29,29,235,],[0,1,2,24,4,29,6,7,8,9,24,139,12,141,14,143,16,17,18,19,20,21,22,23,24,25,26,27,29,29,20,159,160,161,162,35,164,165,166,167,168,29,29,235,],[0,1,2,25,4,29,6,7,8,9,25,139,12,141,14,143,16,17,18,19,20,21,22,23,24,153,154,27,29,29,20,159,160,161,162,35,164,165,166,167,168,29,29,235,],[0,1,2,26,4,29,6,7,8,9,26,139,12,141,14,143,16,17,18,19,20,21,22,23,24,153,154,27,29,29,20,31,32,33,34,35,36,37,38,39,168,29,29,235,],[0,1,2,27,4,157,6,7,8,9,27,139,12,141,14,143,16,17,18,19,20,149,22,23,24,153,154,27,157,157,20,159,160,161,162,35,164,165,166,167,168,157,157,235,],[0,1,2,29,4,29,6,7,8,9,29,139,12,141,14,143,16,17,18,19,20,21,22,23,24,25,26,27,29,29,20,159,160,161,162,35,164,165,166,167,168,29,29,235,],[0,1,2,29,4,29,6,7,8,9,29,139,12,141,14,143,16,17,18,19,20,21,22,23,24,25,26,27,29,29,20,159,160,161,162,35,164,165,166,167,168,29,29,235,],[0,1,2,20,4,157,6,7,8,9,20,139,12,141,14,143,16,17,18,19,20,149,22,23,152,153,154,27,157,157,20,159,160,161,162,163,164,165,166,167,168,157,157,235,],[0,1,2,31,4,157,6,7,8,9,31,139,12,141,14,143,16,17,18,19,20,149,22,23,152,25,154,27,157,157,20,159,32,161,162,163,164,165,166,167,168,157,157,235,],[0,1,2,32,4,157,6,7,8,9,32,139,12,141,14,143,16,17,18,19,20,149,22,23,152,25,154,27,157,157,20,159,160,161,162,163,164,165,166,167,168,157,157,235,],[0,1,2,33,4,157,6,7,8,9,33,139,12,141,14,143,16,17,18,19,20,149,22,23,152,25,154,27,157,157,20,159,160,161,162,163,164,165,38,39,168,157,157,235,],[0,1,2,34,4,157,6,7,8,9,34,139,12,141,14,143,16,17,18,19,20,149,22,23,152,25,154,27,157,157,20,159,160,161,162,163,164,165,166,39,168,157,157,235,],[0,1,2,35,4,29,6,7,8,9,35,139,51,141,51,143,16,17,18,19,20,21,22,23,24,25,26,27,29,29,20,159,160,161,162,35,164,165,166,167,168,29,29,235,],[0,1,2,36,4,157,6,7,8,9,36,139,12,141,14,143,16,17,18,19,20,149,22,23,152,25,154,27,157,157,20,159,160,161,162,163,164,165,166,167,168,157,157,235,],[0,1,2,37,4,157,6,7,8,9,37,139,12,141,14,143,16,17,18,19,20,149,22,23,152,25,154,27,157,157,20,159,160,33,34,163,164,37,38,167,168,157,157,235,],[0,1,2,38,4,157,6,7,8,9,38,139,12,141,14,143,16,17,18,19,20,149,22,23,152,25,154,27,157,157,20,159,160,161,162,163,164,165,38,39,168,157,157,235,],[0,1,2,39,4,157,6,7,8,9,39,139,12,141,14,143,16,17,18,19,20,149,22,23,152,25,154,27,157,157,20,159,160,161,162,163,164,165,166,39,168,157,157,235,],[0,1,2,40,4,157,6,7,8,9,40,139,12,141,14,143,16,17,18,19,20,149,22,23,152,153,154,27,157,157,20,159,160,161,162,163,164,165,166,167,52,157,157,235,],[0,1,2,29,4,29,6,7,8,9,29,139,12,141,14,143,16,17,18,19,20,21,22,23,24,25,26,27,29,29,20,159,160,161,162,35,164,165,166,167,168,29,29,235,],[0,1,2,29,4,29,6,7,8,9,29,139,12,141,14,143,16,17,18,19,20,21,22,23,24,25,26,27,29,29,20,159,160,161,162,35,164,165,166,167,168,29,29,235,],[0,1,2,157,4,157,6,7,136,9,157,139,12,141,14,143,16,17,18,19,20,149,22,23,152,153,154,27,157,157,20,159,160,161,162,163,164,165,166,167,168,157,157,235,],[0,1,2,29,4,29,6,7,8,9,29,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,29,29,20,31,32,33,34,35,36,37,38,39,40,29,29,43,],[0,1,2,157,4,157,134,7,136,45,157,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,157,157,148,159,160,161,162,163,164,165,166,167,168,157,157,235,],[0,1,2,29,4,29,6,7,8,46,29,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,29,29,20,31,32,33,34,35,36,37,38,39,40,29,29,235,],[0,1,2,157,4,157,6,7,136,47,157,139,140,141,142,143,16,17,18,147,148,21,150,23,152,153,154,27,157,157,148,159,160,161,162,163,164,165,166,167,168,157,157,235,],[0,1,2,157,4,157,6,7,136,48,157,139,140,141,142,143,16,17,18,147,20,149,150,23,152,153,154,27,157,157,20,159,160,161,162,163,164,165,166,167,168,157,157,235,],[0,1,2,157,4,157,6,7,136,49,157,139,140,141,142,143,16,17,18,147,20,149,150,23,152,153,154,27,157,157,20,159,160,161,162,163,164,165,166,167,168,157,157,235,],[0,1,2,157,4,157,6,7,136,50,157,11,140,141,142,143,16,17,18,147,148,149,150,23,152,153,154,27,157,157,148,159,160,161,162,163,164,165,166,167,168,157,157,235,],[0,1,2,51,4,29,6,7,8,9,51,11,12,13,14,143,16,17,18,19,20,21,22,23,24,25,26,27,29,29,20,31,32,33,34,35,36,37,38,39,40,29,29,235,],[0,1,2,52,4,157,6,7,8,9,52,139,12,141,14,143,16,17,18,19,20,149,22,23,152,153,154,27,157,157,20,159,160,161,162,163,164,165,166,167,168,157,157,235,],];
-
-        fn is_safe_pair(a: BreakClass, b: BreakClass) -> bool {
-            !matches!((a, b), (CM, CM)|(SP, CM)|(ZWJ, CM)|(BA, CM)|(HY, CM)|(RI, CM)|(CM, SG)|(SP, SG)|(ZWJ, SG)|(BA, SG)|(HY, SG)|(SP, WJ)|(CM, GL)|(SP, GL)|(ZWJ, GL)|(BA, GL)|(HY, GL)|(CM, SP)|(SP, SP)|(ZWJ, SP)|(CM, ZWJ)|(SP, ZWJ)|(ZWJ, ZWJ)|(BA, ZWJ)|(HY, ZWJ)|(RI, ZWJ)|(CM, B2)|(SP, B2)|(ZWJ, B2)|(BA, B2)|(HY, B2)|(CM, BA)|(SP, BA)|(ZWJ, BA)|(CM, BB)|(SP, BB)|(ZWJ, BB)|(BA, BB)|(HY, BB)|(CM, HY)|(SP, HY)|(ZWJ, HY)|(CM, CB)|(SP, CB)|(ZWJ, CB)|(SP, CL)|(SP, CP)|(SP, EX)|(CM, IN)|(SP, IN)|(ZWJ, IN)|(CM, NS)|(SP, NS)|(ZWJ, NS)|(CM, OP)|(SP, OP)|(ZWJ, OP)|(BA, OP)|(HY, OP)|(SP, QU)|(SP, IS)|(CM, NU)|(SP, NU)|(ZWJ, NU)|(BA, NU)|(CM, PO)|(SP, PO)|(ZWJ, PO)|(BA, PO)|(HY, PO)|(CM, PR)|(SP, PR)|(ZWJ, PR)|(BA, PR)|(HY, PR)|(SP, SY)|(CM, AI)|(SP, AI)|(ZWJ, AI)|(BA, AI)|(HY, AI)|(CM, AL)|(SP, AL)|(ZWJ, AL)|(BA, AL)|(HY, AL)|(CM, CJ)|(SP, CJ)|(ZWJ, CJ)|(CM, EB)|(SP, EB)|(ZWJ, EB)|(BA, EB)|(HY, EB)|(CM, EM)|(SP, EM)|(ZWJ, EM)|(BA, EM)|(HY, EM)|(CM, H2)|(SP, H2)|(ZWJ, H2)|(BA, H2)|(HY, H2)|(CM, H3)|(SP, H3)|(ZWJ, H3)|(BA, H3)|(HY, H3)|(CM, HL)|(SP, HL)|(ZWJ, HL)|(BA, HL)|(HY, HL)|(CM, ID)|(SP, ID)|(ZWJ, ID)|(BA, ID)|(HY, ID)|(CM, JL)|(SP, JL)|(ZWJ, JL)|(BA, JL)|(HY, JL)|(CM, JV)|(SP, JV)|(ZWJ, JV)|(BA, JV)|(HY, JV)|(CM, JT)|(SP, JT)|(ZWJ, JT)|(BA, JT)|(HY, JT)|(CM, RI)|(SP, RI)|(ZWJ, RI)|(BA, RI)|(HY, RI)|(RI, RI)|(CM, SA)|(SP, SA)|(ZWJ, SA)|(BA, SA)|(HY, SA)|(CM, XX)|(SP, XX)|(ZWJ, XX)|(BA, XX)|(HY, XX))
-        }
diff --git a/third_party/rust/glob/v0_3/BUILD.gn b/third_party/rust/glob/v0_3/BUILD.gn
deleted file mode 100644
index 8bf995b..0000000
--- a/third_party/rust/glob/v0_3/BUILD.gn
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright 2023 The Chromium Authors
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# @generated from third_party/rust/chromium_crates_io/BUILD.gn.hbs by
-# tools/crates/gnrt.
-# Do not edit!
-
-import("//build/rust/cargo_crate.gni")
-
-cargo_crate("lib") {
-  crate_name = "glob"
-  epoch = "0.3"
-  crate_type = "rlib"
-  crate_root =
-      "//third_party/rust/chromium_crates_io/vendor/glob-0.3.2/src/lib.rs"
-  sources =
-      [ "//third_party/rust/chromium_crates_io/vendor/glob-0.3.2/src/lib.rs" ]
-  inputs = []
-
-  build_native_rust_unit_tests = false
-  edition = "2015"
-  cargo_pkg_version = "0.3.2"
-  cargo_pkg_authors = "The Rust Project Developers"
-  cargo_pkg_name = "glob"
-  cargo_pkg_description =
-      "Support for matching file paths against Unix shell style patterns."
-  library_configs -= [ "//build/config/coverage:default_coverage" ]
-  library_configs -= [ "//build/config/compiler:chromium_code" ]
-  library_configs += [ "//build/config/compiler:no_chromium_code" ]
-  executable_configs -= [ "//build/config/compiler:chromium_code" ]
-  executable_configs += [ "//build/config/compiler:no_chromium_code" ]
-  proc_macro_configs -= [ "//build/config/compiler:chromium_code" ]
-  proc_macro_configs += [ "//build/config/compiler:no_chromium_code" ]
-  rustflags = [
-    "--cap-lints=allow",  # Suppress all warnings in crates.io crates
-  ]
-
-  # Only for usage from third-party crates. Add the crate to
-  # //third_party/rust/chromium_crates_io/Cargo.toml to use
-  # it from first-party code.
-  visibility = [ "//third_party/rust/*" ]
-  testonly = true
-}
diff --git a/third_party/rust/glob/v0_3/README.chromium b/third_party/rust/glob/v0_3/README.chromium
deleted file mode 100644
index 352761e..0000000
--- a/third_party/rust/glob/v0_3/README.chromium
+++ /dev/null
@@ -1,11 +0,0 @@
-Name: glob
-URL: https://crates.io/crates/glob
-Version: 0.3.2
-Revision: 58d0748ead23616834871fe42dce475102f8d895
-License: Apache-2.0
-License File: //third_party/rust/chromium_crates_io/vendor/glob-0.3.2/LICENSE-APACHE
-Shipped: no
-Security Critical: no
-
-Description: Support for matching file paths against Unix shell style patterns.
-
diff --git a/third_party/rust/hex_literal/v0_4/BUILD.gn b/third_party/rust/hex_literal/v0_4/BUILD.gn
deleted file mode 100644
index cc388e8..0000000
--- a/third_party/rust/hex_literal/v0_4/BUILD.gn
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright 2023 The Chromium Authors
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# @generated from third_party/rust/chromium_crates_io/BUILD.gn.hbs by
-# tools/crates/gnrt.
-# Do not edit!
-
-import("//build/rust/cargo_crate.gni")
-
-cargo_crate("lib") {
-  crate_name = "hex_literal"
-  epoch = "0.4"
-  crate_type = "rlib"
-  crate_root = "//third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/src/lib.rs"
-  sources = [
-    "//third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/src/lib.rs",
-  ]
-  inputs = [ "//third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/src/../README.md" ]
-
-  build_native_rust_unit_tests = false
-  edition = "2021"
-  cargo_pkg_version = "0.4.1"
-  cargo_pkg_authors = "RustCrypto Developers"
-  cargo_pkg_name = "hex-literal"
-  cargo_pkg_description =
-      "Macro for converting hexadecimal string to a byte array at compile time"
-  library_configs -= [ "//build/config/coverage:default_coverage" ]
-  library_configs -= [ "//build/config/compiler:chromium_code" ]
-  library_configs += [ "//build/config/compiler:no_chromium_code" ]
-  executable_configs -= [ "//build/config/compiler:chromium_code" ]
-  executable_configs += [ "//build/config/compiler:no_chromium_code" ]
-  proc_macro_configs -= [ "//build/config/compiler:chromium_code" ]
-  proc_macro_configs += [ "//build/config/compiler:no_chromium_code" ]
-  rustflags = [
-    "--cap-lints=allow",  # Suppress all warnings in crates.io crates
-  ]
-
-  testonly = true
-}
diff --git a/third_party/rust/hex_literal/v0_4/README.chromium b/third_party/rust/hex_literal/v0_4/README.chromium
deleted file mode 100644
index 2b68aa1..0000000
--- a/third_party/rust/hex_literal/v0_4/README.chromium
+++ /dev/null
@@ -1,10 +0,0 @@
-Name: hex-literal
-URL: https://crates.io/crates/hex-literal
-Version: 0.4.1
-Revision: b8bd42f441f8e2987ded60fbaa809e7bf9d17a8e
-License: Apache-2.0
-License File: //third_party/rust/chromium_crates_io/vendor/hex-literal-0.4.1/LICENSE-APACHE
-Shipped: no
-Security Critical: no
-
-Description: Macro for converting hexadecimal string to a byte array at compile time
diff --git a/third_party/rust/regex/v1/BUILD.gn b/third_party/rust/regex/v1/BUILD.gn
deleted file mode 100644
index 836b567..0000000
--- a/third_party/rust/regex/v1/BUILD.gn
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright 2023 The Chromium Authors
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# @generated from third_party/rust/chromium_crates_io/BUILD.gn.hbs by
-# tools/crates/gnrt.
-# Do not edit!
-
-import("//build/rust/cargo_crate.gni")
-
-cargo_crate("lib") {
-  crate_name = "regex"
-  epoch = "1"
-  crate_type = "rlib"
-  crate_root =
-      "//third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/lib.rs"
-  sources = [
-    "//third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/builders.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/bytes.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/error.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/find_byte.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/lib.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/pattern.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/regex/bytes.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/regex/mod.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/regex/string.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/regexset/bytes.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/regexset/mod.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-1.11.1/src/regexset/string.rs",
-  ]
-  inputs = []
-
-  build_native_rust_unit_tests = false
-  edition = "2021"
-  cargo_pkg_version = "1.11.1"
-  cargo_pkg_authors =
-      "The Rust Project Developers, Andrew Gallant <jamslam@gmail.com>"
-  cargo_pkg_name = "regex"
-  cargo_pkg_description = "An implementation of regular expressions for Rust. This implementation uses finite automata and guarantees linear time matching on all inputs."
-  library_configs -= [ "//build/config/coverage:default_coverage" ]
-  library_configs -= [ "//build/config/compiler:chromium_code" ]
-  library_configs += [ "//build/config/compiler:no_chromium_code" ]
-  executable_configs -= [ "//build/config/compiler:chromium_code" ]
-  executable_configs += [ "//build/config/compiler:no_chromium_code" ]
-  proc_macro_configs -= [ "//build/config/compiler:chromium_code" ]
-  proc_macro_configs += [ "//build/config/compiler:no_chromium_code" ]
-  deps = [
-    "//third_party/rust/aho_corasick/v1:lib",
-    "//third_party/rust/memchr/v2:lib",
-    "//third_party/rust/regex_automata/v0_4:lib",
-    "//third_party/rust/regex_syntax/v0_8:lib",
-  ]
-  features = [
-    "perf",
-    "perf-backtrack",
-    "perf-cache",
-    "perf-dfa",
-    "perf-inline",
-    "perf-literal",
-    "perf-onepass",
-    "std",
-    "unicode",
-    "unicode-age",
-    "unicode-bool",
-    "unicode-case",
-    "unicode-gencat",
-    "unicode-perl",
-    "unicode-script",
-    "unicode-segment",
-  ]
-  rustflags = [
-    "--cap-lints=allow",  # Suppress all warnings in crates.io crates
-  ]
-
-  testonly = true
-}
diff --git a/third_party/rust/regex/v1/README.chromium b/third_party/rust/regex/v1/README.chromium
deleted file mode 100644
index 8bffb9981..0000000
--- a/third_party/rust/regex/v1/README.chromium
+++ /dev/null
@@ -1,12 +0,0 @@
-Name: regex
-URL: https://crates.io/crates/regex
-Version: 1.11.1
-Revision: 9870c06e6c772daaad7ab612faab29130753e41c
-License: Apache-2.0
-License File: //third_party/rust/chromium_crates_io/vendor/regex-1.11.1/LICENSE-APACHE
-Shipped: no
-Security Critical: no
-
-Description: An implementation of regular expressions for Rust. This implementation uses
-finite automata and guarantees linear time matching on all inputs.
-
diff --git a/third_party/rust/regex_automata/v0_4/BUILD.gn b/third_party/rust/regex_automata/v0_4/BUILD.gn
index 3e8e16e..1f5143b 100644
--- a/third_party/rust/regex_automata/v0_4/BUILD.gn
+++ b/third_party/rust/regex_automata/v0_4/BUILD.gn
@@ -102,36 +102,7 @@
   executable_configs += [ "//build/config/compiler:no_chromium_code" ]
   proc_macro_configs -= [ "//build/config/compiler:chromium_code" ]
   proc_macro_configs += [ "//build/config/compiler:no_chromium_code" ]
-  deps = [
-    "//third_party/rust/aho_corasick/v1:lib",
-    "//third_party/rust/memchr/v2:lib",
-    "//third_party/rust/regex_syntax/v0_8:lib",
-  ]
-  features = [
-    "alloc",
-    "dfa-onepass",
-    "dfa-search",
-    "hybrid",
-    "meta",
-    "nfa-backtrack",
-    "nfa-pikevm",
-    "nfa-thompson",
-    "perf-inline",
-    "perf-literal",
-    "perf-literal-multisubstring",
-    "perf-literal-substring",
-    "std",
-    "syntax",
-    "unicode",
-    "unicode-age",
-    "unicode-bool",
-    "unicode-case",
-    "unicode-gencat",
-    "unicode-perl",
-    "unicode-script",
-    "unicode-segment",
-    "unicode-word-boundary",
-  ]
+  features = [ "dfa-search" ]
   rustflags = [
     "--cap-lints=allow",  # Suppress all warnings in crates.io crates
   ]
diff --git a/third_party/rust/regex_syntax/v0_8/BUILD.gn b/third_party/rust/regex_syntax/v0_8/BUILD.gn
deleted file mode 100644
index 9fea73b..0000000
--- a/third_party/rust/regex_syntax/v0_8/BUILD.gn
+++ /dev/null
@@ -1,86 +0,0 @@
-# Copyright 2023 The Chromium Authors
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# @generated from third_party/rust/chromium_crates_io/BUILD.gn.hbs by
-# tools/crates/gnrt.
-# Do not edit!
-
-import("//build/rust/cargo_crate.gni")
-
-cargo_crate("lib") {
-  crate_name = "regex_syntax"
-  epoch = "0.8"
-  crate_type = "rlib"
-  crate_root = "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/lib.rs"
-  sources = [
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/ast/mod.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/ast/parse.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/ast/print.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/ast/visitor.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/debug.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/either.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/error.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/hir/interval.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/hir/literal.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/hir/mod.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/hir/print.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/hir/translate.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/hir/visitor.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/lib.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/parser.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/rank.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/age.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/case_folding_simple.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/general_category.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/grapheme_cluster_break.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/mod.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/perl_decimal.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/perl_space.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/perl_word.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/property_bool.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/property_names.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/property_values.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/script.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/script_extension.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/sentence_break.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/unicode_tables/word_break.rs",
-    "//third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/src/utf8.rs",
-  ]
-  inputs = []
-
-  build_native_rust_unit_tests = false
-  edition = "2021"
-  cargo_pkg_version = "0.8.5"
-  cargo_pkg_authors =
-      "The Rust Project Developers, Andrew Gallant <jamslam@gmail.com>"
-  cargo_pkg_name = "regex-syntax"
-  cargo_pkg_description = "A regular expression parser."
-  library_configs -= [ "//build/config/coverage:default_coverage" ]
-  library_configs -= [ "//build/config/compiler:chromium_code" ]
-  library_configs += [ "//build/config/compiler:no_chromium_code" ]
-  executable_configs -= [ "//build/config/compiler:chromium_code" ]
-  executable_configs += [ "//build/config/compiler:no_chromium_code" ]
-  proc_macro_configs -= [ "//build/config/compiler:chromium_code" ]
-  proc_macro_configs += [ "//build/config/compiler:no_chromium_code" ]
-  features = [
-    "std",
-    "unicode",
-    "unicode-age",
-    "unicode-bool",
-    "unicode-case",
-    "unicode-gencat",
-    "unicode-perl",
-    "unicode-script",
-    "unicode-segment",
-  ]
-  rustflags = [
-    "--cap-lints=allow",  # Suppress all warnings in crates.io crates
-  ]
-
-  # Only for usage from third-party crates. Add the crate to
-  # //third_party/rust/chromium_crates_io/Cargo.toml to use
-  # it from first-party code.
-  visibility = [ "//third_party/rust/*" ]
-}
diff --git a/third_party/rust/regex_syntax/v0_8/README.chromium b/third_party/rust/regex_syntax/v0_8/README.chromium
deleted file mode 100644
index 1f2e89a..0000000
--- a/third_party/rust/regex_syntax/v0_8/README.chromium
+++ /dev/null
@@ -1,10 +0,0 @@
-Name: regex-syntax
-URL: https://crates.io/crates/regex-syntax
-Version: 0.8.5
-Revision: cba0fbc0194456f644040d7558ae6ed261d57cc2
-License: Apache-2.0
-License File: //third_party/rust/chromium_crates_io/vendor/regex-syntax-0.8.5/LICENSE-APACHE
-Shipped: yes
-Security Critical: yes
-
-Description: A regular expression parser.
diff --git a/third_party/rust/relative_path/v1/BUILD.gn b/third_party/rust/relative_path/v1/BUILD.gn
deleted file mode 100644
index 7625add..0000000
--- a/third_party/rust/relative_path/v1/BUILD.gn
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright 2023 The Chromium Authors
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# @generated from third_party/rust/chromium_crates_io/BUILD.gn.hbs by
-# tools/crates/gnrt.
-# Do not edit!
-
-import("//build/rust/cargo_crate.gni")
-
-cargo_crate("lib") {
-  crate_name = "relative_path"
-  epoch = "1"
-  crate_type = "rlib"
-  crate_root = "//third_party/rust/chromium_crates_io/vendor/relative-path-1.9.3/src/lib.rs"
-  sources = [
-    "//third_party/rust/chromium_crates_io/vendor/relative-path-1.9.3/src/lib.rs",
-    "//third_party/rust/chromium_crates_io/vendor/relative-path-1.9.3/src/path_ext.rs",
-    "//third_party/rust/chromium_crates_io/vendor/relative-path-1.9.3/src/tests.rs",
-  ]
-  inputs = []
-
-  build_native_rust_unit_tests = false
-  edition = "2021"
-  cargo_pkg_version = "1.9.3"
-  cargo_pkg_authors = "John-John Tedro <udoprog@tedro.se>"
-  cargo_pkg_name = "relative-path"
-  cargo_pkg_description = "Portable, relative paths for Rust."
-  library_configs -= [ "//build/config/coverage:default_coverage" ]
-  library_configs -= [ "//build/config/compiler:chromium_code" ]
-  library_configs += [ "//build/config/compiler:no_chromium_code" ]
-  executable_configs -= [ "//build/config/compiler:chromium_code" ]
-  executable_configs += [ "//build/config/compiler:no_chromium_code" ]
-  proc_macro_configs -= [ "//build/config/compiler:chromium_code" ]
-  proc_macro_configs += [ "//build/config/compiler:no_chromium_code" ]
-  rustflags = [
-    "--cap-lints=allow",  # Suppress all warnings in crates.io crates
-  ]
-
-  # Only for usage from third-party crates. Add the crate to
-  # //third_party/rust/chromium_crates_io/Cargo.toml to use
-  # it from first-party code.
-  visibility = [ "//third_party/rust/*" ]
-  testonly = true
-}
diff --git a/third_party/rust/relative_path/v1/README.chromium b/third_party/rust/relative_path/v1/README.chromium
deleted file mode 100644
index 2cb7ed1..0000000
--- a/third_party/rust/relative_path/v1/README.chromium
+++ /dev/null
@@ -1,10 +0,0 @@
-Name: relative-path
-URL: https://crates.io/crates/relative-path
-Version: 1.9.3
-Revision: 6d267fbd85b257e4416c9f020131c6da168e1d3d
-License: Apache-2.0
-License File:
-Shipped: no
-Security Critical: no
-
-Description: Portable, relative paths for Rust.
diff --git a/third_party/rust/rstest/v0_22/BUILD.gn b/third_party/rust/rstest/v0_22/BUILD.gn
deleted file mode 100644
index 9b9aafb..0000000
--- a/third_party/rust/rstest/v0_22/BUILD.gn
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright 2023 The Chromium Authors
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# @generated from third_party/rust/chromium_crates_io/BUILD.gn.hbs by
-# tools/crates/gnrt.
-# Do not edit!
-
-import("//build/rust/cargo_crate.gni")
-
-cargo_crate("lib") {
-  crate_name = "rstest"
-  epoch = "0.22"
-  crate_type = "rlib"
-  crate_root =
-      "//third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/src/lib.rs"
-  sources = [
-    "//third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/src/lib.rs",
-    "//third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/src/magic_conversion.rs",
-    "//third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/src/timeout.rs",
-  ]
-  inputs = []
-
-  build_native_rust_unit_tests = false
-  edition = "2021"
-  cargo_pkg_version = "0.22.0"
-  cargo_pkg_authors = "Michele d'Amico <michele.damico@gmail.com>"
-  cargo_pkg_name = "rstest"
-  cargo_pkg_description = "Rust fixture based test framework. It use procedural macro to implement fixtures and table based tests."
-  library_configs -= [ "//build/config/coverage:default_coverage" ]
-  library_configs -= [ "//build/config/compiler:chromium_code" ]
-  library_configs += [ "//build/config/compiler:no_chromium_code" ]
-  executable_configs -= [ "//build/config/compiler:chromium_code" ]
-  executable_configs += [ "//build/config/compiler:no_chromium_code" ]
-  proc_macro_configs -= [ "//build/config/compiler:chromium_code" ]
-  proc_macro_configs += [ "//build/config/compiler:no_chromium_code" ]
-  deps = [ "//third_party/rust/rstest_macros/v0_22:lib" ]
-  rustflags = [
-    "--cap-lints=allow",  # Suppress all warnings in crates.io crates
-  ]
-
-  testonly = true
-}
diff --git a/third_party/rust/rstest/v0_22/README.chromium b/third_party/rust/rstest/v0_22/README.chromium
deleted file mode 100644
index e187402..0000000
--- a/third_party/rust/rstest/v0_22/README.chromium
+++ /dev/null
@@ -1,12 +0,0 @@
-Name: rstest
-URL: https://crates.io/crates/rstest
-Version: 0.22.0
-Revision: 62134281cf451fc2bea69f9d2a16805a9ad03fef
-License: Apache-2.0
-License File: //third_party/rust/chromium_crates_io/vendor/rstest-0.22.0/LICENSE-APACHE
-Shipped: no
-Security Critical: no
-
-Description: Rust fixture based test framework. It use procedural macro
-to implement fixtures and table based tests.
-
diff --git a/third_party/rust/rstest_macros/v0_22/BUILD.gn b/third_party/rust/rstest_macros/v0_22/BUILD.gn
deleted file mode 100644
index 6466f852..0000000
--- a/third_party/rust/rstest_macros/v0_22/BUILD.gn
+++ /dev/null
@@ -1,81 +0,0 @@
-# Copyright 2023 The Chromium Authors
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# @generated from third_party/rust/chromium_crates_io/BUILD.gn.hbs by
-# tools/crates/gnrt.
-# Do not edit!
-
-import("//build/rust/cargo_crate.gni")
-
-cargo_crate("lib") {
-  crate_name = "rstest_macros"
-  epoch = "0.22"
-  crate_type = "proc-macro"
-  crate_root = "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/lib.rs"
-  sources = [
-    "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/error.rs",
-    "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/lib.rs",
-    "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/arguments.rs",
-    "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/by_ref.rs",
-    "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/expressions.rs",
-    "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/fixture.rs",
-    "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/future.rs",
-    "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/ignore.rs",
-    "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/just_once.rs",
-    "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/macros.rs",
-    "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/mod.rs",
-    "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/rstest.rs",
-    "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/rstest/files.rs",
-    "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/testcase.rs",
-    "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/parse/vlist.rs",
-    "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/refident.rs",
-    "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/render/apply_arguments.rs",
-    "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/render/crate_resolver.rs",
-    "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/render/fixture.rs",
-    "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/render/inject.rs",
-    "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/render/mod.rs",
-    "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/render/test.rs",
-    "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/render/wrapper.rs",
-    "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/resolver.rs",
-    "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/test.rs",
-    "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/src/utils.rs",
-  ]
-  inputs = []
-
-  build_native_rust_unit_tests = false
-  edition = "2021"
-  cargo_pkg_version = "0.22.0"
-  cargo_pkg_authors = "Michele d'Amico <michele.damico@gmail.com>"
-  cargo_pkg_name = "rstest_macros"
-  cargo_pkg_description = "Rust fixture based test framework. It use procedural macro to implement fixtures and table based tests."
-  library_configs -= [ "//build/config/coverage:default_coverage" ]
-  library_configs -= [ "//build/config/compiler:chromium_code" ]
-  library_configs += [ "//build/config/compiler:no_chromium_code" ]
-  executable_configs -= [ "//build/config/compiler:chromium_code" ]
-  executable_configs += [ "//build/config/compiler:no_chromium_code" ]
-  proc_macro_configs -= [ "//build/config/compiler:chromium_code" ]
-  proc_macro_configs += [ "//build/config/compiler:no_chromium_code" ]
-  deps = [
-    "//third_party/rust/cfg_if/v1:lib",
-    "//third_party/rust/glob/v0_3:lib",
-    "//third_party/rust/proc_macro2/v1:lib",
-    "//third_party/rust/quote/v1:lib",
-    "//third_party/rust/regex/v1:lib",
-    "//third_party/rust/relative_path/v1:lib",
-    "//third_party/rust/syn/v2:lib",
-    "//third_party/rust/unicode_ident/v1:lib",
-  ]
-  build_deps = [ "//third_party/rust/rustc_version/v0_4:buildrs_support" ]
-  build_root = "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/build.rs"
-  build_sources = [ "//third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/build.rs" ]
-  rustflags = [
-    "--cap-lints=allow",  # Suppress all warnings in crates.io crates
-  ]
-
-  # Only for usage from third-party crates. Add the crate to
-  # //third_party/rust/chromium_crates_io/Cargo.toml to use
-  # it from first-party code.
-  visibility = [ "//third_party/rust/*" ]
-  testonly = true
-}
diff --git a/third_party/rust/rstest_macros/v0_22/README.chromium b/third_party/rust/rstest_macros/v0_22/README.chromium
deleted file mode 100644
index df88d22e..0000000
--- a/third_party/rust/rstest_macros/v0_22/README.chromium
+++ /dev/null
@@ -1,12 +0,0 @@
-Name: rstest_macros
-URL: https://crates.io/crates/rstest_macros
-Version: 0.22.0
-Revision: 62134281cf451fc2bea69f9d2a16805a9ad03fef
-License: Apache-2.0
-License File: //third_party/rust/chromium_crates_io/vendor/rstest_macros-0.22.0/LICENSE-APACHE
-Shipped: no
-Security Critical: no
-
-Description: Rust fixture based test framework. It use procedural macro
-to implement fixtures and table based tests.
-
diff --git a/third_party/rust/rustc_version/v0_4/BUILD.gn b/third_party/rust/rustc_version/v0_4/BUILD.gn
deleted file mode 100644
index 2d2096c7..0000000
--- a/third_party/rust/rustc_version/v0_4/BUILD.gn
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2023 The Chromium Authors
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# @generated from third_party/rust/chromium_crates_io/BUILD.gn.hbs by
-# tools/crates/gnrt.
-# Do not edit!
-
-import("//build/rust/cargo_crate.gni")
-
-cargo_crate("buildrs_support") {
-  crate_name = "rustc_version"
-  epoch = "0.4"
-  crate_type = "rlib"
-  crate_root = "//third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/src/lib.rs"
-  sources = [ "//third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/src/lib.rs" ]
-  inputs = []
-
-  build_native_rust_unit_tests = false
-  edition = "2018"
-  cargo_pkg_version = "0.4.1"
-  cargo_pkg_name = "rustc_version"
-  cargo_pkg_description =
-      "A library for querying the version of a installed rustc compiler"
-  library_configs -= [ "//build/config/coverage:default_coverage" ]
-  library_configs -= [ "//build/config/compiler:chromium_code" ]
-  library_configs += [ "//build/config/compiler:no_chromium_code" ]
-  executable_configs -= [ "//build/config/compiler:chromium_code" ]
-  executable_configs += [ "//build/config/compiler:no_chromium_code" ]
-  proc_macro_configs -= [ "//build/config/compiler:chromium_code" ]
-  proc_macro_configs += [ "//build/config/compiler:no_chromium_code" ]
-  deps = [ "//third_party/rust/semver/v1:lib" ]
-  rustflags = [
-    "--cap-lints=allow",  # Suppress all warnings in crates.io crates
-  ]
-
-  # Only for usage from third-party crates. Add the crate to
-  # //third_party/rust/chromium_crates_io/Cargo.toml to use
-  # it from first-party code.
-  visibility = [ "//third_party/rust/*" ]
-  testonly = true
-}
diff --git a/third_party/rust/rustc_version/v0_4/README.chromium b/third_party/rust/rustc_version/v0_4/README.chromium
deleted file mode 100644
index d157d0ae..0000000
--- a/third_party/rust/rustc_version/v0_4/README.chromium
+++ /dev/null
@@ -1,10 +0,0 @@
-Name: rustc_version
-URL: https://crates.io/crates/rustc_version
-Version: 0.4.1
-Revision: eeca449cca83e24150e46739e797aa82e9142809
-License: Apache-2.0
-License File: //third_party/rust/chromium_crates_io/vendor/rustc_version-0.4.1/LICENSE-APACHE
-Shipped: no
-Security Critical: no
-
-Description: A library for querying the version of a installed rustc compiler
diff --git a/third_party/rust/rustversion/v1/BUILD.gn b/third_party/rust/rustversion/v1/BUILD.gn
index b813fe9..f6f8116 100644
--- a/third_party/rust/rustversion/v1/BUILD.gn
+++ b/third_party/rust/rustversion/v1/BUILD.gn
@@ -51,4 +51,9 @@
   rustflags = [
     "--cap-lints=allow",  # Suppress all warnings in crates.io crates
   ]
+
+  # Only for usage from third-party crates. Add the crate to
+  # //third_party/rust/chromium_crates_io/Cargo.toml to use
+  # it from first-party code.
+  visibility = [ "//third_party/rust/*" ]
 }
diff --git a/third_party/rust/semver/v1/BUILD.gn b/third_party/rust/semver/v1/BUILD.gn
deleted file mode 100644
index 3ab1b01b0..0000000
--- a/third_party/rust/semver/v1/BUILD.gn
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright 2023 The Chromium Authors
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# @generated from third_party/rust/chromium_crates_io/BUILD.gn.hbs by
-# tools/crates/gnrt.
-# Do not edit!
-
-import("//build/rust/cargo_crate.gni")
-
-cargo_crate("lib") {
-  crate_name = "semver"
-  epoch = "1"
-  crate_type = "rlib"
-  crate_root =
-      "//third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/lib.rs"
-  sources = [
-    "//third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/backport.rs",
-    "//third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/display.rs",
-    "//third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/error.rs",
-    "//third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/eval.rs",
-    "//third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/identifier.rs",
-    "//third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/impls.rs",
-    "//third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/lib.rs",
-    "//third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/parse.rs",
-    "//third_party/rust/chromium_crates_io/vendor/semver-1.0.26/src/serde.rs",
-  ]
-  inputs = []
-
-  build_native_rust_unit_tests = false
-  edition = "2018"
-  cargo_pkg_version = "1.0.26"
-  cargo_pkg_authors = "David Tolnay <dtolnay@gmail.com>"
-  cargo_pkg_name = "semver"
-  cargo_pkg_description =
-      "Parser and evaluator for Cargo's flavor of Semantic Versioning"
-  library_configs -= [ "//build/config/coverage:default_coverage" ]
-  library_configs -= [ "//build/config/compiler:chromium_code" ]
-  library_configs += [ "//build/config/compiler:no_chromium_code" ]
-  executable_configs -= [ "//build/config/compiler:chromium_code" ]
-  executable_configs += [ "//build/config/compiler:no_chromium_code" ]
-  proc_macro_configs -= [ "//build/config/compiler:chromium_code" ]
-  proc_macro_configs += [ "//build/config/compiler:no_chromium_code" ]
-  features = [ "std" ]
-  build_root =
-      "//third_party/rust/chromium_crates_io/vendor/semver-1.0.26/build.rs"
-  build_sources =
-      [ "//third_party/rust/chromium_crates_io/vendor/semver-1.0.26/build.rs" ]
-  rustflags = [
-    "--cap-lints=allow",  # Suppress all warnings in crates.io crates
-  ]
-
-  # Only for usage from third-party crates. Add the crate to
-  # //third_party/rust/chromium_crates_io/Cargo.toml to use
-  # it from first-party code.
-  visibility = [ "//third_party/rust/*" ]
-  testonly = true
-}
diff --git a/third_party/rust/semver/v1/README.chromium b/third_party/rust/semver/v1/README.chromium
deleted file mode 100644
index d7fad5db..0000000
--- a/third_party/rust/semver/v1/README.chromium
+++ /dev/null
@@ -1,10 +0,0 @@
-Name: semver
-URL: https://crates.io/crates/semver
-Version: 1.0.26
-Revision: 3e64fdbfce78bfbd2eb97bdbdc50ce4d62c9831b
-License: Apache-2.0
-License File: //third_party/rust/chromium_crates_io/vendor/semver-1.0.26/LICENSE-APACHE
-Shipped: no
-Security Critical: no
-
-Description: Parser and evaluator for Cargo's flavor of Semantic Versioning
diff --git a/third_party/rust/serde/v1/BUILD.gn b/third_party/rust/serde/v1/BUILD.gn
index 199fc758..1d01a5d 100644
--- a/third_party/rust/serde/v1/BUILD.gn
+++ b/third_party/rust/serde/v1/BUILD.gn
@@ -64,4 +64,9 @@
   rustflags = [
     "--cap-lints=allow",  # Suppress all warnings in crates.io crates
   ]
+
+  # Only for usage from third-party crates. Add the crate to
+  # //third_party/rust/chromium_crates_io/Cargo.toml to use
+  # it from first-party code.
+  visibility = [ "//third_party/rust/*" ]
 }
diff --git a/third_party/rust/serde_json/v1/BUILD.gn b/third_party/rust/serde_json/v1/BUILD.gn
deleted file mode 100644
index d46a92b..0000000
--- a/third_party/rust/serde_json/v1/BUILD.gn
+++ /dev/null
@@ -1,87 +0,0 @@
-# Copyright 2023 The Chromium Authors
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# @generated from third_party/rust/chromium_crates_io/BUILD.gn.hbs by
-# tools/crates/gnrt.
-# Do not edit!
-
-import("//build/rust/cargo_crate.gni")
-
-cargo_crate("lib") {
-  crate_name = "serde_json"
-  epoch = "1"
-  crate_type = "rlib"
-  crate_root = "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lib.rs"
-  sources = [
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/de.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/error.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/io/core.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/io/mod.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/iter.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/algorithm.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/bhcomp.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/bignum.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/cached.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/cached_float80.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/digit.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/errors.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/exponent.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/float.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/large_powers.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/large_powers32.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/large_powers64.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/math.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/mod.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/num.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/parse.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/rounding.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/shift.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lexical/small_powers.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/lib.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/macros.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/map.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/number.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/raw.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/read.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/ser.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/value/de.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/value/from.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/value/index.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/value/mod.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/value/partial_eq.rs",
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/src/value/ser.rs",
-  ]
-  inputs = []
-
-  build_native_rust_unit_tests = false
-  edition = "2021"
-  cargo_pkg_version = "1.0.140"
-  cargo_pkg_authors = "Erick Tryzelaar <erick.tryzelaar@gmail.com>, David Tolnay <dtolnay@gmail.com>"
-  cargo_pkg_name = "serde_json"
-  cargo_pkg_description = "A JSON serialization file format"
-  library_configs -= [ "//build/config/coverage:default_coverage" ]
-  library_configs -= [ "//build/config/compiler:chromium_code" ]
-  library_configs += [ "//build/config/compiler:no_chromium_code" ]
-  executable_configs -= [ "//build/config/compiler:chromium_code" ]
-  executable_configs += [ "//build/config/compiler:no_chromium_code" ]
-  proc_macro_configs -= [ "//build/config/compiler:chromium_code" ]
-  proc_macro_configs += [ "//build/config/compiler:no_chromium_code" ]
-  deps = [
-    "//third_party/rust/itoa/v1:lib",
-    "//third_party/rust/memchr/v2:lib",
-    "//third_party/rust/ryu/v1:lib",
-    "//third_party/rust/serde/v1:lib",
-  ]
-  features = [ "std" ]
-  build_root =
-      "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/build.rs"
-  build_sources = [
-    "//third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/build.rs",
-  ]
-  rustflags = [
-    "--cap-lints=allow",  # Suppress all warnings in crates.io crates
-  ]
-
-  testonly = true
-}
diff --git a/third_party/rust/serde_json/v1/README.chromium b/third_party/rust/serde_json/v1/README.chromium
deleted file mode 100644
index 4fca599f..0000000
--- a/third_party/rust/serde_json/v1/README.chromium
+++ /dev/null
@@ -1,10 +0,0 @@
-Name: serde_json
-URL: https://crates.io/crates/serde_json
-Version: 1.0.140
-Revision: 762783414e6c4f8d670c9d87eb04913efb80d3be
-License: Apache-2.0
-License File: //third_party/rust/chromium_crates_io/vendor/serde_json-1.0.140/LICENSE-APACHE
-Shipped: no
-Security Critical: no
-
-Description: A JSON serialization file format
diff --git a/third_party/rust/syn/v2/BUILD.gn b/third_party/rust/syn/v2/BUILD.gn
index 006fdda..63ea7e18 100644
--- a/third_party/rust/syn/v2/BUILD.gn
+++ b/third_party/rust/syn/v2/BUILD.gn
@@ -101,7 +101,6 @@
     "printing",
     "proc-macro",
     "visit",
-    "visit-mut",
   ]
   rustflags = [
     "--cap-lints=allow",  # Suppress all warnings in crates.io crates
diff --git a/third_party/rust/unicode_linebreak/v0_1/BUILD.gn b/third_party/rust/unicode_linebreak/v0_1/BUILD.gn
deleted file mode 100644
index b1172b7..0000000
--- a/third_party/rust/unicode_linebreak/v0_1/BUILD.gn
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright 2023 The Chromium Authors
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# @generated from third_party/rust/chromium_crates_io/BUILD.gn.hbs by
-# tools/crates/gnrt.
-# Do not edit!
-
-import("//build/rust/cargo_crate.gni")
-
-cargo_crate("lib") {
-  crate_name = "unicode_linebreak"
-  epoch = "0.1"
-  crate_type = "rlib"
-  crate_root = "//third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/src/lib.rs"
-  sources = [
-    "//third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/src/lib.rs",
-    "//third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/src/shared.rs",
-    "//third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/src/tables.rs",
-  ]
-  inputs = []
-
-  build_native_rust_unit_tests = false
-  edition = "2021"
-  cargo_pkg_version = "0.1.5"
-  cargo_pkg_authors = "Axel Forsman <axelsfor@gmail.com>"
-  cargo_pkg_name = "unicode-linebreak"
-  cargo_pkg_description =
-      "Implementation of the Unicode Line Breaking Algorithm"
-  library_configs -= [ "//build/config/coverage:default_coverage" ]
-  library_configs -= [ "//build/config/compiler:chromium_code" ]
-  library_configs += [ "//build/config/compiler:no_chromium_code" ]
-  executable_configs -= [ "//build/config/compiler:chromium_code" ]
-  executable_configs += [ "//build/config/compiler:no_chromium_code" ]
-  proc_macro_configs -= [ "//build/config/compiler:chromium_code" ]
-  proc_macro_configs += [ "//build/config/compiler:no_chromium_code" ]
-  rustflags = [
-    "--cap-lints=allow",  # Suppress all warnings in crates.io crates
-  ]
-}
diff --git a/third_party/rust/unicode_linebreak/v0_1/README.chromium b/third_party/rust/unicode_linebreak/v0_1/README.chromium
deleted file mode 100644
index 1d046e4..0000000
--- a/third_party/rust/unicode_linebreak/v0_1/README.chromium
+++ /dev/null
@@ -1,9 +0,0 @@
-Name: unicode-linebreak
-URL: https://crates.io/crates/unicode-linebreak
-Version: 0.1.5
-License: Apache-2.0
-License File: //third_party/rust/chromium_crates_io/vendor/unicode-linebreak-0.1.5/LICENSE
-Shipped: yes
-Security Critical: yes
-
-Description: Implementation of the Unicode Line Breaking Algorithm