Rollup merge of #132494 - onur-ozkan:library-profile-default, r=jieyouxu
make `download-rustc="if-unchanged"` default for library profile
Since `download-rustc` is tested on CI and https://github.com/rust-lang/rust/pull/132267 makes it effective for library development, we can now make it default for the library profile.
Implements the 3rd item from [this tracking issue](https://github.com/rust-lang/rust/issues/131744).
diff --git a/.mailmap b/.mailmap
index 56490ca..fdb62a9 100644
--- a/.mailmap
+++ b/.mailmap
@@ -31,6 +31,7 @@
Alfie John <alfie@alfie.wtf> Alfie John <alfiej@fastmail.fm>
Alona Enraght-Moony <code@alona.page> <nixon.emoony@gmail.com>
Alona Enraght-Moony <code@alona.page> <nixon@caminus.local>
+Alona Enraght-Moony <code@alona.page> <contact@alona.page>
Amanda Stjerna <mail@amandastjerna.se> <albin.stjerna@gmail.com>
Amanda Stjerna <mail@amandastjerna.se> <amanda.stjerna@it.uu.se>
Amos Onn <amosonn@gmail.com>
@@ -75,6 +76,7 @@
Benoît Cortier <benoit.cortier@fried-world.eu>
Bheesham Persaud <bheesham123@hotmail.com> Bheesham Persaud <bheesham.persaud@live.ca>
bjorn3 <17426603+bjorn3@users.noreply.github.com> <bjorn3@users.noreply.github.com>
+bjorn3 <17426603+bjorn3@users.noreply.github.com> <bjorn3_gh@protonmail.com>
Björn Steinbrink <bsteinbr@gmail.com> <B.Steinbrink@gmx.de>
blake2-ppc <ulrik.sverdrup@gmail.com> <blake2-ppc>
blyxyas <blyxyas@gmail.com> Alejandra González <blyxyas@gmail.com>
@@ -172,6 +174,7 @@
E. Dunham <edunham@mozilla.com> edunham <edunham@mozilla.com>
Ed Barnard <eabarnard@gmail.com>
Eduard-Mihai Burtescu <edy.burt@gmail.com>
+Eduard-Mihai Burtescu <edy.burt@gmail.com> <eddyb@lyken.rs>
Eduardo Bautista <me@eduardobautista.com> <=>
Eduardo Bautista <me@eduardobautista.com> <mail@eduardobautista.com>
Eduardo Broto <ebroto@tutanota.com>
@@ -186,6 +189,7 @@
Erik Desjardins <erikdesjardins@users.noreply.github.com>
Erik Jensen <erikjensen@rkjnsn.net>
Erin Power <xampprocky@gmail.com>
+Erin Power <xampprocky@gmail.com> <xampprocky@icloud.com>
Erin Power <xampprocky@gmail.com> <theaaronepower@gmail.com>
Erin Power <xampprocky@gmail.com> <Aaronepower@users.noreply.github.com>
Esteban Küber <esteban@kuber.com.ar>
@@ -198,6 +202,7 @@
Fabian Kössel <fkjogu@users.noreply.github.com>
Falco Hirschenberger <falco.hirschenberger@gmail.com> <hirschen@itwm.fhg.de>
Felix S. Klock II <pnkfelix@pnkfx.org> Felix S Klock II <pnkfelix@pnkfx.org>
+Felix S. Klock II <pnkfelix@pnkfx.org> <pnkfelix@mozilla.com>
Félix Saparelli <felix@passcod.name>
Flaper Fesp <flaper87@gmail.com>
Florian Berger <fbergr@gmail.com>
@@ -245,7 +250,7 @@
Ivan Ivaschenko <defuz.net@gmail.com>
ivan tkachenko <me@ratijas.tk>
J. J. Weber <jjweber@gmail.com>
-Jack Huey <jack.huey@umassmed.edu>
+Jack Huey <jack.huey@umassmed.edu> <jackh726@gmail.com>
Jacob <jacob.macritchie@gmail.com>
Jacob Greenfield <xales@naveria.com>
Jacob Pratt <jacob@jhpratt.dev> <the.z.cuber@gmail.com>
@@ -292,6 +297,7 @@
John Hodge <acessdev@gmail.com> John Hodge <tpg@mutabah.net>
John Hörnvall <trolledwoods@gmail.com>
John Kåre Alsaker <john.kare.alsaker@gmail.com>
+John Kåre Alsaker <john.kare.alsaker@gmail.com> <zoxc32@gmail.com>
John Talling <inrustwetrust@users.noreply.github.com>
John Van Enk <vanenkj@gmail.com>
Jonas Tepe <jonasprogrammer@gmail.com>
@@ -368,6 +374,7 @@
Luke Metz <luke.metz@students.olin.edu>
Luqman Aden <me@luqman.ca> <laden@csclub.uwaterloo.ca>
Luqman Aden <me@luqman.ca> <laden@mozilla.com>
+Luqman Aden <me@luqman.ca> <rust@luqman.ca>
Lzu Tao <taolzu@gmail.com>
Maik Klein <maikklein@googlemail.com>
Malo Jaffré <jaffre.malo@gmail.com>
@@ -409,6 +416,7 @@
mibac138 <5672750+mibac138@users.noreply.github.com>
Michael Williams <m.t.williams@live.com>
Michael Woerister <michaelwoerister@posteo> <michaelwoerister@gmail>
+Michael Woerister <michaelwoerister@posteo> <michaelwoerister@gmail.com>
Michael Woerister <michaelwoerister@posteo> <michaelwoerister@users.noreply.github.com>
Michael Woerister <michaelwoerister@posteo> <michaelwoerister@posteo.net>
Michael Zhang <hmperson1@gmail.com>
@@ -422,6 +430,7 @@
msizanoen1 <qtmlabs@protonmail.com>
Mukilan Thiagarajan <mukilanthiagarajan@gmail.com>
Nadrieril Feneanar <Nadrieril@users.noreply.github.com>
+Nadrieril Feneanar <Nadrieril@users.noreply.github.com> <nadrieril+git@gmail.com>
NAKASHIMA, Makoto <makoto.nksm+github@gmail.com> <makoto.nksm@gmail.com>
NAKASHIMA, Makoto <makoto.nksm+github@gmail.com> <makoto.nksm+github@gmail.com>
Nathan Ringo <remexre@gmail.com>
@@ -442,6 +451,8 @@
Nicolas Abram <abramlujan@gmail.com>
Nicole Mazzuca <npmazzuca@gmail.com>
Noratrieb <48135649+Noratrieb@users.noreply.github.com> <48135649+Nilstrieb@users.noreply.github.com>
+Noratrieb <48135649+Noratrieb@users.noreply.github.com> <nilstrieb@gmail.com>
+Noratrieb <48135649+Noratrieb@users.noreply.github.com> <nora@noratrieb.dev>
Nif Ward <nif.ward@gmail.com>
Nika Layzell <nika@thelayzells.com> <michael@thelayzells.com>
NODA Kai <nodakai@gmail.com>
@@ -460,6 +471,7 @@
Oliver Scherer <oli-obk@users.noreply.github.com> <public.oliver.schneider@kit.edu>
Oliver Scherer <oli-obk@users.noreply.github.com> <oliver.schneider@kit.edu>
Oliver Scherer <oli-obk@users.noreply.github.com> <obk8176014uqher834@olio-obk.de>
+Oliver Scherer <oli-obk@users.noreply.github.com> <rustc-contact@oli-obk.de>
Oliver Scherer <oli-obk@users.noreply.github.com>
Onur Özkan <onurozkan.dev@outlook.com> <work@onurozkan.dev>
Onur Özkan <onurozkan.dev@outlook.com>
@@ -496,6 +508,7 @@
rChaser53 <tayoshizawa29@gmail.com>
Rémy Rakic <remy.rakic@gmail.com>
Rémy Rakic <remy.rakic@gmail.com> <remy.rakic+github@gmail.com>
+Rémy Rakic <remy.rakic@gmail.com> <remy.rakic+rust@gmail.com>
Renato Riccieri Santos Zannon <renato@rrsz.com.br>
Richard Diamond <wichard@vitalitystudios.com> <wichard@hahbee.co>
Ricky Hosfelt <ricky@hosfelt.io>
@@ -525,6 +538,7 @@
Santiago Pastorino <spastorino@gmail.com>
Santiago Pastorino <spastorino@gmail.com> <santiago@wyeworks.com>
Scott McMurray <scottmcm@users.noreply.github.com>
+Scott McMurray <scottmcm@users.noreply.github.com> <smcmurray@acm.org>
Scott Olson <scott@solson.me> Scott Olson <scott@scott-olson.org>
Sean Gillespie <sean.william.g@gmail.com> swgillespie <sean.william.g@gmail.com>
Seiichi Uchida <seuchida@gmail.com>
@@ -536,6 +550,7 @@
Simon Barber-Dueck <sbarberdueck@gmail.com> Simon BD <simon@server>
Simon Sapin <simon@exyr.org> <simon.sapin@exyr.org>
Simonas Kazlauskas <git@kazlauskas.me> Simonas Kazlauskas <github@kazlauskas.me>
+Simonas Kazlauskas <git@kazlauskas.me> <simonas+t-compiler@kazlauskas.me>
Siva Prasad <sivaauturic@gmail.com>
Smittyvb <me@smitop.com>
Srinivas Reddy Thatiparthy <thatiparthysreenivas@gmail.com>
@@ -556,6 +571,8 @@
Tau Gärtli <git@tau.garden> <ruben.schmidmeister@icloud.com>
Tero Hänninen <lgvz@users.noreply.github.com> Tero Hänninen <tejohann@kapsi.fi>
The8472 <git@infinite-source.de>
+The8472 <git@infinite-source.de> <the8472.rs@infinite-source.de>
+The8472 <git@infinite-source.de> <the8472@users.noreply.github.com>
Theo Belaire <theo.belaire@gmail.com> Theo Belaire <tyr.god.of.war.42@gmail.com>
Theodore Luo Wang <wangtheo662@gmail.com>
Thiago Pontes <email@thiago.me> thiagopnts <thiagopnts@gmail.com>
@@ -593,7 +610,8 @@
Wesley Wiser <wwiser@gmail.com> <wesleywiser@microsoft.com>
whitequark <whitequark@whitequark.org>
William Ting <io@williamting.com> <william.h.ting@gmail.com>
-Wim Looman <wim@nemo157.com>
+Wim Looman <wim@nemo157.com> <rust-lang@nemo157.com>
+Wim Looman <wim@nemo157.com> <git@nemo157.com>
Without Boats <woboats@gmail.com>
Without Boats <woboats@gmail.com> <boats@mozilla.com>
Xinye Tao <xy.tao@outlook.com>
diff --git a/Cargo.lock b/Cargo.lock
index 22b3e82..9fa1daf 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -720,6 +720,7 @@
"miropt-test-tools",
"regex",
"rustfix",
+ "semver",
"serde",
"serde_json",
"tracing",
@@ -3203,9 +3204,11 @@
"rand",
"rand_xoshiro",
"rustc_data_structures",
+ "rustc_feature",
"rustc_index",
"rustc_macros",
"rustc_serialize",
+ "rustc_span",
"tracing",
]
@@ -3748,11 +3751,11 @@
name = "rustc_hir_pretty"
version = "0.0.0"
dependencies = [
+ "rustc_abi",
"rustc_ast",
"rustc_ast_pretty",
"rustc_hir",
"rustc_span",
- "rustc_target",
]
[[package]]
@@ -3937,6 +3940,7 @@
name = "rustc_lint_defs"
version = "0.0.0"
dependencies = [
+ "rustc_abi",
"rustc_ast",
"rustc_data_structures",
"rustc_error_messages",
@@ -3944,7 +3948,6 @@
"rustc_macros",
"rustc_serialize",
"rustc_span",
- "rustc_target",
"serde",
]
@@ -4003,7 +4006,6 @@
"rustc_span",
"rustc_target",
"rustc_type_ir",
- "snap",
"tempfile",
"tracing",
]
@@ -4053,6 +4055,7 @@
dependencies = [
"either",
"itertools",
+ "rustc_abi",
"rustc_apfloat",
"rustc_arena",
"rustc_ast",
@@ -4068,7 +4071,6 @@
"rustc_pattern_analysis",
"rustc_session",
"rustc_span",
- "rustc_target",
"rustc_trait_selection",
"tracing",
]
@@ -4889,12 +4891,6 @@
checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
[[package]]
-name = "snap"
-version = "1.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b"
-
-[[package]]
name = "socket2"
version = "0.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
diff --git a/compiler/rustc_abi/Cargo.toml b/compiler/rustc_abi/Cargo.toml
index 7448f06..3acd25e 100644
--- a/compiler/rustc_abi/Cargo.toml
+++ b/compiler/rustc_abi/Cargo.toml
@@ -9,9 +9,11 @@
rand = { version = "0.8.4", default-features = false, optional = true }
rand_xoshiro = { version = "0.6.0", optional = true }
rustc_data_structures = { path = "../rustc_data_structures", optional = true }
+rustc_feature = { path = "../rustc_feature", optional = true }
rustc_index = { path = "../rustc_index", default-features = false }
rustc_macros = { path = "../rustc_macros", optional = true }
rustc_serialize = { path = "../rustc_serialize", optional = true }
+rustc_span = { path = "../rustc_span", optional = true }
tracing = "0.1"
# tidy-alphabetical-end
@@ -22,8 +24,10 @@
# without depending on rustc_data_structures, rustc_macros and rustc_serialize
nightly = [
"dep:rustc_data_structures",
+ "dep:rustc_feature",
"dep:rustc_macros",
"dep:rustc_serialize",
+ "dep:rustc_span",
"rustc_index/nightly",
]
randomize = ["dep:rand", "dep:rand_xoshiro", "nightly"]
diff --git a/compiler/rustc_abi/src/callconv.rs b/compiler/rustc_abi/src/callconv.rs
index 872cae5..ee63e46 100644
--- a/compiler/rustc_abi/src/callconv.rs
+++ b/compiler/rustc_abi/src/callconv.rs
@@ -6,9 +6,9 @@
#[cfg(feature = "nightly")]
use rustc_macros::HashStable_Generic;
-#[cfg(feature = "nightly")]
-use crate::{Abi, FieldsShape, TyAbiInterface, TyAndLayout};
use crate::{Align, HasDataLayout, Size};
+#[cfg(feature = "nightly")]
+use crate::{BackendRepr, FieldsShape, TyAbiInterface, TyAndLayout};
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
@@ -128,11 +128,11 @@ pub fn homogeneous_aggregate<C>(&self, cx: &C) -> Result<HomogeneousAggregate, H
where
Ty: TyAbiInterface<'a, C> + Copy,
{
- match self.abi {
- Abi::Uninhabited => Err(Heterogeneous),
+ match self.backend_repr {
+ BackendRepr::Uninhabited => Err(Heterogeneous),
// The primitive for this algorithm.
- Abi::Scalar(scalar) => {
+ BackendRepr::Scalar(scalar) => {
let kind = match scalar.primitive() {
abi::Int(..) | abi::Pointer(_) => RegKind::Integer,
abi::Float(_) => RegKind::Float,
@@ -140,7 +140,7 @@ pub fn homogeneous_aggregate<C>(&self, cx: &C) -> Result<HomogeneousAggregate, H
Ok(HomogeneousAggregate::Homogeneous(Reg { kind, size: self.size }))
}
- Abi::Vector { .. } => {
+ BackendRepr::Vector { .. } => {
assert!(!self.is_zst());
Ok(HomogeneousAggregate::Homogeneous(Reg {
kind: RegKind::Vector,
@@ -148,7 +148,7 @@ pub fn homogeneous_aggregate<C>(&self, cx: &C) -> Result<HomogeneousAggregate, H
}))
}
- Abi::ScalarPair(..) | Abi::Aggregate { sized: true } => {
+ BackendRepr::ScalarPair(..) | BackendRepr::Memory { sized: true } => {
// Helper for computing `homogeneous_aggregate`, allowing a custom
// starting offset (used below for handling variants).
let from_fields_at =
@@ -246,7 +246,7 @@ pub fn homogeneous_aggregate<C>(&self, cx: &C) -> Result<HomogeneousAggregate, H
Ok(result)
}
}
- Abi::Aggregate { sized: false } => Err(Heterogeneous),
+ BackendRepr::Memory { sized: false } => Err(Heterogeneous),
}
}
}
diff --git a/compiler/rustc_target/src/spec/abi/mod.rs b/compiler/rustc_abi/src/extern_abi/mod.rs
similarity index 99%
rename from compiler/rustc_target/src/spec/abi/mod.rs
rename to compiler/rustc_abi/src/extern_abi/mod.rs
index c209515..f7e4128 100644
--- a/compiler/rustc_target/src/spec/abi/mod.rs
+++ b/compiler/rustc_abi/src/extern_abi/mod.rs
@@ -7,9 +7,11 @@
#[cfg(test)]
mod tests;
+use ExternAbi as Abi;
+
#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Debug)]
#[derive(HashStable_Generic, Encodable, Decodable)]
-pub enum Abi {
+pub enum ExternAbi {
// Some of the ABIs come first because every time we add a new ABI, we have to re-bless all the
// hashing tests. These are used in many places, so giving them stable values reduces test
// churn. The specific values are meaningless.
diff --git a/compiler/rustc_target/src/spec/abi/tests.rs b/compiler/rustc_abi/src/extern_abi/tests.rs
similarity index 100%
rename from compiler/rustc_target/src/spec/abi/tests.rs
rename to compiler/rustc_abi/src/extern_abi/tests.rs
diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs
index 86de39b..e6d66f6 100644
--- a/compiler/rustc_abi/src/layout.rs
+++ b/compiler/rustc_abi/src/layout.rs
@@ -6,7 +6,7 @@
use tracing::debug;
use crate::{
- Abi, AbiAndPrefAlign, Align, FieldsShape, HasDataLayout, IndexSlice, IndexVec, Integer,
+ AbiAndPrefAlign, Align, BackendRepr, FieldsShape, HasDataLayout, IndexSlice, IndexVec, Integer,
LayoutData, Niche, NonZeroUsize, Primitive, ReprOptions, Scalar, Size, StructKind, TagEncoding,
Variants, WrappingRange,
};
@@ -125,7 +125,7 @@ pub fn scalar_pair<FieldIdx: Idx, VariantIdx: Idx>(
offsets: [Size::ZERO, b_offset].into(),
memory_index: [0, 1].into(),
},
- abi: Abi::ScalarPair(a, b),
+ backend_repr: BackendRepr::ScalarPair(a, b),
largest_niche,
align,
size,
@@ -216,7 +216,7 @@ pub fn layout_of_never_type<FieldIdx: Idx, VariantIdx: Idx>(
LayoutData {
variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Primitive,
- abi: Abi::Uninhabited,
+ backend_repr: BackendRepr::Uninhabited,
largest_niche: None,
align: dl.i8_align,
size: Size::ZERO,
@@ -331,7 +331,7 @@ pub fn layout_of_union<
if let Ok(common) = common_non_zst_abi_and_align {
// Discard valid range information and allow undef
- let field_abi = field.abi.to_union();
+ let field_abi = field.backend_repr.to_union();
if let Some((common_abi, common_align)) = common {
if common_abi != field_abi {
@@ -340,7 +340,7 @@ pub fn layout_of_union<
} else {
// Fields with the same non-Aggregate ABI should also
// have the same alignment
- if !matches!(common_abi, Abi::Aggregate { .. }) {
+ if !matches!(common_abi, BackendRepr::Memory { .. }) {
assert_eq!(
common_align, field.align.abi,
"non-Aggregate field with matching ABI but differing alignment"
@@ -369,11 +369,11 @@ pub fn layout_of_union<
// If all non-ZST fields have the same ABI, we may forward that ABI
// for the union as a whole, unless otherwise inhibited.
let abi = match common_non_zst_abi_and_align {
- Err(AbiMismatch) | Ok(None) => Abi::Aggregate { sized: true },
+ Err(AbiMismatch) | Ok(None) => BackendRepr::Memory { sized: true },
Ok(Some((abi, _))) => {
if abi.inherent_align(dl).map(|a| a.abi) != Some(align.abi) {
// Mismatched alignment (e.g. union is #[repr(packed)]): disable opt
- Abi::Aggregate { sized: true }
+ BackendRepr::Memory { sized: true }
} else {
abi
}
@@ -387,7 +387,7 @@ pub fn layout_of_union<
Ok(LayoutData {
variants: Variants::Single { index: only_variant_idx },
fields: FieldsShape::Union(union_field_count),
- abi,
+ backend_repr: abi,
largest_niche: None,
align,
size: size.align_to(align.abi),
@@ -434,23 +434,23 @@ fn layout_of_struct<
// Already doesn't have any niches
Scalar::Union { .. } => {}
};
- match &mut st.abi {
- Abi::Uninhabited => {}
- Abi::Scalar(scalar) => hide_niches(scalar),
- Abi::ScalarPair(a, b) => {
+ match &mut st.backend_repr {
+ BackendRepr::Uninhabited => {}
+ BackendRepr::Scalar(scalar) => hide_niches(scalar),
+ BackendRepr::ScalarPair(a, b) => {
hide_niches(a);
hide_niches(b);
}
- Abi::Vector { element, count: _ } => hide_niches(element),
- Abi::Aggregate { sized: _ } => {}
+ BackendRepr::Vector { element, count: _ } => hide_niches(element),
+ BackendRepr::Memory { sized: _ } => {}
}
st.largest_niche = None;
return Ok(st);
}
let (start, end) = scalar_valid_range;
- match st.abi {
- Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
+ match st.backend_repr {
+ BackendRepr::Scalar(ref mut scalar) | BackendRepr::ScalarPair(ref mut scalar, _) => {
// Enlarging validity ranges would result in missed
// optimizations, *not* wrongly assuming the inner
// value is valid. e.g. unions already enlarge validity ranges,
@@ -607,8 +607,8 @@ struct TmpLayout<FieldIdx: Idx, VariantIdx: Idx> {
}
// It can't be a Scalar or ScalarPair because the offset isn't 0.
- if !layout.abi.is_uninhabited() {
- layout.abi = Abi::Aggregate { sized: true };
+ if !layout.is_uninhabited() {
+ layout.backend_repr = BackendRepr::Memory { sized: true };
}
layout.size += this_offset;
@@ -627,26 +627,26 @@ struct TmpLayout<FieldIdx: Idx, VariantIdx: Idx> {
let same_size = size == variant_layouts[largest_variant_index].size;
let same_align = align == variant_layouts[largest_variant_index].align;
- let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
- Abi::Uninhabited
+ let abi = if variant_layouts.iter().all(|v| v.is_uninhabited()) {
+ BackendRepr::Uninhabited
} else if same_size && same_align && others_zst {
- match variant_layouts[largest_variant_index].abi {
+ match variant_layouts[largest_variant_index].backend_repr {
// When the total alignment and size match, we can use the
// same ABI as the scalar variant with the reserved niche.
- Abi::Scalar(_) => Abi::Scalar(niche_scalar),
- Abi::ScalarPair(first, second) => {
+ BackendRepr::Scalar(_) => BackendRepr::Scalar(niche_scalar),
+ BackendRepr::ScalarPair(first, second) => {
// Only the niche is guaranteed to be initialised,
// so use union layouts for the other primitive.
if niche_offset == Size::ZERO {
- Abi::ScalarPair(niche_scalar, second.to_union())
+ BackendRepr::ScalarPair(niche_scalar, second.to_union())
} else {
- Abi::ScalarPair(first.to_union(), niche_scalar)
+ BackendRepr::ScalarPair(first.to_union(), niche_scalar)
}
}
- _ => Abi::Aggregate { sized: true },
+ _ => BackendRepr::Memory { sized: true },
}
} else {
- Abi::Aggregate { sized: true }
+ BackendRepr::Memory { sized: true }
};
let layout = LayoutData {
@@ -664,7 +664,7 @@ struct TmpLayout<FieldIdx: Idx, VariantIdx: Idx> {
offsets: [niche_offset].into(),
memory_index: [0].into(),
},
- abi,
+ backend_repr: abi,
largest_niche,
size,
align,
@@ -833,14 +833,14 @@ struct TmpLayout<FieldIdx: Idx, VariantIdx: Idx> {
end: (max as u128 & tag_mask),
},
};
- let mut abi = Abi::Aggregate { sized: true };
+ let mut abi = BackendRepr::Memory { sized: true };
- if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
- abi = Abi::Uninhabited;
+ if layout_variants.iter().all(|v| v.is_uninhabited()) {
+ abi = BackendRepr::Uninhabited;
} else if tag.size(dl) == size {
// Make sure we only use scalar layout when the enum is entirely its
// own tag (i.e. it has no padding nor any non-ZST variant fields).
- abi = Abi::Scalar(tag);
+ abi = BackendRepr::Scalar(tag);
} else {
// Try to use a ScalarPair for all tagged enums.
// That's possible only if we can find a common primitive type for all variants.
@@ -864,8 +864,8 @@ struct TmpLayout<FieldIdx: Idx, VariantIdx: Idx> {
break;
}
};
- let prim = match field.abi {
- Abi::Scalar(scalar) => {
+ let prim = match field.backend_repr {
+ BackendRepr::Scalar(scalar) => {
common_prim_initialized_in_all_variants &=
matches!(scalar, Scalar::Initialized { .. });
scalar.primitive()
@@ -934,7 +934,7 @@ struct TmpLayout<FieldIdx: Idx, VariantIdx: Idx> {
{
// We can use `ScalarPair` only when it matches our
// already computed layout (including `#[repr(C)]`).
- abi = pair.abi;
+ abi = pair.backend_repr;
}
}
}
@@ -942,12 +942,14 @@ struct TmpLayout<FieldIdx: Idx, VariantIdx: Idx> {
// If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
// variants to ensure they are consistent. This is because a downcast is
// semantically a NOP, and thus should not affect layout.
- if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
+ if matches!(abi, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)) {
for variant in &mut layout_variants {
// We only do this for variants with fields; the others are not accessed anyway.
// Also do not overwrite any already existing "clever" ABIs.
- if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) {
- variant.abi = abi;
+ if variant.fields.count() > 0
+ && matches!(variant.backend_repr, BackendRepr::Memory { .. })
+ {
+ variant.backend_repr = abi;
// Also need to bump up the size and alignment, so that the entire value fits
// in here.
variant.size = cmp::max(variant.size, size);
@@ -970,7 +972,7 @@ struct TmpLayout<FieldIdx: Idx, VariantIdx: Idx> {
memory_index: [0].into(),
},
largest_niche,
- abi,
+ backend_repr: abi,
align,
size,
max_repr_align,
@@ -1252,7 +1254,7 @@ fn univariant_biased<
}
let mut layout_of_single_non_zst_field = None;
let sized = unsized_field.is_none();
- let mut abi = Abi::Aggregate { sized };
+ let mut abi = BackendRepr::Memory { sized };
let optimize_abi = !repr.inhibit_newtype_abi_optimization();
@@ -1270,16 +1272,16 @@ fn univariant_biased<
// Field fills the struct and it has a scalar or scalar pair ABI.
if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
{
- match field.abi {
+ match field.backend_repr {
// For plain scalars, or vectors of them, we can't unpack
// newtypes for `#[repr(C)]`, as that affects C ABIs.
- Abi::Scalar(_) | Abi::Vector { .. } if optimize_abi => {
- abi = field.abi;
+ BackendRepr::Scalar(_) | BackendRepr::Vector { .. } if optimize_abi => {
+ abi = field.backend_repr;
}
// But scalar pairs are Rust-specific and get
// treated as aggregates by C ABIs anyway.
- Abi::ScalarPair(..) => {
- abi = field.abi;
+ BackendRepr::ScalarPair(..) => {
+ abi = field.backend_repr;
}
_ => {}
}
@@ -1288,8 +1290,8 @@ fn univariant_biased<
// Two non-ZST fields, and they're both scalars.
(Some((i, a)), Some((j, b)), None) => {
- match (a.abi, b.abi) {
- (Abi::Scalar(a), Abi::Scalar(b)) => {
+ match (a.backend_repr, b.backend_repr) {
+ (BackendRepr::Scalar(a), BackendRepr::Scalar(b)) => {
// Order by the memory placement, not source order.
let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
((i, a), (j, b))
@@ -1315,7 +1317,7 @@ fn univariant_biased<
{
// We can use `ScalarPair` only when it matches our
// already computed layout (including `#[repr(C)]`).
- abi = pair.abi;
+ abi = pair.backend_repr;
}
}
_ => {}
@@ -1325,8 +1327,8 @@ fn univariant_biased<
_ => {}
}
}
- if fields.iter().any(|f| f.abi.is_uninhabited()) {
- abi = Abi::Uninhabited;
+ if fields.iter().any(|f| f.is_uninhabited()) {
+ abi = BackendRepr::Uninhabited;
}
let unadjusted_abi_align = if repr.transparent() {
@@ -1344,7 +1346,7 @@ fn univariant_biased<
Ok(LayoutData {
variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Arbitrary { offsets, memory_index },
- abi,
+ backend_repr: abi,
largest_niche,
align,
size,
diff --git a/compiler/rustc_abi/src/layout/ty.rs b/compiler/rustc_abi/src/layout/ty.rs
index e029e14..062447e 100644
--- a/compiler/rustc_abi/src/layout/ty.rs
+++ b/compiler/rustc_abi/src/layout/ty.rs
@@ -83,8 +83,8 @@ pub fn variants(self) -> &'a Variants<FieldIdx, VariantIdx> {
&self.0.0.variants
}
- pub fn abi(self) -> Abi {
- self.0.0.abi
+ pub fn backend_repr(self) -> BackendRepr {
+ self.0.0.backend_repr
}
pub fn largest_niche(self) -> Option<Niche> {
@@ -114,7 +114,7 @@ pub fn unadjusted_abi_align(self) -> Align {
pub fn is_pointer_like(self, data_layout: &TargetDataLayout) -> bool {
self.size() == data_layout.pointer_size
&& self.align().abi == data_layout.pointer_align.abi
- && matches!(self.abi(), Abi::Scalar(Scalar::Initialized { .. }))
+ && matches!(self.backend_repr(), BackendRepr::Scalar(Scalar::Initialized { .. }))
}
}
@@ -196,9 +196,9 @@ pub fn is_single_fp_element<C>(self, cx: &C) -> bool
Ty: TyAbiInterface<'a, C>,
C: HasDataLayout,
{
- match self.abi {
- Abi::Scalar(scalar) => matches!(scalar.primitive(), Float(F32 | F64)),
- Abi::Aggregate { .. } => {
+ match self.backend_repr {
+ BackendRepr::Scalar(scalar) => matches!(scalar.primitive(), Float(F32 | F64)),
+ BackendRepr::Memory { .. } => {
if self.fields.count() == 1 && self.fields.offset(0).bytes() == 0 {
self.field(cx, 0).is_single_fp_element(cx)
} else {
diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs
index 41922ae..ec75878 100644
--- a/compiler/rustc_abi/src/lib.rs
+++ b/compiler/rustc_abi/src/lib.rs
@@ -1,6 +1,7 @@
// tidy-alphabetical-start
#![cfg_attr(feature = "nightly", allow(internal_features))]
#![cfg_attr(feature = "nightly", doc(rust_logo))]
+#![cfg_attr(feature = "nightly", feature(assert_matches))]
#![cfg_attr(feature = "nightly", feature(rustc_attrs))]
#![cfg_attr(feature = "nightly", feature(rustdoc_internals))]
#![cfg_attr(feature = "nightly", feature(step_trait))]
@@ -28,8 +29,15 @@
#[cfg(test)]
mod tests;
+#[cfg(feature = "nightly")]
+mod extern_abi;
+
pub use callconv::{Heterogeneous, HomogeneousAggregate, Reg, RegKind};
#[cfg(feature = "nightly")]
+pub use extern_abi::{
+ AbiDisabled, AbiUnsupported, ExternAbi, all_names, enabled_names, is_enabled, is_stable, lookup,
+};
+#[cfg(feature = "nightly")]
pub use layout::{FIRST_VARIANT, FieldIdx, Layout, TyAbiInterface, TyAndLayout, VariantIdx};
pub use layout::{LayoutCalculator, LayoutCalculatorError};
@@ -1344,11 +1352,19 @@ impl AddressSpace {
pub const DATA: Self = AddressSpace(0);
}
-/// Describes how values of the type are passed by target ABIs,
-/// in terms of categories of C types there are ABI rules for.
+/// The way we represent values to the backend
+///
+/// Previously this was conflated with the "ABI" a type is given, as in the platform-specific ABI.
+/// In reality, this implies little about that, but is mostly used to describe the syntactic form
+/// emitted for the backend, as most backends handle SSA values and blobs of memory differently.
+/// The psABI may need consideration in doing so, but this enum does not constitute a promise for
+/// how the value will be lowered to the calling convention, in itself.
+///
+/// Generally, a codegen backend will prefer to handle smaller values as a scalar or short vector,
+/// and larger values will usually prefer to be represented as memory.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
-pub enum Abi {
+pub enum BackendRepr {
Uninhabited,
Scalar(Scalar),
ScalarPair(Scalar, Scalar),
@@ -1356,19 +1372,23 @@ pub enum Abi {
element: Scalar,
count: u64,
},
- Aggregate {
+ // FIXME: I sometimes use memory, sometimes use an IR aggregate!
+ Memory {
/// If true, the size is exact, otherwise it's only a lower bound.
sized: bool,
},
}
-impl Abi {
+impl BackendRepr {
/// Returns `true` if the layout corresponds to an unsized type.
#[inline]
pub fn is_unsized(&self) -> bool {
match *self {
- Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
- Abi::Aggregate { sized } => !sized,
+ BackendRepr::Uninhabited
+ | BackendRepr::Scalar(_)
+ | BackendRepr::ScalarPair(..)
+ | BackendRepr::Vector { .. } => false,
+ BackendRepr::Memory { sized } => !sized,
}
}
@@ -1381,7 +1401,7 @@ pub fn is_sized(&self) -> bool {
#[inline]
pub fn is_signed(&self) -> bool {
match self {
- Abi::Scalar(scal) => match scal.primitive() {
+ BackendRepr::Scalar(scal) => match scal.primitive() {
Primitive::Int(_, signed) => signed,
_ => false,
},
@@ -1392,61 +1412,67 @@ pub fn is_signed(&self) -> bool {
/// Returns `true` if this is an uninhabited type
#[inline]
pub fn is_uninhabited(&self) -> bool {
- matches!(*self, Abi::Uninhabited)
+ matches!(*self, BackendRepr::Uninhabited)
}
/// Returns `true` if this is a scalar type
#[inline]
pub fn is_scalar(&self) -> bool {
- matches!(*self, Abi::Scalar(_))
+ matches!(*self, BackendRepr::Scalar(_))
}
/// Returns `true` if this is a bool
#[inline]
pub fn is_bool(&self) -> bool {
- matches!(*self, Abi::Scalar(s) if s.is_bool())
+ matches!(*self, BackendRepr::Scalar(s) if s.is_bool())
}
/// Returns the fixed alignment of this ABI, if any is mandated.
pub fn inherent_align<C: HasDataLayout>(&self, cx: &C) -> Option<AbiAndPrefAlign> {
Some(match *self {
- Abi::Scalar(s) => s.align(cx),
- Abi::ScalarPair(s1, s2) => s1.align(cx).max(s2.align(cx)),
- Abi::Vector { element, count } => {
+ BackendRepr::Scalar(s) => s.align(cx),
+ BackendRepr::ScalarPair(s1, s2) => s1.align(cx).max(s2.align(cx)),
+ BackendRepr::Vector { element, count } => {
cx.data_layout().vector_align(element.size(cx) * count)
}
- Abi::Uninhabited | Abi::Aggregate { .. } => return None,
+ BackendRepr::Uninhabited | BackendRepr::Memory { .. } => return None,
})
}
/// Returns the fixed size of this ABI, if any is mandated.
pub fn inherent_size<C: HasDataLayout>(&self, cx: &C) -> Option<Size> {
Some(match *self {
- Abi::Scalar(s) => {
+ BackendRepr::Scalar(s) => {
// No padding in scalars.
s.size(cx)
}
- Abi::ScalarPair(s1, s2) => {
+ BackendRepr::ScalarPair(s1, s2) => {
// May have some padding between the pair.
let field2_offset = s1.size(cx).align_to(s2.align(cx).abi);
(field2_offset + s2.size(cx)).align_to(self.inherent_align(cx)?.abi)
}
- Abi::Vector { element, count } => {
+ BackendRepr::Vector { element, count } => {
// No padding in vectors, except possibly for trailing padding
// to make the size a multiple of align (e.g. for vectors of size 3).
(element.size(cx) * count).align_to(self.inherent_align(cx)?.abi)
}
- Abi::Uninhabited | Abi::Aggregate { .. } => return None,
+ BackendRepr::Uninhabited | BackendRepr::Memory { .. } => return None,
})
}
/// Discard validity range information and allow undef.
pub fn to_union(&self) -> Self {
match *self {
- Abi::Scalar(s) => Abi::Scalar(s.to_union()),
- Abi::ScalarPair(s1, s2) => Abi::ScalarPair(s1.to_union(), s2.to_union()),
- Abi::Vector { element, count } => Abi::Vector { element: element.to_union(), count },
- Abi::Uninhabited | Abi::Aggregate { .. } => Abi::Aggregate { sized: true },
+ BackendRepr::Scalar(s) => BackendRepr::Scalar(s.to_union()),
+ BackendRepr::ScalarPair(s1, s2) => {
+ BackendRepr::ScalarPair(s1.to_union(), s2.to_union())
+ }
+ BackendRepr::Vector { element, count } => {
+ BackendRepr::Vector { element: element.to_union(), count }
+ }
+ BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {
+ BackendRepr::Memory { sized: true }
+ }
}
}
@@ -1454,12 +1480,12 @@ pub fn eq_up_to_validity(&self, other: &Self) -> bool {
match (self, other) {
// Scalar, Vector, ScalarPair have `Scalar` in them where we ignore validity ranges.
// We do *not* ignore the sign since it matters for some ABIs (e.g. s390x).
- (Abi::Scalar(l), Abi::Scalar(r)) => l.primitive() == r.primitive(),
+ (BackendRepr::Scalar(l), BackendRepr::Scalar(r)) => l.primitive() == r.primitive(),
(
- Abi::Vector { element: element_l, count: count_l },
- Abi::Vector { element: element_r, count: count_r },
+ BackendRepr::Vector { element: element_l, count: count_l },
+ BackendRepr::Vector { element: element_r, count: count_r },
) => element_l.primitive() == element_r.primitive() && count_l == count_r,
- (Abi::ScalarPair(l1, l2), Abi::ScalarPair(r1, r2)) => {
+ (BackendRepr::ScalarPair(l1, l2), BackendRepr::ScalarPair(r1, r2)) => {
l1.primitive() == r1.primitive() && l2.primitive() == r2.primitive()
}
// Everything else must be strictly identical.
@@ -1616,14 +1642,14 @@ pub struct LayoutData<FieldIdx: Idx, VariantIdx: Idx> {
/// must be taken into account.
pub variants: Variants<FieldIdx, VariantIdx>,
- /// The `abi` defines how this data is passed between functions, and it defines
- /// value restrictions via `valid_range`.
+ /// The `backend_repr` defines how this data will be represented to the codegen backend,
+ /// and encodes value restrictions via `valid_range`.
///
/// Note that this is entirely orthogonal to the recursive structure defined by
/// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
- /// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants`
+ /// `IrForm::ScalarPair`! So, even with non-`Memory` `backend_repr`, `fields` and `variants`
/// have to be taken into account to find all fields of this layout.
- pub abi: Abi,
+ pub backend_repr: BackendRepr,
/// The leaf scalar with the largest number of invalid values
/// (i.e. outside of its `valid_range`), if it exists.
@@ -1646,15 +1672,15 @@ pub struct LayoutData<FieldIdx: Idx, VariantIdx: Idx> {
impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
/// Returns `true` if this is an aggregate type (including a ScalarPair!)
pub fn is_aggregate(&self) -> bool {
- match self.abi {
- Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } => false,
- Abi::ScalarPair(..) | Abi::Aggregate { .. } => true,
+ match self.backend_repr {
+ BackendRepr::Uninhabited | BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => false,
+ BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => true,
}
}
/// Returns `true` if this is an uninhabited type
pub fn is_uninhabited(&self) -> bool {
- self.abi.is_uninhabited()
+ self.backend_repr.is_uninhabited()
}
pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
@@ -1664,7 +1690,7 @@ pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
LayoutData {
variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Primitive,
- abi: Abi::Scalar(scalar),
+ backend_repr: BackendRepr::Scalar(scalar),
largest_niche,
size,
align,
@@ -1686,7 +1712,7 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let LayoutData {
size,
align,
- abi,
+ backend_repr,
fields,
largest_niche,
variants,
@@ -1696,7 +1722,7 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Layout")
.field("size", size)
.field("align", align)
- .field("abi", abi)
+ .field("abi", backend_repr)
.field("fields", fields)
.field("largest_niche", largest_niche)
.field("variants", variants)
@@ -1732,12 +1758,12 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
/// Returns `true` if the layout corresponds to an unsized type.
#[inline]
pub fn is_unsized(&self) -> bool {
- self.abi.is_unsized()
+ self.backend_repr.is_unsized()
}
#[inline]
pub fn is_sized(&self) -> bool {
- self.abi.is_sized()
+ self.backend_repr.is_sized()
}
/// Returns `true` if the type is sized and a 1-ZST (meaning it has size 0 and alignment 1).
@@ -1750,10 +1776,12 @@ pub fn is_1zst(&self) -> bool {
/// Note that this does *not* imply that the type is irrelevant for layout! It can still have
/// non-trivial alignment constraints. You probably want to use `is_1zst` instead.
pub fn is_zst(&self) -> bool {
- match self.abi {
- Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
- Abi::Uninhabited => self.size.bytes() == 0,
- Abi::Aggregate { sized } => sized && self.size.bytes() == 0,
+ match self.backend_repr {
+ BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) | BackendRepr::Vector { .. } => {
+ false
+ }
+ BackendRepr::Uninhabited => self.size.bytes() == 0,
+ BackendRepr::Memory { sized } => sized && self.size.bytes() == 0,
}
}
@@ -1768,8 +1796,8 @@ pub fn eq_abi(&self, other: &Self) -> bool {
// 2nd point is quite hard to check though.
self.size == other.size
&& self.is_sized() == other.is_sized()
- && self.abi.eq_up_to_validity(&other.abi)
- && self.abi.is_bool() == other.abi.is_bool()
+ && self.backend_repr.eq_up_to_validity(&other.backend_repr)
+ && self.backend_repr.is_bool() == other.backend_repr.is_bool()
&& self.align.abi == other.align.abi
&& self.max_repr_align == other.max_repr_align
&& self.unadjusted_abi_align == other.unadjusted_abi_align
diff --git a/compiler/rustc_ast/src/ast.rs b/compiler/rustc_ast/src/ast.rs
index 8e4f4c8..ec6ca70 100644
--- a/compiler/rustc_ast/src/ast.rs
+++ b/compiler/rustc_ast/src/ast.rs
@@ -2810,6 +2810,8 @@ pub struct ModSpans {
/// E.g., `extern { .. }` or `extern "C" { .. }`.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct ForeignMod {
+ /// Span of the `extern` keyword.
+ pub extern_span: Span,
/// `unsafe` keyword accepted syntactically for macro DSLs, but not
/// semantically by Rust.
pub safety: Safety,
diff --git a/compiler/rustc_ast/src/mut_visit.rs b/compiler/rustc_ast/src/mut_visit.rs
index 2afbd97..44bb44c 100644
--- a/compiler/rustc_ast/src/mut_visit.rs
+++ b/compiler/rustc_ast/src/mut_visit.rs
@@ -525,7 +525,7 @@ pub fn walk_ty<T: MutVisitor>(vis: &mut T, ty: &mut P<Ty>) {
}
fn walk_foreign_mod<T: MutVisitor>(vis: &mut T, foreign_mod: &mut ForeignMod) {
- let ForeignMod { safety, abi: _, items } = foreign_mod;
+ let ForeignMod { extern_span: _, safety, abi: _, items } = foreign_mod;
visit_safety(vis, safety);
items.flat_map_in_place(|item| vis.flat_map_foreign_item(item));
}
diff --git a/compiler/rustc_ast/src/visit.rs b/compiler/rustc_ast/src/visit.rs
index eb71ec5..2f81154 100644
--- a/compiler/rustc_ast/src/visit.rs
+++ b/compiler/rustc_ast/src/visit.rs
@@ -366,7 +366,7 @@ fn walk<'a, V: Visitor<'a>>(
}
ModKind::Unloaded => {}
},
- ItemKind::ForeignMod(ForeignMod { safety: _, abi: _, items }) => {
+ ItemKind::ForeignMod(ForeignMod { extern_span: _, safety: _, abi: _, items }) => {
walk_list!(visitor, visit_foreign_item, items);
}
ItemKind::GlobalAsm(asm) => try_visit!(visitor.visit_inline_asm(asm)),
diff --git a/compiler/rustc_ast_lowering/src/lib.rs b/compiler/rustc_ast_lowering/src/lib.rs
index dc9c6b7..5a0e9e8 100644
--- a/compiler/rustc_ast_lowering/src/lib.rs
+++ b/compiler/rustc_ast_lowering/src/lib.rs
@@ -45,16 +45,14 @@
use rustc_ast::{self as ast, *};
use rustc_data_structures::captures::Captures;
use rustc_data_structures::fingerprint::Fingerprint;
-use rustc_data_structures::fx::FxIndexSet;
use rustc_data_structures::sorted_map::SortedMap;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::sync::Lrc;
use rustc_errors::{DiagArgFromDisplay, DiagCtxtHandle, StashKey};
use rustc_hir::def::{DefKind, LifetimeRes, Namespace, PartialRes, PerNS, Res};
-use rustc_hir::def_id::{CRATE_DEF_ID, LOCAL_CRATE, LocalDefId, LocalDefIdMap};
+use rustc_hir::def_id::{CRATE_DEF_ID, LOCAL_CRATE, LocalDefId};
use rustc_hir::{
- self as hir, ConstArg, GenericArg, HirId, ItemLocalMap, LangItem, MissingLifetimeKind,
- ParamName, TraitCandidate,
+ self as hir, ConstArg, GenericArg, HirId, ItemLocalMap, LangItem, ParamName, TraitCandidate,
};
use rustc_index::{Idx, IndexSlice, IndexVec};
use rustc_macros::extension;
@@ -83,7 +81,6 @@ macro_rules! arena_vec {
mod format;
mod index;
mod item;
-mod lifetime_collector;
mod pat;
mod path;
@@ -149,12 +146,6 @@ struct LoweringContext<'a, 'hir> {
allow_async_iterator: Lrc<[Symbol]>,
allow_for_await: Lrc<[Symbol]>,
allow_async_fn_traits: Lrc<[Symbol]>,
-
- /// Mapping from generics `def_id`s to TAIT generics `def_id`s.
- /// For each captured lifetime (e.g., 'a), we create a new lifetime parameter that is a generic
- /// defined on the TAIT, so we have type Foo<'a1> = ... and we establish a mapping in this
- /// field from the original parameter 'a to the new parameter 'a1.
- generics_def_id_map: Vec<LocalDefIdMap<LocalDefId>>,
}
impl<'a, 'hir> LoweringContext<'a, 'hir> {
@@ -199,7 +190,6 @@ fn new(tcx: TyCtxt<'hir>, resolver: &'a mut ResolverAstLowering) -> Self {
// FIXME(gen_blocks): how does `closure_track_caller`/`async_fn_track_caller`
// interact with `gen`/`async gen` blocks
allow_async_iterator: [sym::gen_future, sym::async_iterator].into(),
- generics_def_id_map: Default::default(),
}
}
@@ -282,7 +272,7 @@ enum ImplTraitContext {
/// Example: `fn foo() -> impl Debug`, where `impl Debug` is conceptually
/// equivalent to a new opaque type like `type T = impl Debug; fn foo() -> T`.
///
- OpaqueTy { origin: hir::OpaqueTyOrigin },
+ OpaqueTy { origin: hir::OpaqueTyOrigin<LocalDefId> },
/// `impl Trait` is unstably accepted in this position.
FeatureGated(ImplTraitPosition, Symbol),
/// `impl Trait` is not accepted in this position.
@@ -528,54 +518,14 @@ fn next_node_id(&mut self) -> NodeId {
/// Given the id of some node in the AST, finds the `LocalDefId` associated with it by the name
/// resolver (if any).
- fn orig_opt_local_def_id(&self, node: NodeId) -> Option<LocalDefId> {
- self.resolver.node_id_to_def_id.get(&node).copied()
- }
-
- /// Given the id of some node in the AST, finds the `LocalDefId` associated with it by the name
- /// resolver (if any), after applying any remapping from `get_remapped_def_id`.
- ///
- /// For example, in a function like `fn foo<'a>(x: &'a u32)`,
- /// invoking with the id from the `ast::Lifetime` node found inside
- /// the `&'a u32` type would return the `LocalDefId` of the
- /// `'a` parameter declared on `foo`.
- ///
- /// This function also applies remapping from `get_remapped_def_id`.
- /// These are used when synthesizing opaque types from `-> impl Trait` return types and so forth.
- /// For example, in a function like `fn foo<'a>() -> impl Debug + 'a`,
- /// we would create an opaque type `type FooReturn<'a1> = impl Debug + 'a1`.
- /// When lowering the `Debug + 'a` bounds, we add a remapping to map `'a` to `'a1`.
fn opt_local_def_id(&self, node: NodeId) -> Option<LocalDefId> {
- self.orig_opt_local_def_id(node).map(|local_def_id| self.get_remapped_def_id(local_def_id))
+ self.resolver.node_id_to_def_id.get(&node).copied()
}
fn local_def_id(&self, node: NodeId) -> LocalDefId {
self.opt_local_def_id(node).unwrap_or_else(|| panic!("no entry for node id: `{node:?}`"))
}
- /// Get the previously recorded `to` local def id given the `from` local def id, obtained using
- /// `generics_def_id_map` field.
- fn get_remapped_def_id(&self, local_def_id: LocalDefId) -> LocalDefId {
- // `generics_def_id_map` is a stack of mappings. As we go deeper in impl traits nesting we
- // push new mappings, so we first need to get the latest (innermost) mappings, hence `iter().rev()`.
- //
- // Consider:
- //
- // `fn test<'a, 'b>() -> impl Trait<&'a u8, Ty = impl Sized + 'b> {}`
- //
- // We would end with a generics_def_id_map like:
- //
- // `[[fn#'b -> impl_trait#'b], [fn#'b -> impl_sized#'b]]`
- //
- // for the opaque type generated on `impl Sized + 'b`, we want the result to be: impl_sized#'b.
- // So, if we were trying to find first from the start (outermost) would give the wrong result, impl_trait#'b.
- self.generics_def_id_map
- .iter()
- .rev()
- .find_map(|map| map.get(&local_def_id).copied())
- .unwrap_or(local_def_id)
- }
-
/// Freshen the `LoweringContext` and ready it to lower a nested item.
/// The lowered item is registered into `self.children`.
///
@@ -647,27 +597,6 @@ fn with_def_id_parent<T>(&mut self, parent: LocalDefId, f: impl FnOnce(&mut Self
result
}
- /// Installs the remapping `remap` in scope while `f` is being executed.
- /// This causes references to the `LocalDefId` keys to be changed to
- /// refer to the values instead.
- ///
- /// The remapping is used when one piece of AST expands to multiple
- /// pieces of HIR. For example, the function `fn foo<'a>(...) -> impl Debug + 'a`,
- /// expands to both a function definition (`foo`) and a TAIT for the return value,
- /// both of which have a lifetime parameter `'a`. The remapping allows us to
- /// rewrite the `'a` in the return value to refer to the
- /// `'a` declared on the TAIT, instead of the function.
- fn with_remapping<R>(
- &mut self,
- remap: LocalDefIdMap<LocalDefId>,
- f: impl FnOnce(&mut Self) -> R,
- ) -> R {
- self.generics_def_id_map.push(remap);
- let res = f(self);
- self.generics_def_id_map.pop();
- res
- }
-
fn make_owner_info(&mut self, node: hir::OwnerNode<'hir>) -> &'hir hir::OwnerInfo<'hir> {
let attrs = std::mem::take(&mut self.attrs);
let mut bodies = std::mem::take(&mut self.bodies);
@@ -1487,7 +1416,7 @@ fn lower_ty_direct(&mut self, t: &Ty, itctx: ImplTraitContext) -> hir::Ty<'hir>
fn lower_opaque_impl_trait(
&mut self,
span: Span,
- origin: hir::OpaqueTyOrigin,
+ origin: hir::OpaqueTyOrigin<LocalDefId>,
opaque_ty_node_id: NodeId,
bounds: &GenericBounds,
itctx: ImplTraitContext,
@@ -1499,27 +1428,6 @@ fn lower_opaque_impl_trait(
// frequently opened issues show.
let opaque_ty_span = self.mark_span_with_reason(DesugaringKind::OpaqueTy, span, None);
- // Whether this opaque always captures lifetimes in scope.
- // Right now, this is all RPITIT and TAITs, and when `lifetime_capture_rules_2024`
- // is enabled. We don't check the span of the edition, since this is done
- // on a per-opaque basis to account for nested opaques.
- let always_capture_in_scope = match origin {
- _ if self.tcx.features().lifetime_capture_rules_2024() => true,
- hir::OpaqueTyOrigin::TyAlias { .. } => true,
- hir::OpaqueTyOrigin::FnReturn { in_trait_or_impl, .. } => in_trait_or_impl.is_some(),
- hir::OpaqueTyOrigin::AsyncFn { .. } => {
- unreachable!("should be using `lower_coroutine_fn_ret_ty`")
- }
- };
- let captured_lifetimes_to_duplicate = lifetime_collector::lifetimes_for_opaque(
- self.resolver,
- always_capture_in_scope,
- opaque_ty_node_id,
- bounds,
- span,
- );
- debug!(?captured_lifetimes_to_duplicate);
-
// Feature gate for RPITIT + use<..>
match origin {
rustc_hir::OpaqueTyOrigin::FnReturn { in_trait_or_impl: Some(_), .. } => {
@@ -1542,22 +1450,15 @@ fn lower_opaque_impl_trait(
_ => {}
}
- self.lower_opaque_inner(
- opaque_ty_node_id,
- origin,
- captured_lifetimes_to_duplicate,
- span,
- opaque_ty_span,
- |this| this.lower_param_bounds(bounds, itctx),
- )
+ self.lower_opaque_inner(opaque_ty_node_id, origin, opaque_ty_span, |this| {
+ this.lower_param_bounds(bounds, itctx)
+ })
}
fn lower_opaque_inner(
&mut self,
opaque_ty_node_id: NodeId,
- origin: hir::OpaqueTyOrigin,
- captured_lifetimes_to_duplicate: FxIndexSet<Lifetime>,
- span: Span,
+ origin: hir::OpaqueTyOrigin<LocalDefId>,
opaque_ty_span: Span,
lower_item_bounds: impl FnOnce(&mut Self) -> &'hir [hir::GenericBound<'hir>],
) -> hir::TyKind<'hir> {
@@ -1565,145 +1466,19 @@ fn lower_opaque_inner(
let opaque_ty_hir_id = self.lower_node_id(opaque_ty_node_id);
debug!(?opaque_ty_def_id, ?opaque_ty_hir_id);
- // Map from captured (old) lifetime to synthetic (new) lifetime.
- // Used to resolve lifetimes in the bounds of the opaque.
- let mut captured_to_synthesized_mapping = LocalDefIdMap::default();
- // List of (early-bound) synthetic lifetimes that are owned by the opaque.
- // This is used to create the `hir::Generics` owned by the opaque.
- let mut synthesized_lifetime_definitions = vec![];
- // Pairs of lifetime arg (that resolves to the captured lifetime)
- // and the def-id of the (early-bound) synthetic lifetime definition.
- // This is used both to create generics for the `TyKind::OpaqueDef` that
- // we return, and also as a captured lifetime mapping for RPITITs.
- let mut synthesized_lifetime_args = vec![];
-
- for lifetime in captured_lifetimes_to_duplicate {
- let res = self.resolver.get_lifetime_res(lifetime.id).unwrap_or(LifetimeRes::Error);
- let (old_def_id, missing_kind) = match res {
- LifetimeRes::Param { param: old_def_id, binder: _ } => (old_def_id, None),
-
- LifetimeRes::Fresh { param, kind, .. } => {
- debug_assert_eq!(lifetime.ident.name, kw::UnderscoreLifetime);
- if let Some(old_def_id) = self.orig_opt_local_def_id(param) {
- (old_def_id, Some(kind))
- } else {
- self.dcx()
- .span_delayed_bug(lifetime.ident.span, "no def-id for fresh lifetime");
- continue;
- }
- }
-
- // Opaques do not capture `'static`
- LifetimeRes::Static { .. } | LifetimeRes::Error => {
- continue;
- }
-
- res => {
- let bug_msg = format!(
- "Unexpected lifetime resolution {:?} for {:?} at {:?}",
- res, lifetime.ident, lifetime.ident.span
- );
- span_bug!(lifetime.ident.span, "{}", bug_msg);
- }
- };
-
- if captured_to_synthesized_mapping.get(&old_def_id).is_none() {
- // Create a new lifetime parameter local to the opaque.
- let duplicated_lifetime_node_id = self.next_node_id();
- let duplicated_lifetime_def_id = self.create_def(
- opaque_ty_def_id,
- duplicated_lifetime_node_id,
- lifetime.ident.name,
- DefKind::LifetimeParam,
- self.lower_span(lifetime.ident.span),
- );
- captured_to_synthesized_mapping.insert(old_def_id, duplicated_lifetime_def_id);
- // FIXME: Instead of doing this, we could move this whole loop
- // into the `with_hir_id_owner`, then just directly construct
- // the `hir::GenericParam` here.
- synthesized_lifetime_definitions.push((
- duplicated_lifetime_node_id,
- duplicated_lifetime_def_id,
- self.lower_ident(lifetime.ident),
- missing_kind,
- ));
-
- // Now make an arg that we can use for the generic params of the opaque tykind.
- let id = self.next_node_id();
- let lifetime_arg = self.new_named_lifetime_with_res(id, lifetime.ident, res);
- let duplicated_lifetime_def_id = self.local_def_id(duplicated_lifetime_node_id);
- synthesized_lifetime_args.push((lifetime_arg, duplicated_lifetime_def_id))
- }
- }
-
let opaque_ty_def = self.with_def_id_parent(opaque_ty_def_id, |this| {
- // Install the remapping from old to new (if any). This makes sure that
- // any lifetimes that would have resolved to the def-id of captured
- // lifetimes are remapped to the new *synthetic* lifetimes of the opaque.
- let bounds = this
- .with_remapping(captured_to_synthesized_mapping, |this| lower_item_bounds(this));
-
- let generic_params =
- this.arena.alloc_from_iter(synthesized_lifetime_definitions.iter().map(
- |&(new_node_id, new_def_id, ident, missing_kind)| {
- let hir_id = this.lower_node_id(new_node_id);
- let (name, kind) = if ident.name == kw::UnderscoreLifetime {
- (
- hir::ParamName::Fresh,
- hir::LifetimeParamKind::Elided(
- missing_kind.unwrap_or(MissingLifetimeKind::Underscore),
- ),
- )
- } else {
- (hir::ParamName::Plain(ident), hir::LifetimeParamKind::Explicit)
- };
-
- hir::GenericParam {
- hir_id,
- def_id: new_def_id,
- name,
- span: ident.span,
- pure_wrt_drop: false,
- kind: hir::GenericParamKind::Lifetime { kind },
- colon_span: None,
- source: hir::GenericParamSource::Generics,
- }
- },
- ));
- debug!("lower_async_fn_ret_ty: generic_params={:#?}", generic_params);
-
- let lifetime_mapping = self.arena.alloc_slice(&synthesized_lifetime_args);
-
- trace!("registering opaque type with id {:#?}", opaque_ty_def_id);
+ let bounds = lower_item_bounds(this);
let opaque_ty_def = hir::OpaqueTy {
hir_id: opaque_ty_hir_id,
def_id: opaque_ty_def_id,
- generics: this.arena.alloc(hir::Generics {
- params: generic_params,
- predicates: &[],
- has_where_clause_predicates: false,
- where_clause_span: this.lower_span(span),
- span: this.lower_span(span),
- }),
bounds,
origin,
- lifetime_mapping,
span: this.lower_span(opaque_ty_span),
};
this.arena.alloc(opaque_ty_def)
});
- let generic_args = self.arena.alloc_from_iter(
- synthesized_lifetime_args
- .iter()
- .map(|(lifetime, _)| hir::GenericArg::Lifetime(*lifetime)),
- );
-
- // Create the `Foo<...>` reference itself. Note that the `type
- // Foo = impl Trait` is, internally, created as a child of the
- // async fn, so the *type parameters* are inherited. It's
- // only the lifetime parameters that we must supply.
- hir::TyKind::OpaqueDef(opaque_ty_def, generic_args)
+ hir::TyKind::OpaqueDef(opaque_ty_def)
}
fn lower_precise_capturing_args(
@@ -1885,13 +1660,6 @@ fn lower_coroutine_fn_ret_ty(
let opaque_ty_span =
self.mark_span_with_reason(DesugaringKind::Async, span, allowed_features);
- let captured_lifetimes = self
- .resolver
- .extra_lifetime_params(opaque_ty_node_id)
- .into_iter()
- .map(|(ident, id, _)| Lifetime { id, ident })
- .collect();
-
let in_trait_or_impl = match fn_kind {
FnDeclKind::Trait => Some(hir::RpitContext::Trait),
FnDeclKind::Impl => Some(hir::RpitContext::TraitImpl),
@@ -1902,8 +1670,6 @@ fn lower_coroutine_fn_ret_ty(
let opaque_ty_ref = self.lower_opaque_inner(
opaque_ty_node_id,
hir::OpaqueTyOrigin::AsyncFn { parent: fn_def_id, in_trait_or_impl },
- captured_lifetimes,
- span,
opaque_ty_span,
|this| {
let bound = this.lower_coroutine_fn_output_type_to_bound(
@@ -2000,10 +1766,7 @@ fn new_named_lifetime_with_res(
res: LifetimeRes,
) -> &'hir hir::Lifetime {
let res = match res {
- LifetimeRes::Param { param, .. } => {
- let param = self.get_remapped_def_id(param);
- hir::LifetimeName::Param(param)
- }
+ LifetimeRes::Param { param, .. } => hir::LifetimeName::Param(param),
LifetimeRes::Fresh { param, .. } => {
let param = self.local_def_id(param);
hir::LifetimeName::Param(param)
diff --git a/compiler/rustc_ast_lowering/src/lifetime_collector.rs b/compiler/rustc_ast_lowering/src/lifetime_collector.rs
deleted file mode 100644
index 8d47c85..0000000
--- a/compiler/rustc_ast_lowering/src/lifetime_collector.rs
+++ /dev/null
@@ -1,151 +0,0 @@
-use rustc_ast::visit::{self, BoundKind, LifetimeCtxt, Visitor};
-use rustc_ast::{
- GenericBound, GenericBounds, Lifetime, NodeId, PathSegment, PolyTraitRef, Ty, TyKind,
-};
-use rustc_data_structures::fx::FxIndexSet;
-use rustc_hir::def::{DefKind, LifetimeRes, Res};
-use rustc_middle::span_bug;
-use rustc_middle::ty::ResolverAstLowering;
-use rustc_span::Span;
-use rustc_span::symbol::{Ident, kw};
-
-use super::ResolverAstLoweringExt;
-
-struct LifetimeCollectVisitor<'ast> {
- resolver: &'ast mut ResolverAstLowering,
- always_capture_in_scope: bool,
- current_binders: Vec<NodeId>,
- collected_lifetimes: FxIndexSet<Lifetime>,
-}
-
-impl<'ast> LifetimeCollectVisitor<'ast> {
- fn new(resolver: &'ast mut ResolverAstLowering, always_capture_in_scope: bool) -> Self {
- Self {
- resolver,
- always_capture_in_scope,
- current_binders: Vec::new(),
- collected_lifetimes: FxIndexSet::default(),
- }
- }
-
- fn visit_opaque(&mut self, opaque_ty_node_id: NodeId, bounds: &'ast GenericBounds, span: Span) {
- // If we're edition 2024 or within a TAIT or RPITIT, *and* there is no
- // `use<>` statement to override the default capture behavior, then
- // capture all of the in-scope lifetimes.
- if (self.always_capture_in_scope || span.at_least_rust_2024())
- && bounds.iter().all(|bound| !matches!(bound, GenericBound::Use(..)))
- {
- for (ident, id, _) in self.resolver.extra_lifetime_params(opaque_ty_node_id) {
- self.record_lifetime_use(Lifetime { id, ident });
- }
- }
-
- // We also recurse on the bounds to make sure we capture all the lifetimes
- // mentioned in the bounds. These may disagree with the `use<>` list, in which
- // case we will error on these later. We will also recurse to visit any
- // nested opaques, which may *implicitly* capture lifetimes.
- for bound in bounds {
- self.visit_param_bound(bound, BoundKind::Bound);
- }
- }
-
- fn record_lifetime_use(&mut self, lifetime: Lifetime) {
- match self.resolver.get_lifetime_res(lifetime.id).unwrap_or(LifetimeRes::Error) {
- LifetimeRes::Param { binder, .. } | LifetimeRes::Fresh { binder, .. } => {
- if !self.current_binders.contains(&binder) {
- self.collected_lifetimes.insert(lifetime);
- }
- }
- LifetimeRes::Static { .. } | LifetimeRes::Error => {
- self.collected_lifetimes.insert(lifetime);
- }
- LifetimeRes::Infer => {}
- res => {
- let bug_msg = format!(
- "Unexpected lifetime resolution {:?} for {:?} at {:?}",
- res, lifetime.ident, lifetime.ident.span
- );
- span_bug!(lifetime.ident.span, "{}", bug_msg);
- }
- }
- }
-
- /// This collect lifetimes that are elided, for nodes like `Foo<T>` where there are no explicit
- /// lifetime nodes. Is equivalent to having "pseudo" nodes introduced for each of the node ids
- /// in the list start..end.
- fn record_elided_anchor(&mut self, node_id: NodeId, span: Span) {
- if let Some(LifetimeRes::ElidedAnchor { start, end }) =
- self.resolver.get_lifetime_res(node_id)
- {
- for i in start..end {
- let lifetime = Lifetime { id: i, ident: Ident::new(kw::UnderscoreLifetime, span) };
- self.record_lifetime_use(lifetime);
- }
- }
- }
-}
-
-impl<'ast> Visitor<'ast> for LifetimeCollectVisitor<'ast> {
- fn visit_lifetime(&mut self, lifetime: &'ast Lifetime, _: LifetimeCtxt) {
- self.record_lifetime_use(*lifetime);
- }
-
- fn visit_path_segment(&mut self, path_segment: &'ast PathSegment) {
- self.record_elided_anchor(path_segment.id, path_segment.ident.span);
- visit::walk_path_segment(self, path_segment);
- }
-
- fn visit_poly_trait_ref(&mut self, t: &'ast PolyTraitRef) {
- self.current_binders.push(t.trait_ref.ref_id);
-
- visit::walk_poly_trait_ref(self, t);
-
- self.current_binders.pop();
- }
-
- fn visit_ty(&mut self, t: &'ast Ty) {
- match &t.kind {
- TyKind::Path(None, _) => {
- // We can sometimes encounter bare trait objects
- // which are represented in AST as paths.
- if let Some(partial_res) = self.resolver.get_partial_res(t.id)
- && let Some(Res::Def(DefKind::Trait | DefKind::TraitAlias, _)) =
- partial_res.full_res()
- {
- self.current_binders.push(t.id);
- visit::walk_ty(self, t);
- self.current_binders.pop();
- } else {
- visit::walk_ty(self, t);
- }
- }
- TyKind::BareFn(_) => {
- self.current_binders.push(t.id);
- visit::walk_ty(self, t);
- self.current_binders.pop();
- }
- TyKind::Ref(None, _) | TyKind::PinnedRef(None, _) => {
- self.record_elided_anchor(t.id, t.span);
- visit::walk_ty(self, t);
- }
- TyKind::ImplTrait(opaque_ty_node_id, bounds) => {
- self.visit_opaque(*opaque_ty_node_id, bounds, t.span)
- }
- _ => {
- visit::walk_ty(self, t);
- }
- }
- }
-}
-
-pub(crate) fn lifetimes_for_opaque(
- resolver: &mut ResolverAstLowering,
- always_capture_in_scope: bool,
- opaque_ty_node_id: NodeId,
- bounds: &GenericBounds,
- span: Span,
-) -> FxIndexSet<Lifetime> {
- let mut visitor = LifetimeCollectVisitor::new(resolver, always_capture_in_scope);
- visitor.visit_opaque(opaque_ty_node_id, bounds, span);
- visitor.collected_lifetimes
-}
diff --git a/compiler/rustc_ast_passes/src/ast_validation.rs b/compiler/rustc_ast_passes/src/ast_validation.rs
index 0a4f86d..dee4858 100644
--- a/compiler/rustc_ast_passes/src/ast_validation.rs
+++ b/compiler/rustc_ast_passes/src/ast_validation.rs
@@ -677,9 +677,8 @@ fn visit_ty_common(&mut self, ty: &'a Ty) {
Self::check_decl_no_pat(&bfty.decl, |span, _, _| {
self.dcx().emit_err(errors::PatternFnPointer { span });
});
- if let Extern::Implicit(_) = bfty.ext {
- let sig_span = self.sess.source_map().next_point(ty.span.shrink_to_lo());
- self.maybe_lint_missing_abi(sig_span, ty.id);
+ if let Extern::Implicit(extern_span) = bfty.ext {
+ self.maybe_lint_missing_abi(extern_span, ty.id);
}
}
TyKind::TraitObject(bounds, ..) => {
@@ -953,7 +952,7 @@ fn visit_item(&mut self, item: &'a Item) {
walk_list!(self, visit_attribute, &item.attrs);
return; // Avoid visiting again.
}
- ItemKind::ForeignMod(ForeignMod { abi, safety, .. }) => {
+ ItemKind::ForeignMod(ForeignMod { extern_span, abi, safety, .. }) => {
self.with_in_extern_mod(*safety, |this| {
let old_item = mem::replace(&mut this.extern_mod, Some(item.span));
this.visibility_not_permitted(
@@ -977,7 +976,7 @@ fn visit_item(&mut self, item: &'a Item) {
}
if abi.is_none() {
- this.maybe_lint_missing_abi(item.span, item.id);
+ this.maybe_lint_missing_abi(*extern_span, item.id);
}
visit::walk_item(this, item);
this.extern_mod = old_item;
@@ -1350,13 +1349,13 @@ fn visit_fn(&mut self, fk: FnKind<'a>, span: Span, id: NodeId) {
if let FnKind::Fn(
_,
_,
- FnSig { span: sig_span, header: FnHeader { ext: Extern::Implicit(_), .. }, .. },
+ FnSig { header: FnHeader { ext: Extern::Implicit(extern_span), .. }, .. },
_,
_,
_,
) = fk
{
- self.maybe_lint_missing_abi(*sig_span, id);
+ self.maybe_lint_missing_abi(*extern_span, id);
}
// Functions without bodies cannot have patterns.
diff --git a/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs b/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs
index c687be6..511313c 100644
--- a/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs
@@ -1489,6 +1489,7 @@ pub(crate) fn report_move_out_while_borrowed(
&borrow_msg,
&value_msg,
);
+ self.note_due_to_edition_2024_opaque_capture_rules(borrow, &mut err);
borrow_spans.var_path_only_subdiag(&mut err, crate::InitializationRequiringAction::Borrow);
@@ -1524,7 +1525,6 @@ pub(crate) fn report_move_out_while_borrowed(
matches!(
adj.kind,
ty::adjustment::Adjust::Borrow(ty::adjustment::AutoBorrow::Ref(
- _,
ty::adjustment::AutoBorrowMutability::Not
| ty::adjustment::AutoBorrowMutability::Mut {
allow_two_phase_borrow: ty::adjustment::AllowTwoPhase::No
@@ -1561,6 +1561,8 @@ pub(crate) fn report_use_while_mutably_borrowed(
borrow_span,
&self.describe_any_place(borrow.borrowed_place.as_ref()),
);
+ self.note_due_to_edition_2024_opaque_capture_rules(borrow, &mut err);
+
borrow_spans.var_subdiag(&mut err, Some(borrow.kind), |kind, var_span| {
use crate::session_diagnostics::CaptureVarCause::*;
let place = &borrow.borrowed_place;
@@ -1820,6 +1822,7 @@ pub(crate) fn report_conflicting_borrow(
unreachable!()
}
};
+ self.note_due_to_edition_2024_opaque_capture_rules(issued_borrow, &mut err);
if issued_spans == borrow_spans {
borrow_spans.var_subdiag(&mut err, Some(gen_borrow_kind), |kind, var_span| {
@@ -2860,7 +2863,7 @@ pub(crate) fn report_borrowed_value_does_not_live_long_enough(
debug!(?place_desc, ?explanation);
- let err = match (place_desc, explanation) {
+ let mut err = match (place_desc, explanation) {
// If the outlives constraint comes from inside the closure,
// for example:
//
@@ -2939,6 +2942,7 @@ pub(crate) fn report_borrowed_value_does_not_live_long_enough(
explanation,
),
};
+ self.note_due_to_edition_2024_opaque_capture_rules(borrow, &mut err);
self.buffer_error(err);
}
@@ -3777,6 +3781,7 @@ pub(crate) fn report_illegal_mutation_of_borrowed(
}
let mut err = self.cannot_assign_to_borrowed(span, loan_span, &descr_place);
+ self.note_due_to_edition_2024_opaque_capture_rules(loan, &mut err);
loan_spans.var_subdiag(&mut err, Some(loan.kind), |kind, var_span| {
use crate::session_diagnostics::CaptureVarCause::*;
diff --git a/compiler/rustc_borrowck/src/diagnostics/mod.rs b/compiler/rustc_borrowck/src/diagnostics/mod.rs
index 801c7af..3b60071 100644
--- a/compiler/rustc_borrowck/src/diagnostics/mod.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/mod.rs
@@ -48,6 +48,7 @@
mod explain_borrow;
mod move_errors;
mod mutability_errors;
+mod opaque_suggestions;
mod region_errors;
pub(crate) use bound_region_errors::{ToUniverseInfo, UniverseInfo};
diff --git a/compiler/rustc_borrowck/src/diagnostics/opaque_suggestions.rs b/compiler/rustc_borrowck/src/diagnostics/opaque_suggestions.rs
new file mode 100644
index 0000000..bfd7e83
--- /dev/null
+++ b/compiler/rustc_borrowck/src/diagnostics/opaque_suggestions.rs
@@ -0,0 +1,224 @@
+#![allow(rustc::diagnostic_outside_of_impl)]
+#![allow(rustc::untranslatable_diagnostic)]
+
+use std::ops::ControlFlow;
+
+use either::Either;
+use rustc_data_structures::fx::FxIndexSet;
+use rustc_errors::{Applicability, Diag};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::{self, ConstraintCategory, Location};
+use rustc_middle::ty::{
+ self, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitableExt, TypeVisitor,
+};
+use rustc_span::Symbol;
+
+use crate::MirBorrowckCtxt;
+use crate::borrow_set::BorrowData;
+use crate::consumers::RegionInferenceContext;
+use crate::type_check::Locations;
+
+impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> {
+ /// Try to note when an opaque is involved in a borrowck error and that
+ /// opaque captures lifetimes due to edition 2024.
+ // FIXME: This code is otherwise somewhat general, and could easily be adapted
+ // to explain why other things overcapture... like async fn and RPITITs.
+ pub(crate) fn note_due_to_edition_2024_opaque_capture_rules(
+ &self,
+ borrow: &BorrowData<'tcx>,
+ diag: &mut Diag<'_>,
+ ) {
+ // We look at all the locals. Why locals? Because it's the best thing
+ // I could think of that's correlated with the *instantiated* higer-ranked
+ // binder for calls, since we don't really store those anywhere else.
+ for ty in self.body.local_decls.iter().map(|local| local.ty) {
+ if !ty.has_opaque_types() {
+ continue;
+ }
+
+ let tcx = self.infcx.tcx;
+ let ControlFlow::Break((opaque_def_id, offending_region_idx, location)) = ty
+ .visit_with(&mut FindOpaqueRegion {
+ regioncx: &self.regioncx,
+ tcx,
+ borrow_region: borrow.region,
+ })
+ else {
+ continue;
+ };
+
+ // If an opaque explicitly captures a lifetime, then no need to point it out.
+ // FIXME: We should be using a better heuristic for `use<>`.
+ if tcx.rendered_precise_capturing_args(opaque_def_id).is_some() {
+ continue;
+ }
+
+ // If one of the opaque's bounds mentions the region, then no need to
+ // point it out, since it would've been captured on edition 2021 as well.
+ //
+ // Also, while we're at it, collect all the lifetimes that the opaque
+ // *does* mention. We'll use that for the `+ use<'a>` suggestion below.
+ let mut visitor = CheckExplicitRegionMentionAndCollectGenerics {
+ tcx,
+ offending_region_idx,
+ seen_opaques: [opaque_def_id].into_iter().collect(),
+ seen_lifetimes: Default::default(),
+ };
+ if tcx
+ .explicit_item_bounds(opaque_def_id)
+ .skip_binder()
+ .visit_with(&mut visitor)
+ .is_break()
+ {
+ continue;
+ }
+
+ // If we successfully located a terminator, then point it out
+ // and provide a suggestion if it's local.
+ match self.body.stmt_at(location) {
+ Either::Right(mir::Terminator { source_info, .. }) => {
+ diag.span_note(
+ source_info.span,
+ "this call may capture more lifetimes than intended, \
+ because Rust 2024 has adjusted the `impl Trait` lifetime capture rules",
+ );
+ let mut seen_generics: Vec<_> =
+ visitor.seen_lifetimes.iter().map(ToString::to_string).collect();
+ // Capture all in-scope ty/const params.
+ seen_generics.extend(
+ ty::GenericArgs::identity_for_item(tcx, opaque_def_id)
+ .iter()
+ .filter(|arg| {
+ matches!(
+ arg.unpack(),
+ ty::GenericArgKind::Type(_) | ty::GenericArgKind::Const(_)
+ )
+ })
+ .map(|arg| arg.to_string()),
+ );
+ if opaque_def_id.is_local() {
+ diag.span_suggestion_verbose(
+ tcx.def_span(opaque_def_id).shrink_to_hi(),
+ "add a precise capturing bound to avoid overcapturing",
+ format!(" + use<{}>", seen_generics.join(", ")),
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ diag.span_help(
+ tcx.def_span(opaque_def_id),
+ format!(
+ "if you can modify this crate, add a precise \
+ capturing bound to avoid overcapturing: `+ use<{}>`",
+ seen_generics.join(", ")
+ ),
+ );
+ }
+ return;
+ }
+ Either::Left(_) => {}
+ }
+ }
+ }
+}
+
+/// This visitor contains the bulk of the logic for this lint.
+struct FindOpaqueRegion<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ regioncx: &'a RegionInferenceContext<'tcx>,
+ borrow_region: ty::RegionVid,
+}
+
+impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for FindOpaqueRegion<'_, 'tcx> {
+ type Result = ControlFlow<(DefId, usize, Location), ()>;
+
+ fn visit_ty(&mut self, ty: Ty<'tcx>) -> Self::Result {
+ // If we find an opaque in a local ty, then for each of its captured regions,
+ // try to find a path between that captured regions and our borrow region...
+ if let ty::Alias(ty::Opaque, opaque) = *ty.kind()
+ && let hir::OpaqueTyOrigin::FnReturn { parent, in_trait_or_impl: None } =
+ self.tcx.opaque_ty_origin(opaque.def_id)
+ {
+ let variances = self.tcx.variances_of(opaque.def_id);
+ for (idx, (arg, variance)) in std::iter::zip(opaque.args, variances).enumerate() {
+ // Skip uncaptured args.
+ if *variance == ty::Bivariant {
+ continue;
+ }
+ // We only care about regions.
+ let Some(opaque_region) = arg.as_region() else {
+ continue;
+ };
+ // Don't try to convert a late-bound region, which shouldn't exist anyways (yet).
+ if opaque_region.is_bound() {
+ continue;
+ }
+ let opaque_region_vid = self.regioncx.to_region_vid(opaque_region);
+
+ // Find a path between the borrow region and our opaque capture.
+ if let Some((path, _)) =
+ self.regioncx.find_constraint_paths_between_regions(self.borrow_region, |r| {
+ r == opaque_region_vid
+ })
+ {
+ for constraint in path {
+ // If we find a call in this path, then check if it defines the opaque.
+ if let ConstraintCategory::CallArgument(Some(call_ty)) = constraint.category
+ && let ty::FnDef(call_def_id, _) = *call_ty.kind()
+ // This function defines the opaque :D
+ && call_def_id == parent
+ && let Locations::Single(location) = constraint.locations
+ {
+ return ControlFlow::Break((opaque.def_id, idx, location));
+ }
+ }
+ }
+ }
+ }
+
+ ty.super_visit_with(self)
+ }
+}
+
+struct CheckExplicitRegionMentionAndCollectGenerics<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ offending_region_idx: usize,
+ seen_opaques: FxIndexSet<DefId>,
+ seen_lifetimes: FxIndexSet<Symbol>,
+}
+
+impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for CheckExplicitRegionMentionAndCollectGenerics<'tcx> {
+ type Result = ControlFlow<(), ()>;
+
+ fn visit_ty(&mut self, ty: Ty<'tcx>) -> Self::Result {
+ match *ty.kind() {
+ ty::Alias(ty::Opaque, opaque) => {
+ if self.seen_opaques.insert(opaque.def_id) {
+ for (bound, _) in self
+ .tcx
+ .explicit_item_bounds(opaque.def_id)
+ .iter_instantiated_copied(self.tcx, opaque.args)
+ {
+ bound.visit_with(self)?;
+ }
+ }
+ ControlFlow::Continue(())
+ }
+ _ => ty.super_visit_with(self),
+ }
+ }
+
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> Self::Result {
+ match r.kind() {
+ ty::ReEarlyParam(param) => {
+ if param.index as usize == self.offending_region_idx {
+ ControlFlow::Break(())
+ } else {
+ self.seen_lifetimes.insert(param.name);
+ ControlFlow::Continue(())
+ }
+ }
+ _ => ControlFlow::Continue(()),
+ }
+ }
+}
diff --git a/compiler/rustc_borrowck/src/diagnostics/region_name.rs b/compiler/rustc_borrowck/src/diagnostics/region_name.rs
index b4b8373..6ca7251 100644
--- a/compiler/rustc_borrowck/src/diagnostics/region_name.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/region_name.rs
@@ -830,7 +830,7 @@ fn give_name_if_anonymous_region_appears_in_output(&self, fr: RegionVid) -> Opti
///
/// [`OpaqueDef`]: hir::TyKind::OpaqueDef
fn get_future_inner_return_ty(&self, hir_ty: &'tcx hir::Ty<'tcx>) -> &'tcx hir::Ty<'tcx> {
- let hir::TyKind::OpaqueDef(opaque_ty, _) = hir_ty.kind else {
+ let hir::TyKind::OpaqueDef(opaque_ty) = hir_ty.kind else {
span_bug!(
hir_ty.span,
"lowered return type of async fn is not OpaqueDef: {:?}",
diff --git a/compiler/rustc_borrowck/src/region_infer/opaque_types.rs b/compiler/rustc_borrowck/src/region_infer/opaque_types.rs
index 741dac9..3a2f5c3 100644
--- a/compiler/rustc_borrowck/src/region_infer/opaque_types.rs
+++ b/compiler/rustc_borrowck/src/region_infer/opaque_types.rs
@@ -502,7 +502,7 @@ fn get_canonical_args(&self) -> ty::GenericArgsRef<'tcx> {
}
let &Self { tcx, def_id, .. } = self;
- let origin = tcx.opaque_type_origin(def_id);
+ let origin = tcx.local_opaque_ty_origin(def_id);
let parent = match origin {
hir::OpaqueTyOrigin::FnReturn { parent, .. }
| hir::OpaqueTyOrigin::AsyncFn { parent, .. }
diff --git a/compiler/rustc_codegen_cranelift/src/abi/mod.rs b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
index 892ec3e..089b09d 100644
--- a/compiler/rustc_codegen_cranelift/src/abi/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
@@ -193,7 +193,7 @@ fn make_local_place<'tcx>(
);
}
let place = if is_ssa {
- if let rustc_target::abi::Abi::ScalarPair(_, _) = layout.abi {
+ if let BackendRepr::ScalarPair(_, _) = layout.backend_repr {
CPlace::new_var_pair(fx, local, layout)
} else {
CPlace::new_var(fx, local, layout)
diff --git a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
index 38c322b..ad0a13d 100644
--- a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
+++ b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
@@ -78,19 +78,19 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
fn get_abi_param(&self, tcx: TyCtxt<'tcx>) -> SmallVec<[AbiParam; 2]> {
match self.mode {
PassMode::Ignore => smallvec![],
- PassMode::Direct(attrs) => match self.layout.abi {
- Abi::Scalar(scalar) => smallvec![apply_arg_attrs_to_abi_param(
+ PassMode::Direct(attrs) => match self.layout.backend_repr {
+ BackendRepr::Scalar(scalar) => smallvec![apply_arg_attrs_to_abi_param(
AbiParam::new(scalar_to_clif_type(tcx, scalar)),
attrs
)],
- Abi::Vector { .. } => {
+ BackendRepr::Vector { .. } => {
let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout);
smallvec![AbiParam::new(vector_ty)]
}
- _ => unreachable!("{:?}", self.layout.abi),
+ _ => unreachable!("{:?}", self.layout.backend_repr),
},
- PassMode::Pair(attrs_a, attrs_b) => match self.layout.abi {
- Abi::ScalarPair(a, b) => {
+ PassMode::Pair(attrs_a, attrs_b) => match self.layout.backend_repr {
+ BackendRepr::ScalarPair(a, b) => {
let a = scalar_to_clif_type(tcx, a);
let b = scalar_to_clif_type(tcx, b);
smallvec![
@@ -98,7 +98,7 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
apply_arg_attrs_to_abi_param(AbiParam::new(b), attrs_b),
]
}
- _ => unreachable!("{:?}", self.layout.abi),
+ _ => unreachable!("{:?}", self.layout.backend_repr),
},
PassMode::Cast { ref cast, pad_i32 } => {
assert!(!pad_i32, "padding support not yet implemented");
@@ -130,23 +130,23 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
fn get_abi_return(&self, tcx: TyCtxt<'tcx>) -> (Option<AbiParam>, Vec<AbiParam>) {
match self.mode {
PassMode::Ignore => (None, vec![]),
- PassMode::Direct(_) => match self.layout.abi {
- Abi::Scalar(scalar) => {
+ PassMode::Direct(_) => match self.layout.backend_repr {
+ BackendRepr::Scalar(scalar) => {
(None, vec![AbiParam::new(scalar_to_clif_type(tcx, scalar))])
}
- Abi::Vector { .. } => {
+ BackendRepr::Vector { .. } => {
let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout);
(None, vec![AbiParam::new(vector_ty)])
}
- _ => unreachable!("{:?}", self.layout.abi),
+ _ => unreachable!("{:?}", self.layout.backend_repr),
},
- PassMode::Pair(_, _) => match self.layout.abi {
- Abi::ScalarPair(a, b) => {
+ PassMode::Pair(_, _) => match self.layout.backend_repr {
+ BackendRepr::ScalarPair(a, b) => {
let a = scalar_to_clif_type(tcx, a);
let b = scalar_to_clif_type(tcx, b);
(None, vec![AbiParam::new(a), AbiParam::new(b)])
}
- _ => unreachable!("{:?}", self.layout.abi),
+ _ => unreachable!("{:?}", self.layout.backend_repr),
},
PassMode::Cast { ref cast, .. } => {
(None, cast_target_to_abi_params(cast).into_iter().collect())
diff --git a/compiler/rustc_codegen_cranelift/src/base.rs b/compiler/rustc_codegen_cranelift/src/base.rs
index a681e6d..99e3997 100644
--- a/compiler/rustc_codegen_cranelift/src/base.rs
+++ b/compiler/rustc_codegen_cranelift/src/base.rs
@@ -290,7 +290,7 @@ fn codegen_fn_body(fx: &mut FunctionCx<'_, '_, '_>, start_block: Block) {
let arg_uninhabited = fx
.mir
.args_iter()
- .any(|arg| fx.layout_of(fx.monomorphize(fx.mir.local_decls[arg].ty)).abi.is_uninhabited());
+ .any(|arg| fx.layout_of(fx.monomorphize(fx.mir.local_decls[arg].ty)).is_uninhabited());
if arg_uninhabited {
fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
@@ -644,9 +644,9 @@ fn codegen_stmt<'tcx>(
_ => unreachable!("un op Neg for {:?}", layout.ty),
}
}
- UnOp::PtrMetadata => match layout.abi {
- Abi::Scalar(_) => CValue::zst(dest_layout),
- Abi::ScalarPair(_, _) => {
+ UnOp::PtrMetadata => match layout.backend_repr {
+ BackendRepr::Scalar(_) => CValue::zst(dest_layout),
+ BackendRepr::ScalarPair(_, _) => {
CValue::by_val(operand.load_scalar_pair(fx).1, dest_layout)
}
_ => bug!("Unexpected `PtrToMetadata` operand: {operand:?}"),
diff --git a/compiler/rustc_codegen_cranelift/src/discriminant.rs b/compiler/rustc_codegen_cranelift/src/discriminant.rs
index d462dcd..45794a4 100644
--- a/compiler/rustc_codegen_cranelift/src/discriminant.rs
+++ b/compiler/rustc_codegen_cranelift/src/discriminant.rs
@@ -14,7 +14,7 @@ pub(crate) fn codegen_set_discriminant<'tcx>(
variant_index: VariantIdx,
) {
let layout = place.layout();
- if layout.for_variant(fx, variant_index).abi.is_uninhabited() {
+ if layout.for_variant(fx, variant_index).is_uninhabited() {
return;
}
match layout.variants {
@@ -80,7 +80,7 @@ pub(crate) fn codegen_get_discriminant<'tcx>(
) {
let layout = value.layout();
- if layout.abi.is_uninhabited() {
+ if layout.is_uninhabited() {
return;
}
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
index 35f0ccf..aae67948 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
@@ -51,8 +51,8 @@ fn report_atomic_type_validation_error<'tcx>(
}
pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Type {
- let (element, count) = match layout.abi {
- Abi::Vector { element, count } => (element, count),
+ let (element, count) = match layout.backend_repr {
+ BackendRepr::Vector { element, count } => (element, count),
_ => unreachable!(),
};
@@ -505,7 +505,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
let layout = fx.layout_of(generic_args.type_at(0));
// Note: Can't use is_unsized here as truly unsized types need to take the fixed size
// branch
- let meta = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
+ let meta = if let BackendRepr::ScalarPair(_, _) = ptr.layout().backend_repr {
Some(ptr.load_scalar_pair(fx).1)
} else {
None
@@ -519,7 +519,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
let layout = fx.layout_of(generic_args.type_at(0));
// Note: Can't use is_unsized here as truly unsized types need to take the fixed size
// branch
- let meta = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
+ let meta = if let BackendRepr::ScalarPair(_, _) = ptr.layout().backend_repr {
Some(ptr.load_scalar_pair(fx).1)
} else {
None
@@ -693,7 +693,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
let layout = fx.layout_of(ty);
let msg_str = with_no_visible_paths!({
with_no_trimmed_paths!({
- if layout.abi.is_uninhabited() {
+ if layout.is_uninhabited() {
// Use this error even for the other intrinsics as it is more precise.
format!("attempted to instantiate uninhabited type `{}`", ty)
} else if intrinsic == sym::assert_zero_valid {
diff --git a/compiler/rustc_codegen_cranelift/src/lib.rs b/compiler/rustc_codegen_cranelift/src/lib.rs
index b6f9ce8..fc3bd0a 100644
--- a/compiler/rustc_codegen_cranelift/src/lib.rs
+++ b/compiler/rustc_codegen_cranelift/src/lib.rs
@@ -40,6 +40,7 @@
use cranelift_codegen::isa::TargetIsa;
use cranelift_codegen::settings::{self, Configurable};
use rustc_codegen_ssa::CodegenResults;
+use rustc_codegen_ssa::back::versioned_llvm_target;
use rustc_codegen_ssa::traits::CodegenBackend;
use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_errors::ErrorGuaranteed;
@@ -92,6 +93,7 @@ mod prelude {
StackSlotData, StackSlotKind, TrapCode, Type, Value, types,
};
pub(crate) use cranelift_module::{self, DataDescription, FuncId, Linkage, Module};
+ pub(crate) use rustc_abi::{BackendRepr, FIRST_VARIANT, FieldIdx, Scalar, Size, VariantIdx};
pub(crate) use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
pub(crate) use rustc_hir::def_id::{DefId, LOCAL_CRATE};
pub(crate) use rustc_index::Idx;
@@ -101,7 +103,6 @@ mod prelude {
self, FloatTy, Instance, InstanceKind, IntTy, ParamEnv, Ty, TyCtxt, UintTy,
};
pub(crate) use rustc_span::Span;
- pub(crate) use rustc_target::abi::{Abi, FIRST_VARIANT, FieldIdx, Scalar, Size, VariantIdx};
pub(crate) use crate::abi::*;
pub(crate) use crate::base::{codegen_operand, codegen_place};
@@ -260,7 +261,9 @@ fn link(
}
fn target_triple(sess: &Session) -> target_lexicon::Triple {
- match sess.target.llvm_target.parse() {
+ // FIXME(madsmtm): Use `sess.target.llvm_target` once target-lexicon supports unversioned macOS.
+ // See <https://github.com/bytecodealliance/target-lexicon/pull/113>
+ match versioned_llvm_target(sess).parse() {
Ok(triple) => triple,
Err(err) => sess.dcx().fatal(format!("target not recognized: {}", err)),
}
diff --git a/compiler/rustc_codegen_cranelift/src/value_and_place.rs b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
index fd77502..900d7e6 100644
--- a/compiler/rustc_codegen_cranelift/src/value_and_place.rs
+++ b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
@@ -131,8 +131,8 @@ pub(crate) fn dyn_star_force_data_on_stack(
match self.0 {
CValueInner::ByRef(ptr, None) => {
- let (a_scalar, b_scalar) = match self.1.abi {
- Abi::ScalarPair(a, b) => (a, b),
+ let (a_scalar, b_scalar) = match self.1.backend_repr {
+ BackendRepr::ScalarPair(a, b) => (a, b),
_ => unreachable!("dyn_star_force_data_on_stack({:?})", self),
};
let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
@@ -164,15 +164,15 @@ pub(crate) fn try_to_ptr(self) -> Option<(Pointer, Option<Value>)> {
}
}
- /// Load a value with layout.abi of scalar
+ /// Load a value with layout.backend_repr of scalar
#[track_caller]
pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value {
let layout = self.1;
match self.0 {
CValueInner::ByRef(ptr, None) => {
- let clif_ty = match layout.abi {
- Abi::Scalar(scalar) => scalar_to_clif_type(fx.tcx, scalar),
- Abi::Vector { element, count } => scalar_to_clif_type(fx.tcx, element)
+ let clif_ty = match layout.backend_repr {
+ BackendRepr::Scalar(scalar) => scalar_to_clif_type(fx.tcx, scalar),
+ BackendRepr::Vector { element, count } => scalar_to_clif_type(fx.tcx, element)
.by(u32::try_from(count).unwrap())
.unwrap(),
_ => unreachable!("{:?}", layout.ty),
@@ -187,14 +187,14 @@ pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value {
}
}
- /// Load a value pair with layout.abi of scalar pair
+ /// Load a value pair with layout.backend_repr of scalar pair
#[track_caller]
pub(crate) fn load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Value, Value) {
let layout = self.1;
match self.0 {
CValueInner::ByRef(ptr, None) => {
- let (a_scalar, b_scalar) = match layout.abi {
- Abi::ScalarPair(a, b) => (a, b),
+ let (a_scalar, b_scalar) = match layout.backend_repr {
+ BackendRepr::ScalarPair(a, b) => (a, b),
_ => unreachable!("load_scalar_pair({:?})", self),
};
let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
@@ -222,8 +222,8 @@ pub(crate) fn value_field(
let layout = self.1;
match self.0 {
CValueInner::ByVal(_) => unreachable!(),
- CValueInner::ByValPair(val1, val2) => match layout.abi {
- Abi::ScalarPair(_, _) => {
+ CValueInner::ByValPair(val1, val2) => match layout.backend_repr {
+ BackendRepr::ScalarPair(_, _) => {
let val = match field.as_u32() {
0 => val1,
1 => val2,
@@ -232,7 +232,7 @@ pub(crate) fn value_field(
let field_layout = layout.field(&*fx, usize::from(field));
CValue::by_val(val, field_layout)
}
- _ => unreachable!("value_field for ByValPair with abi {:?}", layout.abi),
+ _ => unreachable!("value_field for ByValPair with abi {:?}", layout.backend_repr),
},
CValueInner::ByRef(ptr, None) => {
let (field_ptr, field_layout) = codegen_field(fx, ptr, None, layout, field);
@@ -360,7 +360,7 @@ pub(crate) fn const_val(
pub(crate) fn cast_pointer_to(self, layout: TyAndLayout<'tcx>) -> Self {
assert!(matches!(self.layout().ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
assert!(matches!(layout.ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
- assert_eq!(self.layout().abi, layout.abi);
+ assert_eq!(self.layout().backend_repr, layout.backend_repr);
CValue(self.0, layout)
}
}
@@ -609,8 +609,8 @@ fn transmute_scalar<'tcx>(
let dst_layout = self.layout();
match self.inner {
CPlaceInner::Var(_local, var) => {
- let data = match from.1.abi {
- Abi::Scalar(_) => CValue(from.0, dst_layout).load_scalar(fx),
+ let data = match from.1.backend_repr {
+ BackendRepr::Scalar(_) => CValue(from.0, dst_layout).load_scalar(fx),
_ => {
let (ptr, meta) = from.force_stack(fx);
assert!(meta.is_none());
@@ -621,8 +621,10 @@ fn transmute_scalar<'tcx>(
transmute_scalar(fx, var, data, dst_ty);
}
CPlaceInner::VarPair(_local, var1, var2) => {
- let (data1, data2) = match from.1.abi {
- Abi::ScalarPair(_, _) => CValue(from.0, dst_layout).load_scalar_pair(fx),
+ let (data1, data2) = match from.1.backend_repr {
+ BackendRepr::ScalarPair(_, _) => {
+ CValue(from.0, dst_layout).load_scalar_pair(fx)
+ }
_ => {
let (ptr, meta) = from.force_stack(fx);
assert!(meta.is_none());
@@ -635,7 +637,9 @@ fn transmute_scalar<'tcx>(
}
CPlaceInner::Addr(_, Some(_)) => bug!("Can't write value to unsized place {:?}", self),
CPlaceInner::Addr(to_ptr, None) => {
- if dst_layout.size == Size::ZERO || dst_layout.abi == Abi::Uninhabited {
+ if dst_layout.size == Size::ZERO
+ || dst_layout.backend_repr == BackendRepr::Uninhabited
+ {
return;
}
@@ -646,23 +650,28 @@ fn transmute_scalar<'tcx>(
CValueInner::ByVal(val) => {
to_ptr.store(fx, val, flags);
}
- CValueInner::ByValPair(val1, val2) => match from.layout().abi {
- Abi::ScalarPair(a_scalar, b_scalar) => {
+ CValueInner::ByValPair(val1, val2) => match from.layout().backend_repr {
+ BackendRepr::ScalarPair(a_scalar, b_scalar) => {
let b_offset =
scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
to_ptr.store(fx, val1, flags);
to_ptr.offset(fx, b_offset).store(fx, val2, flags);
}
- _ => bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi),
+ _ => {
+ bug!(
+ "Non ScalarPair repr {:?} for ByValPair CValue",
+ dst_layout.backend_repr
+ )
+ }
},
CValueInner::ByRef(from_ptr, None) => {
- match from.layout().abi {
- Abi::Scalar(_) => {
+ match from.layout().backend_repr {
+ BackendRepr::Scalar(_) => {
let val = from.load_scalar(fx);
to_ptr.store(fx, val, flags);
return;
}
- Abi::ScalarPair(a_scalar, b_scalar) => {
+ BackendRepr::ScalarPair(a_scalar, b_scalar) => {
let b_offset =
scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
let (val1, val2) = from.load_scalar_pair(fx);
diff --git a/compiler/rustc_codegen_cranelift/src/vtable.rs b/compiler/rustc_codegen_cranelift/src/vtable.rs
index 14c607c..82b6178 100644
--- a/compiler/rustc_codegen_cranelift/src/vtable.rs
+++ b/compiler/rustc_codegen_cranelift/src/vtable.rs
@@ -47,7 +47,7 @@ pub(crate) fn get_ptr_and_method_ref<'tcx>(
idx: usize,
) -> (Pointer, Value) {
let (ptr, vtable) = 'block: {
- if let Abi::Scalar(_) = arg.layout().abi {
+ if let BackendRepr::Scalar(_) = arg.layout().backend_repr {
while !arg.layout().ty.is_unsafe_ptr() && !arg.layout().ty.is_ref() {
let (idx, _) = arg
.layout()
@@ -68,7 +68,7 @@ pub(crate) fn get_ptr_and_method_ref<'tcx>(
}
}
- if let Abi::ScalarPair(_, _) = arg.layout().abi {
+ if let BackendRepr::ScalarPair(_, _) = arg.layout().backend_repr {
let (ptr, vtable) = arg.load_scalar_pair(fx);
(Pointer::new(ptr), vtable)
} else {
diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs
index 7c52cba..e6ae7cf 100644
--- a/compiler/rustc_codegen_gcc/src/builder.rs
+++ b/compiler/rustc_codegen_gcc/src/builder.rs
@@ -1016,11 +1016,11 @@ fn scalar_load_metadata<'a, 'gcc, 'tcx>(
OperandValue::Ref(place.val)
} else if place.layout.is_gcc_immediate() {
let load = self.load(place.layout.gcc_type(self), place.val.llval, place.val.align);
- if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
+ if let abi::BackendRepr::Scalar(ref scalar) = place.layout.backend_repr {
scalar_load_metadata(self, load, scalar);
}
OperandValue::Immediate(self.to_immediate(load, place.layout))
- } else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
+ } else if let abi::BackendRepr::ScalarPair(ref a, ref b) = place.layout.backend_repr {
let b_offset = a.size(self).align_to(b.align(self).abi);
let mut load = |i, scalar: &abi::Scalar, align| {
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
index 972d663..b0298a3 100644
--- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
@@ -294,13 +294,13 @@ fn codegen_intrinsic_call(
}
sym::raw_eq => {
- use rustc_target::abi::Abi::*;
+ use rustc_abi::BackendRepr::*;
let tp_ty = fn_args.type_at(0);
let layout = self.layout_of(tp_ty).layout;
- let _use_integer_compare = match layout.abi() {
+ let _use_integer_compare = match layout.backend_repr() {
Scalar(_) | ScalarPair(_, _) => true,
Uninhabited | Vector { .. } => false,
- Aggregate { .. } => {
+ Memory { .. } => {
// For rusty ABIs, small aggregates are actually passed
// as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
// so we re-use that same threshold here.
diff --git a/compiler/rustc_codegen_gcc/src/type_of.rs b/compiler/rustc_codegen_gcc/src/type_of.rs
index db874af..0efdf36 100644
--- a/compiler/rustc_codegen_gcc/src/type_of.rs
+++ b/compiler/rustc_codegen_gcc/src/type_of.rs
@@ -3,7 +3,7 @@
use gccjit::{Struct, Type};
use rustc_abi as abi;
use rustc_abi::Primitive::*;
-use rustc_abi::{Abi, FieldsShape, Integer, PointeeInfo, Size, Variants};
+use rustc_abi::{BackendRepr, FieldsShape, Integer, PointeeInfo, Size, Variants};
use rustc_codegen_ssa::traits::{
BaseTypeCodegenMethods, DerivedTypeCodegenMethods, LayoutTypeCodegenMethods,
};
@@ -60,9 +60,9 @@ fn uncached_gcc_type<'gcc, 'tcx>(
layout: TyAndLayout<'tcx>,
defer: &mut Option<(Struct<'gcc>, TyAndLayout<'tcx>)>,
) -> Type<'gcc> {
- match layout.abi {
- Abi::Scalar(_) => bug!("handled elsewhere"),
- Abi::Vector { ref element, count } => {
+ match layout.backend_repr {
+ BackendRepr::Scalar(_) => bug!("handled elsewhere"),
+ BackendRepr::Vector { ref element, count } => {
let element = layout.scalar_gcc_type_at(cx, element, Size::ZERO);
let element =
// NOTE: gcc doesn't allow pointer types in vectors.
@@ -74,7 +74,7 @@ fn uncached_gcc_type<'gcc, 'tcx>(
};
return cx.context.new_vector_type(element, count);
}
- Abi::ScalarPair(..) => {
+ BackendRepr::ScalarPair(..) => {
return cx.type_struct(
&[
layout.scalar_pair_element_gcc_type(cx, 0),
@@ -83,7 +83,7 @@ fn uncached_gcc_type<'gcc, 'tcx>(
false,
);
}
- Abi::Uninhabited | Abi::Aggregate { .. } => {}
+ BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {}
}
let name = match *layout.ty.kind() {
@@ -176,16 +176,21 @@ fn pointee_info_at<'gcc>(
impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
fn is_gcc_immediate(&self) -> bool {
- match self.abi {
- Abi::Scalar(_) | Abi::Vector { .. } => true,
- Abi::ScalarPair(..) | Abi::Uninhabited | Abi::Aggregate { .. } => false,
+ match self.backend_repr {
+ BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => true,
+ BackendRepr::ScalarPair(..) | BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {
+ false
+ }
}
}
fn is_gcc_scalar_pair(&self) -> bool {
- match self.abi {
- Abi::ScalarPair(..) => true,
- Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false,
+ match self.backend_repr {
+ BackendRepr::ScalarPair(..) => true,
+ BackendRepr::Uninhabited
+ | BackendRepr::Scalar(_)
+ | BackendRepr::Vector { .. }
+ | BackendRepr::Memory { .. } => false,
}
}
@@ -205,7 +210,7 @@ fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
// This must produce the same result for `repr(transparent)` wrappers as for the inner type!
// In other words, this should generally not look at the type at all, but only at the
// layout.
- if let Abi::Scalar(ref scalar) = self.abi {
+ if let BackendRepr::Scalar(ref scalar) = self.backend_repr {
// Use a different cache for scalars because pointers to DSTs
// can be either wide or thin (data pointers of wide pointers).
if let Some(&ty) = cx.scalar_types.borrow().get(&self.ty) {
@@ -261,7 +266,7 @@ fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
}
fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
- if let Abi::Scalar(ref scalar) = self.abi {
+ if let BackendRepr::Scalar(ref scalar) = self.backend_repr {
if scalar.is_bool() {
return cx.type_i1();
}
@@ -299,8 +304,8 @@ fn scalar_pair_element_gcc_type<'gcc>(
// This must produce the same result for `repr(transparent)` wrappers as for the inner type!
// In other words, this should generally not look at the type at all, but only at the
// layout.
- let (a, b) = match self.abi {
- Abi::ScalarPair(ref a, ref b) => (a, b),
+ let (a, b) = match self.backend_repr {
+ BackendRepr::ScalarPair(ref a, ref b) => (a, b),
_ => bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self),
};
let scalar = [a, b][index];
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
index 8a1ee48..855ca01 100644
--- a/compiler/rustc_codegen_llvm/src/abi.rs
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -458,7 +458,7 @@ fn apply_attrs_llfn(
match &self.ret.mode {
PassMode::Direct(attrs) => {
attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
- if let abi::Abi::Scalar(scalar) = self.ret.layout.abi {
+ if let abi::BackendRepr::Scalar(scalar) = self.ret.layout.backend_repr {
apply_range_attr(llvm::AttributePlace::ReturnValue, scalar);
}
}
@@ -495,7 +495,7 @@ fn apply_attrs_llfn(
}
PassMode::Direct(attrs) => {
let i = apply(attrs);
- if let abi::Abi::Scalar(scalar) = arg.layout.abi {
+ if let abi::BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
apply_range_attr(llvm::AttributePlace::Argument(i), scalar);
}
}
@@ -510,7 +510,9 @@ fn apply_attrs_llfn(
PassMode::Pair(a, b) => {
let i = apply(a);
let ii = apply(b);
- if let abi::Abi::ScalarPair(scalar_a, scalar_b) = arg.layout.abi {
+ if let abi::BackendRepr::ScalarPair(scalar_a, scalar_b) =
+ arg.layout.backend_repr
+ {
apply_range_attr(llvm::AttributePlace::Argument(i), scalar_a);
apply_range_attr(llvm::AttributePlace::Argument(ii), scalar_b);
}
@@ -570,7 +572,7 @@ fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll V
}
if bx.cx.sess().opts.optimize != config::OptLevel::No
&& llvm_util::get_version() < (19, 0, 0)
- && let abi::Abi::Scalar(scalar) = self.ret.layout.abi
+ && let abi::BackendRepr::Scalar(scalar) = self.ret.layout.backend_repr
&& matches!(scalar.primitive(), Int(..))
// If the value is a boolean, the range is 0..2 and that ultimately
// become 0..0 when the type becomes i1, which would be rejected
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs
index 3c30822..5375896 100644
--- a/compiler/rustc_codegen_llvm/src/asm.rs
+++ b/compiler/rustc_codegen_llvm/src/asm.rs
@@ -880,8 +880,8 @@ fn llvm_fixup_input<'ll, 'tcx>(
) -> &'ll Value {
use InlineAsmRegClass::*;
let dl = &bx.tcx.data_layout;
- match (reg, layout.abi) {
- (AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
+ match (reg, layout.backend_repr) {
+ (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.primitive() {
let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
@@ -889,7 +889,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
value
}
}
- (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
+ (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
if s.primitive() != Primitive::Float(Float::F128) =>
{
let elem_ty = llvm_asm_scalar_type(bx.cx, s);
@@ -902,7 +902,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
}
bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
}
- (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count })
+ (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
if layout.size.bytes() == 8 =>
{
let elem_ty = llvm_asm_scalar_type(bx.cx, element);
@@ -910,14 +910,14 @@ fn llvm_fixup_input<'ll, 'tcx>(
let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect();
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
}
- (X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
+ (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
if s.primitive() == Primitive::Float(Float::F64) =>
{
bx.bitcast(value, bx.cx.type_i64())
}
(
X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
- Abi::Vector { .. },
+ BackendRepr::Vector { .. },
) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)),
(
X86(
@@ -925,7 +925,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
- Abi::Scalar(s),
+ BackendRepr::Scalar(s),
) if bx.sess().asm_arch == Some(InlineAsmArch::X86)
&& s.primitive() == Primitive::Float(Float::F128) =>
{
@@ -937,7 +937,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
- Abi::Scalar(s),
+ BackendRepr::Scalar(s),
) if s.primitive() == Primitive::Float(Float::F16) => {
let value = bx.insert_element(
bx.const_undef(bx.type_vector(bx.type_f16(), 8)),
@@ -952,11 +952,14 @@ fn llvm_fixup_input<'ll, 'tcx>(
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
- Abi::Vector { element, count: count @ (8 | 16) },
+ BackendRepr::Vector { element, count: count @ (8 | 16) },
) if element.primitive() == Primitive::Float(Float::F16) => {
bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
}
- (Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => {
+ (
+ Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
+ BackendRepr::Scalar(s),
+ ) => {
if let Primitive::Int(Integer::I32, _) = s.primitive() {
bx.bitcast(value, bx.cx.type_f32())
} else {
@@ -969,7 +972,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
| ArmInlineAsmRegClass::dreg_low8
| ArmInlineAsmRegClass::dreg_low16,
),
- Abi::Scalar(s),
+ BackendRepr::Scalar(s),
) => {
if let Primitive::Int(Integer::I64, _) = s.primitive() {
bx.bitcast(value, bx.cx.type_f64())
@@ -986,11 +989,11 @@ fn llvm_fixup_input<'ll, 'tcx>(
| ArmInlineAsmRegClass::qreg_low4
| ArmInlineAsmRegClass::qreg_low8,
),
- Abi::Vector { element, count: count @ (4 | 8) },
+ BackendRepr::Vector { element, count: count @ (4 | 8) },
) if element.primitive() == Primitive::Float(Float::F16) => {
bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
}
- (Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
+ (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
match s.primitive() {
// MIPS only supports register-length arithmetics.
Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
@@ -999,7 +1002,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
_ => value,
}
}
- (RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
+ (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
if s.primitive() == Primitive::Float(Float::F16)
&& !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) =>
{
@@ -1022,15 +1025,15 @@ fn llvm_fixup_output<'ll, 'tcx>(
instance: Instance<'_>,
) -> &'ll Value {
use InlineAsmRegClass::*;
- match (reg, layout.abi) {
- (AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
+ match (reg, layout.backend_repr) {
+ (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.primitive() {
bx.extract_element(value, bx.const_i32(0))
} else {
value
}
}
- (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
+ (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
if s.primitive() != Primitive::Float(Float::F128) =>
{
value = bx.extract_element(value, bx.const_i32(0));
@@ -1039,7 +1042,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
}
value
}
- (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count })
+ (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
if layout.size.bytes() == 8 =>
{
let elem_ty = llvm_asm_scalar_type(bx.cx, element);
@@ -1047,14 +1050,14 @@ fn llvm_fixup_output<'ll, 'tcx>(
let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect();
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
}
- (X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
+ (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
if s.primitive() == Primitive::Float(Float::F64) =>
{
bx.bitcast(value, bx.cx.type_f64())
}
(
X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
- Abi::Vector { .. },
+ BackendRepr::Vector { .. },
) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)),
(
X86(
@@ -1062,7 +1065,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
- Abi::Scalar(s),
+ BackendRepr::Scalar(s),
) if bx.sess().asm_arch == Some(InlineAsmArch::X86)
&& s.primitive() == Primitive::Float(Float::F128) =>
{
@@ -1074,7 +1077,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
- Abi::Scalar(s),
+ BackendRepr::Scalar(s),
) if s.primitive() == Primitive::Float(Float::F16) => {
let value = bx.bitcast(value, bx.type_vector(bx.type_f16(), 8));
bx.extract_element(value, bx.const_usize(0))
@@ -1085,11 +1088,14 @@ fn llvm_fixup_output<'ll, 'tcx>(
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
- Abi::Vector { element, count: count @ (8 | 16) },
+ BackendRepr::Vector { element, count: count @ (8 | 16) },
) if element.primitive() == Primitive::Float(Float::F16) => {
bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
}
- (Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => {
+ (
+ Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
+ BackendRepr::Scalar(s),
+ ) => {
if let Primitive::Int(Integer::I32, _) = s.primitive() {
bx.bitcast(value, bx.cx.type_i32())
} else {
@@ -1102,7 +1108,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
| ArmInlineAsmRegClass::dreg_low8
| ArmInlineAsmRegClass::dreg_low16,
),
- Abi::Scalar(s),
+ BackendRepr::Scalar(s),
) => {
if let Primitive::Int(Integer::I64, _) = s.primitive() {
bx.bitcast(value, bx.cx.type_i64())
@@ -1119,11 +1125,11 @@ fn llvm_fixup_output<'ll, 'tcx>(
| ArmInlineAsmRegClass::qreg_low4
| ArmInlineAsmRegClass::qreg_low8,
),
- Abi::Vector { element, count: count @ (4 | 8) },
+ BackendRepr::Vector { element, count: count @ (4 | 8) },
) if element.primitive() == Primitive::Float(Float::F16) => {
bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
}
- (Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
+ (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
match s.primitive() {
// MIPS only supports register-length arithmetics.
Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
@@ -1133,7 +1139,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
_ => value,
}
}
- (RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
+ (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
if s.primitive() == Primitive::Float(Float::F16)
&& !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) =>
{
@@ -1153,35 +1159,35 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
instance: Instance<'_>,
) -> &'ll Type {
use InlineAsmRegClass::*;
- match (reg, layout.abi) {
- (AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
+ match (reg, layout.backend_repr) {
+ (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.primitive() {
cx.type_vector(cx.type_i8(), 8)
} else {
layout.llvm_type(cx)
}
}
- (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
+ (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
if s.primitive() != Primitive::Float(Float::F128) =>
{
let elem_ty = llvm_asm_scalar_type(cx, s);
let count = 16 / layout.size.bytes();
cx.type_vector(elem_ty, count)
}
- (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count })
+ (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
if layout.size.bytes() == 8 =>
{
let elem_ty = llvm_asm_scalar_type(cx, element);
cx.type_vector(elem_ty, count * 2)
}
- (X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
+ (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
if s.primitive() == Primitive::Float(Float::F64) =>
{
cx.type_i64()
}
(
X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
- Abi::Vector { .. },
+ BackendRepr::Vector { .. },
) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8),
(
X86(
@@ -1189,7 +1195,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
- Abi::Scalar(s),
+ BackendRepr::Scalar(s),
) if cx.sess().asm_arch == Some(InlineAsmArch::X86)
&& s.primitive() == Primitive::Float(Float::F128) =>
{
@@ -1201,7 +1207,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
- Abi::Scalar(s),
+ BackendRepr::Scalar(s),
) if s.primitive() == Primitive::Float(Float::F16) => cx.type_vector(cx.type_i16(), 8),
(
X86(
@@ -1209,11 +1215,14 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
- Abi::Vector { element, count: count @ (8 | 16) },
+ BackendRepr::Vector { element, count: count @ (8 | 16) },
) if element.primitive() == Primitive::Float(Float::F16) => {
cx.type_vector(cx.type_i16(), count)
}
- (Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => {
+ (
+ Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
+ BackendRepr::Scalar(s),
+ ) => {
if let Primitive::Int(Integer::I32, _) = s.primitive() {
cx.type_f32()
} else {
@@ -1226,7 +1235,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
| ArmInlineAsmRegClass::dreg_low8
| ArmInlineAsmRegClass::dreg_low16,
),
- Abi::Scalar(s),
+ BackendRepr::Scalar(s),
) => {
if let Primitive::Int(Integer::I64, _) = s.primitive() {
cx.type_f64()
@@ -1243,11 +1252,11 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
| ArmInlineAsmRegClass::qreg_low4
| ArmInlineAsmRegClass::qreg_low8,
),
- Abi::Vector { element, count: count @ (4 | 8) },
+ BackendRepr::Vector { element, count: count @ (4 | 8) },
) if element.primitive() == Primitive::Float(Float::F16) => {
cx.type_vector(cx.type_i16(), count)
}
- (Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
+ (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
match s.primitive() {
// MIPS only supports register-length arithmetics.
Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
@@ -1256,7 +1265,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
_ => layout.llvm_type(cx),
}
}
- (RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
+ (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
if s.primitive() == Primitive::Float(Float::F16)
&& !any_target_feature_enabled(cx, instance, &[sym::zfhmin, sym::zfh]) =>
{
diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs
index 2c5ec9d..64bb22e 100644
--- a/compiler/rustc_codegen_llvm/src/attributes.rs
+++ b/compiler/rustc_codegen_llvm/src/attributes.rs
@@ -232,11 +232,6 @@ fn probestack_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
return None;
}
- // probestack doesn't play nice either with gcov profiling.
- if cx.sess().opts.unstable_opts.profile {
- return None;
- }
-
let attr_value = match cx.sess().target.stack_probes {
StackProbeType::None => return None,
// Request LLVM to generate the probes inline. If the given LLVM version does not support
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
index bfa9e8b..01e2c30 100644
--- a/compiler/rustc_codegen_llvm/src/back/write.rs
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -9,6 +9,7 @@
LLVMRustLLVMHasZlibCompressionForDebugSymbols, LLVMRustLLVMHasZstdCompressionForDebugSymbols,
};
use rustc_codegen_ssa::back::link::ensure_removed;
+use rustc_codegen_ssa::back::versioned_llvm_target;
use rustc_codegen_ssa::back::write::{
BitcodeSection, CodegenContext, EmitObj, ModuleConfig, TargetMachineFactoryConfig,
TargetMachineFactoryFn,
@@ -211,7 +212,7 @@ pub(crate) fn target_machine_factory(
singlethread = false;
}
- let triple = SmallCStr::new(&sess.target.llvm_target);
+ let triple = SmallCStr::new(&versioned_llvm_target(sess));
let cpu = SmallCStr::new(llvm_util::target_cpu(sess));
let features = CString::new(target_features.join(",")).unwrap();
let abi = SmallCStr::new(&sess.target.llvm_abiname);
@@ -591,7 +592,6 @@ pub(crate) unsafe fn llvm_optimize(
pgo_use_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
config.instrument_coverage,
instr_profile_output_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
- config.instrument_gcov,
pgo_sample_use_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
config.debug_info_for_profiling,
llvm_selfprofiler,
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index f8eb3e0..15883c9 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -543,13 +543,13 @@ fn scalar_load_metadata<'a, 'll, 'tcx>(
}
let llval = const_llval.unwrap_or_else(|| {
let load = self.load(llty, place.val.llval, place.val.align);
- if let abi::Abi::Scalar(scalar) = place.layout.abi {
+ if let abi::BackendRepr::Scalar(scalar) = place.layout.backend_repr {
scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO);
}
load
});
OperandValue::Immediate(self.to_immediate(llval, place.layout))
- } else if let abi::Abi::ScalarPair(a, b) = place.layout.abi {
+ } else if let abi::BackendRepr::ScalarPair(a, b) = place.layout.backend_repr {
let b_offset = a.size(self).align_to(b.align(self).abi);
let mut load = |i, scalar: abi::Scalar, layout, align, offset| {
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index 9778ff4..03f4fb5 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -3,6 +3,7 @@
use std::ffi::{CStr, c_uint};
use std::str;
+use rustc_codegen_ssa::back::versioned_llvm_target;
use rustc_codegen_ssa::base::{wants_msvc_seh, wants_wasm_eh};
use rustc_codegen_ssa::errors as ssa_errors;
use rustc_codegen_ssa::traits::*;
@@ -148,6 +149,11 @@ pub(crate) unsafe fn create_module<'ll>(
target_data_layout =
target_data_layout.replace("-p270:32:32-p271:32:32-p272:64:64", "");
}
+ if sess.target.arch.starts_with("sparc") {
+ // LLVM 20 updates the sparc layout to correctly align 128 bit integers to 128 bit.
+ // See https://github.com/llvm/llvm-project/pull/106951
+ target_data_layout = target_data_layout.replace("-i128:128", "");
+ }
}
// Ensure the data-layout values hardcoded remain the defaults.
@@ -177,7 +183,7 @@ pub(crate) unsafe fn create_module<'ll>(
llvm::LLVMSetDataLayout(llmod, data_layout.as_ptr());
}
- let llvm_target = SmallCStr::new(&sess.target.llvm_target);
+ let llvm_target = SmallCStr::new(&versioned_llvm_target(sess));
unsafe {
llvm::LLVMRustSetNormalizedTarget(llmod, llvm_target.as_ptr());
}
@@ -554,6 +560,7 @@ pub(crate) fn statics_to_rauw(&self) -> &RefCell<Vec<(&'ll Value, &'ll Value)>>
/// Extra state that is only available when coverage instrumentation is enabled.
#[inline]
+ #[track_caller]
pub(crate) fn coverage_cx(&self) -> &coverageinfo::CrateCoverageContext<'ll, 'tcx> {
self.coverage_cx.as_ref().expect("only called when coverage instrumentation is enabled")
}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
index 8edd788..f637819 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
@@ -54,7 +54,11 @@ pub(crate) fn finalize(cx: &CodegenCx<'_, '_>) {
add_unused_functions(cx);
}
- let function_coverage_map = cx.coverage_cx().take_function_coverage_map();
+ // FIXME(#132395): Can this be none even when coverage is enabled?
+ let function_coverage_map = match cx.coverage_cx {
+ Some(ref cx) => cx.take_function_coverage_map(),
+ None => return,
+ };
if function_coverage_map.is_empty() {
// This module has no functions with coverage instrumentation
return;
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
index a298ed8..e4ff508 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
@@ -152,7 +152,12 @@ fn add_coverage(&mut self, instance: Instance<'tcx>, kind: &CoverageKind) {
return;
};
- let mut coverage_map = bx.coverage_cx().function_coverage_map.borrow_mut();
+ // FIXME(#132395): Unwrapping `coverage_cx` here has led to ICEs in the
+ // wild, so keep this early-return until we understand why.
+ let mut coverage_map = match bx.coverage_cx {
+ Some(ref cx) => cx.function_coverage_map.borrow_mut(),
+ None => return,
+ };
let func_coverage = coverage_map
.entry(instance)
.or_insert_with(|| FunctionCoverageCollector::new(instance, function_coverage_info));
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
index 9064cfa..0d1fd01 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -7,7 +7,6 @@
use libc::{c_char, c_longlong, c_uint};
use rustc_codegen_ssa::debuginfo::type_names::{VTableNameKind, cpp_like_debuginfo};
use rustc_codegen_ssa::traits::*;
-use rustc_fs_util::path_to_c_string;
use rustc_hir::def::{CtorKind, DefKind};
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
use rustc_middle::bug;
@@ -979,33 +978,8 @@ pub(crate) fn build_compile_unit_di_node<'ll, 'tcx>(
debug_name_table_kind,
);
- if tcx.sess.opts.unstable_opts.profile {
- let default_gcda_path = &output_filenames.with_extension("gcda");
- let gcda_path =
- tcx.sess.opts.unstable_opts.profile_emit.as_ref().unwrap_or(default_gcda_path);
-
- let gcov_cu_info = [
- path_to_mdstring(debug_context.llcontext, &output_filenames.with_extension("gcno")),
- path_to_mdstring(debug_context.llcontext, gcda_path),
- unit_metadata,
- ];
- let gcov_metadata = llvm::LLVMMDNodeInContext2(
- debug_context.llcontext,
- gcov_cu_info.as_ptr(),
- gcov_cu_info.len(),
- );
- let val = llvm::LLVMMetadataAsValue(debug_context.llcontext, gcov_metadata);
-
- llvm::LLVMAddNamedMetadataOperand(debug_context.llmod, c"llvm.gcov".as_ptr(), val);
- }
-
return unit_metadata;
};
-
- fn path_to_mdstring<'ll>(llcx: &'ll llvm::Context, path: &Path) -> &'ll llvm::Metadata {
- let path_str = path_to_c_string(path);
- unsafe { llvm::LLVMMDStringInContext2(llcx, path_str.as_ptr(), path_str.as_bytes().len()) }
- }
}
/// Creates a `DW_TAG_member` entry inside the DIE represented by the given `type_di_node`.
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
index 72e723a..b6c20cd 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
@@ -55,7 +55,6 @@
/// A context object for maintaining all state needed by the debuginfo module.
pub(crate) struct CodegenUnitDebugContext<'ll, 'tcx> {
- llcontext: &'ll llvm::Context,
llmod: &'ll llvm::Module,
builder: &'ll mut DIBuilder<'ll>,
created_files: RefCell<UnordMap<Option<(StableSourceFileId, SourceFileHash)>, &'ll DIFile>>,
@@ -78,9 +77,7 @@ pub(crate) fn new(llmod: &'ll llvm::Module) -> Self {
debug!("CodegenUnitDebugContext::new");
let builder = unsafe { llvm::LLVMRustDIBuilderCreate(llmod) };
// DIBuilder inherits context from the module, so we'd better use the same one
- let llcontext = unsafe { llvm::LLVMGetModuleContext(llmod) };
CodegenUnitDebugContext {
- llcontext,
llmod,
builder,
created_files: Default::default(),
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index d04b525..c77e00a 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -258,8 +258,8 @@ fn codegen_intrinsic_call(
self.call_intrinsic("llvm.va_copy", &[args[0].immediate(), args[1].immediate()])
}
sym::va_arg => {
- match fn_abi.ret.layout.abi {
- abi::Abi::Scalar(scalar) => {
+ match fn_abi.ret.layout.backend_repr {
+ abi::BackendRepr::Scalar(scalar) => {
match scalar.primitive() {
Primitive::Int(..) => {
if self.cx().size_of(ret_ty).bytes() < 4 {
@@ -436,13 +436,13 @@ fn codegen_intrinsic_call(
}
sym::raw_eq => {
- use abi::Abi::*;
+ use abi::BackendRepr::*;
let tp_ty = fn_args.type_at(0);
let layout = self.layout_of(tp_ty).layout;
- let use_integer_compare = match layout.abi() {
+ let use_integer_compare = match layout.backend_repr() {
Scalar(_) | ScalarPair(_, _) => true,
Uninhabited | Vector { .. } => false,
- Aggregate { .. } => {
+ Memory { .. } => {
// For rusty ABIs, small aggregates are actually passed
// as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
// so we re-use that same threshold here.
@@ -549,7 +549,8 @@ fn codegen_intrinsic_call(
}
let llret_ty = if ret_ty.is_simd()
- && let abi::Abi::Aggregate { .. } = self.layout_of(ret_ty).layout.abi
+ && let abi::BackendRepr::Memory { .. } =
+ self.layout_of(ret_ty).layout.backend_repr
{
let (size, elem_ty) = ret_ty.simd_size_and_type(self.tcx());
let elem_ll_ty = match elem_ty.kind() {
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index 8fc586d..5fad758 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -2269,7 +2269,6 @@ pub fn LLVMRustOptimize<'a>(
PGOUsePath: *const c_char,
InstrumentCoverage: bool,
InstrProfileOutput: *const c_char,
- InstrumentGCOV: bool,
PGOSampleUsePath: *const c_char,
DebugInfoForProfiling: bool,
llvm_selfprofiler: *mut c_void,
diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs
index 6be4c3f..2b05e24 100644
--- a/compiler/rustc_codegen_llvm/src/type_of.rs
+++ b/compiler/rustc_codegen_llvm/src/type_of.rs
@@ -1,7 +1,7 @@
use std::fmt::Write;
use rustc_abi::Primitive::{Float, Int, Pointer};
-use rustc_abi::{Abi, Align, FieldsShape, Scalar, Size, Variants};
+use rustc_abi::{Align, BackendRepr, FieldsShape, Scalar, Size, Variants};
use rustc_codegen_ssa::traits::*;
use rustc_middle::bug;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
@@ -17,13 +17,13 @@ fn uncached_llvm_type<'a, 'tcx>(
layout: TyAndLayout<'tcx>,
defer: &mut Option<(&'a Type, TyAndLayout<'tcx>)>,
) -> &'a Type {
- match layout.abi {
- Abi::Scalar(_) => bug!("handled elsewhere"),
- Abi::Vector { element, count } => {
+ match layout.backend_repr {
+ BackendRepr::Scalar(_) => bug!("handled elsewhere"),
+ BackendRepr::Vector { element, count } => {
let element = layout.scalar_llvm_type_at(cx, element);
return cx.type_vector(element, count);
}
- Abi::Uninhabited | Abi::Aggregate { .. } | Abi::ScalarPair(..) => {}
+ BackendRepr::Uninhabited | BackendRepr::Memory { .. } | BackendRepr::ScalarPair(..) => {}
}
let name = match layout.ty.kind() {
@@ -170,16 +170,21 @@ fn scalar_pair_element_llvm_type<'a>(
impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
fn is_llvm_immediate(&self) -> bool {
- match self.abi {
- Abi::Scalar(_) | Abi::Vector { .. } => true,
- Abi::ScalarPair(..) | Abi::Uninhabited | Abi::Aggregate { .. } => false,
+ match self.backend_repr {
+ BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => true,
+ BackendRepr::ScalarPair(..) | BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {
+ false
+ }
}
}
fn is_llvm_scalar_pair(&self) -> bool {
- match self.abi {
- Abi::ScalarPair(..) => true,
- Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false,
+ match self.backend_repr {
+ BackendRepr::ScalarPair(..) => true,
+ BackendRepr::Uninhabited
+ | BackendRepr::Scalar(_)
+ | BackendRepr::Vector { .. }
+ | BackendRepr::Memory { .. } => false,
}
}
@@ -198,7 +203,7 @@ fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
// This must produce the same result for `repr(transparent)` wrappers as for the inner type!
// In other words, this should generally not look at the type at all, but only at the
// layout.
- if let Abi::Scalar(scalar) = self.abi {
+ if let BackendRepr::Scalar(scalar) = self.backend_repr {
// Use a different cache for scalars because pointers to DSTs
// can be either wide or thin (data pointers of wide pointers).
if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
@@ -248,13 +253,13 @@ fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
}
fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
- match self.abi {
- Abi::Scalar(scalar) => {
+ match self.backend_repr {
+ BackendRepr::Scalar(scalar) => {
if scalar.is_bool() {
return cx.type_i1();
}
}
- Abi::ScalarPair(..) => {
+ BackendRepr::ScalarPair(..) => {
// An immediate pair always contains just the two elements, without any padding
// filler, as it should never be stored to memory.
return cx.type_struct(
@@ -287,7 +292,7 @@ fn scalar_pair_element_llvm_type<'a>(
// This must produce the same result for `repr(transparent)` wrappers as for the inner type!
// In other words, this should generally not look at the type at all, but only at the
// layout.
- let Abi::ScalarPair(a, b) = self.abi else {
+ let BackendRepr::ScalarPair(a, b) = self.backend_repr else {
bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self);
};
let scalar = [a, b][index];
diff --git a/compiler/rustc_codegen_ssa/messages.ftl b/compiler/rustc_codegen_ssa/messages.ftl
index d072749..3b34eb0 100644
--- a/compiler/rustc_codegen_ssa/messages.ftl
+++ b/compiler/rustc_codegen_ssa/messages.ftl
@@ -2,6 +2,12 @@
codegen_ssa_add_native_library = failed to add native library {$library_path}: {$error}
+codegen_ssa_apple_deployment_target_invalid =
+ failed to parse deployment target specified in {$env_var}: {$error}
+
+codegen_ssa_apple_deployment_target_too_low =
+ deployment target in {$env_var} was set to {$version}, but the minimum supported by `rustc` is {$os_min}
+
codegen_ssa_apple_sdk_error_sdk_path = failed to get {$sdk_name} SDK path: {$error}
codegen_ssa_archive_build_failure = failed to build archive at `{$path}`: {$error}
diff --git a/compiler/rustc_codegen_ssa/src/back/apple.rs b/compiler/rustc_codegen_ssa/src/back/apple.rs
new file mode 100644
index 0000000..93d90cd
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/apple.rs
@@ -0,0 +1,171 @@
+use std::env;
+use std::fmt::{Display, from_fn};
+use std::num::ParseIntError;
+
+use rustc_session::Session;
+use rustc_target::spec::Target;
+
+use crate::errors::AppleDeploymentTarget;
+
+#[cfg(test)]
+mod tests;
+
+pub(super) fn macho_platform(target: &Target) -> u32 {
+ match (&*target.os, &*target.abi) {
+ ("macos", _) => object::macho::PLATFORM_MACOS,
+ ("ios", "macabi") => object::macho::PLATFORM_MACCATALYST,
+ ("ios", "sim") => object::macho::PLATFORM_IOSSIMULATOR,
+ ("ios", _) => object::macho::PLATFORM_IOS,
+ ("watchos", "sim") => object::macho::PLATFORM_WATCHOSSIMULATOR,
+ ("watchos", _) => object::macho::PLATFORM_WATCHOS,
+ ("tvos", "sim") => object::macho::PLATFORM_TVOSSIMULATOR,
+ ("tvos", _) => object::macho::PLATFORM_TVOS,
+ ("visionos", "sim") => object::macho::PLATFORM_XROSSIMULATOR,
+ ("visionos", _) => object::macho::PLATFORM_XROS,
+ _ => unreachable!("tried to get Mach-O platform for non-Apple target"),
+ }
+}
+
+/// Deployment target or SDK version.
+///
+/// The size of the numbers in here are limited by Mach-O's `LC_BUILD_VERSION`.
+type OSVersion = (u16, u8, u8);
+
+/// Parse an OS version triple (SDK version or deployment target).
+fn parse_version(version: &str) -> Result<OSVersion, ParseIntError> {
+ if let Some((major, minor)) = version.split_once('.') {
+ let major = major.parse()?;
+ if let Some((minor, patch)) = minor.split_once('.') {
+ Ok((major, minor.parse()?, patch.parse()?))
+ } else {
+ Ok((major, minor.parse()?, 0))
+ }
+ } else {
+ Ok((version.parse()?, 0, 0))
+ }
+}
+
+pub fn pretty_version(version: OSVersion) -> impl Display {
+ let (major, minor, patch) = version;
+ from_fn(move |f| {
+ write!(f, "{major}.{minor}")?;
+ if patch != 0 {
+ write!(f, ".{patch}")?;
+ }
+ Ok(())
+ })
+}
+
+/// Minimum operating system versions currently supported by `rustc`.
+fn os_minimum_deployment_target(os: &str) -> OSVersion {
+ // When bumping a version in here, remember to update the platform-support docs too.
+ //
+ // NOTE: The defaults may change in future `rustc` versions, so if you are looking for the
+ // default deployment target, prefer:
+ // ```
+ // $ rustc --print deployment-target
+ // ```
+ match os {
+ "macos" => (10, 12, 0),
+ "ios" => (10, 0, 0),
+ "tvos" => (10, 0, 0),
+ "watchos" => (5, 0, 0),
+ "visionos" => (1, 0, 0),
+ _ => unreachable!("tried to get deployment target for non-Apple platform"),
+ }
+}
+
+/// The deployment target for the given target.
+///
+/// This is similar to `os_minimum_deployment_target`, except that on certain targets it makes sense
+/// to raise the minimum OS version.
+///
+/// This matches what LLVM does, see in part:
+/// <https://github.com/llvm/llvm-project/blob/llvmorg-18.1.8/llvm/lib/TargetParser/Triple.cpp#L1900-L1932>
+fn minimum_deployment_target(target: &Target) -> OSVersion {
+ match (&*target.os, &*target.arch, &*target.abi) {
+ ("macos", "aarch64", _) => (11, 0, 0),
+ ("ios", "aarch64", "macabi") => (14, 0, 0),
+ ("ios", "aarch64", "sim") => (14, 0, 0),
+ ("ios", _, _) if target.llvm_target.starts_with("arm64e") => (14, 0, 0),
+ // Mac Catalyst defaults to 13.1 in Clang.
+ ("ios", _, "macabi") => (13, 1, 0),
+ ("tvos", "aarch64", "sim") => (14, 0, 0),
+ ("watchos", "aarch64", "sim") => (7, 0, 0),
+ (os, _, _) => os_minimum_deployment_target(os),
+ }
+}
+
+/// Name of the environment variable used to fetch the deployment target on the given OS.
+fn deployment_target_env_var(os: &str) -> &'static str {
+ match os {
+ "macos" => "MACOSX_DEPLOYMENT_TARGET",
+ "ios" => "IPHONEOS_DEPLOYMENT_TARGET",
+ "watchos" => "WATCHOS_DEPLOYMENT_TARGET",
+ "tvos" => "TVOS_DEPLOYMENT_TARGET",
+ "visionos" => "XROS_DEPLOYMENT_TARGET",
+ _ => unreachable!("tried to get deployment target env var for non-Apple platform"),
+ }
+}
+
+/// Get the deployment target based on the standard environment variables, or fall back to the
+/// minimum version supported by `rustc`.
+pub fn deployment_target(sess: &Session) -> OSVersion {
+ let min = minimum_deployment_target(&sess.target);
+ let env_var = deployment_target_env_var(&sess.target.os);
+
+ if let Ok(deployment_target) = env::var(env_var) {
+ match parse_version(&deployment_target) {
+ Ok(version) => {
+ let os_min = os_minimum_deployment_target(&sess.target.os);
+ // It is common that the deployment target is set a bit too low, for example on
+ // macOS Aarch64 to also target older x86_64. So we only want to warn when variable
+ // is lower than the minimum OS supported by rustc, not when the variable is lower
+ // than the minimum for a specific target.
+ if version < os_min {
+ sess.dcx().emit_warn(AppleDeploymentTarget::TooLow {
+ env_var,
+ version: pretty_version(version).to_string(),
+ os_min: pretty_version(os_min).to_string(),
+ });
+ }
+
+ // Raise the deployment target to the minimum supported.
+ version.max(min)
+ }
+ Err(error) => {
+ sess.dcx().emit_err(AppleDeploymentTarget::Invalid { env_var, error });
+ min
+ }
+ }
+ } else {
+ // If no deployment target variable is set, default to the minimum found above.
+ min
+ }
+}
+
+pub(super) fn add_version_to_llvm_target(
+ llvm_target: &str,
+ deployment_target: OSVersion,
+) -> String {
+ let mut components = llvm_target.split("-");
+ let arch = components.next().expect("apple target should have arch");
+ let vendor = components.next().expect("apple target should have vendor");
+ let os = components.next().expect("apple target should have os");
+ let environment = components.next();
+ assert_eq!(components.next(), None, "too many LLVM triple components");
+
+ let (major, minor, patch) = deployment_target;
+
+ assert!(
+ !os.contains(|c: char| c.is_ascii_digit()),
+ "LLVM target must not already be versioned"
+ );
+
+ if let Some(env) = environment {
+ // Insert version into OS, before environment
+ format!("{arch}-{vendor}-{os}{major}.{minor}.{patch}-{env}")
+ } else {
+ format!("{arch}-{vendor}-{os}{major}.{minor}.{patch}")
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/back/apple/tests.rs b/compiler/rustc_codegen_ssa/src/back/apple/tests.rs
new file mode 100644
index 0000000..7ccda5a
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/apple/tests.rs
@@ -0,0 +1,21 @@
+use super::{add_version_to_llvm_target, parse_version};
+
+#[test]
+fn test_add_version_to_llvm_target() {
+ assert_eq!(
+ add_version_to_llvm_target("aarch64-apple-macosx", (10, 14, 1)),
+ "aarch64-apple-macosx10.14.1"
+ );
+ assert_eq!(
+ add_version_to_llvm_target("aarch64-apple-ios-simulator", (16, 1, 0)),
+ "aarch64-apple-ios16.1.0-simulator"
+ );
+}
+
+#[test]
+fn test_parse_version() {
+ assert_eq!(parse_version("10"), Ok((10, 0, 0)));
+ assert_eq!(parse_version("10.12"), Ok((10, 12, 0)));
+ assert_eq!(parse_version("10.12.6"), Ok((10, 12, 6)));
+ assert_eq!(parse_version("9999.99.99"), Ok((9999, 99, 99)));
+}
diff --git a/compiler/rustc_codegen_ssa/src/back/link.rs b/compiler/rustc_codegen_ssa/src/back/link.rs
index 34dc599..b01a62b 100644
--- a/compiler/rustc_codegen_ssa/src/back/link.rs
+++ b/compiler/rustc_codegen_ssa/src/back/link.rs
@@ -40,7 +40,7 @@
use rustc_target::spec::{
Cc, LinkOutputKind, LinkSelfContainedComponents, LinkSelfContainedDefault, LinkerFeatures,
LinkerFlavor, LinkerFlavorCli, Lld, PanicStrategy, RelocModel, RelroLevel, SanitizerSet,
- SplitDebuginfo, current_apple_deployment_target,
+ SplitDebuginfo,
};
use tempfile::Builder as TempFileBuilder;
use tracing::{debug, info, warn};
@@ -50,6 +50,7 @@
use super::linker::{self, Linker};
use super::metadata::{MetadataPosition, create_wrapper_file};
use super::rpath::{self, RPathConfig};
+use super::{apple, versioned_llvm_target};
use crate::{
CodegenResults, CompiledModule, CrateInfo, NativeLib, common, errors,
looks_like_rust_object_file,
@@ -2447,7 +2448,7 @@ fn add_order_independent_options(
if flavor == LinkerFlavor::Llbc {
cmd.link_args(&[
"--target",
- sess.target.llvm_target.as_ref(),
+ &versioned_llvm_target(sess),
"--target-cpu",
&codegen_results.crate_info.target_cpu,
]);
@@ -3039,7 +3040,7 @@ fn add_apple_link_args(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavo
_ => bug!("invalid OS/ABI combination for Apple target: {target_os}, {target_abi}"),
};
- let (major, minor, patch) = current_apple_deployment_target(&sess.target);
+ let (major, minor, patch) = apple::deployment_target(sess);
let min_version = format!("{major}.{minor}.{patch}");
// The SDK version is used at runtime when compiling with a newer SDK / version of Xcode:
@@ -3109,7 +3110,7 @@ fn add_apple_link_args(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavo
// The presence of `-mmacosx-version-min` makes CC default to
// macOS, and it sets the deployment target.
- let (major, minor, patch) = current_apple_deployment_target(&sess.target);
+ let (major, minor, patch) = apple::deployment_target(sess);
// Intentionally pass this as a single argument, Clang doesn't
// seem to like it otherwise.
cmd.cc_arg(&format!("-mmacosx-version-min={major}.{minor}.{patch}"));
@@ -3119,7 +3120,7 @@ fn add_apple_link_args(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavo
//
// We avoid `-m32`/`-m64`, as this is already encoded by `-arch`.
} else {
- cmd.cc_args(&["-target", &sess.target.llvm_target]);
+ cmd.cc_args(&["-target", &versioned_llvm_target(sess)]);
}
}
}
@@ -3345,7 +3346,7 @@ fn add_lld_args(
// targeting a different linker flavor on macOS, and that's also always
// the case when targeting WASM.
if sess.target.linker_flavor != sess.host.linker_flavor {
- cmd.cc_arg(format!("--target={}", sess.target.llvm_target));
+ cmd.cc_arg(format!("--target={}", versioned_llvm_target(sess)));
}
}
}
diff --git a/compiler/rustc_codegen_ssa/src/back/metadata.rs b/compiler/rustc_codegen_ssa/src/back/metadata.rs
index 8857fda..a7d95d5 100644
--- a/compiler/rustc_codegen_ssa/src/back/metadata.rs
+++ b/compiler/rustc_codegen_ssa/src/back/metadata.rs
@@ -22,6 +22,8 @@
use rustc_target::abi::Endian;
use rustc_target::spec::{RelocModel, Target, ef_avr_arch};
+use super::apple;
+
/// The default metadata loader. This is used by cg_llvm and cg_clif.
///
/// # Metadata location
@@ -238,7 +240,7 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static
file.set_macho_cpu_subtype(object::macho::CPU_SUBTYPE_ARM64E);
}
- file.set_macho_build_version(macho_object_build_version_for_target(&sess.target))
+ file.set_macho_build_version(macho_object_build_version_for_target(sess))
}
if binary_format == BinaryFormat::Coff {
// Disable the default mangler to avoid mangling the special "@feat.00" symbol name.
@@ -322,10 +324,11 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static
// Set the appropriate flag based on ABI
// This needs to match LLVM `RISCVELFStreamer.cpp`
match &*sess.target.llvm_abiname {
- "" | "ilp32" | "lp64" => (),
+ "ilp32" | "lp64" => (),
"ilp32f" | "lp64f" => e_flags |= elf::EF_RISCV_FLOAT_ABI_SINGLE,
"ilp32d" | "lp64d" => e_flags |= elf::EF_RISCV_FLOAT_ABI_DOUBLE,
- "ilp32e" => e_flags |= elf::EF_RISCV_RVE,
+ // Note that the `lp64e` is still unstable as it's not (yet) part of the ELF psABI.
+ "ilp32e" | "lp64e" => e_flags |= elf::EF_RISCV_RVE,
_ => bug!("unknown RISC-V ABI name"),
}
@@ -391,7 +394,7 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static
///
/// Since Xcode 15, Apple's LD apparently requires object files to use this load command, so this
/// returns the `MachOBuildVersion` for the target to do so.
-fn macho_object_build_version_for_target(target: &Target) -> object::write::MachOBuildVersion {
+fn macho_object_build_version_for_target(sess: &Session) -> object::write::MachOBuildVersion {
/// The `object` crate demands "X.Y.Z encoded in nibbles as xxxx.yy.zz"
/// e.g. minOS 14.0 = 0x000E0000, or SDK 16.2 = 0x00100200
fn pack_version((major, minor, patch): (u16, u8, u8)) -> u32 {
@@ -399,9 +402,8 @@ fn pack_version((major, minor, patch): (u16, u8, u8)) -> u32 {
(major << 16) | (minor << 8) | patch
}
- let platform =
- rustc_target::spec::current_apple_platform(target).expect("unknown Apple target OS");
- let min_os = rustc_target::spec::current_apple_deployment_target(target);
+ let platform = apple::macho_platform(&sess.target);
+ let min_os = apple::deployment_target(sess);
let mut build_version = object::write::MachOBuildVersion::default();
build_version.platform = platform;
diff --git a/compiler/rustc_codegen_ssa/src/back/mod.rs b/compiler/rustc_codegen_ssa/src/back/mod.rs
index 2b3a2e3..64b5d45 100644
--- a/compiler/rustc_codegen_ssa/src/back/mod.rs
+++ b/compiler/rustc_codegen_ssa/src/back/mod.rs
@@ -1,3 +1,8 @@
+use std::borrow::Cow;
+
+use rustc_session::Session;
+
+pub mod apple;
pub mod archive;
pub(crate) mod command;
pub mod link;
@@ -7,3 +12,19 @@
pub(crate) mod rpath;
pub mod symbol_export;
pub mod write;
+
+/// The target triple depends on the deployment target, and is required to
+/// enable features such as cross-language LTO, and for picking the right
+/// Mach-O commands.
+///
+/// Certain optimizations also depend on the deployment target.
+pub fn versioned_llvm_target(sess: &Session) -> Cow<'_, str> {
+ if sess.target.is_like_osx {
+ apple::add_version_to_llvm_target(&sess.target.llvm_target, apple::deployment_target(sess))
+ .into()
+ } else {
+ // FIXME(madsmtm): Certain other targets also include a version,
+ // we might want to move that here as well.
+ Cow::Borrowed(&sess.target.llvm_target)
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs
index 8445d16..d977cca 100644
--- a/compiler/rustc_codegen_ssa/src/back/write.rs
+++ b/compiler/rustc_codegen_ssa/src/back/write.rs
@@ -90,7 +90,6 @@ pub struct ModuleConfig {
pub pgo_sample_use: Option<PathBuf>,
pub debug_info_for_profiling: bool,
pub instrument_coverage: bool,
- pub instrument_gcov: bool,
pub sanitizer: SanitizerSet,
pub sanitizer_recover: SanitizerSet,
@@ -123,12 +122,7 @@ pub struct ModuleConfig {
}
impl ModuleConfig {
- fn new(
- kind: ModuleKind,
- tcx: TyCtxt<'_>,
- no_builtins: bool,
- is_compiler_builtins: bool,
- ) -> ModuleConfig {
+ fn new(kind: ModuleKind, tcx: TyCtxt<'_>, no_builtins: bool) -> ModuleConfig {
// If it's a regular module, use `$regular`, otherwise use `$other`.
// `$regular` and `$other` are evaluated lazily.
macro_rules! if_regular {
@@ -189,13 +183,6 @@ macro_rules! if_regular {
pgo_sample_use: if_regular!(sess.opts.unstable_opts.profile_sample_use.clone(), None),
debug_info_for_profiling: sess.opts.unstable_opts.debug_info_for_profiling,
instrument_coverage: if_regular!(sess.instrument_coverage(), false),
- instrument_gcov: if_regular!(
- // compiler_builtins overrides the codegen-units settings,
- // which is incompatible with -Zprofile which requires that
- // only a single codegen unit is used per crate.
- sess.opts.unstable_opts.profile && !is_compiler_builtins,
- false
- ),
sanitizer: if_regular!(sess.opts.unstable_opts.sanitizer, SanitizerSet::empty()),
sanitizer_dataflow_abilist: if_regular!(
@@ -473,16 +460,12 @@ pub(crate) fn start_async_codegen<B: ExtraBackendMethods>(
let crate_attrs = tcx.hir().attrs(rustc_hir::CRATE_HIR_ID);
let no_builtins = attr::contains_name(crate_attrs, sym::no_builtins);
- let is_compiler_builtins = attr::contains_name(crate_attrs, sym::compiler_builtins);
let crate_info = CrateInfo::new(tcx, target_cpu);
- let regular_config =
- ModuleConfig::new(ModuleKind::Regular, tcx, no_builtins, is_compiler_builtins);
- let metadata_config =
- ModuleConfig::new(ModuleKind::Metadata, tcx, no_builtins, is_compiler_builtins);
- let allocator_config =
- ModuleConfig::new(ModuleKind::Allocator, tcx, no_builtins, is_compiler_builtins);
+ let regular_config = ModuleConfig::new(ModuleKind::Regular, tcx, no_builtins);
+ let metadata_config = ModuleConfig::new(ModuleKind::Metadata, tcx, no_builtins);
+ let allocator_config = ModuleConfig::new(ModuleKind::Allocator, tcx, no_builtins);
let (shared_emitter, shared_emitter_main) = SharedEmitter::new();
let (codegen_worker_send, codegen_worker_receive) = channel();
diff --git a/compiler/rustc_codegen_ssa/src/errors.rs b/compiler/rustc_codegen_ssa/src/errors.rs
index d67cf0e..cf8d1cf 100644
--- a/compiler/rustc_codegen_ssa/src/errors.rs
+++ b/compiler/rustc_codegen_ssa/src/errors.rs
@@ -2,6 +2,7 @@
use std::borrow::Cow;
use std::io::Error;
+use std::num::ParseIntError;
use std::path::{Path, PathBuf};
use std::process::ExitStatus;
@@ -540,6 +541,14 @@ pub(crate) struct UnsupportedArch<'a> {
}
#[derive(Diagnostic)]
+pub(crate) enum AppleDeploymentTarget {
+ #[diag(codegen_ssa_apple_deployment_target_invalid)]
+ Invalid { env_var: &'static str, error: ParseIntError },
+ #[diag(codegen_ssa_apple_deployment_target_too_low)]
+ TooLow { env_var: &'static str, version: String, os_min: String },
+}
+
+#[derive(Diagnostic)]
pub(crate) enum AppleSdkRootError<'a> {
#[diag(codegen_ssa_apple_sdk_error_sdk_path)]
SdkPath { sdk_name: &'a str, error: Error },
diff --git a/compiler/rustc_codegen_ssa/src/lib.rs b/compiler/rustc_codegen_ssa/src/lib.rs
index 73bfa9d..7dc8ab3 100644
--- a/compiler/rustc_codegen_ssa/src/lib.rs
+++ b/compiler/rustc_codegen_ssa/src/lib.rs
@@ -6,6 +6,7 @@
#![doc(rust_logo)]
#![feature(assert_matches)]
#![feature(box_patterns)]
+#![feature(debug_closure_helpers)]
#![feature(file_buffered)]
#![feature(if_let_guard)]
#![feature(let_chains)]
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs
index a17a127..283740f 100644
--- a/compiler/rustc_codegen_ssa/src/mir/block.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/block.rs
@@ -1532,7 +1532,7 @@ fn codegen_argument(
// the load would just produce `OperandValue::Ref` instead
// of the `OperandValue::Immediate` we need for the call.
llval = bx.load(bx.backend_type(arg.layout), llval, align);
- if let abi::Abi::Scalar(scalar) = arg.layout.abi {
+ if let abi::BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
if scalar.is_bool() {
bx.range_metadata(llval, WrappingRange { start: 0, end: 1 });
}
diff --git a/compiler/rustc_codegen_ssa/src/mir/constant.rs b/compiler/rustc_codegen_ssa/src/mir/constant.rs
index 15f45b2..54b9c9c 100644
--- a/compiler/rustc_codegen_ssa/src/mir/constant.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/constant.rs
@@ -1,8 +1,8 @@
+use rustc_abi::BackendRepr;
use rustc_middle::mir::interpret::ErrorHandled;
use rustc_middle::ty::layout::HasTyCtxt;
use rustc_middle::ty::{self, Ty};
use rustc_middle::{bug, mir, span_bug};
-use rustc_target::abi::Abi;
use super::FunctionCx;
use crate::errors;
@@ -86,7 +86,7 @@ pub fn immediate_const_vector(
.map(|field| {
if let Some(prim) = field.try_to_scalar() {
let layout = bx.layout_of(field_ty);
- let Abi::Scalar(scalar) = layout.abi else {
+ let BackendRepr::Scalar(scalar) = layout.backend_repr else {
bug!("from_const: invalid ByVal layout: {:#?}", layout);
};
bx.scalar_to_backend(prim, scalar, bx.immediate_backend_type(layout))
diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
index 146f55f..21d2047 100644
--- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
@@ -2,6 +2,7 @@
use std::marker::PhantomData;
use std::ops::Range;
+use rustc_abi::{BackendRepr, FieldIdx, FieldsShape, Size, VariantIdx};
use rustc_data_structures::fx::FxHashMap;
use rustc_index::IndexVec;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
@@ -11,7 +12,6 @@
use rustc_session::config::DebugInfo;
use rustc_span::symbol::{Symbol, kw};
use rustc_span::{BytePos, Span, hygiene};
-use rustc_target::abi::{Abi, FieldIdx, FieldsShape, Size, VariantIdx};
use super::operand::{OperandRef, OperandValue};
use super::place::{PlaceRef, PlaceValue};
@@ -510,7 +510,7 @@ pub(crate) fn compute_per_local_var_debug_info(
// be marked as a `LocalVariable` for MSVC debuggers to visualize
// their data correctly. (See #81894 & #88625)
let var_ty_layout = self.cx.layout_of(var_ty);
- if let Abi::ScalarPair(_, _) = var_ty_layout.abi {
+ if let BackendRepr::ScalarPair(_, _) = var_ty_layout.backend_repr {
VariableKind::LocalVariable
} else {
VariableKind::ArgumentVariable(arg_index)
diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs
index 88ceff3..19101ec 100644
--- a/compiler/rustc_codegen_ssa/src/mir/operand.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs
@@ -4,7 +4,7 @@
use arrayvec::ArrayVec;
use either::Either;
use rustc_abi as abi;
-use rustc_abi::{Abi, Align, Size};
+use rustc_abi::{Align, BackendRepr, Size};
use rustc_middle::bug;
use rustc_middle::mir::interpret::{Pointer, Scalar, alloc_range};
use rustc_middle::mir::{self, ConstValue};
@@ -163,7 +163,7 @@ pub(crate) fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
let val = match val {
ConstValue::Scalar(x) => {
- let Abi::Scalar(scalar) = layout.abi else {
+ let BackendRepr::Scalar(scalar) = layout.backend_repr else {
bug!("from_const: invalid ByVal layout: {:#?}", layout);
};
let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout));
@@ -171,7 +171,7 @@ pub(crate) fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
}
ConstValue::ZeroSized => return OperandRef::zero_sized(layout),
ConstValue::Slice { data, meta } => {
- let Abi::ScalarPair(a_scalar, _) = layout.abi else {
+ let BackendRepr::ScalarPair(a_scalar, _) = layout.backend_repr else {
bug!("from_const: invalid ScalarPair layout: {:#?}", layout);
};
let a = Scalar::from_pointer(
@@ -221,14 +221,14 @@ fn from_const_alloc<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
// case where some of the bytes are initialized and others are not. So, we need an extra
// check that walks over the type of `mplace` to make sure it is truly correct to treat this
// like a `Scalar` (or `ScalarPair`).
- match layout.abi {
- Abi::Scalar(s @ abi::Scalar::Initialized { .. }) => {
+ match layout.backend_repr {
+ BackendRepr::Scalar(s @ abi::Scalar::Initialized { .. }) => {
let size = s.size(bx);
assert_eq!(size, layout.size, "abi::Scalar size does not match layout size");
let val = read_scalar(offset, size, s, bx.immediate_backend_type(layout));
OperandRef { val: OperandValue::Immediate(val), layout }
}
- Abi::ScalarPair(
+ BackendRepr::ScalarPair(
a @ abi::Scalar::Initialized { .. },
b @ abi::Scalar::Initialized { .. },
) => {
@@ -322,7 +322,7 @@ pub fn from_immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
llval: V,
layout: TyAndLayout<'tcx>,
) -> Self {
- let val = if let Abi::ScalarPair(..) = layout.abi {
+ let val = if let BackendRepr::ScalarPair(..) = layout.backend_repr {
debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout);
// Deconstruct the immediate aggregate.
@@ -343,7 +343,7 @@ pub(crate) fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
let field = self.layout.field(bx.cx(), i);
let offset = self.layout.fields.offset(i);
- let mut val = match (self.val, self.layout.abi) {
+ let mut val = match (self.val, self.layout.backend_repr) {
// If the field is ZST, it has no data.
_ if field.is_zst() => OperandValue::ZeroSized,
@@ -356,7 +356,7 @@ pub(crate) fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
}
// Extract a scalar component from a pair.
- (OperandValue::Pair(a_llval, b_llval), Abi::ScalarPair(a, b)) => {
+ (OperandValue::Pair(a_llval, b_llval), BackendRepr::ScalarPair(a, b)) => {
if offset.bytes() == 0 {
assert_eq!(field.size, a.size(bx.cx()));
OperandValue::Immediate(a_llval)
@@ -368,30 +368,30 @@ pub(crate) fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
}
// `#[repr(simd)]` types are also immediate.
- (OperandValue::Immediate(llval), Abi::Vector { .. }) => {
+ (OperandValue::Immediate(llval), BackendRepr::Vector { .. }) => {
OperandValue::Immediate(bx.extract_element(llval, bx.cx().const_usize(i as u64)))
}
_ => bug!("OperandRef::extract_field({:?}): not applicable", self),
};
- match (&mut val, field.abi) {
+ match (&mut val, field.backend_repr) {
(OperandValue::ZeroSized, _) => {}
(
OperandValue::Immediate(llval),
- Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. },
+ BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) | BackendRepr::Vector { .. },
) => {
// Bools in union fields needs to be truncated.
*llval = bx.to_immediate(*llval, field);
}
- (OperandValue::Pair(a, b), Abi::ScalarPair(a_abi, b_abi)) => {
+ (OperandValue::Pair(a, b), BackendRepr::ScalarPair(a_abi, b_abi)) => {
// Bools in union fields needs to be truncated.
*a = bx.to_immediate_scalar(*a, a_abi);
*b = bx.to_immediate_scalar(*b, b_abi);
}
// Newtype vector of array, e.g. #[repr(simd)] struct S([i32; 4]);
- (OperandValue::Immediate(llval), Abi::Aggregate { sized: true }) => {
- assert_matches!(self.layout.abi, Abi::Vector { .. });
+ (OperandValue::Immediate(llval), BackendRepr::Memory { sized: true }) => {
+ assert_matches!(self.layout.backend_repr, BackendRepr::Vector { .. });
let llfield_ty = bx.cx().backend_type(field);
@@ -400,7 +400,10 @@ pub(crate) fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
bx.store(*llval, llptr, field.align.abi);
*llval = bx.load(llfield_ty, llptr, field.align.abi);
}
- (OperandValue::Immediate(_), Abi::Uninhabited | Abi::Aggregate { sized: false }) => {
+ (
+ OperandValue::Immediate(_),
+ BackendRepr::Uninhabited | BackendRepr::Memory { sized: false },
+ ) => {
bug!()
}
(OperandValue::Pair(..), _) => bug!(),
@@ -494,7 +497,7 @@ pub(crate) fn store_with_flags<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
bx.store_with_flags(val, dest.val.llval, dest.val.align, flags);
}
OperandValue::Pair(a, b) => {
- let Abi::ScalarPair(a_scalar, b_scalar) = dest.layout.abi else {
+ let BackendRepr::ScalarPair(a_scalar, b_scalar) = dest.layout.backend_repr else {
bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout);
};
let b_offset = a_scalar.size(bx).align_to(b_scalar.align(bx).abi);
@@ -645,7 +648,7 @@ pub fn codegen_operand(
// However, some SIMD types do not actually use the vector ABI
// (in particular, packed SIMD types do not). Ensure we exclude those.
let layout = bx.layout_of(constant_ty);
- if let Abi::Vector { .. } = layout.abi {
+ if let BackendRepr::Vector { .. } = layout.backend_repr {
let (llval, ty) = self.immediate_const_vector(bx, constant);
return OperandRef {
val: OperandValue::Immediate(llval),
diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
index 6e8c193..86cf0f9 100644
--- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
@@ -1136,17 +1136,17 @@ fn value_kind(&self, layout: TyAndLayout<'tcx>) -> OperandValueKind {
OperandValueKind::ZeroSized
} else if self.cx.is_backend_immediate(layout) {
assert!(!self.cx.is_backend_scalar_pair(layout));
- OperandValueKind::Immediate(match layout.abi {
- abi::Abi::Scalar(s) => s,
- abi::Abi::Vector { element, .. } => element,
+ OperandValueKind::Immediate(match layout.backend_repr {
+ abi::BackendRepr::Scalar(s) => s,
+ abi::BackendRepr::Vector { element, .. } => element,
x => span_bug!(self.mir.span, "Couldn't translate {x:?} as backend immediate"),
})
} else if self.cx.is_backend_scalar_pair(layout) {
- let abi::Abi::ScalarPair(s1, s2) = layout.abi else {
+ let abi::BackendRepr::ScalarPair(s1, s2) = layout.backend_repr else {
span_bug!(
self.mir.span,
"Couldn't translate {:?} as backend scalar pair",
- layout.abi,
+ layout.backend_repr,
);
};
OperandValueKind::Pair(s1, s2)
diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs
index 50a5171..768a043 100644
--- a/compiler/rustc_codegen_ssa/src/traits/builder.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs
@@ -1,13 +1,13 @@
use std::assert_matches::assert_matches;
use std::ops::Deref;
+use rustc_abi::{Align, BackendRepr, Scalar, Size, WrappingRange};
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
use rustc_middle::ty::{Instance, Ty};
use rustc_session::config::OptLevel;
use rustc_span::Span;
use rustc_target::abi::call::FnAbi;
-use rustc_target::abi::{Abi, Align, Scalar, Size, WrappingRange};
use super::abi::AbiBuilderMethods;
use super::asm::AsmBuilderMethods;
@@ -162,7 +162,7 @@ fn checked_binop(
fn from_immediate(&mut self, val: Self::Value) -> Self::Value;
fn to_immediate(&mut self, val: Self::Value, layout: TyAndLayout<'_>) -> Self::Value {
- if let Abi::Scalar(scalar) = layout.abi {
+ if let BackendRepr::Scalar(scalar) = layout.backend_repr {
self.to_immediate_scalar(val, scalar)
} else {
val
diff --git a/compiler/rustc_const_eval/src/check_consts/check.rs b/compiler/rustc_const_eval/src/check_consts/check.rs
index 5210241..303c490 100644
--- a/compiler/rustc_const_eval/src/check_consts/check.rs
+++ b/compiler/rustc_const_eval/src/check_consts/check.rs
@@ -11,18 +11,19 @@
use rustc_hir::{self as hir, LangItem};
use rustc_index::bit_set::BitSet;
use rustc_infer::infer::TyCtxtInferExt;
-use rustc_infer::traits::ObligationCause;
use rustc_middle::mir::visit::Visitor;
use rustc_middle::mir::*;
use rustc_middle::span_bug;
use rustc_middle::ty::adjustment::PointerCoercion;
-use rustc_middle::ty::{self, Instance, InstanceKind, Ty, TypeVisitableExt, TypingMode};
+use rustc_middle::ty::{self, Instance, InstanceKind, Ty, TypeVisitableExt};
use rustc_mir_dataflow::Analysis;
use rustc_mir_dataflow::impls::MaybeStorageLive;
use rustc_mir_dataflow::storage::always_storage_live_locals;
-use rustc_span::{DUMMY_SP, Span, Symbol, sym};
+use rustc_span::{Span, Symbol, sym};
use rustc_trait_selection::error_reporting::InferCtxtErrorExt;
-use rustc_trait_selection::traits::{self, ObligationCauseCode, ObligationCtxt};
+use rustc_trait_selection::traits::{
+ Obligation, ObligationCause, ObligationCauseCode, ObligationCtxt,
+};
use tracing::{debug, instrument, trace};
use super::ops::{self, NonConstOp, Status};
@@ -360,6 +361,73 @@ fn place_may_escape(&mut self, place: &Place<'_>) -> bool {
// end of evaluation.
!is_transient
}
+
+ fn revalidate_conditional_constness(
+ &mut self,
+ callee: DefId,
+ callee_args: ty::GenericArgsRef<'tcx>,
+ call_source: CallSource,
+ call_span: Span,
+ ) {
+ let tcx = self.tcx;
+ if !tcx.is_conditionally_const(callee) {
+ return;
+ }
+
+ let const_conditions = tcx.const_conditions(callee).instantiate(tcx, callee_args);
+ // If there are any const conditions on this fn and `const_trait_impl`
+ // is not enabled, simply bail. We shouldn't be able to call conditionally
+ // const functions on stable.
+ if !const_conditions.is_empty() && !tcx.features().const_trait_impl() {
+ self.check_op(ops::FnCallNonConst {
+ callee,
+ args: callee_args,
+ span: call_span,
+ call_source,
+ feature: Some(sym::const_trait_impl),
+ });
+ return;
+ }
+
+ let infcx = tcx.infer_ctxt().build(self.body.typing_mode(tcx));
+ let ocx = ObligationCtxt::new_with_diagnostics(&infcx);
+
+ let body_id = self.body.source.def_id().expect_local();
+ let host_polarity = match self.const_kind() {
+ hir::ConstContext::ConstFn => ty::BoundConstness::Maybe,
+ hir::ConstContext::Static(_) | hir::ConstContext::Const { .. } => {
+ ty::BoundConstness::Const
+ }
+ };
+ let const_conditions = ocx.normalize(
+ &ObligationCause::misc(call_span, body_id),
+ self.param_env,
+ const_conditions,
+ );
+ ocx.register_obligations(const_conditions.into_iter().map(|(trait_ref, span)| {
+ Obligation::new(
+ tcx,
+ ObligationCause::new(
+ call_span,
+ body_id,
+ ObligationCauseCode::WhereClause(callee, span),
+ ),
+ self.param_env,
+ trait_ref.to_host_effect_clause(tcx, host_polarity),
+ )
+ }));
+
+ let errors = ocx.select_all_or_error();
+ if !errors.is_empty() {
+ // FIXME(effects): Soon this should be unconditionally delaying a bug.
+ if matches!(call_source, CallSource::Normal) && tcx.features().effects() {
+ tcx.dcx()
+ .span_delayed_bug(call_span, "this should have reported a ~const error in HIR");
+ } else {
+ infcx.err_ctxt().report_fulfillment_errors(errors);
+ }
+ }
+ }
}
impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
@@ -566,7 +634,6 @@ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location
};
let ConstCx { tcx, body, param_env, .. } = *self.ccx;
- let caller = self.def_id();
let fn_ty = func.ty(body, tcx);
@@ -584,31 +651,7 @@ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location
}
};
- // Check that all trait bounds that are marked as `~const` can be satisfied.
- //
- // Typeck only does a "non-const" check since it operates on HIR and cannot distinguish
- // which path expressions are getting called on and which path expressions are only used
- // as function pointers. This is required for correctness.
- let infcx = tcx.infer_ctxt().build(TypingMode::from_param_env(param_env));
- let ocx = ObligationCtxt::new_with_diagnostics(&infcx);
-
- let predicates = tcx.predicates_of(callee).instantiate(tcx, fn_args);
- let cause = ObligationCause::new(
- terminator.source_info.span,
- self.body.source.def_id().expect_local(),
- ObligationCauseCode::WhereClause(callee, DUMMY_SP),
- );
- let normalized_predicates = ocx.normalize(&cause, param_env, predicates);
- ocx.register_obligations(traits::predicates_for_generics(
- |_, _| cause.clone(),
- self.param_env,
- normalized_predicates,
- ));
-
- let errors = ocx.select_all_or_error();
- if !errors.is_empty() {
- infcx.err_ctxt().report_fulfillment_errors(errors);
- }
+ self.revalidate_conditional_constness(callee, fn_args, call_source, *fn_span);
let mut is_trait = false;
// Attempting to call a trait method?
@@ -648,7 +691,6 @@ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location
None
};
self.check_op(ops::FnCallNonConst {
- caller,
callee,
args: fn_args,
span: *fn_span,
@@ -738,7 +780,6 @@ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location
// Trait functions are not `const fn` so we have to skip them here.
if !tcx.is_const_fn(callee) && !is_trait {
self.check_op(ops::FnCallNonConst {
- caller,
callee,
args: fn_args,
span: *fn_span,
diff --git a/compiler/rustc_const_eval/src/check_consts/mod.rs b/compiler/rustc_const_eval/src/check_consts/mod.rs
index 56da679..dcdaafa 100644
--- a/compiler/rustc_const_eval/src/check_consts/mod.rs
+++ b/compiler/rustc_const_eval/src/check_consts/mod.rs
@@ -32,14 +32,7 @@ impl<'mir, 'tcx> ConstCx<'mir, 'tcx> {
pub fn new(tcx: TyCtxt<'tcx>, body: &'mir mir::Body<'tcx>) -> Self {
let def_id = body.source.def_id().expect_local();
let param_env = tcx.param_env(def_id);
- Self::new_with_param_env(tcx, body, param_env)
- }
- pub fn new_with_param_env(
- tcx: TyCtxt<'tcx>,
- body: &'mir mir::Body<'tcx>,
- param_env: ty::ParamEnv<'tcx>,
- ) -> Self {
let const_kind = tcx.hir().body_const_context(body.source.def_id().expect_local());
ConstCx { body, tcx, param_env, const_kind }
}
diff --git a/compiler/rustc_const_eval/src/check_consts/ops.rs b/compiler/rustc_const_eval/src/check_consts/ops.rs
index 3f977dc..ce36701 100644
--- a/compiler/rustc_const_eval/src/check_consts/ops.rs
+++ b/compiler/rustc_const_eval/src/check_consts/ops.rs
@@ -1,6 +1,5 @@
//! Concrete error types for all operations which may be invalid in a certain const context.
-use hir::def_id::LocalDefId;
use hir::{ConstContext, LangItem};
use rustc_errors::Diag;
use rustc_errors::codes::*;
@@ -12,7 +11,7 @@
use rustc_middle::span_bug;
use rustc_middle::ty::print::{PrintTraitRefExt as _, with_no_trimmed_paths};
use rustc_middle::ty::{
- self, Closure, FnDef, FnPtr, GenericArgKind, GenericArgsRef, Param, TraitRef, Ty, TypingMode,
+ self, Closure, FnDef, FnPtr, GenericArgKind, GenericArgsRef, Param, TraitRef, Ty,
suggest_constraining_type_param,
};
use rustc_middle::util::{CallDesugaringKind, CallKind, call_kind};
@@ -74,7 +73,6 @@ fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx> {
/// A function call where the callee is not marked as `const`.
#[derive(Debug, Clone, Copy)]
pub(crate) struct FnCallNonConst<'tcx> {
- pub caller: LocalDefId,
pub callee: DefId,
pub args: GenericArgsRef<'tcx>,
pub span: Span,
@@ -87,8 +85,9 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
#[allow(rustc::diagnostic_outside_of_impl)]
#[allow(rustc::untranslatable_diagnostic)]
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, _: Span) -> Diag<'tcx> {
- let FnCallNonConst { caller, callee, args, span, call_source, feature } = *self;
- let ConstCx { tcx, param_env, body, .. } = *ccx;
+ let FnCallNonConst { callee, args, span, call_source, feature } = *self;
+ let ConstCx { tcx, param_env, .. } = *ccx;
+ let caller = ccx.def_id();
let diag_trait = |err, self_ty: Ty<'_>, trait_id| {
let trait_ref = TraitRef::from_method(tcx, trait_id, args);
@@ -116,7 +115,7 @@ fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, _: Span) -> Diag<'tcx> {
let obligation =
Obligation::new(tcx, ObligationCause::dummy(), param_env, trait_ref);
- let infcx = tcx.infer_ctxt().build(TypingMode::from_param_env(param_env));
+ let infcx = tcx.infer_ctxt().build(ccx.body.typing_mode(tcx));
let mut selcx = SelectionContext::new(&infcx);
let implsrc = selcx.select(&obligation);
@@ -289,7 +288,7 @@ macro_rules! error {
if let Some(feature) = feature {
ccx.tcx.disabled_nightly_features(
&mut err,
- body.source.def_id().as_local().map(|local| ccx.tcx.local_def_id_to_hir_id(local)),
+ Some(ccx.tcx.local_def_id_to_hir_id(caller)),
[(String::new(), feature)],
);
}
diff --git a/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs b/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs
index 743924f..bc2661c 100644
--- a/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs
+++ b/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs
@@ -131,7 +131,7 @@ fn binary_ptr_op(
interp_ok(match bin_op {
Eq | Ne | Lt | Le | Gt | Ge => {
// Types can differ, e.g. fn ptrs with different `for`.
- assert_eq!(left.layout.abi, right.layout.abi);
+ assert_eq!(left.layout.backend_repr, right.layout.backend_repr);
let size = ecx.pointer_size();
// Just compare the bits. ScalarPairs are compared lexicographically.
// We thus always compare pairs and simply fill scalars up with 0.
diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
index 7319c25..81b9d73 100644
--- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
+++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
@@ -1,6 +1,7 @@
use std::sync::atomic::Ordering::Relaxed;
use either::{Left, Right};
+use rustc_abi::{self as abi, BackendRepr};
use rustc_hir::def::DefKind;
use rustc_middle::bug;
use rustc_middle::mir::interpret::{AllocId, ErrorHandled, InterpErrorInfo};
@@ -12,7 +13,6 @@
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_span::def_id::LocalDefId;
use rustc_span::{DUMMY_SP, Span};
-use rustc_target::abi::{self, Abi};
use tracing::{debug, instrument, trace};
use super::{CanAccessMutGlobal, CompileTimeInterpCx, CompileTimeMachine};
@@ -174,8 +174,8 @@ pub(super) fn op_to_const<'tcx>(
// type (it's used throughout the compiler and having it work just on literals is not enough)
// and we want it to be fast (i.e., don't go to an `Allocation` and reconstruct the `Scalar`
// from its byte-serialized form).
- let force_as_immediate = match op.layout.abi {
- Abi::Scalar(abi::Scalar::Initialized { .. }) => true,
+ let force_as_immediate = match op.layout.backend_repr {
+ BackendRepr::Scalar(abi::Scalar::Initialized { .. }) => true,
// We don't *force* `ConstValue::Slice` for `ScalarPair`. This has the advantage that if the
// input `op` is a place, then turning it into a `ConstValue` and back into a `OpTy` will
// not have to generate any duplicate allocations (we preserve the original `AllocId` in
diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
index 9e80e66..ea88b2e 100644
--- a/compiler/rustc_const_eval/src/const_eval/valtrees.rs
+++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
@@ -1,10 +1,10 @@
+use rustc_abi::{BackendRepr, VariantIdx};
use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_middle::mir::interpret::{EvalToValTreeResult, GlobalId};
use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
use rustc_middle::{bug, mir};
use rustc_span::DUMMY_SP;
-use rustc_target::abi::{Abi, VariantIdx};
use tracing::{debug, instrument, trace};
use super::eval_queries::{mk_eval_cx_to_read_const_val, op_to_const};
@@ -117,7 +117,7 @@ fn const_to_valtree_inner<'tcx>(
let val = ecx.read_immediate(place).unwrap();
// We could allow wide raw pointers where both sides are integers in the future,
// but for now we reject them.
- if matches!(val.layout.abi, Abi::ScalarPair(..)) {
+ if matches!(val.layout.backend_repr, BackendRepr::ScalarPair(..)) {
return Err(ValTreeCreationError::NonSupportedType(ty));
}
let val = val.to_scalar();
@@ -311,7 +311,7 @@ pub fn valtree_to_const_value<'tcx>(
// Fast path to avoid some allocations.
return mir::ConstValue::ZeroSized;
}
- if layout.abi.is_scalar()
+ if layout.backend_repr.is_scalar()
&& (matches!(ty.kind(), ty::Tuple(_))
|| matches!(ty.kind(), ty::Adt(def, _) if def.is_struct()))
{
diff --git a/compiler/rustc_const_eval/src/interpret/call.rs b/compiler/rustc_const_eval/src/interpret/call.rs
index 85d9990..1915bf7 100644
--- a/compiler/rustc_const_eval/src/interpret/call.rs
+++ b/compiler/rustc_const_eval/src/interpret/call.rs
@@ -172,8 +172,8 @@ fn layout_compat(
// must be compatible. So we just accept everything with Pointer ABI as compatible,
// even if this will accept some code that is not stably guaranteed to work.
// This also handles function pointers.
- let thin_pointer = |layout: TyAndLayout<'tcx>| match layout.abi {
- abi::Abi::Scalar(s) => match s.primitive() {
+ let thin_pointer = |layout: TyAndLayout<'tcx>| match layout.backend_repr {
+ abi::BackendRepr::Scalar(s) => match s.primitive() {
abi::Primitive::Pointer(addr_space) => Some(addr_space),
_ => None,
},
diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs
index 64b1561..60d5e90 100644
--- a/compiler/rustc_const_eval/src/interpret/cast.rs
+++ b/compiler/rustc_const_eval/src/interpret/cast.rs
@@ -274,7 +274,7 @@ fn cast_from_int_like(
cast_ty: Ty<'tcx>,
) -> InterpResult<'tcx, Scalar<M::Provenance>> {
// Let's make sure v is sign-extended *if* it has a signed type.
- let signed = src_layout.abi.is_signed(); // Also asserts that abi is `Scalar`.
+ let signed = src_layout.backend_repr.is_signed(); // Also asserts that abi is `Scalar`.
let v = match src_layout.ty.kind() {
Uint(_) | RawPtr(..) | FnPtr(..) => scalar.to_uint(src_layout.size)?,
diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs
index feed086..bb4ac95 100644
--- a/compiler/rustc_const_eval/src/interpret/discriminant.rs
+++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs
@@ -112,7 +112,7 @@ pub fn read_discriminant(
// Read tag and sanity-check `tag_layout`.
let tag_val = self.read_immediate(&self.project_field(op, tag_field)?)?;
assert_eq!(tag_layout.size, tag_val.layout.size);
- assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
+ assert_eq!(tag_layout.backend_repr.is_signed(), tag_val.layout.backend_repr.is_signed());
trace!("tag value: {}", tag_val);
// Figure out which discriminant and variant this corresponds to.
diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs
index d81368e..9043bd3 100644
--- a/compiler/rustc_const_eval/src/interpret/eval_context.rs
+++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs
@@ -3,7 +3,7 @@
use rustc_hir::def_id::DefId;
use rustc_infer::infer::TyCtxtInferExt;
use rustc_infer::infer::at::ToTrace;
-use rustc_infer::traits::ObligationCause;
+use rustc_infer::traits::{ObligationCause, Reveal};
use rustc_middle::mir::interpret::{ErrorHandled, InvalidMetaKind, ReportedErrorInfo};
use rustc_middle::query::TyCtxtAt;
use rustc_middle::ty::layout::{
@@ -116,6 +116,7 @@ fn handle_fn_abi_err(
/// This test should be symmetric, as it is primarily about layout compatibility.
pub(super) fn mir_assign_valid_types<'tcx>(
tcx: TyCtxt<'tcx>,
+ typing_mode: TypingMode<'tcx>,
param_env: ParamEnv<'tcx>,
src: TyAndLayout<'tcx>,
dest: TyAndLayout<'tcx>,
@@ -124,7 +125,7 @@ pub(super) fn mir_assign_valid_types<'tcx>(
// all normal lifetimes are erased, higher-ranked types with their
// late-bound lifetimes are still around and can lead to type
// differences.
- if util::relate_types(tcx, param_env, Variance::Covariant, src.ty, dest.ty) {
+ if util::relate_types(tcx, typing_mode, param_env, Variance::Covariant, src.ty, dest.ty) {
// Make sure the layout is equal, too -- just to be safe. Miri really
// needs layout equality. For performance reason we skip this check when
// the types are equal. Equal types *can* have different layouts when
@@ -144,6 +145,7 @@ pub(super) fn mir_assign_valid_types<'tcx>(
#[cfg_attr(not(debug_assertions), inline(always))]
pub(super) fn from_known_layout<'tcx>(
tcx: TyCtxtAt<'tcx>,
+ typing_mode: TypingMode<'tcx>,
param_env: ParamEnv<'tcx>,
known_layout: Option<TyAndLayout<'tcx>>,
compute: impl FnOnce() -> InterpResult<'tcx, TyAndLayout<'tcx>>,
@@ -153,7 +155,13 @@ pub(super) fn from_known_layout<'tcx>(
Some(known_layout) => {
if cfg!(debug_assertions) {
let check_layout = compute()?;
- if !mir_assign_valid_types(tcx.tcx, param_env, check_layout, known_layout) {
+ if !mir_assign_valid_types(
+ tcx.tcx,
+ typing_mode,
+ param_env,
+ check_layout,
+ known_layout,
+ ) {
span_bug!(
tcx.span,
"expected type differs from actual type.\nexpected: {}\nactual: {}",
@@ -203,6 +211,11 @@ pub fn new(
}
}
+ pub fn typing_mode(&self) -> TypingMode<'tcx> {
+ debug_assert_eq!(self.param_env.reveal(), Reveal::All);
+ TypingMode::PostAnalysis
+ }
+
/// Returns the span of the currently executed statement/terminator.
/// This is the span typically used for error reporting.
#[inline(always)]
@@ -327,7 +340,7 @@ pub(super) fn eq_in_param_env<T>(&self, a: T, b: T) -> bool
return true;
}
// Slow path: spin up an inference context to check if these traits are sufficiently equal.
- let infcx = self.tcx.infer_ctxt().build(TypingMode::from_param_env(self.param_env));
+ let infcx = self.tcx.infer_ctxt().build(self.typing_mode());
let ocx = ObligationCtxt::new(&infcx);
let cause = ObligationCause::dummy_with_span(self.cur_span());
// equate the two trait refs after normalization
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
index 6148123..80e14ee 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -563,7 +563,7 @@ pub fn saturating_arith(
self.binary_op(mir_op.wrapping_to_overflowing().unwrap(), l, r)?.to_scalar_pair();
interp_ok(if overflowed.to_bool()? {
let size = l.layout.size;
- if l.layout.abi.is_signed() {
+ if l.layout.backend_repr.is_signed() {
// For signed ints the saturated value depends on the sign of the first
// term since the sign of the second term can be inferred from this and
// the fact that the operation has overflowed (if either is 0 no
diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs
index cd5e2ae..a130ae8 100644
--- a/compiler/rustc_const_eval/src/interpret/operand.rs
+++ b/compiler/rustc_const_eval/src/interpret/operand.rs
@@ -5,7 +5,7 @@
use either::{Either, Left, Right};
use rustc_abi as abi;
-use rustc_abi::{Abi, HasDataLayout, Size};
+use rustc_abi::{BackendRepr, HasDataLayout, Size};
use rustc_hir::def::Namespace;
use rustc_middle::mir::interpret::ScalarSizeMismatch;
use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, LayoutOf, TyAndLayout};
@@ -114,9 +114,9 @@ pub fn to_scalar_and_meta(self) -> (Scalar<Prov>, MemPlaceMeta<Prov>) {
}
/// Assert that this immediate is a valid value for the given ABI.
- pub fn assert_matches_abi(self, abi: Abi, msg: &str, cx: &impl HasDataLayout) {
+ pub fn assert_matches_abi(self, abi: BackendRepr, msg: &str, cx: &impl HasDataLayout) {
match (self, abi) {
- (Immediate::Scalar(scalar), Abi::Scalar(s)) => {
+ (Immediate::Scalar(scalar), BackendRepr::Scalar(s)) => {
assert_eq!(scalar.size(), s.size(cx), "{msg}: scalar value has wrong size");
if !matches!(s.primitive(), abi::Primitive::Pointer(..)) {
// This is not a pointer, it should not carry provenance.
@@ -126,7 +126,7 @@ pub fn assert_matches_abi(self, abi: Abi, msg: &str, cx: &impl HasDataLayout) {
);
}
}
- (Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => {
+ (Immediate::ScalarPair(a_val, b_val), BackendRepr::ScalarPair(a, b)) => {
assert_eq!(
a_val.size(),
a.size(cx),
@@ -244,7 +244,7 @@ fn deref(&self) -> &Immediate<Prov> {
impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
#[inline]
pub fn from_scalar(val: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
- debug_assert!(layout.abi.is_scalar(), "`ImmTy::from_scalar` on non-scalar layout");
+ debug_assert!(layout.backend_repr.is_scalar(), "`ImmTy::from_scalar` on non-scalar layout");
debug_assert_eq!(val.size(), layout.size);
ImmTy { imm: val.into(), layout }
}
@@ -252,7 +252,7 @@ pub fn from_scalar(val: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
#[inline]
pub fn from_scalar_pair(a: Scalar<Prov>, b: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
debug_assert!(
- matches!(layout.abi, Abi::ScalarPair(..)),
+ matches!(layout.backend_repr, BackendRepr::ScalarPair(..)),
"`ImmTy::from_scalar_pair` on non-scalar-pair layout"
);
let imm = Immediate::ScalarPair(a, b);
@@ -263,9 +263,9 @@ pub fn from_scalar_pair(a: Scalar<Prov>, b: Scalar<Prov>, layout: TyAndLayout<'t
pub fn from_immediate(imm: Immediate<Prov>, layout: TyAndLayout<'tcx>) -> Self {
// Without a `cx` we cannot call `assert_matches_abi`.
debug_assert!(
- match (imm, layout.abi) {
- (Immediate::Scalar(..), Abi::Scalar(..)) => true,
- (Immediate::ScalarPair(..), Abi::ScalarPair(..)) => true,
+ match (imm, layout.backend_repr) {
+ (Immediate::Scalar(..), BackendRepr::Scalar(..)) => true,
+ (Immediate::ScalarPair(..), BackendRepr::ScalarPair(..)) => true,
(Immediate::Uninit, _) if layout.is_sized() => true,
_ => false,
},
@@ -356,7 +356,11 @@ pub fn to_pair(self, cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>)) -> (Self,
fn offset_(&self, offset: Size, layout: TyAndLayout<'tcx>, cx: &impl HasDataLayout) -> Self {
// Verify that the input matches its type.
if cfg!(debug_assertions) {
- self.assert_matches_abi(self.layout.abi, "invalid input to Immediate::offset", cx);
+ self.assert_matches_abi(
+ self.layout.backend_repr,
+ "invalid input to Immediate::offset",
+ cx,
+ );
}
// `ImmTy` have already been checked to be in-bounds, so we can just check directly if this
// remains in-bounds. This cannot actually be violated since projections are type-checked
@@ -370,19 +374,19 @@ fn offset_(&self, offset: Size, layout: TyAndLayout<'tcx>, cx: &impl HasDataLayo
);
// This makes several assumptions about what layouts we will encounter; we match what
// codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`).
- let inner_val: Immediate<_> = match (**self, self.layout.abi) {
+ let inner_val: Immediate<_> = match (**self, self.layout.backend_repr) {
// If the entire value is uninit, then so is the field (can happen in ConstProp).
(Immediate::Uninit, _) => Immediate::Uninit,
// If the field is uninhabited, we can forget the data (can happen in ConstProp).
// `enum S { A(!), B, C }` is an example of an enum with Scalar layout that
// has an `Uninhabited` variant, which means this case is possible.
- _ if layout.abi.is_uninhabited() => Immediate::Uninit,
+ _ if layout.is_uninhabited() => Immediate::Uninit,
// the field contains no information, can be left uninit
// (Scalar/ScalarPair can contain even aligned ZST, not just 1-ZST)
_ if layout.is_zst() => Immediate::Uninit,
// some fieldless enum variants can have non-zero size but still `Aggregate` ABI... try
// to detect those here and also give them no data
- _ if matches!(layout.abi, Abi::Aggregate { .. })
+ _ if matches!(layout.backend_repr, BackendRepr::Memory { .. })
&& matches!(layout.variants, abi::Variants::Single { .. })
&& matches!(&layout.fields, abi::FieldsShape::Arbitrary { offsets, .. } if offsets.len() == 0) =>
{
@@ -394,7 +398,7 @@ fn offset_(&self, offset: Size, layout: TyAndLayout<'tcx>, cx: &impl HasDataLayo
**self
}
// extract fields from types with `ScalarPair` ABI
- (Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => {
+ (Immediate::ScalarPair(a_val, b_val), BackendRepr::ScalarPair(a, b)) => {
Immediate::from(if offset.bytes() == 0 {
a_val
} else {
@@ -411,7 +415,11 @@ fn offset_(&self, offset: Size, layout: TyAndLayout<'tcx>, cx: &impl HasDataLayo
),
};
// Ensure the new layout matches the new value.
- inner_val.assert_matches_abi(layout.abi, "invalid field type in Immediate::offset", cx);
+ inner_val.assert_matches_abi(
+ layout.backend_repr,
+ "invalid field type in Immediate::offset",
+ cx,
+ );
ImmTy::from_immediate(inner_val, layout)
}
@@ -567,8 +575,8 @@ fn read_immediate_from_mplace_raw(
// case where some of the bytes are initialized and others are not. So, we need an extra
// check that walks over the type of `mplace` to make sure it is truly correct to treat this
// like a `Scalar` (or `ScalarPair`).
- interp_ok(match mplace.layout.abi {
- Abi::Scalar(abi::Scalar::Initialized { value: s, .. }) => {
+ interp_ok(match mplace.layout.backend_repr {
+ BackendRepr::Scalar(abi::Scalar::Initialized { value: s, .. }) => {
let size = s.size(self);
assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size");
let scalar = alloc.read_scalar(
@@ -577,7 +585,7 @@ fn read_immediate_from_mplace_raw(
)?;
Some(ImmTy::from_scalar(scalar, mplace.layout))
}
- Abi::ScalarPair(
+ BackendRepr::ScalarPair(
abi::Scalar::Initialized { value: a, .. },
abi::Scalar::Initialized { value: b, .. },
) => {
@@ -637,9 +645,12 @@ pub fn read_immediate(
op: &impl Projectable<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
if !matches!(
- op.layout().abi,
- Abi::Scalar(abi::Scalar::Initialized { .. })
- | Abi::ScalarPair(abi::Scalar::Initialized { .. }, abi::Scalar::Initialized { .. })
+ op.layout().backend_repr,
+ BackendRepr::Scalar(abi::Scalar::Initialized { .. })
+ | BackendRepr::ScalarPair(
+ abi::Scalar::Initialized { .. },
+ abi::Scalar::Initialized { .. }
+ )
) {
span_bug!(self.cur_span(), "primitive read not possible for type: {}", op.layout().ty);
}
@@ -762,6 +773,7 @@ pub fn eval_place_to_op(
)?;
if !mir_assign_valid_types(
*self.tcx,
+ self.typing_mode(),
self.param_env,
self.layout_of(normalized_place_ty)?,
op.layout,
@@ -821,7 +833,9 @@ pub(crate) fn const_val_to_op(
})
};
let layout =
- from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty).into())?;
+ from_known_layout(self.tcx, self.typing_mode(), self.param_env, layout, || {
+ self.layout_of(ty).into()
+ })?;
let imm = match val_val {
mir::ConstValue::Indirect { alloc_id, offset } => {
// This is const data, no mutation allowed.
diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs
index 380db90..cf280e0 100644
--- a/compiler/rustc_const_eval/src/interpret/operator.rs
+++ b/compiler/rustc_const_eval/src/interpret/operator.rs
@@ -114,7 +114,7 @@ fn binary_int_op(
let l_bits = left.layout.size.bits();
// Compute the equivalent shift modulo `size` that is in the range `0..size`. (This is
// the one MIR operator that does *not* directly map to a single LLVM operation.)
- let (shift_amount, overflow) = if right.layout.abi.is_signed() {
+ let (shift_amount, overflow) = if right.layout.backend_repr.is_signed() {
let shift_amount = r_signed();
let rem = shift_amount.rem_euclid(l_bits.into());
// `rem` is guaranteed positive, so the `unwrap` cannot fail
@@ -126,7 +126,7 @@ fn binary_int_op(
};
let shift_amount = u32::try_from(shift_amount).unwrap(); // we brought this in the range `0..size` so this will always fit
// Compute the shifted result.
- let result = if left.layout.abi.is_signed() {
+ let result = if left.layout.backend_repr.is_signed() {
let l = l_signed();
let result = match bin_op {
Shl | ShlUnchecked => l.checked_shl(shift_amount).unwrap(),
@@ -147,7 +147,7 @@ fn binary_int_op(
if overflow && let Some(intrinsic) = throw_ub_on_overflow {
throw_ub!(ShiftOverflow {
intrinsic,
- shift_amount: if right.layout.abi.is_signed() {
+ shift_amount: if right.layout.backend_repr.is_signed() {
Either::Right(r_signed())
} else {
Either::Left(r_unsigned())
@@ -171,7 +171,7 @@ fn binary_int_op(
let size = left.layout.size;
// Operations that need special treatment for signed integers
- if left.layout.abi.is_signed() {
+ if left.layout.backend_repr.is_signed() {
let op: Option<fn(&i128, &i128) -> bool> = match bin_op {
Lt => Some(i128::lt),
Le => Some(i128::le),
@@ -250,7 +250,7 @@ fn binary_int_op(
BitXor => ImmTy::from_uint(l ^ r, left.layout),
_ => {
- assert!(!left.layout.abi.is_signed());
+ assert!(!left.layout.backend_repr.is_signed());
let op: fn(u128, u128) -> (u128, bool) = match bin_op {
Add | AddUnchecked | AddWithOverflow => u128::overflowing_add,
Sub | SubUnchecked | SubWithOverflow => u128::overflowing_sub,
@@ -332,7 +332,7 @@ fn binary_ptr_op(
}
let offset_bytes = val.to_target_isize(self)?;
- if !right.layout.abi.is_signed() && offset_bytes < 0 {
+ if !right.layout.backend_repr.is_signed() && offset_bytes < 0 {
// We were supposed to do an unsigned offset but the result is negative -- this
// can only mean that the cast wrapped around.
throw_ub!(PointerArithOverflow)
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
index 81b926a..cc8d1db 100644
--- a/compiler/rustc_const_eval/src/interpret/place.rs
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -5,11 +5,11 @@
use std::assert_matches::assert_matches;
use either::{Either, Left, Right};
+use rustc_abi::{Align, BackendRepr, HasDataLayout, Size};
use rustc_ast::Mutability;
use rustc_middle::ty::Ty;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::{bug, mir, span_bug};
-use rustc_target::abi::{Abi, Align, HasDataLayout, Size};
use tracing::{instrument, trace};
use super::{
@@ -540,6 +540,7 @@ pub fn eval_place(
)?;
if !mir_assign_valid_types(
*self.tcx,
+ self.typing_mode(),
self.param_env,
self.layout_of(normalized_place_ty)?,
place.layout,
@@ -659,7 +660,7 @@ pub(super) fn write_immediate_no_validate(
// Unfortunately this is too expensive to do in release builds.
if cfg!(debug_assertions) {
src.assert_matches_abi(
- local_layout.abi,
+ local_layout.backend_repr,
"invalid immediate for given destination place",
self,
);
@@ -683,7 +684,11 @@ fn write_immediate_to_mplace_no_validate(
) -> InterpResult<'tcx> {
// We use the sizes from `value` below.
// Ensure that matches the type of the place it is written to.
- value.assert_matches_abi(layout.abi, "invalid immediate for given destination place", self);
+ value.assert_matches_abi(
+ layout.backend_repr,
+ "invalid immediate for given destination place",
+ self,
+ );
// Note that it is really important that the type here is the right one, and matches the
// type things are read at. In case `value` is a `ScalarPair`, we don't do any magic here
// to handle padding properly, which is only correct if we never look at this data with the
@@ -700,7 +705,7 @@ fn write_immediate_to_mplace_no_validate(
alloc.write_scalar(alloc_range(Size::ZERO, scalar.size()), scalar)
}
Immediate::ScalarPair(a_val, b_val) => {
- let Abi::ScalarPair(a, b) = layout.abi else {
+ let BackendRepr::ScalarPair(a, b) = layout.backend_repr else {
span_bug!(
self.cur_span(),
"write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
@@ -866,8 +871,13 @@ fn copy_op_no_validate(
) -> InterpResult<'tcx> {
// We do NOT compare the types for equality, because well-typed code can
// actually "transmute" `&mut T` to `&T` in an assignment without a cast.
- let layout_compat =
- mir_assign_valid_types(*self.tcx, self.param_env, src.layout(), dest.layout());
+ let layout_compat = mir_assign_valid_types(
+ *self.tcx,
+ self.typing_mode(),
+ self.param_env,
+ src.layout(),
+ dest.layout(),
+ );
if !allow_transmute && !layout_compat {
span_bug!(
self.cur_span(),
diff --git a/compiler/rustc_const_eval/src/interpret/stack.rs b/compiler/rustc_const_eval/src/interpret/stack.rs
index 3bc9f46..50c0446 100644
--- a/compiler/rustc_const_eval/src/interpret/stack.rs
+++ b/compiler/rustc_const_eval/src/interpret/stack.rs
@@ -596,12 +596,13 @@ pub(super) fn layout_of_local(
return interp_ok(layout);
}
- let layout = from_known_layout(self.tcx, self.param_env, layout, || {
- let local_ty = frame.body.local_decls[local].ty;
- let local_ty =
- self.instantiate_from_frame_and_normalize_erasing_regions(frame, local_ty)?;
- self.layout_of(local_ty).into()
- })?;
+ let layout =
+ from_known_layout(self.tcx, self.typing_mode(), self.param_env, layout, || {
+ let local_ty = frame.body.local_decls[local].ty;
+ let local_ty =
+ self.instantiate_from_frame_and_normalize_erasing_regions(frame, local_ty)?;
+ self.layout_of(local_ty).into()
+ })?;
// Layouts of locals are requested a lot, so we cache them.
state.layout.set(Some(layout));
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
index 8b5bb13..cd2c1ef 100644
--- a/compiler/rustc_const_eval/src/interpret/validity.rs
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -11,6 +11,10 @@
use either::{Left, Right};
use hir::def::DefKind;
+use rustc_abi::{
+ BackendRepr, FieldIdx, FieldsShape, Scalar as ScalarAbi, Size, VariantIdx, Variants,
+ WrappingRange,
+};
use rustc_ast::Mutability;
use rustc_data_structures::fx::FxHashSet;
use rustc_hir as hir;
@@ -23,9 +27,6 @@
use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, Ty};
use rustc_span::symbol::{Symbol, sym};
-use rustc_target::abi::{
- Abi, FieldIdx, FieldsShape, Scalar as ScalarAbi, Size, VariantIdx, Variants, WrappingRange,
-};
use tracing::trace;
use super::machine::AllocMap;
@@ -422,7 +423,7 @@ fn deref_pointer(
// Reset provenance: ensure slice tail metadata does not preserve provenance,
// and ensure all pointers do not preserve partial provenance.
if self.reset_provenance_and_padding {
- if matches!(imm.layout.abi, Abi::Scalar(..)) {
+ if matches!(imm.layout.backend_repr, BackendRepr::Scalar(..)) {
// A thin pointer. If it has provenance, we don't have to do anything.
// If it does not, ensure we clear the provenance in memory.
if matches!(imm.to_scalar(), Scalar::Int(..)) {
@@ -981,7 +982,7 @@ fn union_data_range_uncached<'tcx>(
let elem = layout.field(cx, 0);
// Fast-path for large arrays of simple types that do not contain any padding.
- if elem.abi.is_scalar() {
+ if elem.backend_repr.is_scalar() {
out.add_range(base_offset, elem.size * count);
} else {
for idx in 0..count {
@@ -1299,19 +1300,19 @@ fn visit_value(&mut self, val: &PlaceTy<'tcx, M::Provenance>) -> InterpResult<'t
// FIXME: We could avoid some redundant checks here. For newtypes wrapping
// scalars, we do the same check on every "level" (e.g., first we check
// MyNewtype and then the scalar in there).
- match val.layout.abi {
- Abi::Uninhabited => {
+ match val.layout.backend_repr {
+ BackendRepr::Uninhabited => {
let ty = val.layout.ty;
throw_validation_failure!(self.path, UninhabitedVal { ty });
}
- Abi::Scalar(scalar_layout) => {
+ BackendRepr::Scalar(scalar_layout) => {
if !scalar_layout.is_uninit_valid() {
// There is something to check here.
let scalar = self.read_scalar(val, ExpectedKind::InitScalar)?;
self.visit_scalar(scalar, scalar_layout)?;
}
}
- Abi::ScalarPair(a_layout, b_layout) => {
+ BackendRepr::ScalarPair(a_layout, b_layout) => {
// We can only proceed if *both* scalars need to be initialized.
// FIXME: find a way to also check ScalarPair when one side can be uninit but
// the other must be init.
@@ -1322,12 +1323,12 @@ fn visit_value(&mut self, val: &PlaceTy<'tcx, M::Provenance>) -> InterpResult<'t
self.visit_scalar(b, b_layout)?;
}
}
- Abi::Vector { .. } => {
+ BackendRepr::Vector { .. } => {
// No checks here, we assume layout computation gets this right.
// (This is harder to check since Miri does not represent these as `Immediate`. We
// also cannot use field projections since this might be a newtype around a vector.)
}
- Abi::Aggregate { .. } => {
+ BackendRepr::Memory { .. } => {
// Nothing to do.
}
}
diff --git a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
index 7a8b976..f743525 100644
--- a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
+++ b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
@@ -1,9 +1,9 @@
+use rustc_abi::{BackendRepr, FieldsShape, Scalar, Variants};
use rustc_middle::bug;
use rustc_middle::ty::layout::{
HasTyCtxt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, ValidityRequirement,
};
use rustc_middle::ty::{ParamEnvAnd, Ty, TyCtxt};
-use rustc_target::abi::{Abi, FieldsShape, Scalar, Variants};
use crate::const_eval::{CanAccessMutGlobal, CheckAlignment, CompileTimeMachine};
use crate::interpret::{InterpCx, MemoryKind};
@@ -111,12 +111,12 @@ fn check_validity_requirement_lax<'tcx>(
};
// Check the ABI.
- let valid = match this.abi {
- Abi::Uninhabited => false, // definitely UB
- Abi::Scalar(s) => scalar_allows_raw_init(s),
- Abi::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2),
- Abi::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s),
- Abi::Aggregate { .. } => true, // Fields are checked below.
+ let valid = match this.backend_repr {
+ BackendRepr::Uninhabited => false, // definitely UB
+ BackendRepr::Scalar(s) => scalar_allows_raw_init(s),
+ BackendRepr::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2),
+ BackendRepr::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s),
+ BackendRepr::Memory { .. } => true, // Fields are checked below.
};
if !valid {
// This is definitely not okay.
diff --git a/compiler/rustc_const_eval/src/util/compare_types.rs b/compiler/rustc_const_eval/src/util/compare_types.rs
index 7af977b..0cf27d3 100644
--- a/compiler/rustc_const_eval/src/util/compare_types.rs
+++ b/compiler/rustc_const_eval/src/util/compare_types.rs
@@ -8,24 +8,15 @@
use rustc_middle::ty::{ParamEnv, Ty, TyCtxt, TypingMode, Variance};
use rustc_trait_selection::traits::ObligationCtxt;
-/// Returns whether the two types are equal up to subtyping.
-///
-/// This is used in case we don't know the expected subtyping direction
-/// and still want to check whether anything is broken.
-pub fn is_equal_up_to_subtyping<'tcx>(
+/// Returns whether `src` is a subtype of `dest`, i.e. `src <: dest`.
+pub fn sub_types<'tcx>(
tcx: TyCtxt<'tcx>,
+ typing_mode: TypingMode<'tcx>,
param_env: ParamEnv<'tcx>,
src: Ty<'tcx>,
dest: Ty<'tcx>,
) -> bool {
- // Fast path.
- if src == dest {
- return true;
- }
-
- // Check for subtyping in either direction.
- relate_types(tcx, param_env, Variance::Covariant, src, dest)
- || relate_types(tcx, param_env, Variance::Covariant, dest, src)
+ relate_types(tcx, typing_mode, param_env, Variance::Covariant, src, dest)
}
/// Returns whether `src` is a subtype of `dest`, i.e. `src <: dest`.
@@ -35,6 +26,7 @@ pub fn is_equal_up_to_subtyping<'tcx>(
/// because we want to check for type equality.
pub fn relate_types<'tcx>(
tcx: TyCtxt<'tcx>,
+ typing_mode: TypingMode<'tcx>,
param_env: ParamEnv<'tcx>,
variance: Variance,
src: Ty<'tcx>,
@@ -45,8 +37,7 @@ pub fn relate_types<'tcx>(
}
let mut builder = tcx.infer_ctxt().ignoring_regions();
- // FIXME(#132279): This should eventually use the already defined hidden types.
- let infcx = builder.build(TypingMode::from_param_env(param_env));
+ let infcx = builder.build(typing_mode);
let ocx = ObligationCtxt::new(&infcx);
let cause = ObligationCause::dummy();
let src = ocx.normalize(&cause, param_env, src);
diff --git a/compiler/rustc_const_eval/src/util/mod.rs b/compiler/rustc_const_eval/src/util/mod.rs
index 66a1add..25a9dbb 100644
--- a/compiler/rustc_const_eval/src/util/mod.rs
+++ b/compiler/rustc_const_eval/src/util/mod.rs
@@ -8,7 +8,7 @@
pub use self::alignment::{is_disaligned, is_within_packed};
pub use self::check_validity_requirement::check_validity_requirement;
-pub use self::compare_types::{is_equal_up_to_subtyping, relate_types};
+pub use self::compare_types::{relate_types, sub_types};
pub use self::type_name::type_name;
/// Classify whether an operator is "left-homogeneous", i.e., the LHS has the
diff --git a/compiler/rustc_driver_impl/src/lib.rs b/compiler/rustc_driver_impl/src/lib.rs
index e2585c0..92b622f 100644
--- a/compiler/rustc_driver_impl/src/lib.rs
+++ b/compiler/rustc_driver_impl/src/lib.rs
@@ -33,6 +33,7 @@
use std::{env, str};
use rustc_ast as ast;
+use rustc_codegen_ssa::back::apple;
use rustc_codegen_ssa::traits::CodegenBackend;
use rustc_codegen_ssa::{CodegenErrors, CodegenResults};
use rustc_data_structures::profiling::{
@@ -855,12 +856,11 @@ fn print_crate_info(
}
}
DeploymentTarget => {
- use rustc_target::spec::current_apple_deployment_target;
-
if sess.target.is_like_osx {
- let (major, minor, patch) = current_apple_deployment_target(&sess.target);
- let patch = if patch != 0 { format!(".{patch}") } else { String::new() };
- println_info!("deployment_target={major}.{minor}{patch}")
+ println_info!(
+ "deployment_target={}",
+ apple::pretty_version(apple::deployment_target(sess))
+ )
} else {
#[allow(rustc::diagnostic_outside_of_impl)]
sess.dcx().fatal("only Apple targets currently support deployment version info")
diff --git a/compiler/rustc_error_codes/src/error_codes/E0801.md b/compiler/rustc_error_codes/src/error_codes/E0801.md
new file mode 100644
index 0000000..c89feb9
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0801.md
@@ -0,0 +1,51 @@
+The `self` parameter in a method has an invalid generic "receiver type".
+
+Erroneous code example:
+
+```compile_fail,E0801
+struct Foo;
+
+impl Foo {
+ fn foo<R: std::ops::Deref<Target=Self>>(self: R) {}
+}
+```
+
+or alternatively,
+
+```compile_fail,E0801
+struct Foo;
+
+impl Foo {
+ fn foo(self: impl std::ops::Deref<Target=Self>) {}
+}
+```
+
+Methods take a special first parameter, termed `self`. It's normal to
+use `self`, `&self` or `&mut self`, which are syntactic sugar for
+`self: Self`, `self: &Self`, and `self: &mut Self` respectively.
+But it's also possible to use more sophisticated types of `self`
+parameter, for instance `std::rc::Rc<Self>`. The set of allowable
+`Self` types is extensible using the nightly feature
+[Arbitrary self types][AST].
+This will extend the valid set of `Self` types to anything which implements
+`std::ops::Deref<Target=Self>`, for example `Rc<Self>`, `Box<Self>`, or
+your own smart pointers that do the same.
+
+However, even with that feature, the `self` type must be concrete.
+Generic `self` types are not permitted. Specifically, a `self` type will
+be rejected if it is a type parameter defined on the method.
+
+These are OK:
+
+```
+struct Foo;
+
+impl Foo {
+ fn foo(self) {}
+ fn foo2(self: std::rc::Rc<Self>) {} // or some other similar
+ // smart pointer if you enable arbitrary self types and
+ // the pointer implements Deref<Target=Self>
+}
+```
+
+[AST]: https://doc.rust-lang.org/unstable-book/language-features/arbitrary-self-types.html
diff --git a/compiler/rustc_error_codes/src/lib.rs b/compiler/rustc_error_codes/src/lib.rs
index 27a34d6..29f3277 100644
--- a/compiler/rustc_error_codes/src/lib.rs
+++ b/compiler/rustc_error_codes/src/lib.rs
@@ -540,6 +540,7 @@ macro_rules! error_codes {
E0798: 0798,
E0799: 0799,
E0800: 0800,
+E0801: 0801,
);
)
}
diff --git a/compiler/rustc_hir/src/hir.rs b/compiler/rustc_hir/src/hir.rs
index 1c268c8..fa76f86 100644
--- a/compiler/rustc_hir/src/hir.rs
+++ b/compiler/rustc_hir/src/hir.rs
@@ -2627,7 +2627,6 @@ fn are_suggestable_generic_args(generic_args: &[GenericArg<'_>]) -> bool {
}
TyKind::Tup(tys) => tys.iter().any(Self::is_suggestable_infer_ty),
TyKind::Ptr(mut_ty) | TyKind::Ref(_, mut_ty) => mut_ty.ty.is_suggestable_infer_ty(),
- TyKind::OpaqueDef(_, generic_args) => are_suggestable_generic_args(generic_args),
TyKind::Path(QPath::TypeRelative(ty, segment)) => {
ty.is_suggestable_infer_ty() || are_suggestable_generic_args(segment.args().args)
}
@@ -2746,19 +2745,8 @@ pub struct BareFnTy<'hir> {
pub struct OpaqueTy<'hir> {
pub hir_id: HirId,
pub def_id: LocalDefId,
- pub generics: &'hir Generics<'hir>,
pub bounds: GenericBounds<'hir>,
- pub origin: OpaqueTyOrigin,
- /// Return-position impl traits (and async futures) must "reify" any late-bound
- /// lifetimes that are captured from the function signature they originate from.
- ///
- /// This is done by generating a new early-bound lifetime parameter local to the
- /// opaque which is instantiated in the function signature with the late-bound
- /// lifetime.
- ///
- /// This mapping associated a captured lifetime (first parameter) with the new
- /// early-bound lifetime that was generated for the opaque.
- pub lifetime_mapping: &'hir [(&'hir Lifetime, LocalDefId)],
+ pub origin: OpaqueTyOrigin<LocalDefId>,
pub span: Span,
}
@@ -2796,33 +2784,35 @@ pub struct PreciseCapturingNonLifetimeArg {
pub res: Res,
}
-#[derive(Copy, Clone, PartialEq, Eq, Debug, HashStable_Generic)]
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+#[derive(HashStable_Generic, Encodable, Decodable)]
pub enum RpitContext {
Trait,
TraitImpl,
}
/// From whence the opaque type came.
-#[derive(Copy, Clone, PartialEq, Eq, Debug, HashStable_Generic)]
-pub enum OpaqueTyOrigin {
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+#[derive(HashStable_Generic, Encodable, Decodable)]
+pub enum OpaqueTyOrigin<D> {
/// `-> impl Trait`
FnReturn {
/// The defining function.
- parent: LocalDefId,
+ parent: D,
// Whether this is an RPITIT (return position impl trait in trait)
in_trait_or_impl: Option<RpitContext>,
},
/// `async fn`
AsyncFn {
/// The defining function.
- parent: LocalDefId,
+ parent: D,
// Whether this is an AFIT (async fn in trait)
in_trait_or_impl: Option<RpitContext>,
},
/// type aliases: `type Foo = impl Trait;`
TyAlias {
/// The type alias or associated type parent of the TAIT/ATPIT
- parent: LocalDefId,
+ parent: D,
/// associated types in impl blocks for traits.
in_assoc_ty: bool,
},
@@ -2861,12 +2851,7 @@ pub enum TyKind<'hir> {
/// Type parameters may be stored in each `PathSegment`.
Path(QPath<'hir>),
/// An opaque type definition itself. This is only used for `impl Trait`.
- ///
- /// The generic argument list contains the lifetimes (and in the future
- /// possibly parameters) that are actually bound on the `impl Trait`.
- ///
- /// The last parameter specifies whether this opaque appears in a trait definition.
- OpaqueDef(&'hir OpaqueTy<'hir>, &'hir [GenericArg<'hir>]),
+ OpaqueDef(&'hir OpaqueTy<'hir>),
/// A trait object type `Bound1 + Bound2 + Bound3`
/// where `Bound` is a trait or a lifetime.
TraitObject(&'hir [PolyTraitRef<'hir>], &'hir Lifetime, TraitObjectSyntax),
@@ -3991,7 +3976,6 @@ pub fn generics(self) -> Option<&'hir Generics<'hir>> {
| Node::TraitItem(TraitItem { generics, .. })
| Node::ImplItem(ImplItem { generics, .. }) => Some(generics),
Node::Item(item) => item.kind.generics(),
- Node::OpaqueTy(opaque) => Some(opaque.generics),
_ => None,
}
}
diff --git a/compiler/rustc_hir/src/intravisit.rs b/compiler/rustc_hir/src/intravisit.rs
index 322f8e2..a453af3 100644
--- a/compiler/rustc_hir/src/intravisit.rs
+++ b/compiler/rustc_hir/src/intravisit.rs
@@ -896,9 +896,8 @@ pub fn walk_ty<'v, V: Visitor<'v>>(visitor: &mut V, typ: &'v Ty<'v>) -> V::Resul
TyKind::Path(ref qpath) => {
try_visit!(visitor.visit_qpath(qpath, typ.hir_id, typ.span));
}
- TyKind::OpaqueDef(opaque, lifetimes) => {
+ TyKind::OpaqueDef(opaque) => {
try_visit!(visitor.visit_opaque_ty(opaque));
- walk_list!(visitor, visit_generic_arg, lifetimes);
}
TyKind::Array(ref ty, ref length) => {
try_visit!(visitor.visit_ty(ty));
@@ -1188,10 +1187,8 @@ pub fn walk_poly_trait_ref<'v, V: Visitor<'v>>(
}
pub fn walk_opaque_ty<'v, V: Visitor<'v>>(visitor: &mut V, opaque: &'v OpaqueTy<'v>) -> V::Result {
- let &OpaqueTy { hir_id, def_id: _, generics, bounds, origin: _, lifetime_mapping: _, span: _ } =
- opaque;
+ let &OpaqueTy { hir_id, def_id: _, bounds, origin: _, span: _ } = opaque;
try_visit!(visitor.visit_id(hir_id));
- try_visit!(walk_generics(visitor, generics));
walk_list!(visitor, visit_param_bound, bounds);
V::Result::output()
}
diff --git a/compiler/rustc_hir_analysis/messages.ftl b/compiler/rustc_hir_analysis/messages.ftl
index 7191c72..38b11aa 100644
--- a/compiler/rustc_hir_analysis/messages.ftl
+++ b/compiler/rustc_hir_analysis/messages.ftl
@@ -234,6 +234,12 @@
.help = consider moving this inherent impl into the crate defining the type if possible
.span_help = alternatively add `#[rustc_allow_incoherent_impl]` to the relevant impl items
+hir_analysis_invalid_generic_receiver_ty = invalid generic `self` parameter type: `{$receiver_ty}`
+ .note = type of `self` must not be a method generic parameter type
+
+hir_analysis_invalid_generic_receiver_ty_help =
+ use a concrete type such as `self`, `&self`, `&mut self`, `self: Box<Self>`, `self: Rc<Self>`, `self: Arc<Self>`, or `self: Pin<P>` (where P is one of the previous types except `Self`)
+
hir_analysis_invalid_receiver_ty = invalid `self` parameter type: `{$receiver_ty}`
.note = type of `self` must be `Self` or a type that dereferences to it
diff --git a/compiler/rustc_hir_analysis/src/bounds.rs b/compiler/rustc_hir_analysis/src/bounds.rs
index 09ddc6c..9b02651 100644
--- a/compiler/rustc_hir_analysis/src/bounds.rs
+++ b/compiler/rustc_hir_analysis/src/bounds.rs
@@ -84,11 +84,11 @@ pub(crate) fn push_const_bound(
&mut self,
tcx: TyCtxt<'tcx>,
bound_trait_ref: ty::PolyTraitRef<'tcx>,
- host: ty::HostPolarity,
+ constness: ty::BoundConstness,
span: Span,
) {
if tcx.is_const_trait(bound_trait_ref.def_id()) {
- self.clauses.push((bound_trait_ref.to_host_effect_clause(tcx, host), span));
+ self.clauses.push((bound_trait_ref.to_host_effect_clause(tcx, constness), span));
} else {
tcx.dcx().span_delayed_bug(span, "tried to lower {host:?} bound for non-const trait");
}
diff --git a/compiler/rustc_hir_analysis/src/check/check.rs b/compiler/rustc_hir_analysis/src/check/check.rs
index f830108..476814c 100644
--- a/compiler/rustc_hir_analysis/src/check/check.rs
+++ b/compiler/rustc_hir_analysis/src/check/check.rs
@@ -268,7 +268,7 @@ fn check_opaque_meets_bounds<'tcx>(
tcx: TyCtxt<'tcx>,
def_id: LocalDefId,
span: Span,
- origin: &hir::OpaqueTyOrigin,
+ origin: &hir::OpaqueTyOrigin<LocalDefId>,
) -> Result<(), ErrorGuaranteed> {
let defining_use_anchor = match *origin {
hir::OpaqueTyOrigin::FnReturn { parent, .. }
@@ -677,7 +677,7 @@ pub(crate) fn check_item_type(tcx: TyCtxt<'_>, def_id: LocalDefId) {
DefKind::OpaqueTy => {
check_opaque_precise_captures(tcx, def_id);
- let origin = tcx.opaque_type_origin(def_id);
+ let origin = tcx.local_opaque_ty_origin(def_id);
if let hir::OpaqueTyOrigin::FnReturn { parent: fn_def_id, .. }
| hir::OpaqueTyOrigin::AsyncFn { parent: fn_def_id, .. } = origin
&& let hir::Node::TraitItem(trait_item) = tcx.hir_node_by_def_id(fn_def_id)
@@ -821,8 +821,7 @@ pub(super) fn check_specialization_validity<'tcx>(
let result = opt_result.unwrap_or(Ok(()));
if let Err(parent_impl) = result {
- // FIXME(effects) the associated type from effects could be specialized
- if !tcx.is_impl_trait_in_trait(impl_item) && !tcx.is_effects_desugared_assoc_ty(impl_item) {
+ if !tcx.is_impl_trait_in_trait(impl_item) {
report_forbidden_specialization(tcx, impl_item, parent_impl);
} else {
tcx.dcx().delayed_bug(format!("parent item: {parent_impl:?} not marked as default"));
diff --git a/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs b/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs
index db2c44f..0b7d3f8 100644
--- a/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs
+++ b/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs
@@ -218,7 +218,7 @@ fn compare_method_predicate_entailment<'tcx>(
tcx.const_conditions(trait_m.def_id).instantiate_own(tcx, trait_to_impl_args),
)
.map(|(trait_ref, _)| {
- trait_ref.to_host_effect_clause(tcx, ty::HostPolarity::Maybe)
+ trait_ref.to_host_effect_clause(tcx, ty::BoundConstness::Maybe)
}),
);
}
@@ -272,7 +272,7 @@ fn compare_method_predicate_entailment<'tcx>(
tcx,
cause,
param_env,
- const_condition.to_host_effect_clause(tcx, ty::HostPolarity::Maybe),
+ const_condition.to_host_effect_clause(tcx, ty::BoundConstness::Maybe),
));
}
}
@@ -1942,7 +1942,7 @@ fn compare_type_predicate_entailment<'tcx>(
tcx.const_conditions(trait_ty.def_id).instantiate_own(tcx, trait_to_impl_args),
)
.map(|(trait_ref, _)| {
- trait_ref.to_host_effect_clause(tcx, ty::HostPolarity::Maybe)
+ trait_ref.to_host_effect_clause(tcx, ty::BoundConstness::Maybe)
}),
);
}
@@ -1985,7 +1985,7 @@ fn compare_type_predicate_entailment<'tcx>(
tcx,
cause,
param_env,
- const_condition.to_host_effect_clause(tcx, ty::HostPolarity::Maybe),
+ const_condition.to_host_effect_clause(tcx, ty::BoundConstness::Maybe),
));
}
}
@@ -2042,7 +2042,7 @@ pub(super) fn check_type_bounds<'tcx>(
// A synthetic impl Trait for RPITIT desugaring or assoc type for effects desugaring has no HIR,
// which we currently use to get the span for an impl's associated type. Instead, for these,
// use the def_span for the synthesized associated type.
- let impl_ty_span = if impl_ty.is_impl_trait_in_trait() || impl_ty.is_effects_desugaring {
+ let impl_ty_span = if impl_ty.is_impl_trait_in_trait() {
tcx.def_span(impl_ty_def_id)
} else {
match tcx.hir_node_by_def_id(impl_ty_def_id) {
@@ -2091,7 +2091,7 @@ pub(super) fn check_type_bounds<'tcx>(
tcx,
mk_cause(span),
param_env,
- c.to_host_effect_clause(tcx, ty::HostPolarity::Maybe),
+ c.to_host_effect_clause(tcx, ty::BoundConstness::Maybe),
)
}),
);
diff --git a/compiler/rustc_hir_analysis/src/check/wfcheck.rs b/compiler/rustc_hir_analysis/src/check/wfcheck.rs
index 12ed7b8..d01c378 100644
--- a/compiler/rustc_hir_analysis/src/check/wfcheck.rs
+++ b/compiler/rustc_hir_analysis/src/check/wfcheck.rs
@@ -904,7 +904,6 @@ fn check_impl_item<'tcx>(
hir::ImplItemKind::Type(ty) if ty.span != DUMMY_SP => (None, ty.span),
_ => (None, impl_item.span),
};
-
check_associated_item(tcx, impl_item.owner_id.def_id, span, method_sig)
}
@@ -1389,7 +1388,7 @@ fn check_impl<'tcx>(
ObligationCauseCode::WellFormed(None),
),
wfcx.param_env,
- bound.to_host_effect_clause(tcx, ty::HostPolarity::Maybe),
+ bound.to_host_effect_clause(tcx, ty::BoundConstness::Maybe),
))
}
}
@@ -1725,8 +1724,11 @@ fn check_method_receiver<'tcx>(
} else {
None
};
+ let generics = tcx.generics_of(method.def_id);
- if !receiver_is_valid(wfcx, span, receiver_ty, self_ty, arbitrary_self_types_level) {
+ let receiver_validity =
+ receiver_is_valid(wfcx, span, receiver_ty, self_ty, arbitrary_self_types_level, generics);
+ if let Err(receiver_validity_err) = receiver_validity {
return Err(match arbitrary_self_types_level {
// Wherever possible, emit a message advising folks that the features
// `arbitrary_self_types` or `arbitrary_self_types_pointers` might
@@ -1737,7 +1739,9 @@ fn check_method_receiver<'tcx>(
receiver_ty,
self_ty,
Some(ArbitrarySelfTypesLevel::Basic),
- ) =>
+ generics,
+ )
+ .is_ok() =>
{
// Report error; would have worked with `arbitrary_self_types`.
feature_err(
@@ -1759,7 +1763,9 @@ fn check_method_receiver<'tcx>(
receiver_ty,
self_ty,
Some(ArbitrarySelfTypesLevel::WithPointers),
- ) =>
+ generics,
+ )
+ .is_ok() =>
{
// Report error; would have worked with `arbitrary_self_types_pointers`.
feature_err(
@@ -1777,13 +1783,45 @@ fn check_method_receiver<'tcx>(
_ =>
// Report error; would not have worked with `arbitrary_self_types[_pointers]`.
{
- tcx.dcx().emit_err(errors::InvalidReceiverTy { span, receiver_ty })
+ match receiver_validity_err {
+ ReceiverValidityError::DoesNotDeref => {
+ tcx.dcx().emit_err(errors::InvalidReceiverTy { span, receiver_ty })
+ }
+ ReceiverValidityError::MethodGenericParamUsed => {
+ tcx.dcx().emit_err(errors::InvalidGenericReceiverTy { span, receiver_ty })
+ }
+ }
}
});
}
Ok(())
}
+/// Error cases which may be returned from `receiver_is_valid`. These error
+/// cases are generated in this function as they may be unearthed as we explore
+/// the `autoderef` chain, but they're converted to diagnostics in the caller.
+enum ReceiverValidityError {
+ /// The self type does not get to the receiver type by following the
+ /// autoderef chain.
+ DoesNotDeref,
+ /// A type was found which is a method type parameter, and that's not allowed.
+ MethodGenericParamUsed,
+}
+
+/// Confirms that a type is not a type parameter referring to one of the
+/// method's type params.
+fn confirm_type_is_not_a_method_generic_param(
+ ty: Ty<'_>,
+ method_generics: &ty::Generics,
+) -> Result<(), ReceiverValidityError> {
+ if let ty::Param(param) = ty.kind() {
+ if (param.index as usize) >= method_generics.parent_count {
+ return Err(ReceiverValidityError::MethodGenericParamUsed);
+ }
+ }
+ Ok(())
+}
+
/// Returns whether `receiver_ty` would be considered a valid receiver type for `self_ty`. If
/// `arbitrary_self_types` is enabled, `receiver_ty` must transitively deref to `self_ty`, possibly
/// through a `*const/mut T` raw pointer if `arbitrary_self_types_pointers` is also enabled.
@@ -1799,7 +1837,8 @@ fn receiver_is_valid<'tcx>(
receiver_ty: Ty<'tcx>,
self_ty: Ty<'tcx>,
arbitrary_self_types_enabled: Option<ArbitrarySelfTypesLevel>,
-) -> bool {
+ method_generics: &ty::Generics,
+) -> Result<(), ReceiverValidityError> {
let infcx = wfcx.infcx;
let tcx = wfcx.tcx();
let cause =
@@ -1811,9 +1850,11 @@ fn receiver_is_valid<'tcx>(
ocx.eq(&cause, wfcx.param_env, self_ty, receiver_ty)?;
if ocx.select_all_or_error().is_empty() { Ok(()) } else { Err(NoSolution) }
}) {
- return true;
+ return Ok(());
}
+ confirm_type_is_not_a_method_generic_param(receiver_ty, method_generics)?;
+
let mut autoderef = Autoderef::new(infcx, wfcx.param_env, wfcx.body_def_id, span, receiver_ty);
// The `arbitrary_self_types_pointers` feature allows raw pointer receivers like `self: *const Self`.
@@ -1830,6 +1871,8 @@ fn receiver_is_valid<'tcx>(
potential_self_ty, self_ty
);
+ confirm_type_is_not_a_method_generic_param(potential_self_ty, method_generics)?;
+
// Check if the self type unifies. If it does, then commit the result
// since it may have region side-effects.
if let Ok(()) = wfcx.infcx.commit_if_ok(|_| {
@@ -1838,7 +1881,7 @@ fn receiver_is_valid<'tcx>(
if ocx.select_all_or_error().is_empty() { Ok(()) } else { Err(NoSolution) }
}) {
wfcx.register_obligations(autoderef.into_obligations());
- return true;
+ return Ok(());
}
// Without `feature(arbitrary_self_types)`, we require that each step in the
@@ -1865,7 +1908,7 @@ fn receiver_is_valid<'tcx>(
}
debug!("receiver_is_valid: type `{:?}` does not deref to `{:?}`", receiver_ty, self_ty);
- false
+ Err(ReceiverValidityError::DoesNotDeref)
}
fn receiver_is_implemented<'tcx>(
diff --git a/compiler/rustc_hir_analysis/src/collect.rs b/compiler/rustc_hir_analysis/src/collect.rs
index cca52c5..3f6198d 100644
--- a/compiler/rustc_hir_analysis/src/collect.rs
+++ b/compiler/rustc_hir_analysis/src/collect.rs
@@ -86,7 +86,7 @@ pub fn provide(providers: &mut Providers) {
impl_trait_header,
coroutine_kind,
coroutine_for_closure,
- is_type_alias_impl_trait,
+ opaque_ty_origin,
rendered_precise_capturing_args,
..*providers
};
@@ -1302,7 +1302,7 @@ fn trait_def(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::TraitDef {
}
}
-#[instrument(level = "debug", skip(tcx))]
+#[instrument(level = "debug", skip(tcx), ret)]
fn fn_sig(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<'_, ty::PolyFnSig<'_>> {
use rustc_hir::Node::*;
use rustc_hir::*;
@@ -1759,9 +1759,18 @@ fn coroutine_for_closure(tcx: TyCtxt<'_>, def_id: LocalDefId) -> DefId {
def_id.to_def_id()
}
-fn is_type_alias_impl_trait<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> bool {
- let opaque = tcx.hir().expect_opaque_ty(def_id);
- matches!(opaque.origin, hir::OpaqueTyOrigin::TyAlias { .. })
+fn opaque_ty_origin<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> hir::OpaqueTyOrigin<DefId> {
+ match tcx.hir_node_by_def_id(def_id).expect_opaque_ty().origin {
+ hir::OpaqueTyOrigin::FnReturn { parent, in_trait_or_impl } => {
+ hir::OpaqueTyOrigin::FnReturn { parent: parent.to_def_id(), in_trait_or_impl }
+ }
+ hir::OpaqueTyOrigin::AsyncFn { parent, in_trait_or_impl } => {
+ hir::OpaqueTyOrigin::AsyncFn { parent: parent.to_def_id(), in_trait_or_impl }
+ }
+ hir::OpaqueTyOrigin::TyAlias { parent, in_assoc_ty } => {
+ hir::OpaqueTyOrigin::TyAlias { parent: parent.to_def_id(), in_assoc_ty }
+ }
+ }
}
fn rendered_precise_capturing_args<'tcx>(
diff --git a/compiler/rustc_hir_analysis/src/collect/generics_of.rs b/compiler/rustc_hir_analysis/src/collect/generics_of.rs
index 3eec0e1..c31bff2 100644
--- a/compiler/rustc_hir_analysis/src/collect/generics_of.rs
+++ b/compiler/rustc_hir_analysis/src/collect/generics_of.rs
@@ -426,6 +426,21 @@ enum Defaults {
});
}
+ if let Node::OpaqueTy(&hir::OpaqueTy { .. }) = node {
+ assert!(own_params.is_empty());
+
+ let lifetimes = tcx.opaque_captured_lifetimes(def_id);
+ debug!(?lifetimes);
+
+ own_params.extend(lifetimes.iter().map(|&(_, param)| ty::GenericParamDef {
+ name: tcx.item_name(param.to_def_id()),
+ index: next_index(),
+ def_id: param.to_def_id(),
+ pure_wrt_drop: false,
+ kind: ty::GenericParamDefKind::Lifetime,
+ }))
+ }
+
let param_def_id_to_index =
own_params.iter().map(|param| (param.def_id, param.index)).collect();
diff --git a/compiler/rustc_hir_analysis/src/collect/item_bounds.rs b/compiler/rustc_hir_analysis/src/collect/item_bounds.rs
index 5c4cecc..0b81f46 100644
--- a/compiler/rustc_hir_analysis/src/collect/item_bounds.rs
+++ b/compiler/rustc_hir_analysis/src/collect/item_bounds.rs
@@ -43,7 +43,7 @@ fn associated_type_bounds<'tcx>(
match filter {
PredicateFilter::All
| PredicateFilter::SelfOnly
- | PredicateFilter::SelfThatDefines(_)
+ | PredicateFilter::SelfTraitThatDefines(_)
| PredicateFilter::SelfAndAssociatedTypeBounds => {
icx.lowerer().add_sized_bound(&mut bounds, item_ty, hir_bounds, None, span);
}
@@ -122,7 +122,7 @@ fn remap_gat_vars_and_recurse_into_nested_projections<'tcx>(
PredicateFilter::SelfOnly => {
return None;
}
- PredicateFilter::SelfThatDefines(_)
+ PredicateFilter::SelfTraitThatDefines(_)
| PredicateFilter::SelfConstIfConst
| PredicateFilter::SelfAndAssociatedTypeBounds
| PredicateFilter::ConstIfConst => {
@@ -329,7 +329,7 @@ fn opaque_type_bounds<'tcx>(
match filter {
PredicateFilter::All
| PredicateFilter::SelfOnly
- | PredicateFilter::SelfThatDefines(_)
+ | PredicateFilter::SelfTraitThatDefines(_)
| PredicateFilter::SelfAndAssociatedTypeBounds => {
// Associated types are implicitly sized unless a `?Sized` bound is found
icx.lowerer().add_sized_bound(&mut bounds, item_ty, hir_bounds, None, span);
@@ -379,9 +379,6 @@ pub(super) fn explicit_item_bounds_with_filter(
}
let bounds = match tcx.hir_node_by_def_id(def_id) {
- _ if tcx.is_effects_desugared_assoc_ty(def_id.to_def_id()) => {
- associated_type_bounds(tcx, def_id, &[], tcx.def_span(def_id), filter)
- }
hir::Node::TraitItem(hir::TraitItem {
kind: hir::TraitItemKind::Type(bounds, _),
span,
diff --git a/compiler/rustc_hir_analysis/src/collect/predicates_of.rs b/compiler/rustc_hir_analysis/src/collect/predicates_of.rs
index 644ff0c..7ce12d4 100644
--- a/compiler/rustc_hir_analysis/src/collect/predicates_of.rs
+++ b/compiler/rustc_hir_analysis/src/collect/predicates_of.rs
@@ -329,13 +329,6 @@ fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Gen
// We create bi-directional Outlives predicates between the original
// and the duplicated parameter, to ensure that they do not get out of sync.
if let Node::OpaqueTy(..) = node {
- let opaque_ty_node = tcx.parent_hir_node(hir_id);
- let Node::Ty(&hir::Ty { kind: TyKind::OpaqueDef(_, lifetimes), .. }) = opaque_ty_node
- else {
- bug!("unexpected {opaque_ty_node:?}")
- };
- debug!(?lifetimes);
-
compute_bidirectional_outlives_predicates(tcx, &generics.own_params, &mut predicates);
debug!(?predicates);
}
@@ -564,7 +557,11 @@ pub(super) fn explicit_supertraits_containing_assoc_item<'tcx>(
tcx: TyCtxt<'tcx>,
(trait_def_id, assoc_name): (DefId, Ident),
) -> ty::EarlyBinder<'tcx, &'tcx [(ty::Clause<'tcx>, Span)]> {
- implied_predicates_with_filter(tcx, trait_def_id, PredicateFilter::SelfThatDefines(assoc_name))
+ implied_predicates_with_filter(
+ tcx,
+ trait_def_id,
+ PredicateFilter::SelfTraitThatDefines(assoc_name),
+ )
}
pub(super) fn explicit_implied_predicates_of<'tcx>(
@@ -593,7 +590,7 @@ pub(super) fn implied_predicates_with_filter<'tcx>(
let Some(trait_def_id) = trait_def_id.as_local() else {
// if `assoc_name` is None, then the query should've been redirected to an
// external provider
- assert_matches!(filter, PredicateFilter::SelfThatDefines(_));
+ assert_matches!(filter, PredicateFilter::SelfTraitThatDefines(_));
return tcx.explicit_super_predicates_of(trait_def_id);
};
@@ -613,12 +610,8 @@ pub(super) fn implied_predicates_with_filter<'tcx>(
let mut bounds = Bounds::default();
icx.lowerer().lower_bounds(self_param_ty, superbounds, &mut bounds, ty::List::empty(), filter);
- let where_bounds_that_match = icx.probe_ty_param_bounds_in_generics(
- generics,
- item.owner_id.def_id,
- self_param_ty,
- filter,
- );
+ let where_bounds_that_match =
+ icx.probe_ty_param_bounds_in_generics(generics, item.owner_id.def_id, filter);
// Combine the two lists to form the complete set of superbounds:
let implied_bounds =
@@ -659,7 +652,7 @@ pub(super) fn implied_predicates_with_filter<'tcx>(
}
// Make sure when elaborating supertraits, probing for associated types, etc.,
-// we really truly are elaborating clauses that have `Self` as their self type.
+// we really truly are elaborating clauses that have `ty` as their self type.
// This is very important since downstream code relies on this being correct.
pub(super) fn assert_only_contains_predicates_from<'tcx>(
filter: PredicateFilter,
@@ -671,7 +664,7 @@ pub(super) fn assert_only_contains_predicates_from<'tcx>(
}
match filter {
- PredicateFilter::SelfOnly | PredicateFilter::SelfThatDefines(_) => {
+ PredicateFilter::SelfOnly => {
for (clause, _) in bounds {
match clause.kind().skip_binder() {
ty::ClauseKind::Trait(trait_predicate) => {
@@ -711,12 +704,39 @@ pub(super) fn assert_only_contains_predicates_from<'tcx>(
}
}
}
+ PredicateFilter::SelfTraitThatDefines(_) => {
+ for (clause, _) in bounds {
+ match clause.kind().skip_binder() {
+ ty::ClauseKind::Trait(trait_predicate) => {
+ assert_eq!(
+ trait_predicate.self_ty(),
+ ty,
+ "expected `Self` predicate when computing \
+ `{filter:?}` implied bounds: {clause:?}"
+ );
+ }
+
+ ty::ClauseKind::Projection(_)
+ | ty::ClauseKind::TypeOutlives(_)
+ | ty::ClauseKind::RegionOutlives(_)
+ | ty::ClauseKind::ConstArgHasType(_, _)
+ | ty::ClauseKind::WellFormed(_)
+ | ty::ClauseKind::ConstEvaluatable(_)
+ | ty::ClauseKind::HostEffect(..) => {
+ bug!(
+ "unexpected non-`Self` predicate when computing \
+ `{filter:?}` implied bounds: {clause:?}"
+ );
+ }
+ }
+ }
+ }
PredicateFilter::ConstIfConst => {
for (clause, _) in bounds {
match clause.kind().skip_binder() {
ty::ClauseKind::HostEffect(ty::HostEffectPredicate {
trait_ref: _,
- host: ty::HostPolarity::Maybe,
+ constness: ty::BoundConstness::Maybe,
}) => {}
_ => {
bug!(
@@ -732,8 +752,8 @@ pub(super) fn assert_only_contains_predicates_from<'tcx>(
match clause.kind().skip_binder() {
ty::ClauseKind::HostEffect(pred) => {
assert_eq!(
- pred.host,
- ty::HostPolarity::Maybe,
+ pred.constness,
+ ty::BoundConstness::Maybe,
"expected `~const` predicate when computing `{filter:?}` \
implied bounds: {clause:?}",
);
@@ -764,8 +784,15 @@ pub(super) fn type_param_predicates<'tcx>(
tcx: TyCtxt<'tcx>,
(item_def_id, def_id, assoc_name): (LocalDefId, LocalDefId, Ident),
) -> ty::EarlyBinder<'tcx, &'tcx [(ty::Clause<'tcx>, Span)]> {
- use rustc_hir::*;
- use rustc_middle::ty::Ty;
+ match tcx.opt_rpitit_info(item_def_id.to_def_id()) {
+ Some(ty::ImplTraitInTraitData::Trait { opaque_def_id, .. }) => {
+ return tcx.type_param_predicates((opaque_def_id.expect_local(), def_id, assoc_name));
+ }
+ Some(ty::ImplTraitInTraitData::Impl { .. }) => {
+ unreachable!("should not be lowering bounds on RPITIT in impl")
+ }
+ None => {}
+ }
// In the HIR, bounds can derive from two places. Either
// written inline like `<T: Foo>` or in a where-clause like
@@ -773,12 +800,10 @@ pub(super) fn type_param_predicates<'tcx>(
let param_id = tcx.local_def_id_to_hir_id(def_id);
let param_owner = tcx.hir().ty_param_owner(def_id);
- let generics = tcx.generics_of(param_owner);
- let index = generics.param_def_id_to_index[&def_id.to_def_id()];
- let ty = Ty::new_param(tcx, index, tcx.hir().ty_param_name(def_id));
// Don't look for bounds where the type parameter isn't in scope.
let parent = if item_def_id == param_owner {
+ // FIXME: Shouldn't this be unreachable?
None
} else {
tcx.generics_of(item_def_id).parent.map(|def_id| def_id.expect_local())
@@ -798,8 +823,9 @@ pub(super) fn type_param_predicates<'tcx>(
let Some(hir_generics) = hir_node.generics() else {
return result;
};
+
if let Node::Item(item) = hir_node
- && let ItemKind::Trait(..) = item.kind
+ && let hir::ItemKind::Trait(..) = item.kind
// Implied `Self: Trait` and supertrait bounds.
&& param_id == item_hir_id
{
@@ -808,23 +834,34 @@ pub(super) fn type_param_predicates<'tcx>(
}
let icx = ItemCtxt::new(tcx, item_def_id);
- let extra_predicates = extend.into_iter().chain(
- icx.probe_ty_param_bounds_in_generics(
- hir_generics,
- def_id,
- ty,
- PredicateFilter::SelfThatDefines(assoc_name),
- )
- .into_iter()
- .filter(|(predicate, _)| match predicate.kind().skip_binder() {
- ty::ClauseKind::Trait(data) => data.self_ty().is_param(index),
- _ => false,
- }),
+ let extra_predicates = extend.into_iter().chain(icx.probe_ty_param_bounds_in_generics(
+ hir_generics,
+ def_id,
+ PredicateFilter::SelfTraitThatDefines(assoc_name),
+ ));
+
+ let bounds =
+ &*tcx.arena.alloc_from_iter(result.skip_binder().iter().copied().chain(extra_predicates));
+
+ // Double check that the bounds *only* contain `SelfTy: Trait` preds.
+ let self_ty = match tcx.def_kind(def_id) {
+ DefKind::TyParam => Ty::new_param(
+ tcx,
+ tcx.generics_of(item_def_id)
+ .param_def_id_to_index(tcx, def_id.to_def_id())
+ .expect("expected generic param to be owned by item"),
+ tcx.item_name(def_id.to_def_id()),
+ ),
+ DefKind::Trait | DefKind::TraitAlias => tcx.types.self_param,
+ _ => unreachable!(),
+ };
+ assert_only_contains_predicates_from(
+ PredicateFilter::SelfTraitThatDefines(assoc_name),
+ bounds,
+ self_ty,
);
- ty::EarlyBinder::bind(
- tcx.arena.alloc_from_iter(result.skip_binder().iter().copied().chain(extra_predicates)),
- )
+ ty::EarlyBinder::bind(bounds)
}
impl<'tcx> ItemCtxt<'tcx> {
@@ -838,7 +875,6 @@ fn probe_ty_param_bounds_in_generics(
&self,
hir_generics: &'tcx hir::Generics<'tcx>,
param_def_id: LocalDefId,
- ty: Ty<'tcx>,
filter: PredicateFilter,
) -> Vec<(ty::Clause<'tcx>, Span)> {
let mut bounds = Bounds::default();
@@ -848,13 +884,21 @@ fn probe_ty_param_bounds_in_generics(
continue;
};
- let bound_ty = if predicate.is_param_bound(param_def_id.to_def_id()) {
- ty
- } else if matches!(filter, PredicateFilter::All) {
- self.lowerer().lower_ty_maybe_return_type_notation(predicate.bounded_ty)
- } else {
- continue;
- };
+ match filter {
+ _ if predicate.is_param_bound(param_def_id.to_def_id()) => {
+ // Ok
+ }
+ PredicateFilter::All => {
+ // Ok
+ }
+ PredicateFilter::SelfOnly
+ | PredicateFilter::SelfTraitThatDefines(_)
+ | PredicateFilter::SelfConstIfConst
+ | PredicateFilter::SelfAndAssociatedTypeBounds => continue,
+ PredicateFilter::ConstIfConst => unreachable!(),
+ }
+
+ let bound_ty = self.lowerer().lower_ty_maybe_return_type_notation(predicate.bounded_ty);
let bound_vars = self.tcx.late_bound_vars(predicate.hir_id);
self.lowerer().lower_bounds(
@@ -943,7 +987,7 @@ pub(super) fn const_conditions<'tcx>(
bounds.push_const_bound(
tcx,
ty::Binder::dummy(ty::TraitRef::identity(tcx, def_id.to_def_id())),
- ty::HostPolarity::Maybe,
+ ty::BoundConstness::Maybe,
DUMMY_SP,
);
@@ -963,7 +1007,7 @@ pub(super) fn const_conditions<'tcx>(
clause.kind().map_bound(|clause| match clause {
ty::ClauseKind::HostEffect(ty::HostEffectPredicate {
trait_ref,
- host: ty::HostPolarity::Maybe,
+ constness: ty::BoundConstness::Maybe,
}) => trait_ref,
_ => bug!("converted {clause:?}"),
}),
@@ -1001,7 +1045,7 @@ pub(super) fn implied_const_bounds<'tcx>(
clause.kind().map_bound(|clause| match clause {
ty::ClauseKind::HostEffect(ty::HostEffectPredicate {
trait_ref,
- host: ty::HostPolarity::Maybe,
+ constness: ty::BoundConstness::Maybe,
}) => trait_ref,
_ => bug!("converted {clause:?}"),
}),
diff --git a/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs b/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs
index f7daef3..dc3ef99 100644
--- a/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs
+++ b/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs
@@ -6,12 +6,14 @@
//! the types in HIR to identify late-bound lifetimes and assign their Debruijn indices. This file
//! is also responsible for assigning their semantics to implicit lifetimes in trait objects.
-use core::ops::ControlFlow;
+use std::cell::RefCell;
use std::fmt;
+use std::ops::ControlFlow;
use rustc_ast::visit::walk_list;
use rustc_data_structures::fx::{FxHashSet, FxIndexMap, FxIndexSet};
use rustc_data_structures::sorted_map::SortedMap;
+use rustc_errors::ErrorGuaranteed;
use rustc_hir as hir;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::intravisit::{self, Visitor};
@@ -25,7 +27,7 @@
use rustc_middle::ty::{self, TyCtxt, TypeSuperVisitable, TypeVisitor};
use rustc_middle::{bug, span_bug};
use rustc_span::Span;
-use rustc_span::def_id::{DefId, LocalDefId};
+use rustc_span::def_id::{DefId, LocalDefId, LocalDefIdMap};
use rustc_span::symbol::{Ident, sym};
use tracing::{debug, debug_span, instrument};
@@ -80,6 +82,9 @@ struct NamedVarMap {
// - trait refs
// - bound types (like `T` in `for<'a> T<'a>: Foo`)
late_bound_vars: ItemLocalMap<Vec<ty::BoundVariableKind>>,
+
+ // List captured variables for each opaque type.
+ opaque_captured_lifetimes: LocalDefIdMap<Vec<(ResolvedArg, LocalDefId)>>,
}
struct BoundVarContext<'a, 'tcx> {
@@ -147,6 +152,23 @@ enum Scope<'a> {
s: ScopeRef<'a>,
},
+ /// Remap lifetimes that appear in opaque types to fresh lifetime parameters. Given:
+ /// `fn foo<'a>() -> impl MyTrait<'a> { ... }`
+ ///
+ /// HIR tells us that `'a` refer to the lifetime bound on `foo`.
+ /// However, typeck and borrowck for opaques work based on using a new generic type.
+ /// `type MyAnonTy<'b> = impl MyTrait<'b>;`
+ ///
+ /// This scope collects the mapping `'a -> 'b`.
+ Opaque {
+ /// The opaque type we are traversing.
+ def_id: LocalDefId,
+ /// Mapping from each captured lifetime `'a` to the duplicate generic parameter `'b`.
+ captures: &'a RefCell<FxIndexMap<ResolvedArg, LocalDefId>>,
+
+ s: ScopeRef<'a>,
+ },
+
/// Disallows capturing late-bound vars from parent scopes.
///
/// This is necessary for something like `for<T> [(); { /* references T */ }]:`,
@@ -192,6 +214,12 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
.field("where_bound_origin", where_bound_origin)
.field("s", &"..")
.finish(),
+ Scope::Opaque { captures, def_id, s: _ } => f
+ .debug_struct("Opaque")
+ .field("def_id", def_id)
+ .field("captures", &captures.borrow())
+ .field("s", &"..")
+ .finish(),
Scope::Body { id, s: _ } => {
f.debug_struct("Body").field("id", id).field("s", &"..").finish()
}
@@ -226,6 +254,12 @@ pub(crate) fn provide(providers: &mut Providers) {
is_late_bound_map,
object_lifetime_default,
late_bound_vars_map: |tcx, id| &tcx.resolve_bound_vars(id).late_bound_vars,
+ opaque_captured_lifetimes: |tcx, id| {
+ &tcx.resolve_bound_vars(tcx.local_def_id_to_hir_id(id).owner)
+ .opaque_captured_lifetimes
+ .get(&id)
+ .map_or(&[][..], |x| &x[..])
+ },
..*providers
};
@@ -236,8 +270,11 @@ pub(crate) fn provide(providers: &mut Providers) {
/// `named_variable_map`, `is_late_bound_map`, etc.
#[instrument(level = "debug", skip(tcx))]
fn resolve_bound_vars(tcx: TyCtxt<'_>, local_def_id: hir::OwnerId) -> ResolveBoundVars {
- let mut named_variable_map =
- NamedVarMap { defs: Default::default(), late_bound_vars: Default::default() };
+ let mut named_variable_map = NamedVarMap {
+ defs: Default::default(),
+ late_bound_vars: Default::default(),
+ opaque_captured_lifetimes: Default::default(),
+ };
let mut visitor = BoundVarContext {
tcx,
map: &mut named_variable_map,
@@ -264,13 +301,16 @@ fn resolve_bound_vars(tcx: TyCtxt<'_>, local_def_id: hir::OwnerId) -> ResolveBou
let defs = named_variable_map.defs.into_sorted_stable_ord();
let late_bound_vars = named_variable_map.late_bound_vars.into_sorted_stable_ord();
+ let opaque_captured_lifetimes = named_variable_map.opaque_captured_lifetimes;
let rl = ResolveBoundVars {
defs: SortedMap::from_presorted_elements(defs),
late_bound_vars: SortedMap::from_presorted_elements(late_bound_vars),
+ opaque_captured_lifetimes,
};
debug!(?rl.defs);
debug!(?rl.late_bound_vars);
+ debug!(?rl.opaque_captured_lifetimes);
rl
}
@@ -306,6 +346,26 @@ fn generic_param_def_as_bound_arg(param: &ty::GenericParamDef) -> ty::BoundVaria
}
}
+/// Whether this opaque always captures lifetimes in scope.
+/// Right now, this is all RPITIT and TAITs, and when `lifetime_capture_rules_2024`
+/// is enabled. We don't check the span of the edition, since this is done
+/// on a per-opaque basis to account for nested opaques.
+fn opaque_captures_all_in_scope_lifetimes<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ opaque: &'tcx hir::OpaqueTy<'tcx>,
+) -> bool {
+ match opaque.origin {
+ // if the opaque has the `use<...>` syntax, the user is telling us that they only want
+ // to account for those lifetimes, so do not try to be clever.
+ _ if opaque.bounds.iter().any(|bound| matches!(bound, hir::GenericBound::Use(..))) => false,
+ hir::OpaqueTyOrigin::AsyncFn { .. } | hir::OpaqueTyOrigin::TyAlias { .. } => true,
+ _ if tcx.features().lifetime_capture_rules_2024() || opaque.span.at_least_rust_2024() => {
+ true
+ }
+ hir::OpaqueTyOrigin::FnReturn { in_trait_or_impl, .. } => in_trait_or_impl.is_some(),
+ }
+}
+
impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
/// Returns the binders in scope and the type of `Binder` that should be created for a poly trait ref.
fn poly_trait_ref_binder_info(&mut self) -> (Vec<ty::BoundVariableKind>, BinderScopeType) {
@@ -317,7 +377,9 @@ fn poly_trait_ref_binder_info(&mut self) -> (Vec<ty::BoundVariableKind>, BinderS
break (vec![], BinderScopeType::Normal);
}
- Scope::ObjectLifetimeDefault { s, .. } | Scope::LateBoundary { s, .. } => {
+ Scope::Opaque { s, .. }
+ | Scope::ObjectLifetimeDefault { s, .. }
+ | Scope::LateBoundary { s, .. } => {
scope = s;
}
@@ -488,29 +550,102 @@ fn visit_ty(&mut self, t: &'v hir::Ty<'v>) -> Self::Result {
}
}
+ /// Resolve the lifetimes inside the opaque type, and save them into
+ /// `opaque_captured_lifetimes`.
+ ///
+ /// This method has special handling for opaques that capture all lifetimes,
+ /// like async desugaring.
#[instrument(level = "debug", skip(self))]
fn visit_opaque_ty(&mut self, opaque: &'tcx rustc_hir::OpaqueTy<'tcx>) {
- // We want to start our early-bound indices at the end of the parent scope,
- // not including any parent `impl Trait`s.
- let mut bound_vars = FxIndexMap::default();
- debug!(?opaque.generics.params);
- for param in opaque.generics.params {
- let arg = ResolvedArg::early(param);
- bound_vars.insert(param.def_id, arg);
+ let captures = RefCell::new(FxIndexMap::default());
+
+ let capture_all_in_scope_lifetimes =
+ opaque_captures_all_in_scope_lifetimes(self.tcx, opaque);
+ if capture_all_in_scope_lifetimes {
+ let lifetime_ident = |def_id: LocalDefId| {
+ let name = self.tcx.item_name(def_id.to_def_id());
+ let span = self.tcx.def_span(def_id);
+ Ident::new(name, span)
+ };
+
+ // We list scopes outwards, this causes us to see lifetime parameters in reverse
+ // declaration order. In order to make it consistent with what `generics_of` might
+ // give, we will reverse the IndexMap after early captures.
+ let mut late_depth = 0;
+ let mut scope = self.scope;
+ let mut crossed_late_boundary = None;
+ let mut opaque_capture_scopes = vec![(opaque.def_id, &captures)];
+ loop {
+ match *scope {
+ Scope::Binder { ref bound_vars, scope_type, s, .. } => {
+ for (&original_lifetime, &def) in bound_vars.iter().rev() {
+ if let ResolvedArg::LateBound(..) = def
+ && crossed_late_boundary.is_some()
+ {
+ continue;
+ }
+ if let DefKind::LifetimeParam = self.tcx.def_kind(original_lifetime) {
+ let def = def.shifted(late_depth);
+ let ident = lifetime_ident(original_lifetime);
+ self.remap_opaque_captures(&opaque_capture_scopes, def, ident);
+ }
+ }
+ match scope_type {
+ BinderScopeType::Normal => late_depth += 1,
+ BinderScopeType::Concatenating => {}
+ }
+ scope = s;
+ }
+
+ Scope::Root { mut opt_parent_item } => {
+ while let Some(parent_item) = opt_parent_item {
+ let parent_generics = self.tcx.generics_of(parent_item);
+ for param in parent_generics.own_params.iter().rev() {
+ if let ty::GenericParamDefKind::Lifetime = param.kind {
+ let def = ResolvedArg::EarlyBound(param.def_id.expect_local());
+ let ident = lifetime_ident(param.def_id.expect_local());
+ self.remap_opaque_captures(&opaque_capture_scopes, def, ident);
+ }
+ }
+ opt_parent_item = parent_generics.parent.and_then(DefId::as_local);
+ }
+ break;
+ }
+
+ Scope::Opaque { captures, def_id, s } => {
+ opaque_capture_scopes.push((def_id, captures));
+ late_depth = 0;
+ scope = s;
+ }
+
+ Scope::Body { .. } => {
+ bug!("{:?}", scope)
+ }
+
+ Scope::ObjectLifetimeDefault { s, .. }
+ | Scope::Supertrait { s, .. }
+ | Scope::TraitRefBoundary { s, .. } => {
+ scope = s;
+ }
+
+ Scope::LateBoundary { s, what, .. } => {
+ crossed_late_boundary = Some(what);
+ scope = s;
+ }
+ }
+ }
+ captures.borrow_mut().reverse();
}
- let hir_id = self.tcx.local_def_id_to_hir_id(opaque.def_id);
- let scope = Scope::Binder {
- hir_id,
- bound_vars,
- s: self.scope,
- scope_type: BinderScopeType::Normal,
- where_bound_origin: None,
- };
+ let scope = Scope::Opaque { captures: &captures, def_id: opaque.def_id, s: self.scope };
self.with(scope, |this| {
let scope = Scope::TraitRefBoundary { s: this.scope };
this.with(scope, |this| intravisit::walk_opaque_ty(this, opaque))
- })
+ });
+
+ let captures = captures.into_inner().into_iter().collect();
+ debug!(?captures);
+ self.map.opaque_captured_lifetimes.insert(opaque.def_id, captures);
}
#[instrument(level = "debug", skip(self))]
@@ -685,67 +820,6 @@ fn visit_ty(&mut self, ty: &'tcx hir::Ty<'tcx>) {
};
self.with(scope, |this| this.visit_ty(mt.ty));
}
- hir::TyKind::OpaqueDef(opaque_ty, lifetimes) => {
- self.visit_opaque_ty(opaque_ty);
-
- // Resolve the lifetimes in the bounds to the lifetime defs in the generics.
- // `fn foo<'a>() -> impl MyTrait<'a> { ... }` desugars to
- // `type MyAnonTy<'b> = impl MyTrait<'b>;`
- // ^ ^ this gets resolved in the scope of
- // the opaque_ty generics
-
- // Resolve the lifetimes that are applied to the opaque type.
- // These are resolved in the current scope.
- // `fn foo<'a>() -> impl MyTrait<'a> { ... }` desugars to
- // `fn foo<'a>() -> MyAnonTy<'a> { ... }`
- // ^ ^this gets resolved in the current scope
- for lifetime in lifetimes {
- let hir::GenericArg::Lifetime(lifetime) = lifetime else { continue };
- self.visit_lifetime(lifetime);
-
- // Check for predicates like `impl for<'a> Trait<impl OtherTrait<'a>>`
- // and ban them. Type variables instantiated inside binders aren't
- // well-supported at the moment, so this doesn't work.
- // In the future, this should be fixed and this error should be removed.
- let def = self.map.defs.get(&lifetime.hir_id.local_id).copied();
- let Some(ResolvedArg::LateBound(_, _, lifetime_def_id)) = def else { continue };
- let lifetime_hir_id = self.tcx.local_def_id_to_hir_id(lifetime_def_id);
-
- let bad_place = match self.tcx.hir_node(self.tcx.parent_hir_id(lifetime_hir_id))
- {
- // Opaques do not declare their own lifetimes, so if a lifetime comes from an opaque
- // it must be a reified late-bound lifetime from a trait goal.
- hir::Node::OpaqueTy(_) => "higher-ranked lifetime from outer `impl Trait`",
- // Other items are fine.
- hir::Node::Item(_) | hir::Node::TraitItem(_) | hir::Node::ImplItem(_) => {
- continue;
- }
- hir::Node::Ty(hir::Ty { kind: hir::TyKind::BareFn(_), .. }) => {
- "higher-ranked lifetime from function pointer"
- }
- hir::Node::Ty(hir::Ty { kind: hir::TyKind::TraitObject(..), .. }) => {
- "higher-ranked lifetime from `dyn` type"
- }
- _ => "higher-ranked lifetime",
- };
-
- let (span, label) = if lifetime.ident.span == self.tcx.def_span(lifetime_def_id)
- {
- (opaque_ty.span, Some(opaque_ty.span))
- } else {
- (lifetime.ident.span, None)
- };
-
- // Ensure that the parent of the def is an item, not HRTB
- self.tcx.dcx().emit_err(errors::OpaqueCapturesHigherRankedLifetime {
- span,
- label,
- decl_span: self.tcx.def_span(lifetime_def_id),
- bad_place,
- });
- self.uninsert_lifetime_on_error(lifetime, def.unwrap());
- }
- }
_ => intravisit::walk_ty(self, ty),
}
}
@@ -1129,6 +1203,7 @@ fn resolve_lifetime_ref(
let mut scope = self.scope;
let mut outermost_body = None;
let mut crossed_late_boundary = None;
+ let mut opaque_capture_scopes = vec![];
let result = loop {
match *scope {
Scope::Body { id, s } => {
@@ -1204,6 +1279,12 @@ fn resolve_lifetime_ref(
scope = s;
}
+ Scope::Opaque { captures, def_id, s } => {
+ opaque_capture_scopes.push((def_id, captures));
+ late_depth = 0;
+ scope = s;
+ }
+
Scope::ObjectLifetimeDefault { s, .. }
| Scope::Supertrait { s, .. }
| Scope::TraitRefBoundary { s, .. } => {
@@ -1218,6 +1299,8 @@ fn resolve_lifetime_ref(
};
if let Some(mut def) = result {
+ def = self.remap_opaque_captures(&opaque_capture_scopes, def, lifetime_ref.ident);
+
if let ResolvedArg::EarlyBound(..) = def {
// Do not free early-bound regions, only late-bound ones.
} else if let ResolvedArg::LateBound(_, _, param_def_id) = def
@@ -1291,6 +1374,7 @@ fn resolve_lifetime_ref(
Scope::Root { .. } => break,
Scope::Binder { s, .. }
| Scope::Body { s, .. }
+ | Scope::Opaque { s, .. }
| Scope::ObjectLifetimeDefault { s, .. }
| Scope::Supertrait { s, .. }
| Scope::TraitRefBoundary { s, .. }
@@ -1306,6 +1390,79 @@ fn resolve_lifetime_ref(
);
}
+ /// Check for predicates like `impl for<'a> Trait<impl OtherTrait<'a>>`
+ /// and ban them. Type variables instantiated inside binders aren't
+ /// well-supported at the moment, so this doesn't work.
+ /// In the future, this should be fixed and this error should be removed.
+ fn check_lifetime_is_capturable(
+ &self,
+ opaque_def_id: LocalDefId,
+ lifetime: ResolvedArg,
+ capture_span: Span,
+ ) -> Result<(), ErrorGuaranteed> {
+ let ResolvedArg::LateBound(_, _, lifetime_def_id) = lifetime else { return Ok(()) };
+ let lifetime_hir_id = self.tcx.local_def_id_to_hir_id(lifetime_def_id);
+ let bad_place = match self.tcx.hir_node(self.tcx.parent_hir_id(lifetime_hir_id)) {
+ // Opaques do not declare their own lifetimes, so if a lifetime comes from an opaque
+ // it must be a reified late-bound lifetime from a trait goal.
+ hir::Node::OpaqueTy(_) => "higher-ranked lifetime from outer `impl Trait`",
+ // Other items are fine.
+ hir::Node::Item(_) | hir::Node::TraitItem(_) | hir::Node::ImplItem(_) => return Ok(()),
+ hir::Node::Ty(hir::Ty { kind: hir::TyKind::BareFn(_), .. }) => {
+ "higher-ranked lifetime from function pointer"
+ }
+ hir::Node::Ty(hir::Ty { kind: hir::TyKind::TraitObject(..), .. }) => {
+ "higher-ranked lifetime from `dyn` type"
+ }
+ _ => "higher-ranked lifetime",
+ };
+
+ let decl_span = self.tcx.def_span(lifetime_def_id);
+ let (span, label) = if capture_span != decl_span {
+ (capture_span, None)
+ } else {
+ let opaque_span = self.tcx.def_span(opaque_def_id);
+ (opaque_span, Some(opaque_span))
+ };
+
+ // Ensure that the parent of the def is an item, not HRTB
+ let guar = self.tcx.dcx().emit_err(errors::OpaqueCapturesHigherRankedLifetime {
+ span,
+ label,
+ decl_span,
+ bad_place,
+ });
+ Err(guar)
+ }
+
+ #[instrument(level = "trace", skip(self, opaque_capture_scopes), ret)]
+ fn remap_opaque_captures(
+ &self,
+ opaque_capture_scopes: &Vec<(LocalDefId, &RefCell<FxIndexMap<ResolvedArg, LocalDefId>>)>,
+ mut lifetime: ResolvedArg,
+ ident: Ident,
+ ) -> ResolvedArg {
+ if let Some(&(opaque_def_id, _)) = opaque_capture_scopes.last() {
+ if let Err(guar) =
+ self.check_lifetime_is_capturable(opaque_def_id, lifetime, ident.span)
+ {
+ lifetime = ResolvedArg::Error(guar);
+ }
+ }
+
+ for &(opaque_def_id, captures) in opaque_capture_scopes.iter().rev() {
+ let mut captures = captures.borrow_mut();
+ let remapped = *captures.entry(lifetime).or_insert_with(|| {
+ let feed = self.tcx.create_def(opaque_def_id, ident.name, DefKind::LifetimeParam);
+ feed.def_span(ident.span);
+ feed.def_ident_span(Some(ident.span));
+ feed.def_id()
+ });
+ lifetime = ResolvedArg::EarlyBound(remapped);
+ }
+ lifetime
+ }
+
fn resolve_type_ref(&mut self, param_def_id: LocalDefId, hir_id: HirId) {
// Walk up the scope chain, tracking the number of fn scopes
// that we pass through, until we find a lifetime with the
@@ -1345,6 +1502,7 @@ fn resolve_type_ref(&mut self, param_def_id: LocalDefId, hir_id: HirId) {
}
Scope::ObjectLifetimeDefault { s, .. }
+ | Scope::Opaque { s, .. }
| Scope::Supertrait { s, .. }
| Scope::TraitRefBoundary { s, .. } => {
scope = s;
@@ -1425,6 +1583,7 @@ fn resolve_type_ref(&mut self, param_def_id: LocalDefId, hir_id: HirId) {
Scope::Root { .. } => break,
Scope::Binder { s, .. }
| Scope::Body { s, .. }
+ | Scope::Opaque { s, .. }
| Scope::ObjectLifetimeDefault { s, .. }
| Scope::Supertrait { s, .. }
| Scope::TraitRefBoundary { s, .. }
@@ -1501,6 +1660,7 @@ fn visit_segment_args(
Scope::Binder { s, .. }
| Scope::ObjectLifetimeDefault { s, .. }
+ | Scope::Opaque { s, .. }
| Scope::Supertrait { s, .. }
| Scope::TraitRefBoundary { s, .. }
| Scope::LateBoundary { s, .. } => {
@@ -1786,7 +1946,8 @@ fn visit_fn_like_elision(
fn resolve_object_lifetime_default(&mut self, lifetime_ref: &'tcx hir::Lifetime) {
let mut late_depth = 0;
let mut scope = self.scope;
- let lifetime = loop {
+ let mut opaque_capture_scopes = vec![];
+ let mut lifetime = loop {
match *scope {
Scope::Binder { s, scope_type, .. } => {
match scope_type {
@@ -1800,7 +1961,15 @@ fn resolve_object_lifetime_default(&mut self, lifetime_ref: &'tcx hir::Lifetime)
Scope::Body { .. } | Scope::ObjectLifetimeDefault { lifetime: None, .. } => return,
- Scope::ObjectLifetimeDefault { lifetime: Some(l), .. } => break l,
+ Scope::ObjectLifetimeDefault { lifetime: Some(l), .. } => {
+ break l.shifted(late_depth);
+ }
+
+ Scope::Opaque { captures, def_id, s } => {
+ opaque_capture_scopes.push((def_id, captures));
+ late_depth = 0;
+ scope = s;
+ }
Scope::Supertrait { s, .. }
| Scope::TraitRefBoundary { s, .. }
@@ -1809,7 +1978,10 @@ fn resolve_object_lifetime_default(&mut self, lifetime_ref: &'tcx hir::Lifetime)
}
}
};
- self.insert_lifetime(lifetime_ref, lifetime.shifted(late_depth));
+
+ lifetime = self.remap_opaque_captures(&opaque_capture_scopes, lifetime, lifetime_ref.ident);
+
+ self.insert_lifetime(lifetime_ref, lifetime);
}
#[instrument(level = "debug", skip(self))]
@@ -1818,18 +1990,6 @@ fn insert_lifetime(&mut self, lifetime_ref: &'tcx hir::Lifetime, def: ResolvedAr
self.map.defs.insert(lifetime_ref.hir_id.local_id, def);
}
- /// Sometimes we resolve a lifetime, but later find that it is an
- /// error (esp. around impl trait). In that case, we remove the
- /// entry into `map.defs` so as not to confuse later code.
- fn uninsert_lifetime_on_error(
- &mut self,
- lifetime_ref: &'tcx hir::Lifetime,
- bad_def: ResolvedArg,
- ) {
- let old_value = self.map.defs.remove(&lifetime_ref.hir_id.local_id);
- assert_eq!(old_value, Some(bad_def));
- }
-
// When we have a return type notation type in a where clause, like
// `where <T as Trait>::method(..): Send`, we need to introduce new bound
// vars to the existing where clause's binder, to represent the lifetimes
@@ -2013,18 +2173,22 @@ fn is_late_bound_map(
tcx: TyCtxt<'_>,
owner_id: hir::OwnerId,
) -> Option<&FxIndexSet<hir::ItemLocalId>> {
- let decl = tcx.hir().fn_decl_by_hir_id(owner_id.into())?;
+ let sig = tcx.hir().fn_sig_by_hir_id(owner_id.into())?;
let generics = tcx.hir().get_generics(owner_id.def_id)?;
let mut late_bound = FxIndexSet::default();
let mut constrained_by_input = ConstrainedCollector { regions: Default::default(), tcx };
- for arg_ty in decl.inputs {
+ for arg_ty in sig.decl.inputs {
constrained_by_input.visit_ty(arg_ty);
}
- let mut appears_in_output = AllCollector::default();
- intravisit::walk_fn_ret_ty(&mut appears_in_output, &decl.output);
+ let mut appears_in_output =
+ AllCollector { tcx, has_fully_capturing_opaque: false, regions: Default::default() };
+ intravisit::walk_fn_ret_ty(&mut appears_in_output, &sig.decl.output);
+ if appears_in_output.has_fully_capturing_opaque {
+ appears_in_output.regions.extend(generics.params.iter().map(|param| param.def_id));
+ }
debug!(?constrained_by_input.regions);
@@ -2032,7 +2196,8 @@ fn is_late_bound_map(
//
// Subtle point: because we disallow nested bindings, we can just
// ignore binders here and scrape up all names we see.
- let mut appears_in_where_clause = AllCollector::default();
+ let mut appears_in_where_clause =
+ AllCollector { tcx, has_fully_capturing_opaque: true, regions: Default::default() };
appears_in_where_clause.visit_generics(generics);
debug!(?appears_in_where_clause.regions);
@@ -2198,17 +2363,26 @@ fn visit_lifetime(&mut self, lifetime_ref: &'v hir::Lifetime) {
}
}
- #[derive(Default)]
- struct AllCollector {
+ struct AllCollector<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ has_fully_capturing_opaque: bool,
regions: FxHashSet<LocalDefId>,
}
- impl<'v> Visitor<'v> for AllCollector {
+ impl<'v> Visitor<'v> for AllCollector<'v> {
fn visit_lifetime(&mut self, lifetime_ref: &'v hir::Lifetime) {
if let hir::LifetimeName::Param(def_id) = lifetime_ref.res {
self.regions.insert(def_id);
}
}
+
+ fn visit_opaque_ty(&mut self, opaque: &'v hir::OpaqueTy<'v>) {
+ if !self.has_fully_capturing_opaque {
+ self.has_fully_capturing_opaque =
+ opaque_captures_all_in_scope_lifetimes(self.tcx, opaque);
+ }
+ intravisit::walk_opaque_ty(self, opaque);
+ }
}
}
diff --git a/compiler/rustc_hir_analysis/src/errors.rs b/compiler/rustc_hir_analysis/src/errors.rs
index 7fa9dfe..a92a5e4 100644
--- a/compiler/rustc_hir_analysis/src/errors.rs
+++ b/compiler/rustc_hir_analysis/src/errors.rs
@@ -1624,6 +1624,16 @@ pub(crate) struct InvalidReceiverTy<'tcx> {
}
#[derive(Diagnostic)]
+#[diag(hir_analysis_invalid_generic_receiver_ty, code = E0801)]
+#[note]
+#[help(hir_analysis_invalid_generic_receiver_ty_help)]
+pub(crate) struct InvalidGenericReceiverTy<'tcx> {
+ #[primary_span]
+ pub span: Span,
+ pub receiver_ty: Ty<'tcx>,
+}
+
+#[derive(Diagnostic)]
#[diag(hir_analysis_cmse_inputs_stack_spill, code = E0798)]
#[note]
pub(crate) struct CmseInputsStackSpill {
diff --git a/compiler/rustc_hir_analysis/src/hir_ty_lowering/bounds.rs b/compiler/rustc_hir_analysis/src/hir_ty_lowering/bounds.rs
index a570908..85ba883 100644
--- a/compiler/rustc_hir_analysis/src/hir_ty_lowering/bounds.rs
+++ b/compiler/rustc_hir_analysis/src/hir_ty_lowering/bounds.rs
@@ -152,9 +152,9 @@ pub(crate) fn lower_bounds<'hir, I: IntoIterator<Item = &'hir hir::GenericBound<
'tcx: 'hir,
{
for hir_bound in hir_bounds {
- // In order to avoid cycles, when we're lowering `SelfThatDefines`,
+ // In order to avoid cycles, when we're lowering `SelfTraitThatDefines`,
// we skip over any traits that don't define the given associated type.
- if let PredicateFilter::SelfThatDefines(assoc_name) = predicate_filter {
+ if let PredicateFilter::SelfTraitThatDefines(assoc_name) = predicate_filter {
if let Some(trait_ref) = hir_bound.trait_ref()
&& let Some(trait_did) = trait_ref.trait_def_id()
&& self.tcx().trait_may_define_assoc_item(trait_did, assoc_name)
@@ -168,12 +168,6 @@ pub(crate) fn lower_bounds<'hir, I: IntoIterator<Item = &'hir hir::GenericBound<
match hir_bound {
hir::GenericBound::Trait(poly_trait_ref) => {
let hir::TraitBoundModifiers { constness, polarity } = poly_trait_ref.modifiers;
- let polarity = match polarity {
- rustc_ast::BoundPolarity::Positive => ty::PredicatePolarity::Positive,
- rustc_ast::BoundPolarity::Negative(_) => ty::PredicatePolarity::Negative,
- rustc_ast::BoundPolarity::Maybe(_) => continue,
- };
-
let _ = self.lower_poly_trait_ref(
&poly_trait_ref.trait_ref,
poly_trait_ref.span,
@@ -395,7 +389,6 @@ pub(super) fn lower_assoc_item_constraint(
match predicate_filter {
PredicateFilter::All
| PredicateFilter::SelfOnly
- | PredicateFilter::SelfThatDefines(_)
| PredicateFilter::SelfAndAssociatedTypeBounds => {
bounds.push_projection_bound(
tcx,
@@ -406,6 +399,8 @@ pub(super) fn lower_assoc_item_constraint(
constraint.span,
);
}
+ // SelfTraitThatDefines is only interested in trait predicates.
+ PredicateFilter::SelfTraitThatDefines(_) => {}
// `ConstIfConst` is only interested in `~const` bounds.
PredicateFilter::ConstIfConst | PredicateFilter::SelfConstIfConst => {}
}
@@ -432,7 +427,7 @@ pub(super) fn lower_assoc_item_constraint(
);
}
PredicateFilter::SelfOnly
- | PredicateFilter::SelfThatDefines(_)
+ | PredicateFilter::SelfTraitThatDefines(_)
| PredicateFilter::SelfConstIfConst => {}
}
}
diff --git a/compiler/rustc_hir_analysis/src/hir_ty_lowering/dyn_compatibility.rs b/compiler/rustc_hir_analysis/src/hir_ty_lowering/dyn_compatibility.rs
index f2ee4b0..5e27ace 100644
--- a/compiler/rustc_hir_analysis/src/hir_ty_lowering/dyn_compatibility.rs
+++ b/compiler/rustc_hir_analysis/src/hir_ty_lowering/dyn_compatibility.rs
@@ -51,7 +51,7 @@ pub(super) fn lower_trait_object_ty(
&trait_bound.trait_ref,
trait_bound.span,
hir::BoundConstness::Never,
- ty::PredicatePolarity::Positive,
+ hir::BoundPolarity::Positive,
dummy_self,
&mut bounds,
PredicateFilter::SelfOnly,
@@ -140,9 +140,7 @@ pub(super) fn lower_trait_object_ty(
tcx.associated_items(pred.def_id())
.in_definition_order()
.filter(|item| item.kind == ty::AssocKind::Type)
- .filter(|item| {
- !item.is_impl_trait_in_trait() && !item.is_effects_desugaring
- })
+ .filter(|item| !item.is_impl_trait_in_trait())
.map(|item| item.def_id),
);
}
diff --git a/compiler/rustc_hir_analysis/src/hir_ty_lowering/mod.rs b/compiler/rustc_hir_analysis/src/hir_ty_lowering/mod.rs
index 2d0c3ec..fb23ad1 100644
--- a/compiler/rustc_hir_analysis/src/hir_ty_lowering/mod.rs
+++ b/compiler/rustc_hir_analysis/src/hir_ty_lowering/mod.rs
@@ -20,6 +20,7 @@
pub mod generics;
mod lint;
+use std::assert_matches::assert_matches;
use std::slice;
use rustc_ast::TraitObjectSyntax;
@@ -75,7 +76,7 @@ pub enum PredicateFilter {
/// Only traits that reference `Self: ..` and define an associated type
/// with the given ident are implied by the trait. This mode exists to
/// side-step query cycles when lowering associated types.
- SelfThatDefines(Ident),
+ SelfTraitThatDefines(Ident),
/// Only traits that reference `Self: ..` and their associated type bounds.
/// For example, given `Self: Tr<A: B>`, this would expand to `Self: Tr`
@@ -294,13 +295,23 @@ pub fn lower_lifetime(
lifetime: &hir::Lifetime,
reason: RegionInferReason<'_>,
) -> ty::Region<'tcx> {
+ if let Some(resolved) = self.tcx().named_bound_var(lifetime.hir_id) {
+ self.lower_resolved_lifetime(resolved)
+ } else {
+ self.re_infer(lifetime.ident.span, reason)
+ }
+ }
+
+ /// Lower a lifetime from the HIR to our internal notion of a lifetime called a *region*.
+ #[instrument(level = "debug", skip(self), ret)]
+ pub fn lower_resolved_lifetime(&self, resolved: rbv::ResolvedArg) -> ty::Region<'tcx> {
let tcx = self.tcx();
let lifetime_name = |def_id| tcx.hir().name(tcx.local_def_id_to_hir_id(def_id));
- match tcx.named_bound_var(lifetime.hir_id) {
- Some(rbv::ResolvedArg::StaticLifetime) => tcx.lifetimes.re_static,
+ match resolved {
+ rbv::ResolvedArg::StaticLifetime => tcx.lifetimes.re_static,
- Some(rbv::ResolvedArg::LateBound(debruijn, index, def_id)) => {
+ rbv::ResolvedArg::LateBound(debruijn, index, def_id) => {
let name = lifetime_name(def_id);
let br = ty::BoundRegion {
var: ty::BoundVar::from_u32(index),
@@ -309,7 +320,7 @@ pub fn lower_lifetime(
ty::Region::new_bound(tcx, debruijn, br)
}
- Some(rbv::ResolvedArg::EarlyBound(def_id)) => {
+ rbv::ResolvedArg::EarlyBound(def_id) => {
let name = tcx.hir().ty_param_name(def_id);
let item_def_id = tcx.hir().ty_param_owner(def_id);
let generics = tcx.generics_of(item_def_id);
@@ -317,7 +328,7 @@ pub fn lower_lifetime(
ty::Region::new_early_param(tcx, ty::EarlyParamRegion { index, name })
}
- Some(rbv::ResolvedArg::Free(scope, id)) => {
+ rbv::ResolvedArg::Free(scope, id) => {
let name = lifetime_name(id);
ty::Region::new_late_param(
tcx,
@@ -328,9 +339,7 @@ pub fn lower_lifetime(
// (*) -- not late-bound, won't change
}
- Some(rbv::ResolvedArg::Error(guar)) => ty::Region::new_error(tcx, guar),
-
- None => self.re_infer(lifetime.ident.span, reason),
+ rbv::ResolvedArg::Error(guar) => ty::Region::new_error(tcx, guar),
}
}
@@ -659,7 +668,7 @@ pub(crate) fn lower_poly_trait_ref(
trait_ref: &hir::TraitRef<'tcx>,
span: Span,
constness: hir::BoundConstness,
- polarity: ty::PredicatePolarity,
+ polarity: hir::BoundPolarity,
self_ty: Ty<'tcx>,
bounds: &mut Bounds<'tcx>,
predicate_filter: PredicateFilter,
@@ -681,15 +690,6 @@ pub(crate) fn lower_poly_trait_ref(
Some(self_ty),
);
- if let hir::BoundConstness::Always(span) | hir::BoundConstness::Maybe(span) = constness
- && !self.tcx().is_const_trait(trait_def_id)
- {
- self.dcx().emit_err(crate::errors::ConstBoundForNonConstTrait {
- span,
- modifier: constness.as_str(),
- });
- }
-
let tcx = self.tcx();
let bound_vars = tcx.late_bound_vars(trait_ref.hir_ref_id);
debug!(?bound_vars);
@@ -699,10 +699,43 @@ pub(crate) fn lower_poly_trait_ref(
bound_vars,
);
+ let polarity = match polarity {
+ rustc_ast::BoundPolarity::Positive => ty::PredicatePolarity::Positive,
+ rustc_ast::BoundPolarity::Negative(_) => ty::PredicatePolarity::Negative,
+ rustc_ast::BoundPolarity::Maybe(_) => {
+ // Validate associated type at least. We may want to reject these
+ // outright in the future...
+ for constraint in trait_segment.args().constraints {
+ let _ = self.lower_assoc_item_constraint(
+ trait_ref.hir_ref_id,
+ poly_trait_ref,
+ constraint,
+ &mut Default::default(),
+ &mut Default::default(),
+ constraint.span,
+ predicate_filter,
+ );
+ }
+ return arg_count;
+ }
+ };
+
+ if let hir::BoundConstness::Always(span) | hir::BoundConstness::Maybe(span) = constness
+ && !self.tcx().is_const_trait(trait_def_id)
+ {
+ self.dcx().emit_err(crate::errors::ConstBoundForNonConstTrait {
+ span,
+ modifier: constness.as_str(),
+ });
+ }
+
match predicate_filter {
+ // This is only concerned with trait predicates.
+ PredicateFilter::SelfTraitThatDefines(..) => {
+ bounds.push_trait_bound(tcx, poly_trait_ref, span, polarity);
+ }
PredicateFilter::All
| PredicateFilter::SelfOnly
- | PredicateFilter::SelfThatDefines(..)
| PredicateFilter::SelfAndAssociatedTypeBounds => {
debug!(?poly_trait_ref);
bounds.push_trait_bound(tcx, poly_trait_ref, span, polarity);
@@ -713,7 +746,7 @@ pub(crate) fn lower_poly_trait_ref(
bounds.push_const_bound(
tcx,
poly_trait_ref,
- ty::HostPolarity::Const,
+ ty::BoundConstness::Const,
span,
);
}
@@ -736,7 +769,12 @@ pub(crate) fn lower_poly_trait_ref(
PredicateFilter::ConstIfConst | PredicateFilter::SelfConstIfConst => match constness {
hir::BoundConstness::Maybe(span) => {
if polarity == ty::PredicatePolarity::Positive {
- bounds.push_const_bound(tcx, poly_trait_ref, ty::HostPolarity::Maybe, span);
+ bounds.push_const_bound(
+ tcx,
+ poly_trait_ref,
+ ty::BoundConstness::Maybe,
+ span,
+ );
}
}
hir::BoundConstness::Always(_) | hir::BoundConstness::Never => {}
@@ -749,11 +787,11 @@ pub(crate) fn lower_poly_trait_ref(
// since we should have emitted an error for them earlier, and they
// would not be well-formed!
if polarity != ty::PredicatePolarity::Positive {
- assert!(
- self.dcx().has_errors().is_some(),
+ self.dcx().span_delayed_bug(
+ constraint.span,
"negative trait bounds should not have assoc item constraints",
);
- continue;
+ break;
}
// Specify type to assert that error was already reported in `Err` case.
@@ -1798,7 +1836,7 @@ pub fn lower_path(
match path.res {
Res::Def(DefKind::OpaqueTy, did) => {
// Check for desugared `impl Trait`.
- assert!(tcx.is_type_alias_impl_trait(did));
+ assert_matches!(tcx.opaque_ty_origin(did), hir::OpaqueTyOrigin::TyAlias { .. });
let item_segment = path.segments.split_last().unwrap();
let _ = self
.prohibit_generic_args(item_segment.1.iter(), GenericsArgsErrExtend::OpaqueTy);
@@ -2094,13 +2132,11 @@ pub fn lower_ty(&self, hir_ty: &hir::Ty<'tcx>) -> Ty<'tcx> {
let opt_self_ty = maybe_qself.as_ref().map(|qself| self.lower_ty(qself));
self.lower_path(opt_self_ty, path, hir_ty.hir_id, false)
}
- &hir::TyKind::OpaqueDef(opaque_ty, lifetimes) => {
- let local_def_id = opaque_ty.def_id;
-
+ &hir::TyKind::OpaqueDef(opaque_ty) => {
// If this is an RPITIT and we are using the new RPITIT lowering scheme, we
// generate the def_id of an associated type for the trait and return as
// type a projection.
- match opaque_ty.origin {
+ let in_trait = match opaque_ty.origin {
hir::OpaqueTyOrigin::FnReturn {
in_trait_or_impl: Some(hir::RpitContext::Trait),
..
@@ -2108,11 +2144,7 @@ pub fn lower_ty(&self, hir_ty: &hir::Ty<'tcx>) -> Ty<'tcx> {
| hir::OpaqueTyOrigin::AsyncFn {
in_trait_or_impl: Some(hir::RpitContext::Trait),
..
- } => self.lower_opaque_ty(
- tcx.associated_type_for_impl_trait_in_trait(local_def_id).to_def_id(),
- lifetimes,
- true,
- ),
+ } => true,
hir::OpaqueTyOrigin::FnReturn {
in_trait_or_impl: None | Some(hir::RpitContext::TraitImpl),
..
@@ -2121,10 +2153,10 @@ pub fn lower_ty(&self, hir_ty: &hir::Ty<'tcx>) -> Ty<'tcx> {
in_trait_or_impl: None | Some(hir::RpitContext::TraitImpl),
..
}
- | hir::OpaqueTyOrigin::TyAlias { .. } => {
- self.lower_opaque_ty(local_def_id.to_def_id(), lifetimes, false)
- }
- }
+ | hir::OpaqueTyOrigin::TyAlias { .. } => false,
+ };
+
+ self.lower_opaque_ty(opaque_ty.def_id, in_trait)
}
// If we encounter a type relative path with RTN generics, then it must have
// *not* gone through `lower_ty_maybe_return_type_notation`, and therefore
@@ -2264,40 +2296,34 @@ pub fn lower_ty(&self, hir_ty: &hir::Ty<'tcx>) -> Ty<'tcx> {
}
/// Lower an opaque type (i.e., an existential impl-Trait type) from the HIR.
- #[instrument(level = "debug", skip_all, ret)]
- fn lower_opaque_ty(
- &self,
- def_id: DefId,
- lifetimes: &[hir::GenericArg<'_>],
- in_trait: bool,
- ) -> Ty<'tcx> {
- debug!(?def_id, ?lifetimes);
+ #[instrument(level = "debug", skip(self), ret)]
+ fn lower_opaque_ty(&self, def_id: LocalDefId, in_trait: bool) -> Ty<'tcx> {
let tcx = self.tcx();
+ let lifetimes = tcx.opaque_captured_lifetimes(def_id);
+ debug!(?lifetimes);
+
+ // If this is an RPITIT and we are using the new RPITIT lowering scheme, we
+ // generate the def_id of an associated type for the trait and return as
+ // type a projection.
+ let def_id = if in_trait {
+ tcx.associated_type_for_impl_trait_in_trait(def_id).to_def_id()
+ } else {
+ def_id.to_def_id()
+ };
+
let generics = tcx.generics_of(def_id);
debug!(?generics);
+ // We use `generics.count() - lifetimes.len()` here instead of `generics.parent_count`
+ // since return-position impl trait in trait squashes all of the generics from its source fn
+ // into its own generics, so the opaque's "own" params isn't always just lifetimes.
+ let offset = generics.count() - lifetimes.len();
+
let args = ty::GenericArgs::for_item(tcx, def_id, |param, _| {
- // We use `generics.count() - lifetimes.len()` here instead of `generics.parent_count`
- // since return-position impl trait in trait squashes all of the generics from its source fn
- // into its own generics, so the opaque's "own" params isn't always just lifetimes.
- if let Some(i) = (param.index as usize).checked_sub(generics.count() - lifetimes.len())
- {
- // Resolve our own lifetime parameters.
- let GenericParamDefKind::Lifetime { .. } = param.kind else {
- span_bug!(
- tcx.def_span(param.def_id),
- "only expected lifetime for opaque's own generics, got {:?}",
- param
- );
- };
- let hir::GenericArg::Lifetime(lifetime) = &lifetimes[i] else {
- bug!(
- "expected lifetime argument for param {param:?}, found {:?}",
- &lifetimes[i]
- )
- };
- self.lower_lifetime(lifetime, RegionInferReason::Param(¶m)).into()
+ if let Some(i) = (param.index as usize).checked_sub(offset) {
+ let (lifetime, _) = lifetimes[i];
+ self.lower_resolved_lifetime(lifetime).into()
} else {
tcx.mk_param_from_def(param)
}
diff --git a/compiler/rustc_hir_pretty/Cargo.toml b/compiler/rustc_hir_pretty/Cargo.toml
index aacf41b..9af1fb8 100644
--- a/compiler/rustc_hir_pretty/Cargo.toml
+++ b/compiler/rustc_hir_pretty/Cargo.toml
@@ -5,9 +5,9 @@
[dependencies]
# tidy-alphabetical-start
+rustc_abi = { path = "../rustc_abi" }
rustc_ast = { path = "../rustc_ast" }
rustc_ast_pretty = { path = "../rustc_ast_pretty" }
rustc_hir = { path = "../rustc_hir" }
rustc_span = { path = "../rustc_span" }
-rustc_target = { path = "../rustc_target" }
# tidy-alphabetical-end
diff --git a/compiler/rustc_hir_pretty/src/lib.rs b/compiler/rustc_hir_pretty/src/lib.rs
index 61214b9..0a3aa8f 100644
--- a/compiler/rustc_hir_pretty/src/lib.rs
+++ b/compiler/rustc_hir_pretty/src/lib.rs
@@ -9,6 +9,7 @@
use std::cell::Cell;
use std::vec;
+use rustc_abi::ExternAbi;
use rustc_ast::util::parser::{self, AssocOp, Fixity};
use rustc_ast_pretty::pp::Breaks::{Consistent, Inconsistent};
use rustc_ast_pretty::pp::{self, Breaks};
@@ -20,7 +21,6 @@
use rustc_span::FileName;
use rustc_span::source_map::SourceMap;
use rustc_span::symbol::{Ident, Symbol, kw};
-use rustc_target::spec::abi::Abi;
use {rustc_ast as ast, rustc_hir as hir};
pub fn id_to_string(map: &dyn rustc_hir::intravisit::Map<'_>, hir_id: HirId) -> String {
@@ -659,8 +659,6 @@ fn print_trait_ref(&mut self, t: &hir::TraitRef<'_>) {
fn print_opaque_ty(&mut self, o: &hir::OpaqueTy<'_>) {
self.head("opaque");
- self.print_generic_params(o.generics.params);
- self.print_where_clause(o.generics);
self.word("{");
self.print_bounds("impl", o.bounds);
self.word("}");
@@ -2242,7 +2240,7 @@ fn print_fn_output(&mut self, decl: &hir::FnDecl<'_>) {
fn print_ty_fn(
&mut self,
- abi: Abi,
+ abi: ExternAbi,
safety: hir::Safety,
decl: &hir::FnDecl<'_>,
name: Option<Symbol>,
@@ -2278,7 +2276,7 @@ fn print_fn_header_info(&mut self, header: hir::FnHeader) {
self.print_safety(header.safety);
- if header.abi != Abi::Rust {
+ if header.abi != ExternAbi::Rust {
self.word_nbsp("extern");
self.word_nbsp(header.abi.to_string());
}
diff --git a/compiler/rustc_hir_typeck/src/_match.rs b/compiler/rustc_hir_typeck/src/_match.rs
index 3372cae..1774772 100644
--- a/compiler/rustc_hir_typeck/src/_match.rs
+++ b/compiler/rustc_hir_typeck/src/_match.rs
@@ -601,7 +601,7 @@ pub(crate) fn return_position_impl_trait_from_match_expectation(
_ => return None,
};
let hir::OpaqueTyOrigin::FnReturn { parent: parent_def_id, .. } =
- self.tcx.opaque_type_origin(def_id)
+ self.tcx.local_opaque_ty_origin(def_id)
else {
return None;
};
diff --git a/compiler/rustc_hir_typeck/src/autoderef.rs b/compiler/rustc_hir_typeck/src/autoderef.rs
index c3e095b..7af2662 100644
--- a/compiler/rustc_hir_typeck/src/autoderef.rs
+++ b/compiler/rustc_hir_typeck/src/autoderef.rs
@@ -50,8 +50,9 @@ pub(crate) fn adjust_steps_as_infer_ok(
self.try_overloaded_deref(autoderef.span(), source).and_then(
|InferOk { value: method, obligations: o }| {
obligations.extend(o);
- if let ty::Ref(region, _, mutbl) = *method.sig.output().kind() {
- Some(OverloadedDeref { region, mutbl, span: autoderef.span() })
+ // FIXME: we should assert the sig is &T here... there's no reason for this to be fallible.
+ if let ty::Ref(_, _, mutbl) = *method.sig.output().kind() {
+ Some(OverloadedDeref { mutbl, span: autoderef.span() })
} else {
None
}
diff --git a/compiler/rustc_hir_typeck/src/callee.rs b/compiler/rustc_hir_typeck/src/callee.rs
index 9cf1ea3..f9a21a9 100644
--- a/compiler/rustc_hir_typeck/src/callee.rs
+++ b/compiler/rustc_hir_typeck/src/callee.rs
@@ -307,7 +307,7 @@ fn try_overloaded_call_traits(
if borrow {
// Check for &self vs &mut self in the method signature. Since this is either
// the Fn or FnMut trait, it should be one of those.
- let ty::Ref(region, _, mutbl) = method.sig.inputs()[0].kind() else {
+ let ty::Ref(_, _, mutbl) = method.sig.inputs()[0].kind() else {
bug!("Expected `FnMut`/`Fn` to take receiver by-ref/by-mut")
};
@@ -317,7 +317,7 @@ fn try_overloaded_call_traits(
let mutbl = AutoBorrowMutability::new(*mutbl, AllowTwoPhase::No);
autoref = Some(Adjustment {
- kind: Adjust::Borrow(AutoBorrow::Ref(*region, mutbl)),
+ kind: Adjust::Borrow(AutoBorrow::Ref(mutbl)),
target: method.sig.inputs()[0],
});
}
@@ -851,11 +851,16 @@ pub(super) fn enforce_context_effects(
return;
}
+ // If we have `rustc_do_not_const_check`, do not check `~const` bounds.
+ if self.tcx.has_attr(self.body_id, sym::rustc_do_not_const_check) {
+ return;
+ }
+
let host = match self.tcx.hir().body_const_context(self.body_id) {
Some(hir::ConstContext::Const { .. } | hir::ConstContext::Static(_)) => {
- ty::HostPolarity::Const
+ ty::BoundConstness::Const
}
- Some(hir::ConstContext::ConstFn) => ty::HostPolarity::Maybe,
+ Some(hir::ConstContext::ConstFn) => ty::BoundConstness::Maybe,
None => return,
};
diff --git a/compiler/rustc_hir_typeck/src/coercion.rs b/compiler/rustc_hir_typeck/src/coercion.rs
index 6fa958d..87798ca 100644
--- a/compiler/rustc_hir_typeck/src/coercion.rs
+++ b/compiler/rustc_hir_typeck/src/coercion.rs
@@ -113,7 +113,7 @@ fn identity(_: Ty<'_>) -> Vec<Adjustment<'_>> {
vec![]
}
-fn simple<'tcx>(kind: Adjust<'tcx>) -> impl FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>> {
+fn simple<'tcx>(kind: Adjust) -> impl FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>> {
move |target| vec![Adjustment { kind, target }]
}
@@ -484,14 +484,11 @@ fn coerce_borrowed_pointer(
// Now apply the autoref. We have to extract the region out of
// the final ref type we got.
- let ty::Ref(r_borrow, _, _) = ty.kind() else {
+ let ty::Ref(..) = ty.kind() else {
span_bug!(span, "expected a ref type, got {:?}", ty);
};
let mutbl = AutoBorrowMutability::new(mutbl_b, self.allow_two_phase);
- adjustments.push(Adjustment {
- kind: Adjust::Borrow(AutoBorrow::Ref(*r_borrow, mutbl)),
- target: ty,
- });
+ adjustments.push(Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(mutbl)), target: ty });
debug!("coerce_borrowed_pointer: succeeded ty={:?} adjustments={:?}", ty, adjustments);
@@ -547,7 +544,7 @@ fn coerce_unsized(&self, mut source: Ty<'tcx>, mut target: Ty<'tcx>) -> CoerceRe
let mutbl = AutoBorrowMutability::new(mutbl_b, AllowTwoPhase::No);
Some((Adjustment { kind: Adjust::Deref(None), target: ty_a }, Adjustment {
- kind: Adjust::Borrow(AutoBorrow::Ref(r_borrow, mutbl)),
+ kind: Adjust::Borrow(AutoBorrow::Ref(mutbl)),
target: Ty::new_ref(self.tcx, r_borrow, ty_a, mutbl_b),
}))
}
@@ -827,7 +824,7 @@ fn coerce_pin(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> CoerceResult<'tcx> {
};
let (pin, a_region, a_ty, mut_a) = extract_pin_mut(a)?;
- let (_, b_region, _b_ty, mut_b) = extract_pin_mut(b)?;
+ let (_, _, _b_ty, mut_b) = extract_pin_mut(b)?;
coerce_mutbls(mut_a, mut_b)?;
@@ -841,7 +838,7 @@ fn coerce_pin(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> CoerceResult<'tcx> {
// To complete the reborrow, we need to make sure we can unify the inner types, and if so we
// add the adjustments.
self.unify_and(a, b, |_inner_ty| {
- vec![Adjustment { kind: Adjust::ReborrowPin(b_region, mut_b), target: b }]
+ vec![Adjustment { kind: Adjust::ReborrowPin(mut_b), target: b }]
})
}
@@ -1321,7 +1318,7 @@ fn try_find_coercion_lub<E>(
let noop = match self.typeck_results.borrow().expr_adjustments(expr) {
&[
Adjustment { kind: Adjust::Deref(_), .. },
- Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(_, mutbl_adj)), .. },
+ Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(mutbl_adj)), .. },
] => {
match *self.node_ty(expr.hir_id).kind() {
ty::Ref(_, _, mt_orig) => {
diff --git a/compiler/rustc_hir_typeck/src/errors.rs b/compiler/rustc_hir_typeck/src/errors.rs
index cceaaba..4f579b0 100644
--- a/compiler/rustc_hir_typeck/src/errors.rs
+++ b/compiler/rustc_hir_typeck/src/errors.rs
@@ -169,19 +169,34 @@ pub(crate) struct MissingParenthesesInRange {
pub(crate) enum NeverTypeFallbackFlowingIntoUnsafe {
#[help]
#[diag(hir_typeck_never_type_fallback_flowing_into_unsafe_call)]
- Call,
+ Call {
+ #[subdiagnostic]
+ sugg: SuggestAnnotations,
+ },
#[help]
#[diag(hir_typeck_never_type_fallback_flowing_into_unsafe_method)]
- Method,
+ Method {
+ #[subdiagnostic]
+ sugg: SuggestAnnotations,
+ },
#[help]
#[diag(hir_typeck_never_type_fallback_flowing_into_unsafe_path)]
- Path,
+ Path {
+ #[subdiagnostic]
+ sugg: SuggestAnnotations,
+ },
#[help]
#[diag(hir_typeck_never_type_fallback_flowing_into_unsafe_union_field)]
- UnionField,
+ UnionField {
+ #[subdiagnostic]
+ sugg: SuggestAnnotations,
+ },
#[help]
#[diag(hir_typeck_never_type_fallback_flowing_into_unsafe_deref)]
- Deref,
+ Deref {
+ #[subdiagnostic]
+ sugg: SuggestAnnotations,
+ },
}
#[derive(LintDiagnostic)]
@@ -191,6 +206,64 @@ pub(crate) struct DependencyOnUnitNeverTypeFallback<'tcx> {
#[note]
pub obligation_span: Span,
pub obligation: ty::Predicate<'tcx>,
+ #[subdiagnostic]
+ pub sugg: SuggestAnnotations,
+}
+
+#[derive(Clone)]
+pub(crate) enum SuggestAnnotation {
+ Unit(Span),
+ Path(Span),
+ Local(Span),
+ Turbo(Span, usize, usize),
+}
+
+#[derive(Clone)]
+pub(crate) struct SuggestAnnotations {
+ pub suggestions: Vec<SuggestAnnotation>,
+}
+impl Subdiagnostic for SuggestAnnotations {
+ fn add_to_diag_with<G: EmissionGuarantee, F: SubdiagMessageOp<G>>(
+ self,
+ diag: &mut Diag<'_, G>,
+ _: &F,
+ ) {
+ if self.suggestions.is_empty() {
+ return;
+ }
+
+ let mut suggestions = vec![];
+ for suggestion in self.suggestions {
+ match suggestion {
+ SuggestAnnotation::Unit(span) => {
+ suggestions.push((span, "()".to_string()));
+ }
+ SuggestAnnotation::Path(span) => {
+ suggestions.push((span.shrink_to_lo(), "<() as ".to_string()));
+ suggestions.push((span.shrink_to_hi(), ">".to_string()));
+ }
+ SuggestAnnotation::Local(span) => {
+ suggestions.push((span, ": ()".to_string()));
+ }
+ SuggestAnnotation::Turbo(span, n_args, idx) => suggestions.push((
+ span,
+ format!(
+ "::<{}>",
+ (0..n_args)
+ .map(|i| if i == idx { "()" } else { "_" })
+ .collect::<Vec<_>>()
+ .join(", "),
+ ),
+ )),
+ }
+ }
+
+ diag.multipart_suggestion_verbose(
+ "use `()` annotations to avoid fallback changes",
+ suggestions,
+ Applicability::MachineApplicable,
+ );
+ }
}
#[derive(Subdiagnostic)]
diff --git a/compiler/rustc_hir_typeck/src/expr_use_visitor.rs b/compiler/rustc_hir_typeck/src/expr_use_visitor.rs
index bb5f351..041ccfc 100644
--- a/compiler/rustc_hir_typeck/src/expr_use_visitor.rs
+++ b/compiler/rustc_hir_typeck/src/expr_use_visitor.rs
@@ -781,7 +781,7 @@ fn walk_adjustment(&self, expr: &hir::Expr<'_>) -> Result<(), Cx::Error> {
self.walk_autoref(expr, &place_with_id, autoref);
}
- adjustment::Adjust::ReborrowPin(_, mutbl) => {
+ adjustment::Adjust::ReborrowPin(mutbl) => {
// Reborrowing a Pin is like a combinations of a deref and a borrow, so we do
// both.
let bk = match mutbl {
@@ -804,7 +804,7 @@ fn walk_autoref(
&self,
expr: &hir::Expr<'_>,
base_place: &PlaceWithHirId<'tcx>,
- autoref: &adjustment::AutoBorrow<'tcx>,
+ autoref: &adjustment::AutoBorrow,
) {
debug!(
"walk_autoref(expr.hir_id={} base_place={:?} autoref={:?})",
@@ -812,7 +812,7 @@ fn walk_autoref(
);
match *autoref {
- adjustment::AutoBorrow::Ref(_, m) => {
+ adjustment::AutoBorrow::Ref(m) => {
self.delegate.borrow_mut().borrow(
base_place,
base_place.hir_id,
@@ -1283,7 +1283,12 @@ fn cat_expr_adjusted_with<F>(
adjustment::Adjust::Deref(overloaded) => {
// Equivalent to *expr or something similar.
let base = if let Some(deref) = overloaded {
- let ref_ty = Ty::new_ref(self.cx.tcx(), deref.region, target, deref.mutbl);
+ let ref_ty = Ty::new_ref(
+ self.cx.tcx(),
+ self.cx.tcx().lifetimes.re_erased,
+ target,
+ deref.mutbl,
+ );
self.cat_rvalue(expr.hir_id, ref_ty)
} else {
previous()?
diff --git a/compiler/rustc_hir_typeck/src/fallback.rs b/compiler/rustc_hir_typeck/src/fallback.rs
index 68776c5..8d8573c 100644
--- a/compiler/rustc_hir_typeck/src/fallback.rs
+++ b/compiler/rustc_hir_typeck/src/fallback.rs
@@ -1,11 +1,15 @@
use std::cell::OnceCell;
+use std::ops::ControlFlow;
+use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::graph::iterate::DepthFirstSearch;
use rustc_data_structures::graph::vec_graph::VecGraph;
use rustc_data_structures::graph::{self};
use rustc_data_structures::unord::{UnordBag, UnordMap, UnordSet};
use rustc_hir as hir;
use rustc_hir::HirId;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::def_id::DefId;
use rustc_hir::intravisit::Visitor;
use rustc_middle::ty::{self, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable};
use rustc_session::lint;
@@ -14,7 +18,7 @@
use rustc_trait_selection::traits::{ObligationCause, ObligationCtxt};
use tracing::debug;
-use crate::{FnCtxt, TypeckRootCtxt, errors};
+use crate::{FnCtxt, errors};
#[derive(Copy, Clone)]
pub(crate) enum DivergingFallbackBehavior {
@@ -321,7 +325,11 @@ fn calculate_diverging_fallback(
let mut diverging_fallback = UnordMap::with_capacity(diverging_vids.len());
let unsafe_infer_vars = OnceCell::new();
- self.lint_obligations_broken_by_never_type_fallback_change(behavior, &diverging_vids);
+ self.lint_obligations_broken_by_never_type_fallback_change(
+ behavior,
+ &diverging_vids,
+ &coercion_graph,
+ );
for &diverging_vid in &diverging_vids {
let diverging_ty = Ty::new_var(self.tcx, diverging_vid);
@@ -419,7 +427,7 @@ fn lint_never_type_fallback_flowing_into_unsafe_code(
root_vid: ty::TyVid,
) {
let unsafe_infer_vars = unsafe_infer_vars.get_or_init(|| {
- let unsafe_infer_vars = compute_unsafe_infer_vars(self.root_ctxt, self.body_id);
+ let unsafe_infer_vars = compute_unsafe_infer_vars(self, self.body_id);
debug!(?unsafe_infer_vars);
unsafe_infer_vars
});
@@ -429,19 +437,31 @@ fn lint_never_type_fallback_flowing_into_unsafe_code(
.filter_map(|x| unsafe_infer_vars.get(&x).copied())
.collect::<Vec<_>>();
+ let sugg = self.try_to_suggest_annotations(&[root_vid], coercion_graph);
+
for (hir_id, span, reason) in affected_unsafe_infer_vars {
self.tcx.emit_node_span_lint(
lint::builtin::NEVER_TYPE_FALLBACK_FLOWING_INTO_UNSAFE,
hir_id,
span,
match reason {
- UnsafeUseReason::Call => errors::NeverTypeFallbackFlowingIntoUnsafe::Call,
- UnsafeUseReason::Method => errors::NeverTypeFallbackFlowingIntoUnsafe::Method,
- UnsafeUseReason::Path => errors::NeverTypeFallbackFlowingIntoUnsafe::Path,
- UnsafeUseReason::UnionField => {
- errors::NeverTypeFallbackFlowingIntoUnsafe::UnionField
+ UnsafeUseReason::Call => {
+ errors::NeverTypeFallbackFlowingIntoUnsafe::Call { sugg: sugg.clone() }
}
- UnsafeUseReason::Deref => errors::NeverTypeFallbackFlowingIntoUnsafe::Deref,
+ UnsafeUseReason::Method => {
+ errors::NeverTypeFallbackFlowingIntoUnsafe::Method { sugg: sugg.clone() }
+ }
+ UnsafeUseReason::Path => {
+ errors::NeverTypeFallbackFlowingIntoUnsafe::Path { sugg: sugg.clone() }
+ }
+ UnsafeUseReason::UnionField => {
+ errors::NeverTypeFallbackFlowingIntoUnsafe::UnionField {
+ sugg: sugg.clone(),
+ }
+ }
+ UnsafeUseReason::Deref => {
+ errors::NeverTypeFallbackFlowingIntoUnsafe::Deref { sugg: sugg.clone() }
+ }
},
);
}
@@ -451,6 +471,7 @@ fn lint_obligations_broken_by_never_type_fallback_change(
&self,
behavior: DivergingFallbackBehavior,
diverging_vids: &[ty::TyVid],
+ coercions: &VecGraph<ty::TyVid, true>,
) {
let DivergingFallbackBehavior::ToUnit = behavior else { return };
@@ -478,13 +499,14 @@ fn lint_obligations_broken_by_never_type_fallback_change(
};
// If we have no errors with `fallback = ()`, but *do* have errors with `fallback = !`,
- // then this code will be broken by the never type fallback change.qba
+ // then this code will be broken by the never type fallback change.
let unit_errors = remaining_errors_if_fallback_to(self.tcx.types.unit);
if unit_errors.is_empty()
&& let mut never_errors = remaining_errors_if_fallback_to(self.tcx.types.never)
&& let [ref mut never_error, ..] = never_errors.as_mut_slice()
{
self.adjust_fulfillment_error_for_expr_obligation(never_error);
+ let sugg = self.try_to_suggest_annotations(diverging_vids, coercions);
self.tcx.emit_node_span_lint(
lint::builtin::DEPENDENCY_ON_UNIT_NEVER_TYPE_FALLBACK,
self.tcx.local_def_id_to_hir_id(self.body_id),
@@ -492,6 +514,7 @@ fn lint_obligations_broken_by_never_type_fallback_change(
errors::DependencyOnUnitNeverTypeFallback {
obligation_span: never_error.obligation.cause.span,
obligation: never_error.obligation.predicate,
+ sugg,
},
)
}
@@ -541,6 +564,153 @@ fn create_coercion_graph(&self) -> VecGraph<ty::TyVid, true> {
fn root_vid(&self, ty: Ty<'tcx>) -> Option<ty::TyVid> {
Some(self.root_var(self.shallow_resolve(ty).ty_vid()?))
}
+
+ /// Given a set of diverging vids and coercions, walk the HIR to gather a
+ /// set of suggestions which can be applied to preserve fallback to unit.
+ fn try_to_suggest_annotations(
+ &self,
+ diverging_vids: &[ty::TyVid],
+ coercions: &VecGraph<ty::TyVid, true>,
+ ) -> errors::SuggestAnnotations {
+ let body =
+ self.tcx.hir().maybe_body_owned_by(self.body_id).expect("body id must have an owner");
+ // For each diverging var, look through the HIR for a place to give it
+ // a type annotation. We do this per var because we only really need one
+ // suggestion to influence a var to be `()`.
+ let suggestions = diverging_vids
+ .iter()
+ .copied()
+ .filter_map(|vid| {
+ let reachable_vids =
+ graph::depth_first_search_as_undirected(coercions, vid).collect();
+ AnnotateUnitFallbackVisitor { reachable_vids, fcx: self }
+ .visit_expr(body.value)
+ .break_value()
+ })
+ .collect();
+ errors::SuggestAnnotations { suggestions }
+ }
+}
+
+/// Try to walk the HIR to find a place to insert a useful suggestion
+/// to preserve fallback to `()` in 2024.
+struct AnnotateUnitFallbackVisitor<'a, 'tcx> {
+ reachable_vids: FxHashSet<ty::TyVid>,
+ fcx: &'a FnCtxt<'a, 'tcx>,
+}
+impl<'tcx> AnnotateUnitFallbackVisitor<'_, 'tcx> {
+ // For a given path segment, if it's missing a turbofish, try to suggest adding
+ // one so we can constrain an argument to `()`. To keep the suggestion simple,
+ // we want to simply suggest `_` for all the other args. This (for now) only
+ // works when there are only type variables (and region variables, since we can
+ // elide them)...
+ fn suggest_for_segment(
+ &self,
+ arg_segment: &'tcx hir::PathSegment<'tcx>,
+ def_id: DefId,
+ id: HirId,
+ ) -> ControlFlow<errors::SuggestAnnotation> {
+ if arg_segment.args.is_none()
+ && let Some(all_args) = self.fcx.typeck_results.borrow().node_args_opt(id)
+ && let generics = self.fcx.tcx.generics_of(def_id)
+ && let args = &all_args[generics.parent_count..]
+ // We can't turbofish consts :(
+ && args.iter().all(|arg| matches!(arg.unpack(), ty::GenericArgKind::Type(_) | ty::GenericArgKind::Lifetime(_)))
+ {
+ let n_tys = args
+ .iter()
+ .filter(|arg| matches!(arg.unpack(), ty::GenericArgKind::Type(_)))
+ .count();
+ for (idx, arg) in args.iter().enumerate() {
+ if let Some(ty) = arg.as_type()
+ && let Some(vid) = self.fcx.root_vid(ty)
+ && self.reachable_vids.contains(&vid)
+ {
+ return ControlFlow::Break(errors::SuggestAnnotation::Turbo(
+ arg_segment.ident.span.shrink_to_hi(),
+ n_tys,
+ idx,
+ ));
+ }
+ }
+ }
+ ControlFlow::Continue(())
+ }
+}
+impl<'tcx> Visitor<'tcx> for AnnotateUnitFallbackVisitor<'_, 'tcx> {
+ type Result = ControlFlow<errors::SuggestAnnotation>;
+
+ fn visit_ty(&mut self, hir_ty: &'tcx hir::Ty<'tcx>) -> Self::Result {
+ // Try to replace `_` with `()`.
+ if let hir::TyKind::Infer = hir_ty.kind
+ && let ty = self.fcx.typeck_results.borrow().node_type(hir_ty.hir_id)
+ && let Some(vid) = self.fcx.root_vid(ty)
+ && self.reachable_vids.contains(&vid)
+ {
+ return ControlFlow::Break(errors::SuggestAnnotation::Unit(hir_ty.span));
+ }
+ hir::intravisit::walk_ty(self, hir_ty)
+ }
+
+ fn visit_qpath(
+ &mut self,
+ qpath: &'tcx rustc_hir::QPath<'tcx>,
+ id: HirId,
+ _span: Span,
+ ) -> Self::Result {
+ let arg_segment = match qpath {
+ hir::QPath::Resolved(_, path) => {
+ path.segments.last().expect("paths should have a segment")
+ }
+ hir::QPath::TypeRelative(_, segment) => segment,
+ hir::QPath::LangItem(..) => {
+ return hir::intravisit::walk_qpath(self, qpath, id);
+ }
+ };
+ // Alternatively, try to turbofish `::<_, (), _>`.
+ if let Some(def_id) = self.fcx.typeck_results.borrow().qpath_res(qpath, id).opt_def_id() {
+ self.suggest_for_segment(arg_segment, def_id, id)?;
+ }
+ hir::intravisit::walk_qpath(self, qpath, id)
+ }
+
+ fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) -> Self::Result {
+ // Try to suggest adding an explicit qself `()` to a trait method path.
+ // i.e. changing `Default::default()` to `<() as Default>::default()`.
+ if let hir::ExprKind::Path(hir::QPath::Resolved(None, path)) = expr.kind
+ && let Res::Def(DefKind::AssocFn, def_id) = path.res
+ && self.fcx.tcx.trait_of_item(def_id).is_some()
+ && let self_ty = self.fcx.typeck_results.borrow().node_args(expr.hir_id).type_at(0)
+ && let Some(vid) = self.fcx.root_vid(self_ty)
+ && self.reachable_vids.contains(&vid)
+ && let [.., trait_segment, _method_segment] = path.segments
+ {
+ let span = path.span.shrink_to_lo().to(trait_segment.ident.span);
+ return ControlFlow::Break(errors::SuggestAnnotation::Path(span));
+ }
+ // Or else, try suggesting turbofishing the method args.
+ if let hir::ExprKind::MethodCall(segment, ..) = expr.kind
+ && let Some(def_id) =
+ self.fcx.typeck_results.borrow().type_dependent_def_id(expr.hir_id)
+ {
+ self.suggest_for_segment(segment, def_id, expr.hir_id)?;
+ }
+ hir::intravisit::walk_expr(self, expr)
+ }
+
+ fn visit_local(&mut self, local: &'tcx hir::LetStmt<'tcx>) -> Self::Result {
+ // For a local, try suggest annotating the type if it's missing.
+ if let None = local.ty
+ && let ty = self.fcx.typeck_results.borrow().node_type(local.hir_id)
+ && let Some(vid) = self.fcx.root_vid(ty)
+ && self.reachable_vids.contains(&vid)
+ {
+ return ControlFlow::Break(errors::SuggestAnnotation::Local(
+ local.pat.span.shrink_to_hi(),
+ ));
+ }
+ hir::intravisit::walk_local(self, local)
+ }
}
#[derive(Debug, Copy, Clone)]
@@ -569,27 +739,26 @@ pub(crate) enum UnsafeUseReason {
///
/// `compute_unsafe_infer_vars` will return `{ id(?X) -> (hir_id, span, Call) }`
fn compute_unsafe_infer_vars<'a, 'tcx>(
- root_ctxt: &'a TypeckRootCtxt<'tcx>,
+ fcx: &'a FnCtxt<'a, 'tcx>,
body_id: LocalDefId,
) -> UnordMap<ty::TyVid, (HirId, Span, UnsafeUseReason)> {
- let body =
- root_ctxt.tcx.hir().maybe_body_owned_by(body_id).expect("body id must have an owner");
+ let body = fcx.tcx.hir().maybe_body_owned_by(body_id).expect("body id must have an owner");
let mut res = UnordMap::default();
struct UnsafeInferVarsVisitor<'a, 'tcx> {
- root_ctxt: &'a TypeckRootCtxt<'tcx>,
+ fcx: &'a FnCtxt<'a, 'tcx>,
res: &'a mut UnordMap<ty::TyVid, (HirId, Span, UnsafeUseReason)>,
}
impl Visitor<'_> for UnsafeInferVarsVisitor<'_, '_> {
fn visit_expr(&mut self, ex: &'_ hir::Expr<'_>) {
- let typeck_results = self.root_ctxt.typeck_results.borrow();
+ let typeck_results = self.fcx.typeck_results.borrow();
match ex.kind {
hir::ExprKind::MethodCall(..) => {
if let Some(def_id) = typeck_results.type_dependent_def_id(ex.hir_id)
- && let method_ty = self.root_ctxt.tcx.type_of(def_id).instantiate_identity()
- && let sig = method_ty.fn_sig(self.root_ctxt.tcx)
+ && let method_ty = self.fcx.tcx.type_of(def_id).instantiate_identity()
+ && let sig = method_ty.fn_sig(self.fcx.tcx)
&& let hir::Safety::Unsafe = sig.safety()
{
let mut collector = InferVarCollector {
@@ -609,7 +778,7 @@ fn visit_expr(&mut self, ex: &'_ hir::Expr<'_>) {
let func_ty = typeck_results.expr_ty(func);
if func_ty.is_fn()
- && let sig = func_ty.fn_sig(self.root_ctxt.tcx)
+ && let sig = func_ty.fn_sig(self.fcx.tcx)
&& let hir::Safety::Unsafe = sig.safety()
{
let mut collector = InferVarCollector {
@@ -640,7 +809,7 @@ fn visit_expr(&mut self, ex: &'_ hir::Expr<'_>) {
// If this path refers to an unsafe function, collect inference variables which may affect it.
// `is_fn` excludes closures, but those can't be unsafe.
if ty.is_fn()
- && let sig = ty.fn_sig(self.root_ctxt.tcx)
+ && let sig = ty.fn_sig(self.fcx.tcx)
&& let hir::Safety::Unsafe = sig.safety()
{
let mut collector = InferVarCollector {
@@ -698,7 +867,7 @@ fn visit_ty(&mut self, t: Ty<'tcx>) {
}
}
- UnsafeInferVarsVisitor { root_ctxt, res: &mut res }.visit_expr(&body.value);
+ UnsafeInferVarsVisitor { fcx, res: &mut res }.visit_expr(&body.value);
debug!(?res, "collected the following unsafe vars for {body_id:?}");
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs
index 0fc566c..a1a7837 100644
--- a/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs
@@ -264,7 +264,7 @@ pub(crate) fn apply_adjustments(&self, expr: &hir::Expr<'_>, adj: Vec<Adjustment
let autoborrow_mut = adj.iter().any(|adj| {
matches!(adj, &Adjustment {
- kind: Adjust::Borrow(AutoBorrow::Ref(_, AutoBorrowMutability::Mut { .. })),
+ kind: Adjust::Borrow(AutoBorrow::Ref(AutoBorrowMutability::Mut { .. })),
..
})
});
diff --git a/compiler/rustc_hir_typeck/src/method/confirm.rs b/compiler/rustc_hir_typeck/src/method/confirm.rs
index f2b55d3..3754fd0 100644
--- a/compiler/rustc_hir_typeck/src/method/confirm.rs
+++ b/compiler/rustc_hir_typeck/src/method/confirm.rs
@@ -200,10 +200,8 @@ fn adjust_self_ty(
// for two-phase borrows.
let mutbl = AutoBorrowMutability::new(mutbl, AllowTwoPhase::Yes);
- adjustments.push(Adjustment {
- kind: Adjust::Borrow(AutoBorrow::Ref(region, mutbl)),
- target,
- });
+ adjustments
+ .push(Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(mutbl)), target });
if unsize {
let unsized_ty = if let ty::Array(elem_ty, _) = base_ty.kind() {
@@ -250,7 +248,7 @@ fn adjust_self_ty(
_ => bug!("Cannot adjust receiver type for reborrowing pin of {target:?}"),
};
- adjustments.push(Adjustment { kind: Adjust::ReborrowPin(region, mutbl), target });
+ adjustments.push(Adjustment { kind: Adjust::ReborrowPin(mutbl), target });
}
None => {}
}
@@ -533,9 +531,6 @@ fn unify_receivers(
self.register_predicates(obligations);
}
Err(terr) => {
- // FIXME(arbitrary_self_types): We probably should limit the
- // situations where this can occur by adding additional restrictions
- // to the feature, like the self type can't reference method args.
if self.tcx.features().arbitrary_self_types() {
self.err_ctxt()
.report_mismatched_types(
diff --git a/compiler/rustc_hir_typeck/src/method/probe.rs b/compiler/rustc_hir_typeck/src/method/probe.rs
index 569fdea..eb5581f 100644
--- a/compiler/rustc_hir_typeck/src/method/probe.rs
+++ b/compiler/rustc_hir_typeck/src/method/probe.rs
@@ -1365,7 +1365,6 @@ pub(crate) fn differs_from(&self, other: &Self) -> bool {
trait_item_def_id: _,
fn_has_self_parameter: _,
opt_rpitit_info: _,
- is_effects_desugaring: _,
},
kind: _,
import_ids: _,
diff --git a/compiler/rustc_hir_typeck/src/op.rs b/compiler/rustc_hir_typeck/src/op.rs
index 57ac7f7..9c1459e 100644
--- a/compiler/rustc_hir_typeck/src/op.rs
+++ b/compiler/rustc_hir_typeck/src/op.rs
@@ -256,23 +256,23 @@ fn check_overloaded_binop(
Ok(method) => {
let by_ref_binop = !op.node.is_by_value();
if is_assign == IsAssign::Yes || by_ref_binop {
- if let ty::Ref(region, _, mutbl) = method.sig.inputs()[0].kind() {
+ if let ty::Ref(_, _, mutbl) = method.sig.inputs()[0].kind() {
let mutbl = AutoBorrowMutability::new(*mutbl, AllowTwoPhase::Yes);
let autoref = Adjustment {
- kind: Adjust::Borrow(AutoBorrow::Ref(*region, mutbl)),
+ kind: Adjust::Borrow(AutoBorrow::Ref(mutbl)),
target: method.sig.inputs()[0],
};
self.apply_adjustments(lhs_expr, vec![autoref]);
}
}
if by_ref_binop {
- if let ty::Ref(region, _, mutbl) = method.sig.inputs()[1].kind() {
+ if let ty::Ref(_, _, mutbl) = method.sig.inputs()[1].kind() {
// Allow two-phase borrows for binops in initial deployment
// since they desugar to methods
let mutbl = AutoBorrowMutability::new(*mutbl, AllowTwoPhase::Yes);
let autoref = Adjustment {
- kind: Adjust::Borrow(AutoBorrow::Ref(*region, mutbl)),
+ kind: Adjust::Borrow(AutoBorrow::Ref(mutbl)),
target: method.sig.inputs()[1],
};
// HACK(eddyb) Bypass checks due to reborrows being in
diff --git a/compiler/rustc_hir_typeck/src/place_op.rs b/compiler/rustc_hir_typeck/src/place_op.rs
index 5dd5172..d5c7fe5 100644
--- a/compiler/rustc_hir_typeck/src/place_op.rs
+++ b/compiler/rustc_hir_typeck/src/place_op.rs
@@ -29,9 +29,9 @@ pub(super) fn lookup_derefing(
let ok = self.try_overloaded_deref(expr.span, oprnd_ty)?;
let method = self.register_infer_ok_obligations(ok);
- if let ty::Ref(region, _, hir::Mutability::Not) = method.sig.inputs()[0].kind() {
+ if let ty::Ref(_, _, hir::Mutability::Not) = method.sig.inputs()[0].kind() {
self.apply_adjustments(oprnd_expr, vec![Adjustment {
- kind: Adjust::Borrow(AutoBorrow::Ref(*region, AutoBorrowMutability::Not)),
+ kind: Adjust::Borrow(AutoBorrow::Ref(AutoBorrowMutability::Not)),
target: method.sig.inputs()[0],
}]);
} else {
@@ -158,7 +158,7 @@ fn try_index_step(
let mut adjustments = self.adjust_steps(autoderef);
if let ty::Ref(region, _, hir::Mutability::Not) = method.sig.inputs()[0].kind() {
adjustments.push(Adjustment {
- kind: Adjust::Borrow(AutoBorrow::Ref(*region, AutoBorrowMutability::Not)),
+ kind: Adjust::Borrow(AutoBorrow::Ref(AutoBorrowMutability::Not)),
target: Ty::new_imm_ref(self.tcx, *region, adjusted_ty),
});
} else {
@@ -289,9 +289,13 @@ pub(crate) fn convert_place_derefs_to_mutable(&self, expr: &hir::Expr<'_>) {
)
{
let method = self.register_infer_ok_obligations(ok);
- if let ty::Ref(region, _, mutbl) = *method.sig.output().kind() {
- *deref = OverloadedDeref { region, mutbl, span: deref.span };
- }
+ let ty::Ref(_, _, mutbl) = *method.sig.output().kind() else {
+ span_bug!(
+ self.tcx.def_span(method.def_id),
+ "expected DerefMut to return a &mut"
+ );
+ };
+ *deref = OverloadedDeref { mutbl, span: deref.span };
// If this is a union field, also throw an error for `DerefMut` of `ManuallyDrop` (see RFC 2514).
// This helps avoid accidental drops.
if inside_union
@@ -390,7 +394,7 @@ fn convert_place_op_to_mutable(
// not the case today.
allow_two_phase_borrow: AllowTwoPhase::No,
};
- adjustment.kind = Adjust::Borrow(AutoBorrow::Ref(*region, mutbl));
+ adjustment.kind = Adjust::Borrow(AutoBorrow::Ref(mutbl));
adjustment.target = Ty::new_ref(self.tcx, *region, source, mutbl.into());
}
source = adjustment.target;
diff --git a/compiler/rustc_infer/src/infer/opaque_types/mod.rs b/compiler/rustc_infer/src/infer/opaque_types/mod.rs
index 498d25a..d65ed72 100644
--- a/compiler/rustc_infer/src/infer/opaque_types/mod.rs
+++ b/compiler/rustc_infer/src/infer/opaque_types/mod.rs
@@ -155,7 +155,10 @@ pub fn handle_opaque_type(
// however in `fn fut() -> impl Future<Output = i32> { async { 42 } }`, where
// it is of no concern, so we only check for TAITs.
if self.can_define_opaque_ty(b_def_id)
- && self.tcx.is_type_alias_impl_trait(b_def_id)
+ && matches!(
+ self.tcx.opaque_ty_origin(b_def_id),
+ hir::OpaqueTyOrigin::TyAlias { .. }
+ )
{
self.dcx().emit_err(OpaqueHiddenTypeDiag {
span,
@@ -429,7 +432,6 @@ fn visit_ty(&mut self, ty: Ty<'tcx>) {
upvar.visit_with(self);
}
- // FIXME(async_closures): Is this the right signature to visit here?
args.as_coroutine_closure().signature_parts_ty().visit_with(self);
}
diff --git a/compiler/rustc_interface/src/tests.rs b/compiler/rustc_interface/src/tests.rs
index d3762e7..35bba14 100644
--- a/compiler/rustc_interface/src/tests.rs
+++ b/compiler/rustc_interface/src/tests.rs
@@ -832,8 +832,6 @@ macro_rules! tracked {
tracked!(polonius, Polonius::Legacy);
tracked!(precise_enum_drop_elaboration, false);
tracked!(print_fuel, Some("abc".to_string()));
- tracked!(profile, true);
- tracked!(profile_emit, Some(PathBuf::from("abc")));
tracked!(profile_sample_use, Some(PathBuf::from("abc")));
tracked!(profiler_runtime, "abc".to_string());
tracked!(regparm, Some(3));
diff --git a/compiler/rustc_lint/messages.ftl b/compiler/rustc_lint/messages.ftl
index 3fa4329..9187f6c 100644
--- a/compiler/rustc_lint/messages.ftl
+++ b/compiler/rustc_lint/messages.ftl
@@ -268,7 +268,7 @@
lint_extern_without_abi = extern declarations without an explicit ABI are deprecated
.label = ABI should be specified here
- .help = the default ABI is {$default_abi}
+ .suggestion = explicitly specify the {$default_abi} ABI
lint_for_loops_over_fallibles =
for loop over {$article} `{$ref_prefix}{$ty}`. This is more readably written as an `if let` statement
diff --git a/compiler/rustc_lint/src/builtin.rs b/compiler/rustc_lint/src/builtin.rs
index 77682ea..02d22ee 100644
--- a/compiler/rustc_lint/src/builtin.rs
+++ b/compiler/rustc_lint/src/builtin.rs
@@ -16,6 +16,7 @@
use std::fmt::Write;
use ast::token::TokenKind;
+use rustc_abi::BackendRepr;
use rustc_ast::tokenstream::{TokenStream, TokenTree};
use rustc_ast::visit::{FnCtxt, FnKind};
use rustc_ast::{self as ast, *};
@@ -40,7 +41,6 @@
use rustc_span::source_map::Spanned;
use rustc_span::symbol::{Ident, Symbol, kw, sym};
use rustc_span::{BytePos, InnerSpan, Span};
-use rustc_target::abi::Abi;
use rustc_target::asm::InlineAsmArch;
use rustc_trait_selection::infer::{InferCtxtExt, TyCtxtInferExt};
use rustc_trait_selection::traits::misc::type_allowed_to_implement_copy;
@@ -2468,7 +2468,9 @@ fn variant_find_init_error<'tcx>(
// Check if this ADT has a constrained layout (like `NonNull` and friends).
if let Ok(layout) = cx.tcx.layout_of(cx.param_env.and(ty)) {
- if let Abi::Scalar(scalar) | Abi::ScalarPair(scalar, _) = &layout.abi {
+ if let BackendRepr::Scalar(scalar) | BackendRepr::ScalarPair(scalar, _) =
+ &layout.backend_repr
+ {
let range = scalar.valid_range(cx);
let msg = if !range.contains(0) {
"must be non-null"
diff --git a/compiler/rustc_lint/src/foreign_modules.rs b/compiler/rustc_lint/src/foreign_modules.rs
index abe4e3e..394ea79 100644
--- a/compiler/rustc_lint/src/foreign_modules.rs
+++ b/compiler/rustc_lint/src/foreign_modules.rs
@@ -217,7 +217,7 @@ fn structurally_same_type<'tcx>(
// `extern` blocks cannot be generic, so we'll always get a layout here.
let a_layout = tcx.layout_of(param_env.and(a)).unwrap();
let b_layout = tcx.layout_of(param_env.and(b)).unwrap();
- assert_eq!(a_layout.abi, b_layout.abi);
+ assert_eq!(a_layout.backend_repr, b_layout.backend_repr);
assert_eq!(a_layout.size, b_layout.size);
assert_eq!(a_layout.align, b_layout.align);
}
diff --git a/compiler/rustc_lint/src/lints.rs b/compiler/rustc_lint/src/lints.rs
index 000f4b6..38e5257 100644
--- a/compiler/rustc_lint/src/lints.rs
+++ b/compiler/rustc_lint/src/lints.rs
@@ -2738,11 +2738,9 @@ pub(crate) struct PatternsInFnsWithoutBodySub {
#[derive(LintDiagnostic)]
#[diag(lint_extern_without_abi)]
-#[help]
pub(crate) struct MissingAbi {
- #[label]
+ #[suggestion(code = "extern \"{default_abi}\"", applicability = "machine-applicable")]
pub span: Span,
-
pub default_abi: &'static str,
}
diff --git a/compiler/rustc_lint/src/non_local_def.rs b/compiler/rustc_lint/src/non_local_def.rs
index 3c31b87..3c33b2d 100644
--- a/compiler/rustc_lint/src/non_local_def.rs
+++ b/compiler/rustc_lint/src/non_local_def.rs
@@ -151,9 +151,15 @@ fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx Item<'tcx>) {
// };
// };
// ```
+ //
+ // It isn't possible to mix a impl in a module with const-anon, but an item can
+ // be put inside a module and referenced by a impl so we also have to treat the
+ // item parent as transparent to module and for consistency we have to do the same
+ // for impl, otherwise the item-def and impl-def won't have the same parent.
let outermost_impl_parent = peel_parent_while(cx.tcx, parent, |tcx, did| {
- tcx.def_kind(did) == DefKind::Const
- && tcx.opt_item_name(did) == Some(kw::Underscore)
+ tcx.def_kind(did) == DefKind::Mod
+ || (tcx.def_kind(did) == DefKind::Const
+ && tcx.opt_item_name(did) == Some(kw::Underscore))
});
// 2. We check if any of the paths reference a the `impl`-parent.
diff --git a/compiler/rustc_lint/src/opaque_hidden_inferred_bound.rs b/compiler/rustc_lint/src/opaque_hidden_inferred_bound.rs
index 16a1a5a..5de0d4b 100644
--- a/compiler/rustc_lint/src/opaque_hidden_inferred_bound.rs
+++ b/compiler/rustc_lint/src/opaque_hidden_inferred_bound.rs
@@ -69,7 +69,7 @@
impl<'tcx> LateLintPass<'tcx> for OpaqueHiddenInferredBound {
fn check_ty(&mut self, cx: &LateContext<'tcx>, ty: &'tcx hir::Ty<'tcx>) {
- let hir::TyKind::OpaqueDef(opaque, _) = &ty.kind else {
+ let hir::TyKind::OpaqueDef(opaque) = &ty.kind else {
return;
};
diff --git a/compiler/rustc_lint/src/types.rs b/compiler/rustc_lint/src/types.rs
index 0751d35..48dd8e3 100644
--- a/compiler/rustc_lint/src/types.rs
+++ b/compiler/rustc_lint/src/types.rs
@@ -1,6 +1,7 @@
use std::iter;
use std::ops::ControlFlow;
+use rustc_abi::{BackendRepr, TagEncoding, Variants, WrappingRange};
use rustc_data_structures::fx::FxHashSet;
use rustc_errors::DiagMessage;
use rustc_hir::{Expr, ExprKind};
@@ -13,7 +14,6 @@
use rustc_span::def_id::LocalDefId;
use rustc_span::symbol::sym;
use rustc_span::{Span, Symbol, source_map};
-use rustc_target::abi::{Abi, TagEncoding, Variants, WrappingRange};
use rustc_target::spec::abi::Abi as SpecAbi;
use tracing::debug;
use {rustc_ast as ast, rustc_hir as hir};
@@ -204,7 +204,10 @@ fn is_nan(cx: &LateContext<'_>, expr: &hir::Expr<'_>) -> bool {
return false;
};
- matches!(cx.tcx.get_diagnostic_name(def_id), Some(sym::f32_nan | sym::f64_nan))
+ matches!(
+ cx.tcx.get_diagnostic_name(def_id),
+ Some(sym::f16_nan | sym::f32_nan | sym::f64_nan | sym::f128_nan)
+ )
}
_ => false,
}
@@ -776,8 +779,8 @@ pub(crate) fn repr_nullable_ptr<'tcx>(
bug!("should be able to compute the layout of non-polymorphic type");
}
- let field_ty_abi = &field_ty_layout.ok()?.abi;
- if let Abi::Scalar(field_ty_scalar) = field_ty_abi {
+ let field_ty_abi = &field_ty_layout.ok()?.backend_repr;
+ if let BackendRepr::Scalar(field_ty_scalar) = field_ty_abi {
match field_ty_scalar.valid_range(&tcx) {
WrappingRange { start: 0, end }
if end == field_ty_scalar.size(&tcx).unsigned_int_max() - 1 =>
diff --git a/compiler/rustc_lint/src/unused.rs b/compiler/rustc_lint/src/unused.rs
index bbb290c..b50a95e 100644
--- a/compiler/rustc_lint/src/unused.rs
+++ b/compiler/rustc_lint/src/unused.rs
@@ -1610,7 +1610,7 @@ fn check_expr(&mut self, cx: &LateContext<'_>, e: &hir::Expr<'_>) {
}
for adj in cx.typeck_results().expr_adjustments(e) {
- if let adjustment::Adjust::Borrow(adjustment::AutoBorrow::Ref(_, m)) = adj.kind {
+ if let adjustment::Adjust::Borrow(adjustment::AutoBorrow::Ref(m)) = adj.kind {
match m {
adjustment::AutoBorrowMutability::Not => {
cx.emit_span_lint(UNUSED_ALLOCATION, e.span, UnusedAllocationDiag);
diff --git a/compiler/rustc_lint_defs/Cargo.toml b/compiler/rustc_lint_defs/Cargo.toml
index eb2a184..450885e7 100644
--- a/compiler/rustc_lint_defs/Cargo.toml
+++ b/compiler/rustc_lint_defs/Cargo.toml
@@ -5,6 +5,7 @@
[dependencies]
# tidy-alphabetical-start
+rustc_abi = { path = "../rustc_abi" }
rustc_ast = { path = "../rustc_ast" }
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_error_messages = { path = "../rustc_error_messages" }
@@ -12,6 +13,5 @@
rustc_macros = { path = "../rustc_macros" }
rustc_serialize = { path = "../rustc_serialize" }
rustc_span = { path = "../rustc_span" }
-rustc_target = { path = "../rustc_target" }
serde = { version = "1.0.125", features = ["derive"] }
# tidy-alphabetical-end
diff --git a/compiler/rustc_lint_defs/src/lib.rs b/compiler/rustc_lint_defs/src/lib.rs
index 601784f..0caf6ef 100644
--- a/compiler/rustc_lint_defs/src/lib.rs
+++ b/compiler/rustc_lint_defs/src/lib.rs
@@ -2,6 +2,7 @@
#![warn(unreachable_pub)]
// tidy-alphabetical-end
+use rustc_abi::ExternAbi;
use rustc_ast::node_id::NodeId;
use rustc_ast::{AttrId, Attribute};
use rustc_data_structures::fx::{FxIndexMap, FxIndexSet};
@@ -15,7 +16,6 @@
pub use rustc_span::edition::Edition;
use rustc_span::symbol::{Ident, MacroRulesNormalizedIdent};
use rustc_span::{Span, Symbol, sym};
-use rustc_target::spec::abi::Abi;
use serde::{Deserialize, Serialize};
pub use self::Level::*;
@@ -602,7 +602,7 @@ pub enum BuiltinLintDiag {
path: String,
since_kind: DeprecatedSinceKind,
},
- MissingAbi(Span, Abi),
+ MissingAbi(Span, ExternAbi),
UnusedDocComment(Span),
UnusedBuiltinAttribute {
attr_name: Symbol,
diff --git a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
index 3e906f8..3b7dc6d 100644
--- a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
+++ b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
@@ -42,7 +42,6 @@
#if LLVM_VERSION_GE(19, 0)
#include "llvm/Support/PGOOptions.h"
#endif
-#include "llvm/Transforms/Instrumentation/GCOVProfiler.h"
#include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h"
#include "llvm/Transforms/Instrumentation/InstrProfiling.h"
#include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
@@ -714,9 +713,8 @@
bool SLPVectorize, bool LoopVectorize, bool DisableSimplifyLibCalls,
bool EmitLifetimeMarkers, LLVMRustSanitizerOptions *SanitizerOptions,
const char *PGOGenPath, const char *PGOUsePath, bool InstrumentCoverage,
- const char *InstrProfileOutput, bool InstrumentGCOV,
- const char *PGOSampleUsePath, bool DebugInfoForProfiling,
- void *LlvmSelfProfiler,
+ const char *InstrProfileOutput, const char *PGOSampleUsePath,
+ bool DebugInfoForProfiling, void *LlvmSelfProfiler,
LLVMRustSelfProfileBeforePassCallback BeforePassCallback,
LLVMRustSelfProfileAfterPassCallback AfterPassCallback,
const char *ExtraPasses, size_t ExtraPassesLen, const char *LLVMPlugins,
@@ -847,13 +845,6 @@
});
}
- if (InstrumentGCOV) {
- PipelineStartEPCallbacks.push_back(
- [](ModulePassManager &MPM, OptimizationLevel Level) {
- MPM.addPass(GCOVProfilerPass(GCOVOptions::getDefault()));
- });
- }
-
if (InstrumentCoverage) {
PipelineStartEPCallbacks.push_back(
[InstrProfileOutput](ModulePassManager &MPM, OptimizationLevel Level) {
diff --git a/compiler/rustc_metadata/Cargo.toml b/compiler/rustc_metadata/Cargo.toml
index 3b0151b..cece700 100644
--- a/compiler/rustc_metadata/Cargo.toml
+++ b/compiler/rustc_metadata/Cargo.toml
@@ -27,7 +27,6 @@
rustc_span = { path = "../rustc_span" }
rustc_target = { path = "../rustc_target" }
rustc_type_ir = { path = "../rustc_type_ir" }
-snap = "1"
tempfile = "3.2"
tracing = "0.1"
# tidy-alphabetical-end
diff --git a/compiler/rustc_metadata/src/creader.rs b/compiler/rustc_metadata/src/creader.rs
index 1662391..d2be6ae 100644
--- a/compiler/rustc_metadata/src/creader.rs
+++ b/compiler/rustc_metadata/src/creader.rs
@@ -778,9 +778,7 @@ fn inject_panic_runtime(&mut self, krate: &ast::Crate) {
fn inject_profiler_runtime(&mut self, krate: &ast::Crate) {
if self.sess.opts.unstable_opts.no_profiler_runtime
- || !(self.sess.instrument_coverage()
- || self.sess.opts.unstable_opts.profile
- || self.sess.opts.cg.profile_generate.enabled())
+ || !(self.sess.instrument_coverage() || self.sess.opts.cg.profile_generate.enabled())
{
return;
}
diff --git a/compiler/rustc_metadata/src/locator.rs b/compiler/rustc_metadata/src/locator.rs
index a4a69ae..f924ed4 100644
--- a/compiler/rustc_metadata/src/locator.rs
+++ b/compiler/rustc_metadata/src/locator.rs
@@ -213,7 +213,7 @@
//! metadata::locator or metadata::creader for all the juicy details!
use std::borrow::Cow;
-use std::io::{Read, Result as IoResult, Write};
+use std::io::{Result as IoResult, Write};
use std::ops::Deref;
use std::path::{Path, PathBuf};
use std::{cmp, fmt};
@@ -232,7 +232,6 @@
use rustc_span::Span;
use rustc_span::symbol::Symbol;
use rustc_target::spec::{Target, TargetTriple};
-use snap::read::FrameDecoder;
use tracing::{debug, info};
use crate::creader::{Library, MetadataLoader};
@@ -792,7 +791,6 @@ fn get_metadata_section<'p>(
CrateFlavor::Dylib => {
let buf =
loader.get_dylib_metadata(target, filename).map_err(MetadataError::LoadFailure)?;
- // The header is uncompressed
let header_len = METADATA_HEADER.len();
// header + u64 length of data
let data_start = header_len + 8;
@@ -806,7 +804,7 @@ fn get_metadata_section<'p>(
)));
}
- // Length of the compressed stream - this allows linkers to pad the section if they want
+ // Length of the metadata - this allows linkers to pad the section if they want
let Ok(len_bytes) =
<[u8; 8]>::try_from(&buf[header_len..cmp::min(data_start, buf.len())])
else {
@@ -814,29 +812,10 @@ fn get_metadata_section<'p>(
"invalid metadata length found".to_string(),
));
};
- let compressed_len = u64::from_le_bytes(len_bytes) as usize;
+ let metadata_len = u64::from_le_bytes(len_bytes) as usize;
// Header is okay -> inflate the actual metadata
- let compressed_bytes = buf.slice(|buf| &buf[data_start..(data_start + compressed_len)]);
- if &compressed_bytes[..cmp::min(METADATA_HEADER.len(), compressed_bytes.len())]
- == METADATA_HEADER
- {
- // The metadata was not actually compressed.
- compressed_bytes
- } else {
- debug!("inflating {} bytes of compressed metadata", compressed_bytes.len());
- // Assume the decompressed data will be at least the size of the compressed data, so we
- // don't have to grow the buffer as much.
- let mut inflated = Vec::with_capacity(compressed_bytes.len());
- FrameDecoder::new(&*compressed_bytes).read_to_end(&mut inflated).map_err(|_| {
- MetadataError::LoadFailure(format!(
- "failed to decompress metadata: {}",
- filename.display()
- ))
- })?;
-
- slice_owned(inflated, Deref::deref)
- }
+ buf.slice(|buf| &buf[data_start..(data_start + metadata_len)])
}
CrateFlavor::Rmeta => {
// mmap the file, because only a small fraction of it is read.
diff --git a/compiler/rustc_metadata/src/rmeta/decoder.rs b/compiler/rustc_metadata/src/rmeta/decoder.rs
index 4b40649..ebfd3c0 100644
--- a/compiler/rustc_metadata/src/rmeta/decoder.rs
+++ b/compiler/rustc_metadata/src/rmeta/decoder.rs
@@ -1317,9 +1317,7 @@ fn get_associated_item_or_field_def_ids(
}
fn get_associated_item(self, id: DefIndex, sess: &'a Session) -> ty::AssocItem {
- let name = if self.root.tables.opt_rpitit_info.get(self, id).is_some()
- || self.root.tables.is_effects_desugaring.get(self, id)
- {
+ let name = if self.root.tables.opt_rpitit_info.get(self, id).is_some() {
kw::Empty
} else {
self.item_name(id)
@@ -1342,7 +1340,6 @@ fn get_associated_item(self, id: DefIndex, sess: &'a Session) -> ty::AssocItem {
container,
fn_has_self_parameter: has_self,
opt_rpitit_info,
- is_effects_desugaring: self.root.tables.is_effects_desugaring.get(self, id),
}
}
diff --git a/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs b/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs
index 926eb4f..f06f2fd 100644
--- a/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs
+++ b/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs
@@ -316,10 +316,7 @@ fn into_args(self) -> (DefId, SimplifiedType) {
})
.unwrap_or_default()
}
- is_type_alias_impl_trait => {
- debug_assert_eq!(tcx.def_kind(def_id), DefKind::OpaqueTy);
- cdata.root.tables.is_type_alias_impl_trait.get(cdata, def_id.index)
- }
+ opaque_ty_origin => { table }
assumed_wf_types_for_rpitit => { table }
collect_return_position_impl_trait_in_trait_tys => {
Ok(cdata
diff --git a/compiler/rustc_metadata/src/rmeta/encoder.rs b/compiler/rustc_metadata/src/rmeta/encoder.rs
index b5ac302..7277039 100644
--- a/compiler/rustc_metadata/src/rmeta/encoder.rs
+++ b/compiler/rustc_metadata/src/rmeta/encoder.rs
@@ -1188,7 +1188,7 @@ fn should_encode_type(tcx: TyCtxt<'_>, def_id: LocalDefId, def_kind: DefKind) ->
| DefKind::SyntheticCoroutineBody => true,
DefKind::OpaqueTy => {
- let origin = tcx.opaque_type_origin(def_id);
+ let origin = tcx.local_opaque_ty_origin(def_id);
if let hir::OpaqueTyOrigin::FnReturn { parent, .. }
| hir::OpaqueTyOrigin::AsyncFn { parent, .. } = origin
&& let hir::Node::TraitItem(trait_item) = tcx.hir_node_by_def_id(parent)
@@ -1530,9 +1530,7 @@ fn encode_def_ids(&mut self) {
if let DefKind::OpaqueTy = def_kind {
self.encode_explicit_item_bounds(def_id);
self.encode_explicit_item_super_predicates(def_id);
- self.tables
- .is_type_alias_impl_trait
- .set(def_id.index, self.tcx.is_type_alias_impl_trait(def_id));
+ record!(self.tables.opaque_ty_origin[def_id] <- self.tcx.opaque_ty_origin(def_id));
self.encode_precise_capturing_args(def_id);
}
if tcx.impl_method_has_trait_impl_trait_tys(def_id)
@@ -1677,9 +1675,6 @@ fn encode_info_for_assoc_item(&mut self, def_id: DefId) {
self.encode_precise_capturing_args(def_id);
}
}
- if item.is_effects_desugaring {
- self.tables.is_effects_desugaring.set(def_id.index, true);
- }
}
fn encode_precise_capturing_args(&mut self, def_id: DefId) {
diff --git a/compiler/rustc_metadata/src/rmeta/mod.rs b/compiler/rustc_metadata/src/rmeta/mod.rs
index f184404..ab87876 100644
--- a/compiler/rustc_metadata/src/rmeta/mod.rs
+++ b/compiler/rustc_metadata/src/rmeta/mod.rs
@@ -378,7 +378,6 @@ fn encode(&self, buf: &mut FileEncoder) -> LazyTables {
- defaulted:
intrinsic: Table<DefIndex, Option<LazyValue<ty::IntrinsicDef>>>,
is_macro_rules: Table<DefIndex, bool>,
- is_type_alias_impl_trait: Table<DefIndex, bool>,
type_alias_is_lazy: Table<DefIndex, bool>,
attr_flags: Table<DefIndex, AttrFlags>,
// The u64 is the crate-local part of the DefPathHash. All hashes in this crate have the same
@@ -396,7 +395,6 @@ fn encode(&self, buf: &mut FileEncoder) -> LazyTables {
inherent_impls: Table<DefIndex, LazyArray<DefIndex>>,
associated_types_for_impl_traits_in_associated_fn: Table<DefIndex, LazyArray<DefId>>,
opt_rpitit_info: Table<DefIndex, Option<LazyValue<ty::ImplTraitInTraitData>>>,
- is_effects_desugaring: Table<DefIndex, bool>,
unused_generic_params: Table<DefIndex, UnusedGenericParams>,
// Reexported names are not associated with individual `DefId`s,
// e.g. a glob import can introduce a lot of names, all with the same `DefId`.
@@ -469,6 +467,7 @@ fn encode(&self, buf: &mut FileEncoder) -> LazyTables {
doc_link_resolutions: Table<DefIndex, LazyValue<DocLinkResMap>>,
doc_link_traits_in_scope: Table<DefIndex, LazyArray<DefId>>,
assumed_wf_types_for_rpitit: Table<DefIndex, LazyArray<(Ty<'static>, Span)>>,
+ opaque_ty_origin: Table<DefIndex, LazyValue<hir::OpaqueTyOrigin<DefId>>>,
}
#[derive(TyEncodable, TyDecodable)]
diff --git a/compiler/rustc_middle/src/middle/resolve_bound_vars.rs b/compiler/rustc_middle/src/middle/resolve_bound_vars.rs
index 13e35cd..111ac99 100644
--- a/compiler/rustc_middle/src/middle/resolve_bound_vars.rs
+++ b/compiler/rustc_middle/src/middle/resolve_bound_vars.rs
@@ -3,7 +3,7 @@
use rustc_data_structures::sorted_map::SortedMap;
use rustc_errors::ErrorGuaranteed;
use rustc_hir::ItemLocalId;
-use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::def_id::{DefId, LocalDefId, LocalDefIdMap};
use rustc_macros::{Decodable, Encodable, HashStable, TyDecodable, TyEncodable};
use crate::ty;
@@ -54,4 +54,6 @@ pub struct ResolveBoundVars {
pub defs: SortedMap<ItemLocalId, ResolvedArg>,
pub late_bound_vars: SortedMap<ItemLocalId, Vec<ty::BoundVariableKind>>,
+
+ pub opaque_captured_lifetimes: LocalDefIdMap<Vec<(ResolvedArg, LocalDefId)>>,
}
diff --git a/compiler/rustc_middle/src/mir/mod.rs b/compiler/rustc_middle/src/mir/mod.rs
index cd148ae..978abab 100644
--- a/compiler/rustc_middle/src/mir/mod.rs
+++ b/compiler/rustc_middle/src/mir/mod.rs
@@ -39,7 +39,7 @@
use crate::ty::print::{FmtPrinter, Printer, pretty_print_const, with_no_trimmed_paths};
use crate::ty::visit::TypeVisitableExt;
use crate::ty::{
- self, AdtDef, GenericArg, GenericArgsRef, Instance, InstanceKind, List, Ty, TyCtxt,
+ self, AdtDef, GenericArg, GenericArgsRef, Instance, InstanceKind, List, Ty, TyCtxt, TypingMode,
UserTypeAnnotationIndex,
};
@@ -452,6 +452,15 @@ pub fn basic_blocks_mut(&mut self) -> &mut IndexVec<BasicBlock, BasicBlockData<'
self.basic_blocks.as_mut()
}
+ pub fn typing_mode(&self, _tcx: TyCtxt<'tcx>) -> TypingMode<'tcx> {
+ match self.phase {
+ // FIXME(#132279): the MIR is quite clearly inside of a body, so we
+ // should instead reveal opaques defined by that body here.
+ MirPhase::Built | MirPhase::Analysis(_) => TypingMode::non_body_analysis(),
+ MirPhase::Runtime(_) => TypingMode::PostAnalysis,
+ }
+ }
+
#[inline]
pub fn local_kind(&self, local: Local) -> LocalKind {
let index = local.as_usize();
diff --git a/compiler/rustc_middle/src/mir/pretty.rs b/compiler/rustc_middle/src/mir/pretty.rs
index e690bf7..80ae5a7 100644
--- a/compiler/rustc_middle/src/mir/pretty.rs
+++ b/compiler/rustc_middle/src/mir/pretty.rs
@@ -762,32 +762,34 @@ fn write_basic_block<'tcx, F>(
// Terminator at the bottom.
extra_data(PassWhere::BeforeLocation(current_location), w)?;
- let indented_terminator = format!("{0}{0}{1:?};", INDENT, data.terminator().kind);
- if options.include_extra_comments {
- writeln!(
- w,
- "{:A$} // {}{}",
- indented_terminator,
- if tcx.sess.verbose_internals() {
- format!("{current_location:?}: ")
- } else {
- String::new()
- },
- comment(tcx, data.terminator().source_info),
- A = ALIGN,
- )?;
- } else {
- writeln!(w, "{indented_terminator}")?;
- }
+ if data.terminator.is_some() {
+ let indented_terminator = format!("{0}{0}{1:?};", INDENT, data.terminator().kind);
+ if options.include_extra_comments {
+ writeln!(
+ w,
+ "{:A$} // {}{}",
+ indented_terminator,
+ if tcx.sess.verbose_internals() {
+ format!("{current_location:?}: ")
+ } else {
+ String::new()
+ },
+ comment(tcx, data.terminator().source_info),
+ A = ALIGN,
+ )?;
+ } else {
+ writeln!(w, "{indented_terminator}")?;
+ }
- write_extra(
- tcx,
- w,
- |visitor| {
- visitor.visit_terminator(data.terminator(), current_location);
- },
- options,
- )?;
+ write_extra(
+ tcx,
+ w,
+ |visitor| {
+ visitor.visit_terminator(data.terminator(), current_location);
+ },
+ options,
+ )?;
+ }
extra_data(PassWhere::AfterLocation(current_location), w)?;
extra_data(PassWhere::AfterTerminator(block), w)?;
diff --git a/compiler/rustc_middle/src/mir/syntax.rs b/compiler/rustc_middle/src/mir/syntax.rs
index c610fac..85beb6e 100644
--- a/compiler/rustc_middle/src/mir/syntax.rs
+++ b/compiler/rustc_middle/src/mir/syntax.rs
@@ -19,9 +19,8 @@
use super::{BasicBlock, Const, Local, UserTypeProjection};
use crate::mir::coverage::CoverageKind;
-use crate::traits::Reveal;
use crate::ty::adjustment::PointerCoercion;
-use crate::ty::{self, GenericArgsRef, List, Region, Ty, UserTypeAnnotationIndex};
+use crate::ty::{self, GenericArgsRef, List, Region, Ty, TyCtxt, UserTypeAnnotationIndex};
/// Represents the "flavors" of MIR.
///
@@ -102,10 +101,10 @@ pub fn name(&self) -> &'static str {
}
}
- pub fn reveal(&self) -> Reveal {
- match *self {
- MirPhase::Built | MirPhase::Analysis(_) => Reveal::UserFacing,
- MirPhase::Runtime(_) => Reveal::All,
+ pub fn param_env<'tcx>(&self, tcx: TyCtxt<'tcx>, body_def_id: DefId) -> ty::ParamEnv<'tcx> {
+ match self {
+ MirPhase::Built | MirPhase::Analysis(_) => tcx.param_env(body_def_id),
+ MirPhase::Runtime(_) => tcx.param_env_reveal_all_normalized(body_def_id),
}
}
}
diff --git a/compiler/rustc_middle/src/query/erase.rs b/compiler/rustc_middle/src/query/erase.rs
index 5f8427b..8cfc7f1 100644
--- a/compiler/rustc_middle/src/query/erase.rs
+++ b/compiler/rustc_middle/src/query/erase.rs
@@ -280,6 +280,7 @@ impl EraseType for $ty {
rustc_hir::IsAsync,
rustc_hir::ItemLocalId,
rustc_hir::LangItem,
+ rustc_hir::OpaqueTyOrigin<rustc_hir::def_id::DefId>,
rustc_hir::OwnerId,
rustc_hir::Upvar,
rustc_index::bit_set::FiniteBitSet<u32>,
diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs
index d7a60a8..088b5d4 100644
--- a/compiler/rustc_middle/src/query/mod.rs
+++ b/compiler/rustc_middle/src/query/mod.rs
@@ -260,11 +260,10 @@
separate_provide_extern
}
- query is_type_alias_impl_trait(key: DefId) -> bool
+ query opaque_ty_origin(key: DefId) -> hir::OpaqueTyOrigin<DefId>
{
- desc { "determine whether the opaque is a type-alias impl trait" }
+ desc { "determine where the opaque originates from" }
separate_provide_extern
- feedable
}
query unsizing_params_for_adt(key: DefId) -> &'tcx rustc_index::bit_set::BitSet<u32>
@@ -1781,6 +1780,23 @@
-> &'tcx SortedMap<ItemLocalId, Vec<ty::BoundVariableKind>> {
desc { |tcx| "looking up late bound vars inside `{}`", tcx.def_path_str(owner_id) }
}
+ /// For an opaque type, return the list of (captured lifetime, inner generic param).
+ /// ```ignore (illustrative)
+ /// fn foo<'a: 'a, 'b, T>(&'b u8) -> impl Into<Self> + 'b { ... }
+ /// ```
+ ///
+ /// We would return `[('a, '_a), ('b, '_b)]`, with `'a` early-bound and `'b` late-bound.
+ ///
+ /// After hir_ty_lowering, we get:
+ /// ```ignore (pseudo-code)
+ /// opaque foo::<'a>::opaque<'_a, '_b>: Into<Foo<'_a>> + '_b;
+ /// ^^^^^^^^ inner generic params
+ /// fn foo<'a>: for<'b> fn(&'b u8) -> foo::<'a>::opaque::<'a, 'b>
+ /// ^^^^^^ captured lifetimes
+ /// ```
+ query opaque_captured_lifetimes(def_id: LocalDefId) -> &'tcx [(ResolvedArg, LocalDefId)] {
+ desc { |tcx| "listing captured lifetimes for opaque `{}`", tcx.def_path_str(def_id) }
+ }
/// Computes the visibility of the provided `def_id`.
///
diff --git a/compiler/rustc_middle/src/ty/adjustment.rs b/compiler/rustc_middle/src/ty/adjustment.rs
index 71833ee..c56038d 100644
--- a/compiler/rustc_middle/src/ty/adjustment.rs
+++ b/compiler/rustc_middle/src/ty/adjustment.rs
@@ -82,7 +82,7 @@ pub enum PointerCoercion {
/// `Box<[i32]>` is an `Adjust::Unsize` with the target `Box<[i32]>`.
#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
pub struct Adjustment<'tcx> {
- pub kind: Adjust<'tcx>,
+ pub kind: Adjust,
pub target: Ty<'tcx>,
}
@@ -93,20 +93,20 @@ pub fn is_region_borrow(&self) -> bool {
}
#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
-pub enum Adjust<'tcx> {
+pub enum Adjust {
/// Go from ! to any type.
NeverToAny,
/// Dereference once, producing a place.
- Deref(Option<OverloadedDeref<'tcx>>),
+ Deref(Option<OverloadedDeref>),
/// Take the address and produce either a `&` or `*` pointer.
- Borrow(AutoBorrow<'tcx>),
+ Borrow(AutoBorrow),
Pointer(PointerCoercion),
/// Take a pinned reference and reborrow as a `Pin<&mut T>` or `Pin<&T>`.
- ReborrowPin(ty::Region<'tcx>, hir::Mutability),
+ ReborrowPin(hir::Mutability),
}
/// An overloaded autoderef step, representing a `Deref(Mut)::deref(_mut)`
@@ -115,17 +115,16 @@ pub enum Adjust<'tcx> {
/// being those shared by both the receiver and the returned reference.
#[derive(Copy, Clone, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)]
#[derive(TypeFoldable, TypeVisitable)]
-pub struct OverloadedDeref<'tcx> {
- pub region: ty::Region<'tcx>,
+pub struct OverloadedDeref {
pub mutbl: hir::Mutability,
/// The `Span` associated with the field access or method call
/// that triggered this overloaded deref.
pub span: Span,
}
-impl<'tcx> OverloadedDeref<'tcx> {
+impl OverloadedDeref {
/// Get the zst function item type for this method call.
- pub fn method_call(&self, tcx: TyCtxt<'tcx>, source: Ty<'tcx>) -> Ty<'tcx> {
+ pub fn method_call<'tcx>(&self, tcx: TyCtxt<'tcx>, source: Ty<'tcx>) -> Ty<'tcx> {
let trait_def_id = match self.mutbl {
hir::Mutability::Not => tcx.require_lang_item(LangItem::Deref, None),
hir::Mutability::Mut => tcx.require_lang_item(LangItem::DerefMut, None),
@@ -187,9 +186,9 @@ fn from(m: AutoBorrowMutability) -> Self {
#[derive(Copy, Clone, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)]
#[derive(TypeFoldable, TypeVisitable)]
-pub enum AutoBorrow<'tcx> {
+pub enum AutoBorrow {
/// Converts from T to &T.
- Ref(ty::Region<'tcx>, AutoBorrowMutability),
+ Ref(AutoBorrowMutability),
/// Converts from T to *T.
RawPtr(hir::Mutability),
diff --git a/compiler/rustc_middle/src/ty/assoc.rs b/compiler/rustc_middle/src/ty/assoc.rs
index db56e00..3137fe9 100644
--- a/compiler/rustc_middle/src/ty/assoc.rs
+++ b/compiler/rustc_middle/src/ty/assoc.rs
@@ -34,8 +34,6 @@ pub struct AssocItem {
/// return-position `impl Trait` in trait desugaring. The `ImplTraitInTraitData`
/// provides additional information about its source.
pub opt_rpitit_info: Option<ty::ImplTraitInTraitData>,
-
- pub is_effects_desugaring: bool,
}
impl AssocItem {
diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs
index 5cbbc80..9abad6d 100644
--- a/compiler/rustc_middle/src/ty/context.rs
+++ b/compiler/rustc_middle/src/ty/context.rs
@@ -55,7 +55,7 @@
use rustc_type_ir::lang_items::TraitSolverLangItem;
pub use rustc_type_ir::lift::Lift;
use rustc_type_ir::{CollectAndApply, Interner, TypeFlags, WithCachedTypeInfo, search_graph};
-use tracing::{debug, trace};
+use tracing::{debug, instrument};
use crate::arena::Arena;
use crate::dep_graph::{DepGraph, DepKindStruct};
@@ -76,8 +76,8 @@
};
use crate::ty::predicate::ExistentialPredicateStableCmpExt as _;
use crate::ty::{
- self, AdtDef, AdtDefData, AdtKind, Binder, Clause, Clauses, Const, GenericArg, GenericArgs,
- GenericArgsRef, GenericParamDefKind, HostPolarity, ImplPolarity, List, ListWithCachedTypeInfo,
+ self, AdtDef, AdtDefData, AdtKind, Binder, BoundConstness, Clause, Clauses, Const, GenericArg,
+ GenericArgs, GenericArgsRef, GenericParamDefKind, ImplPolarity, List, ListWithCachedTypeInfo,
ParamConst, ParamTy, Pattern, PatternKind, PolyExistentialPredicate, PolyFnSig, Predicate,
PredicateKind, PredicatePolarity, Region, RegionKind, ReprOptions, TraitObjectVisitor, Ty,
TyKind, TyVid, Visibility,
@@ -2103,11 +2103,9 @@ pub fn local_visibility(self, def_id: LocalDefId) -> Visibility {
}
/// Returns the origin of the opaque type `def_id`.
- #[track_caller]
- pub fn opaque_type_origin(self, def_id: LocalDefId) -> hir::OpaqueTyOrigin {
- let origin = self.hir().expect_opaque_ty(def_id).origin;
- trace!("opaque_type_origin({def_id:?}) => {origin:?}");
- origin
+ #[instrument(skip(self), level = "trace", ret)]
+ pub fn local_opaque_ty_origin(self, def_id: LocalDefId) -> hir::OpaqueTyOrigin<LocalDefId> {
+ self.hir().expect_opaque_ty(def_id).origin
}
}
@@ -2205,7 +2203,7 @@ fn lift_to_interner(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
nop_slice_lift! {ty::ValTree<'a> => ty::ValTree<'tcx>}
TrivialLiftImpls! {
- ImplPolarity, PredicatePolarity, Promoted, HostPolarity,
+ ImplPolarity, PredicatePolarity, Promoted, BoundConstness,
}
macro_rules! sty_debug_print {
@@ -3060,7 +3058,7 @@ pub fn map_opaque_lifetime_to_parent_lifetime(
loop {
let parent = self.local_parent(opaque_lifetime_param_def_id);
- let hir::OpaqueTy { lifetime_mapping, .. } = self.hir().expect_opaque_ty(parent);
+ let lifetime_mapping = self.opaque_captured_lifetimes(parent);
let Some((lifetime, _)) = lifetime_mapping
.iter()
@@ -3069,8 +3067,8 @@ pub fn map_opaque_lifetime_to_parent_lifetime(
bug!("duplicated lifetime param should be present");
};
- match self.named_bound_var(lifetime.hir_id) {
- Some(resolve_bound_vars::ResolvedArg::EarlyBound(ebv)) => {
+ match *lifetime {
+ resolve_bound_vars::ResolvedArg::EarlyBound(ebv) => {
let new_parent = self.local_parent(ebv);
// If we map to another opaque, then it should be a parent
@@ -3089,7 +3087,7 @@ pub fn map_opaque_lifetime_to_parent_lifetime(
name: self.item_name(ebv.to_def_id()),
});
}
- Some(resolve_bound_vars::ResolvedArg::LateBound(_, _, lbv)) => {
+ resolve_bound_vars::ResolvedArg::LateBound(_, _, lbv) => {
let new_parent = self.local_parent(lbv);
return ty::Region::new_late_param(
self,
@@ -3100,13 +3098,13 @@ pub fn map_opaque_lifetime_to_parent_lifetime(
),
);
}
- Some(resolve_bound_vars::ResolvedArg::Error(guar)) => {
+ resolve_bound_vars::ResolvedArg::Error(guar) => {
return ty::Region::new_error(self, guar);
}
_ => {
return ty::Region::new_error_with_message(
self,
- lifetime.ident.span,
+ self.def_span(opaque_lifetime_param_def_id),
"cannot resolve lifetime",
);
}
diff --git a/compiler/rustc_middle/src/ty/flags.rs b/compiler/rustc_middle/src/ty/flags.rs
index 704a197..04d0318 100644
--- a/compiler/rustc_middle/src/ty/flags.rs
+++ b/compiler/rustc_middle/src/ty/flags.rs
@@ -267,7 +267,7 @@ fn add_predicate_atom(&mut self, atom: ty::PredicateKind<'_>) {
}
ty::PredicateKind::Clause(ty::ClauseKind::HostEffect(ty::HostEffectPredicate {
trait_ref,
- host: _,
+ constness: _,
})) => {
self.add_args(trait_ref.args);
}
diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs
index 2c7a3ff..0560ffe 100644
--- a/compiler/rustc_middle/src/ty/layout.rs
+++ b/compiler/rustc_middle/src/ty/layout.rs
@@ -4,8 +4,9 @@
use rustc_abi::Primitive::{self, Float, Int, Pointer};
use rustc_abi::{
- Abi, AddressSpace, Align, FieldsShape, HasDataLayout, Integer, LayoutCalculator, LayoutData,
- PointeeInfo, PointerKind, ReprOptions, Scalar, Size, TagEncoding, TargetDataLayout, Variants,
+ AddressSpace, Align, BackendRepr, FieldsShape, HasDataLayout, Integer, LayoutCalculator,
+ LayoutData, PointeeInfo, PointerKind, ReprOptions, Scalar, Size, TagEncoding, TargetDataLayout,
+ Variants,
};
use rustc_error_messages::DiagMessage;
use rustc_errors::{
@@ -757,7 +758,7 @@ fn ty_and_layout_for_variant(
Some(fields) => FieldsShape::Union(fields),
None => FieldsShape::Arbitrary { offsets: IndexVec::new(), memory_index: IndexVec::new() },
},
- abi: Abi::Uninhabited,
+ backend_repr: BackendRepr::Uninhabited,
largest_niche: None,
align: tcx.data_layout.i8_align,
size: Size::ZERO,
diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs
index bd32e58..dac81a6 100644
--- a/compiler/rustc_middle/src/ty/mod.rs
+++ b/compiler/rustc_middle/src/ty/mod.rs
@@ -1625,16 +1625,6 @@ pub fn opt_rpitit_info(self, def_id: DefId) -> Option<ImplTraitInTraitData> {
}
}
- /// Whether the `def_id` is an associated type that was desugared from a
- /// `#[const_trait]` or `impl_const`.
- pub fn is_effects_desugared_assoc_ty(self, def_id: DefId) -> bool {
- if let DefKind::AssocTy = self.def_kind(def_id) {
- self.associated_item(def_id).is_effects_desugaring
- } else {
- false
- }
- }
-
pub fn find_field_index(self, ident: Ident, variant: &VariantDef) -> Option<FieldIdx> {
variant.fields.iter_enumerated().find_map(|(i, field)| {
self.hygienic_eq(ident, field.ident(self), variant.def_id).then_some(i)
diff --git a/compiler/rustc_middle/src/ty/parameterized.rs b/compiler/rustc_middle/src/ty/parameterized.rs
index 43bdce5..7c280bc 100644
--- a/compiler/rustc_middle/src/ty/parameterized.rs
+++ b/compiler/rustc_middle/src/ty/parameterized.rs
@@ -94,6 +94,7 @@ impl $crate::ty::ParameterizedOverTcx for $ty {
rustc_hir::def_id::DefId,
rustc_hir::def_id::DefIndex,
rustc_hir::definitions::DefKey,
+ rustc_hir::OpaqueTyOrigin<rustc_hir::def_id::DefId>,
rustc_index::bit_set::BitSet<u32>,
rustc_index::bit_set::FiniteBitSet<u32>,
rustc_session::cstore::ForeignModule,
diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs
index 0248aad..2480cee 100644
--- a/compiler/rustc_middle/src/ty/print/pretty.rs
+++ b/compiler/rustc_middle/src/ty/print/pretty.rs
@@ -1959,7 +1959,7 @@ fn pretty_print_bound_constness(
ty::BoundConstness::Const => {
p!("const ");
}
- ty::BoundConstness::ConstIfConst => {
+ ty::BoundConstness::Maybe => {
p!("~const ");
}
}
@@ -3076,9 +3076,9 @@ macro_rules! define_print_and_forward_display {
}
ty::HostEffectPredicate<'tcx> {
- let constness = match self.host {
- ty::HostPolarity::Const => { "const" }
- ty::HostPolarity::Maybe => { "~const" }
+ let constness = match self.constness {
+ ty::BoundConstness::Const => { "const" }
+ ty::BoundConstness::Maybe => { "~const" }
};
p!(print(self.trait_ref.self_ty()), ": {constness} ");
p!(print(self.trait_ref.print_trait_sugared()))
diff --git a/compiler/rustc_middle/src/ty/sty.rs b/compiler/rustc_middle/src/ty/sty.rs
index a8327eb..f54afdb 100644
--- a/compiler/rustc_middle/src/ty/sty.rs
+++ b/compiler/rustc_middle/src/ty/sty.rs
@@ -1354,6 +1354,7 @@ pub fn builtin_index(self) -> Option<Ty<'tcx>> {
}
}
+ #[tracing::instrument(level = "trace", skip(tcx))]
pub fn fn_sig(self, tcx: TyCtxt<'tcx>) -> PolyFnSig<'tcx> {
match self.kind() {
FnDef(def_id, args) => tcx.fn_sig(*def_id).instantiate(tcx, args),
diff --git a/compiler/rustc_mir_build/Cargo.toml b/compiler/rustc_mir_build/Cargo.toml
index 529e9cc..1190472 100644
--- a/compiler/rustc_mir_build/Cargo.toml
+++ b/compiler/rustc_mir_build/Cargo.toml
@@ -7,6 +7,8 @@
# tidy-alphabetical-start
either = "1.5.0"
itertools = "0.12"
+
+rustc_abi = { path = "../rustc_abi" }
rustc_apfloat = "0.2.0"
rustc_arena = { path = "../rustc_arena" }
rustc_ast = { path = "../rustc_ast" }
@@ -22,7 +24,6 @@
rustc_pattern_analysis = { path = "../rustc_pattern_analysis" }
rustc_session = { path = "../rustc_session" }
rustc_span = { path = "../rustc_span" }
-rustc_target = { path = "../rustc_target" }
rustc_trait_selection = { path = "../rustc_trait_selection" }
tracing = "0.1"
# tidy-alphabetical-end
diff --git a/compiler/rustc_mir_build/src/build/custom/parse/instruction.rs b/compiler/rustc_mir_build/src/build/custom/parse/instruction.rs
index 9e3af89..07964e3 100644
--- a/compiler/rustc_mir_build/src/build/custom/parse/instruction.rs
+++ b/compiler/rustc_mir_build/src/build/custom/parse/instruction.rs
@@ -1,3 +1,4 @@
+use rustc_abi::{FieldIdx, VariantIdx};
use rustc_middle::mir::interpret::Scalar;
use rustc_middle::mir::tcx::PlaceTy;
use rustc_middle::mir::*;
@@ -6,7 +7,6 @@
use rustc_middle::ty::cast::mir_cast_kind;
use rustc_span::Span;
use rustc_span::source_map::Spanned;
-use rustc_target::abi::{FieldIdx, VariantIdx};
use super::{PResult, ParseCtxt, parse_by_kind};
use crate::build::custom::ParseError;
diff --git a/compiler/rustc_mir_build/src/build/expr/as_constant.rs b/compiler/rustc_mir_build/src/build/expr/as_constant.rs
index ae164cf..3f2e3b9 100644
--- a/compiler/rustc_mir_build/src/build/expr/as_constant.rs
+++ b/compiler/rustc_mir_build/src/build/expr/as_constant.rs
@@ -1,5 +1,6 @@
//! See docs in build/expr/mod.rs
+use rustc_abi::Size;
use rustc_ast as ast;
use rustc_hir::LangItem;
use rustc_middle::mir::interpret::{
@@ -11,7 +12,6 @@
self, CanonicalUserType, CanonicalUserTypeAnnotation, Ty, TyCtxt, UserTypeAnnotationIndex,
};
use rustc_middle::{bug, mir, span_bug};
-use rustc_target::abi::Size;
use tracing::{instrument, trace};
use crate::build::{Builder, parse_float_into_constval};
diff --git a/compiler/rustc_mir_build/src/build/expr/as_place.rs b/compiler/rustc_mir_build/src/build/expr/as_place.rs
index c7298e3..9f6e073 100644
--- a/compiler/rustc_mir_build/src/build/expr/as_place.rs
+++ b/compiler/rustc_mir_build/src/build/expr/as_place.rs
@@ -3,6 +3,7 @@
use std::assert_matches::assert_matches;
use std::iter;
+use rustc_abi::{FIRST_VARIANT, FieldIdx, VariantIdx};
use rustc_hir::def_id::LocalDefId;
use rustc_middle::hir::place::{Projection as HirProjection, ProjectionKind as HirProjectionKind};
use rustc_middle::middle::region;
@@ -12,7 +13,6 @@
use rustc_middle::ty::{self, AdtDef, CanonicalUserTypeAnnotation, Ty, Variance};
use rustc_middle::{bug, span_bug};
use rustc_span::Span;
-use rustc_target::abi::{FIRST_VARIANT, FieldIdx, VariantIdx};
use tracing::{debug, instrument, trace};
use crate::build::ForGuard::{OutsideGuard, RefWithinGuard};
diff --git a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs
index fd949a5..1985dd3 100644
--- a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs
+++ b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs
@@ -1,5 +1,6 @@
//! See docs in `build/expr/mod.rs`.
+use rustc_abi::{BackendRepr, FieldIdx, Primitive};
use rustc_hir::lang_items::LangItem;
use rustc_index::{Idx, IndexVec};
use rustc_middle::bug;
@@ -13,7 +14,6 @@
use rustc_middle::ty::{self, Ty, UpvarArgs};
use rustc_span::source_map::Spanned;
use rustc_span::{DUMMY_SP, Span};
-use rustc_target::abi::{Abi, FieldIdx, Primitive};
use tracing::debug;
use crate::build::expr::as_place::PlaceBase;
@@ -207,7 +207,7 @@ pub(crate) fn as_rvalue(
);
let (op, ty) = (Operand::Move(discr), discr_ty);
- if let Abi::Scalar(scalar) = layout.unwrap().abi
+ if let BackendRepr::Scalar(scalar) = layout.unwrap().backend_repr
&& !scalar.is_always_valid(&this.tcx)
&& let Primitive::Int(int_width, _signed) = scalar.primitive()
{
diff --git a/compiler/rustc_mir_build/src/build/matches/mod.rs b/compiler/rustc_mir_build/src/build/matches/mod.rs
index 51ead57..a62d4e9 100644
--- a/compiler/rustc_mir_build/src/build/matches/mod.rs
+++ b/compiler/rustc_mir_build/src/build/matches/mod.rs
@@ -5,6 +5,7 @@
//! This also includes code for pattern bindings in `let` statements and
//! function parameters.
+use rustc_abi::VariantIdx;
use rustc_data_structures::fx::FxIndexMap;
use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_hir::{BindingMode, ByRef};
@@ -15,7 +16,6 @@
use rustc_middle::ty::{self, CanonicalUserTypeAnnotation, Ty};
use rustc_span::symbol::Symbol;
use rustc_span::{BytePos, Pos, Span};
-use rustc_target::abi::VariantIdx;
use tracing::{debug, instrument};
use crate::build::ForGuard::{self, OutsideGuard, RefWithinGuard};
diff --git a/compiler/rustc_mir_build/src/build/mod.rs b/compiler/rustc_mir_build/src/build/mod.rs
index e15ea4d..46be2ae 100644
--- a/compiler/rustc_mir_build/src/build/mod.rs
+++ b/compiler/rustc_mir_build/src/build/mod.rs
@@ -1,4 +1,5 @@
use itertools::Itertools;
+use rustc_abi::{ExternAbi, FieldIdx};
use rustc_apfloat::Float;
use rustc_apfloat::ieee::{Double, Half, Quad, Single};
use rustc_ast::attr;
@@ -20,8 +21,6 @@
use rustc_middle::{bug, span_bug};
use rustc_span::symbol::sym;
use rustc_span::{Span, Symbol};
-use rustc_target::abi::FieldIdx;
-use rustc_target::spec::abi::Abi;
use super::lints;
use crate::build::expr::as_place::PlaceBuilder;
@@ -467,7 +466,7 @@ fn construct_fn<'tcx>(
if let DefKind::Closure = tcx.def_kind(fn_def) {
// HACK(eddyb) Avoid having RustCall on closures,
// as it adds unnecessary (and wrong) auto-tupling.
- abi = Abi::Rust;
+ abi = ExternAbi::Rust;
}
let arguments = &thir.params;
@@ -540,7 +539,7 @@ fn construct_fn<'tcx>(
let mut body = builder.finish();
- body.spread_arg = if abi == Abi::RustCall {
+ body.spread_arg = if abi == ExternAbi::RustCall {
// RustCall pseudo-ABI untuples the last argument.
Some(Local::new(arguments.len()))
} else {
@@ -792,12 +791,6 @@ fn new(
}
fn finish(self) -> Body<'tcx> {
- for (index, block) in self.cfg.basic_blocks.iter().enumerate() {
- if block.terminator.is_none() {
- span_bug!(self.fn_span, "no terminator on block {:?}", index);
- }
- }
-
let mut body = Body::new(
MirSource::item(self.def_id.to_def_id()),
self.cfg.basic_blocks,
@@ -811,6 +804,23 @@ fn finish(self) -> Body<'tcx> {
None,
);
body.coverage_info_hi = self.coverage_info.map(|b| b.into_done());
+
+ for (index, block) in body.basic_blocks.iter().enumerate() {
+ if block.terminator.is_none() {
+ use rustc_middle::mir::pretty;
+ let options = pretty::PrettyPrintMirOptions::from_cli(self.tcx);
+ pretty::write_mir_fn(
+ self.tcx,
+ &body,
+ &mut |_, _| Ok(()),
+ &mut std::io::stdout(),
+ options,
+ )
+ .unwrap();
+ span_bug!(self.fn_span, "no terminator on block {:?}", index);
+ }
+ }
+
body
}
diff --git a/compiler/rustc_mir_build/src/thir/cx/expr.rs b/compiler/rustc_mir_build/src/thir/cx/expr.rs
index d20e5fc..0481f71 100644
--- a/compiler/rustc_mir_build/src/thir/cx/expr.rs
+++ b/compiler/rustc_mir_build/src/thir/cx/expr.rs
@@ -1,4 +1,5 @@
use itertools::Itertools;
+use rustc_abi::{FIRST_VARIANT, FieldIdx};
use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_hir as hir;
use rustc_hir::def::{CtorKind, CtorOf, DefKind, Res};
@@ -18,7 +19,6 @@
};
use rustc_middle::{bug, span_bug};
use rustc_span::{Span, sym};
-use rustc_target::abi::{FIRST_VARIANT, FieldIdx};
use tracing::{debug, info, instrument, trace};
use crate::errors;
@@ -140,7 +140,7 @@ fn apply_adjustment(
expr = Expr {
temp_lifetime,
- ty: Ty::new_ref(self.tcx, deref.region, expr.ty, deref.mutbl),
+ ty: Ty::new_ref(self.tcx, self.tcx.lifetimes.re_erased, expr.ty, deref.mutbl),
span,
kind: ExprKind::Borrow {
borrow_kind: deref.mutbl.to_borrow_kind(),
@@ -152,14 +152,14 @@ fn apply_adjustment(
self.overloaded_place(hir_expr, adjustment.target, Some(call), expr, deref.span)
}
- Adjust::Borrow(AutoBorrow::Ref(_, m)) => ExprKind::Borrow {
+ Adjust::Borrow(AutoBorrow::Ref(m)) => ExprKind::Borrow {
borrow_kind: m.to_borrow_kind(),
arg: self.thir.exprs.push(expr),
},
Adjust::Borrow(AutoBorrow::RawPtr(mutability)) => {
ExprKind::RawBorrow { mutability, arg: self.thir.exprs.push(expr) }
}
- Adjust::ReborrowPin(region, mutbl) => {
+ Adjust::ReborrowPin(mutbl) => {
debug!("apply ReborrowPin adjustment");
// Rewrite `$expr` as `Pin { __pointer: &(mut)? *($expr).__pointer }`
@@ -197,7 +197,8 @@ fn apply_adjustment(
hir::Mutability::Mut => BorrowKind::Mut { kind: mir::MutBorrowKind::Default },
hir::Mutability::Not => BorrowKind::Shared,
};
- let new_pin_target = Ty::new_ref(self.tcx, region, ptr_target_ty, mutbl);
+ let new_pin_target =
+ Ty::new_ref(self.tcx, self.tcx.lifetimes.re_erased, ptr_target_ty, mutbl);
let expr = self.thir.exprs.push(Expr {
temp_lifetime,
ty: new_pin_target,
diff --git a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs
index c89f526..983853d 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs
@@ -1,4 +1,5 @@
use either::Either;
+use rustc_abi::{FieldIdx, VariantIdx};
use rustc_apfloat::Float;
use rustc_hir as hir;
use rustc_index::Idx;
@@ -9,7 +10,6 @@
use rustc_middle::thir::{FieldPat, Pat, PatKind};
use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitableExt, TypingMode, ValTree};
use rustc_span::Span;
-use rustc_target::abi::{FieldIdx, VariantIdx};
use rustc_trait_selection::traits::ObligationCause;
use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt;
use tracing::{debug, instrument, trace};
diff --git a/compiler/rustc_mir_build/src/thir/pattern/mod.rs b/compiler/rustc_mir_build/src/thir/pattern/mod.rs
index 56e5156..ec852ad 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/mod.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/mod.rs
@@ -5,6 +5,7 @@
use std::cmp::Ordering;
+use rustc_abi::{FieldIdx, Integer};
use rustc_errors::codes::*;
use rustc_hir::def::{CtorOf, DefKind, Res};
use rustc_hir::pat_util::EnumerateAndAdjustIterator;
@@ -20,7 +21,6 @@
use rustc_middle::{bug, span_bug};
use rustc_span::def_id::LocalDefId;
use rustc_span::{ErrorGuaranteed, Span};
-use rustc_target::abi::{FieldIdx, Integer};
use tracing::{debug, instrument};
pub(crate) use self::check_match::check_match;
diff --git a/compiler/rustc_mir_dataflow/src/framework/graphviz.rs b/compiler/rustc_mir_dataflow/src/framework/graphviz.rs
index 96a70f4..bac75b9 100644
--- a/compiler/rustc_mir_dataflow/src/framework/graphviz.rs
+++ b/compiler/rustc_mir_dataflow/src/framework/graphviz.rs
@@ -32,8 +32,11 @@ pub(crate) struct Formatter<'mir, 'tcx, A>
where
A: Analysis<'tcx>,
{
- body: &'mir Body<'tcx>,
- results: RefCell<Option<Results<'tcx, A>>>,
+ // The `RefCell` is used because `<Formatter as Labeller>::node_label`
+ // takes `&self`, but it needs to modify the cursor. This is also the
+ // reason for the `Formatter`/`BlockFormatter` split; `BlockFormatter` has
+ // the operations that involve the mutation, i.e. within the `borrow_mut`.
+ cursor: RefCell<ResultsCursor<'mir, 'tcx, A>>,
style: OutputStyle,
reachable: BitSet<BasicBlock>,
}
@@ -48,11 +51,15 @@ pub(crate) fn new(
style: OutputStyle,
) -> Self {
let reachable = mir::traversal::reachable_as_bitset(body);
- Formatter { body, results: Some(results).into(), style, reachable }
+ Formatter { cursor: results.into_results_cursor(body).into(), style, reachable }
+ }
+
+ fn body(&self) -> &'mir Body<'tcx> {
+ self.cursor.borrow().body()
}
pub(crate) fn into_results(self) -> Results<'tcx, A> {
- self.results.into_inner().unwrap()
+ self.cursor.into_inner().into_results()
}
}
@@ -81,7 +88,7 @@ impl<'tcx, A> dot::Labeller<'_> for Formatter<'_, 'tcx, A>
type Edge = CfgEdge;
fn graph_id(&self) -> dot::Id<'_> {
- let name = graphviz_safe_def_name(self.body.source.def_id());
+ let name = graphviz_safe_def_name(self.body().source.def_id());
dot::Id::new(format!("graph_for_def_id_{name}")).unwrap()
}
@@ -90,20 +97,11 @@ fn node_id(&self, n: &Self::Node) -> dot::Id<'_> {
}
fn node_label(&self, block: &Self::Node) -> dot::LabelText<'_> {
- let mut label = Vec::new();
- self.results.replace_with(|results| {
- // `Formatter::result` is a `RefCell<Option<_>>` so we can replace
- // the value with `None`, move it into the results cursor, move it
- // back out, and return it to the refcell wrapped in `Some`.
- let mut fmt = BlockFormatter {
- results: results.take().unwrap().into_results_cursor(self.body),
- style: self.style,
- bg: Background::Light,
- };
+ let mut cursor = self.cursor.borrow_mut();
+ let mut fmt =
+ BlockFormatter { cursor: &mut cursor, style: self.style, bg: Background::Light };
+ let label = fmt.write_node_label(*block).unwrap();
- fmt.write_node_label(&mut label, *block).unwrap();
- Some(fmt.results.into_results())
- });
dot::LabelText::html(String::from_utf8(label).unwrap())
}
@@ -112,12 +110,12 @@ fn node_shape(&self, _n: &Self::Node) -> Option<dot::LabelText<'_>> {
}
fn edge_label(&self, e: &Self::Edge) -> dot::LabelText<'_> {
- let label = &self.body[e.source].terminator().kind.fmt_successor_labels()[e.index];
+ let label = &self.body()[e.source].terminator().kind.fmt_successor_labels()[e.index];
dot::LabelText::label(label.clone())
}
}
-impl<'mir, 'tcx, A> dot::GraphWalk<'mir> for Formatter<'mir, 'tcx, A>
+impl<'tcx, A> dot::GraphWalk<'_> for Formatter<'_, 'tcx, A>
where
A: Analysis<'tcx>,
{
@@ -125,7 +123,7 @@ impl<'mir, 'tcx, A> dot::GraphWalk<'mir> for Formatter<'mir, 'tcx, A>
type Edge = CfgEdge;
fn nodes(&self) -> dot::Nodes<'_, Self::Node> {
- self.body
+ self.body()
.basic_blocks
.indices()
.filter(|&idx| self.reachable.contains(idx))
@@ -134,10 +132,10 @@ fn nodes(&self) -> dot::Nodes<'_, Self::Node> {
}
fn edges(&self) -> dot::Edges<'_, Self::Edge> {
- self.body
- .basic_blocks
+ let body = self.body();
+ body.basic_blocks
.indices()
- .flat_map(|bb| dataflow_successors(self.body, bb))
+ .flat_map(|bb| dataflow_successors(body, bb))
.collect::<Vec<_>>()
.into()
}
@@ -147,20 +145,20 @@ fn source(&self, edge: &Self::Edge) -> Self::Node {
}
fn target(&self, edge: &Self::Edge) -> Self::Node {
- self.body[edge.source].terminator().successors().nth(edge.index).unwrap()
+ self.body()[edge.source].terminator().successors().nth(edge.index).unwrap()
}
}
-struct BlockFormatter<'mir, 'tcx, A>
+struct BlockFormatter<'a, 'mir, 'tcx, A>
where
A: Analysis<'tcx>,
{
- results: ResultsCursor<'mir, 'tcx, A>,
+ cursor: &'a mut ResultsCursor<'mir, 'tcx, A>,
bg: Background,
style: OutputStyle,
}
-impl<'mir, 'tcx, A> BlockFormatter<'mir, 'tcx, A>
+impl<'tcx, A> BlockFormatter<'_, '_, 'tcx, A>
where
A: Analysis<'tcx>,
A::Domain: DebugWithContext<A>,
@@ -173,7 +171,9 @@ fn toggle_background(&mut self) -> Background {
bg
}
- fn write_node_label(&mut self, w: &mut impl io::Write, block: BasicBlock) -> io::Result<()> {
+ fn write_node_label(&mut self, block: BasicBlock) -> io::Result<Vec<u8>> {
+ use std::io::Write;
+
// Sample output:
// +-+-----------------------------------------------+
// A | bb4 |
@@ -200,6 +200,9 @@ fn write_node_label(&mut self, w: &mut impl io::Write, block: BasicBlock) -> io:
// attributes. Make sure to test the output before trying to remove the redundancy.
// Notably, `align` was found to have no effect when applied only to <table>.
+ let mut v = vec![];
+ let w = &mut v;
+
let table_fmt = concat!(
" border=\"1\"",
" cellborder=\"1\"",
@@ -219,8 +222,8 @@ fn write_node_label(&mut self, w: &mut impl io::Write, block: BasicBlock) -> io:
// C: State at start of block
self.bg = Background::Light;
- self.results.seek_to_block_start(block);
- let block_start_state = self.results.get().clone();
+ self.cursor.seek_to_block_start(block);
+ let block_start_state = self.cursor.get().clone();
self.write_row_with_full_state(w, "", "(on start)")?;
// D + E: Statement and terminator transfer functions
@@ -228,12 +231,12 @@ fn write_node_label(&mut self, w: &mut impl io::Write, block: BasicBlock) -> io:
// F: State at end of block
- let terminator = self.results.body()[block].terminator();
+ let terminator = self.cursor.body()[block].terminator();
// Write the full dataflow state immediately after the terminator if it differs from the
// state at block entry.
- self.results.seek_to_block_end(block);
- if self.results.get() != &block_start_state || A::Direction::IS_BACKWARD {
+ self.cursor.seek_to_block_end(block);
+ if self.cursor.get() != &block_start_state || A::Direction::IS_BACKWARD {
let after_terminator_name = match terminator.kind {
mir::TerminatorKind::Call { target: Some(_), .. } => "(on unwind)",
_ => "(on end)",
@@ -250,8 +253,8 @@ fn write_node_label(&mut self, w: &mut impl io::Write, block: BasicBlock) -> io:
match terminator.kind {
mir::TerminatorKind::Call { destination, .. } => {
self.write_row(w, "", "(on successful return)", |this, w, fmt| {
- let state_on_unwind = this.results.get().clone();
- this.results.apply_custom_effect(|analysis, state| {
+ let state_on_unwind = this.cursor.get().clone();
+ this.cursor.apply_custom_effect(|analysis, state| {
analysis.apply_call_return_effect(
state,
block,
@@ -265,9 +268,9 @@ fn write_node_label(&mut self, w: &mut impl io::Write, block: BasicBlock) -> io:
colspan = this.style.num_state_columns(),
fmt = fmt,
diff = diff_pretty(
- this.results.get(),
+ this.cursor.get(),
&state_on_unwind,
- this.results.analysis()
+ this.cursor.analysis()
),
)
})?;
@@ -275,8 +278,8 @@ fn write_node_label(&mut self, w: &mut impl io::Write, block: BasicBlock) -> io:
mir::TerminatorKind::Yield { resume, resume_arg, .. } => {
self.write_row(w, "", "(on yield resume)", |this, w, fmt| {
- let state_on_coroutine_drop = this.results.get().clone();
- this.results.apply_custom_effect(|analysis, state| {
+ let state_on_coroutine_drop = this.cursor.get().clone();
+ this.cursor.apply_custom_effect(|analysis, state| {
analysis.apply_call_return_effect(
state,
resume,
@@ -290,9 +293,9 @@ fn write_node_label(&mut self, w: &mut impl io::Write, block: BasicBlock) -> io:
colspan = this.style.num_state_columns(),
fmt = fmt,
diff = diff_pretty(
- this.results.get(),
+ this.cursor.get(),
&state_on_coroutine_drop,
- this.results.analysis()
+ this.cursor.analysis()
),
)
})?;
@@ -302,8 +305,8 @@ fn write_node_label(&mut self, w: &mut impl io::Write, block: BasicBlock) -> io:
if !targets.is_empty() =>
{
self.write_row(w, "", "(on successful return)", |this, w, fmt| {
- let state_on_unwind = this.results.get().clone();
- this.results.apply_custom_effect(|analysis, state| {
+ let state_on_unwind = this.cursor.get().clone();
+ this.cursor.apply_custom_effect(|analysis, state| {
analysis.apply_call_return_effect(
state,
block,
@@ -317,9 +320,9 @@ fn write_node_label(&mut self, w: &mut impl io::Write, block: BasicBlock) -> io:
colspan = this.style.num_state_columns(),
fmt = fmt,
diff = diff_pretty(
- this.results.get(),
+ this.cursor.get(),
&state_on_unwind,
- this.results.analysis()
+ this.cursor.analysis()
),
)
})?;
@@ -328,7 +331,9 @@ fn write_node_label(&mut self, w: &mut impl io::Write, block: BasicBlock) -> io:
_ => {}
};
- write!(w, "</table>")
+ write!(w, "</table>")?;
+
+ Ok(v)
}
fn write_block_header_simple(
@@ -407,9 +412,9 @@ fn write_statements_and_terminator(
block: BasicBlock,
) -> io::Result<()> {
let diffs = StateDiffCollector::run(
- self.results.body(),
+ self.cursor.body(),
block,
- self.results.mut_results(),
+ self.cursor.mut_results(),
self.style,
);
@@ -420,7 +425,7 @@ fn write_statements_and_terminator(
if A::Direction::IS_FORWARD { it.next().unwrap() } else { it.next_back().unwrap() }
};
- for (i, statement) in self.results.body()[block].statements.iter().enumerate() {
+ for (i, statement) in self.cursor.body()[block].statements.iter().enumerate() {
let statement_str = format!("{statement:?}");
let index_str = format!("{i}");
@@ -442,7 +447,7 @@ fn write_statements_and_terminator(
assert!(diffs_after.is_empty());
assert!(diffs_before.as_ref().map_or(true, ExactSizeIterator::is_empty));
- let terminator = self.results.body()[block].terminator();
+ let terminator = self.cursor.body()[block].terminator();
let mut terminator_str = String::new();
terminator.kind.fmt_head(&mut terminator_str).unwrap();
@@ -492,8 +497,8 @@ fn write_row_with_full_state(
mir: &str,
) -> io::Result<()> {
self.write_row(w, i, mir, |this, w, fmt| {
- let state = this.results.get();
- let analysis = this.results.analysis();
+ let state = this.cursor.get();
+ let analysis = this.cursor.analysis();
// FIXME: The full state vector can be quite long. It would be nice to split on commas
// and use some text wrapping algorithm.
diff --git a/compiler/rustc_mir_dataflow/src/value_analysis.rs b/compiler/rustc_mir_dataflow/src/value_analysis.rs
index f7d4a08..af2d514 100644
--- a/compiler/rustc_mir_dataflow/src/value_analysis.rs
+++ b/compiler/rustc_mir_dataflow/src/value_analysis.rs
@@ -1,38 +1,3 @@
-//! This module provides a framework on top of the normal MIR dataflow framework to simplify the
-//! implementation of analyses that track information about the values stored in certain places.
-//! We are using the term "place" here to refer to a `mir::Place` (a place expression) instead of
-//! an `interpret::Place` (a memory location).
-//!
-//! The default methods of [`ValueAnalysis`] (prefixed with `super_` instead of `handle_`)
-//! provide some behavior that should be valid for all abstract domains that are based only on the
-//! value stored in a certain place. On top of these default rules, an implementation should
-//! override some of the `handle_` methods. For an example, see `ConstAnalysis`.
-//!
-//! An implementation must also provide a [`Map`]. Before the analysis begins, all places that
-//! should be tracked during the analysis must be registered. During the analysis, no new places
-//! can be registered. The [`State`] can be queried to retrieve the abstract value stored for a
-//! certain place by passing the map.
-//!
-//! This framework is currently experimental. Originally, it supported shared references and enum
-//! variants. However, it was discovered that both of these were unsound, and especially references
-//! had subtle but serious issues. In the future, they could be added back in, but we should clarify
-//! the rules for optimizations that rely on the aliasing model first.
-//!
-//!
-//! # Notes
-//!
-//! - The bottom state denotes uninitialized memory. Because we are only doing a sound approximation
-//! of the actual execution, we can also use this state for places where access would be UB.
-//!
-//! - The assignment logic in `State::insert_place_idx` assumes that the places are non-overlapping,
-//! or identical. Note that this refers to place expressions, not memory locations.
-//!
-//! - Currently, places that have their reference taken cannot be tracked. Although this would be
-//! possible, it has to rely on some aliasing model, which we are not ready to commit to yet.
-//! Because of that, we can assume that the only way to change the value behind a tracked place is
-//! by direct assignment.
-
-use std::assert_matches::assert_matches;
use std::fmt::{Debug, Formatter};
use std::ops::Range;
@@ -42,359 +7,14 @@
use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_index::IndexVec;
use rustc_index::bit_set::BitSet;
-use rustc_middle::bug;
use rustc_middle::mir::tcx::PlaceTy;
use rustc_middle::mir::visit::{MutatingUseContext, PlaceContext, Visitor};
use rustc_middle::mir::*;
use rustc_middle::ty::{self, Ty, TyCtxt};
use tracing::debug;
-use crate::fmt::DebugWithContext;
+use crate::JoinSemiLattice;
use crate::lattice::{HasBottom, HasTop};
-use crate::{Analysis, JoinSemiLattice, SwitchIntEdgeEffects};
-
-pub trait ValueAnalysis<'tcx> {
- /// For each place of interest, the analysis tracks a value of the given type.
- type Value: Clone + JoinSemiLattice + HasBottom + HasTop + Debug;
-
- const NAME: &'static str;
-
- fn map(&self) -> &Map<'tcx>;
-
- fn handle_statement(&self, statement: &Statement<'tcx>, state: &mut State<Self::Value>) {
- self.super_statement(statement, state)
- }
-
- fn super_statement(&self, statement: &Statement<'tcx>, state: &mut State<Self::Value>) {
- match &statement.kind {
- StatementKind::Assign(box (place, rvalue)) => {
- self.handle_assign(*place, rvalue, state);
- }
- StatementKind::SetDiscriminant { box place, variant_index } => {
- self.handle_set_discriminant(*place, *variant_index, state);
- }
- StatementKind::Intrinsic(box intrinsic) => {
- self.handle_intrinsic(intrinsic, state);
- }
- StatementKind::StorageLive(local) | StatementKind::StorageDead(local) => {
- // StorageLive leaves the local in an uninitialized state.
- // StorageDead makes it UB to access the local afterwards.
- state.flood_with(Place::from(*local).as_ref(), self.map(), Self::Value::BOTTOM);
- }
- StatementKind::Deinit(box place) => {
- // Deinit makes the place uninitialized.
- state.flood_with(place.as_ref(), self.map(), Self::Value::BOTTOM);
- }
- StatementKind::Retag(..) => {
- // We don't track references.
- }
- StatementKind::ConstEvalCounter
- | StatementKind::Nop
- | StatementKind::FakeRead(..)
- | StatementKind::PlaceMention(..)
- | StatementKind::Coverage(..)
- | StatementKind::AscribeUserType(..) => (),
- }
- }
-
- fn handle_set_discriminant(
- &self,
- place: Place<'tcx>,
- variant_index: VariantIdx,
- state: &mut State<Self::Value>,
- ) {
- self.super_set_discriminant(place, variant_index, state)
- }
-
- fn super_set_discriminant(
- &self,
- place: Place<'tcx>,
- _variant_index: VariantIdx,
- state: &mut State<Self::Value>,
- ) {
- state.flood_discr(place.as_ref(), self.map());
- }
-
- fn handle_intrinsic(
- &self,
- intrinsic: &NonDivergingIntrinsic<'tcx>,
- state: &mut State<Self::Value>,
- ) {
- self.super_intrinsic(intrinsic, state);
- }
-
- fn super_intrinsic(
- &self,
- intrinsic: &NonDivergingIntrinsic<'tcx>,
- _state: &mut State<Self::Value>,
- ) {
- match intrinsic {
- NonDivergingIntrinsic::Assume(..) => {
- // Could use this, but ignoring it is sound.
- }
- NonDivergingIntrinsic::CopyNonOverlapping(CopyNonOverlapping {
- dst: _,
- src: _,
- count: _,
- }) => {
- // This statement represents `*dst = *src`, `count` times.
- }
- }
- }
-
- fn handle_assign(
- &self,
- target: Place<'tcx>,
- rvalue: &Rvalue<'tcx>,
- state: &mut State<Self::Value>,
- ) {
- self.super_assign(target, rvalue, state)
- }
-
- fn super_assign(
- &self,
- target: Place<'tcx>,
- rvalue: &Rvalue<'tcx>,
- state: &mut State<Self::Value>,
- ) {
- let result = self.handle_rvalue(rvalue, state);
- state.assign(target.as_ref(), result, self.map());
- }
-
- fn handle_rvalue(
- &self,
- rvalue: &Rvalue<'tcx>,
- state: &mut State<Self::Value>,
- ) -> ValueOrPlace<Self::Value> {
- self.super_rvalue(rvalue, state)
- }
-
- fn super_rvalue(
- &self,
- rvalue: &Rvalue<'tcx>,
- state: &mut State<Self::Value>,
- ) -> ValueOrPlace<Self::Value> {
- match rvalue {
- Rvalue::Use(operand) => self.handle_operand(operand, state),
- Rvalue::CopyForDeref(place) => self.handle_operand(&Operand::Copy(*place), state),
- Rvalue::Ref(..) | Rvalue::RawPtr(..) => {
- // We don't track such places.
- ValueOrPlace::TOP
- }
- Rvalue::Repeat(..)
- | Rvalue::ThreadLocalRef(..)
- | Rvalue::Len(..)
- | Rvalue::Cast(..)
- | Rvalue::BinaryOp(..)
- | Rvalue::NullaryOp(..)
- | Rvalue::UnaryOp(..)
- | Rvalue::Discriminant(..)
- | Rvalue::Aggregate(..)
- | Rvalue::ShallowInitBox(..) => {
- // No modification is possible through these r-values.
- ValueOrPlace::TOP
- }
- }
- }
-
- fn handle_operand(
- &self,
- operand: &Operand<'tcx>,
- state: &mut State<Self::Value>,
- ) -> ValueOrPlace<Self::Value> {
- self.super_operand(operand, state)
- }
-
- fn super_operand(
- &self,
- operand: &Operand<'tcx>,
- state: &mut State<Self::Value>,
- ) -> ValueOrPlace<Self::Value> {
- match operand {
- Operand::Constant(box constant) => {
- ValueOrPlace::Value(self.handle_constant(constant, state))
- }
- Operand::Copy(place) | Operand::Move(place) => {
- // On move, we would ideally flood the place with bottom. But with the current
- // framework this is not possible (similar to `InterpCx::eval_operand`).
- self.map()
- .find(place.as_ref())
- .map(ValueOrPlace::Place)
- .unwrap_or(ValueOrPlace::TOP)
- }
- }
- }
-
- fn handle_constant(
- &self,
- constant: &ConstOperand<'tcx>,
- state: &mut State<Self::Value>,
- ) -> Self::Value {
- self.super_constant(constant, state)
- }
-
- fn super_constant(
- &self,
- _constant: &ConstOperand<'tcx>,
- _state: &mut State<Self::Value>,
- ) -> Self::Value {
- Self::Value::TOP
- }
-
- /// The effect of a successful function call return should not be
- /// applied here, see [`Analysis::apply_terminator_effect`].
- fn handle_terminator<'mir>(
- &self,
- terminator: &'mir Terminator<'tcx>,
- state: &mut State<Self::Value>,
- ) -> TerminatorEdges<'mir, 'tcx> {
- self.super_terminator(terminator, state)
- }
-
- fn super_terminator<'mir>(
- &self,
- terminator: &'mir Terminator<'tcx>,
- state: &mut State<Self::Value>,
- ) -> TerminatorEdges<'mir, 'tcx> {
- match &terminator.kind {
- TerminatorKind::Call { .. } | TerminatorKind::InlineAsm { .. } => {
- // Effect is applied by `handle_call_return`.
- }
- TerminatorKind::Drop { place, .. } => {
- state.flood_with(place.as_ref(), self.map(), Self::Value::BOTTOM);
- }
- TerminatorKind::Yield { .. } => {
- // They would have an effect, but are not allowed in this phase.
- bug!("encountered disallowed terminator");
- }
- TerminatorKind::SwitchInt { discr, targets } => {
- return self.handle_switch_int(discr, targets, state);
- }
- TerminatorKind::TailCall { .. } => {
- // FIXME(explicit_tail_calls): determine if we need to do something here (probably not)
- }
- TerminatorKind::Goto { .. }
- | TerminatorKind::UnwindResume
- | TerminatorKind::UnwindTerminate(_)
- | TerminatorKind::Return
- | TerminatorKind::Unreachable
- | TerminatorKind::Assert { .. }
- | TerminatorKind::CoroutineDrop
- | TerminatorKind::FalseEdge { .. }
- | TerminatorKind::FalseUnwind { .. } => {
- // These terminators have no effect on the analysis.
- }
- }
- terminator.edges()
- }
-
- fn handle_call_return(
- &self,
- return_places: CallReturnPlaces<'_, 'tcx>,
- state: &mut State<Self::Value>,
- ) {
- self.super_call_return(return_places, state)
- }
-
- fn super_call_return(
- &self,
- return_places: CallReturnPlaces<'_, 'tcx>,
- state: &mut State<Self::Value>,
- ) {
- return_places.for_each(|place| {
- state.flood(place.as_ref(), self.map());
- })
- }
-
- fn handle_switch_int<'mir>(
- &self,
- discr: &'mir Operand<'tcx>,
- targets: &'mir SwitchTargets,
- state: &mut State<Self::Value>,
- ) -> TerminatorEdges<'mir, 'tcx> {
- self.super_switch_int(discr, targets, state)
- }
-
- fn super_switch_int<'mir>(
- &self,
- discr: &'mir Operand<'tcx>,
- targets: &'mir SwitchTargets,
- _state: &mut State<Self::Value>,
- ) -> TerminatorEdges<'mir, 'tcx> {
- TerminatorEdges::SwitchInt { discr, targets }
- }
-
- fn wrap(self) -> ValueAnalysisWrapper<Self>
- where
- Self: Sized,
- {
- ValueAnalysisWrapper(self)
- }
-}
-
-pub struct ValueAnalysisWrapper<T>(pub T);
-
-impl<'tcx, T: ValueAnalysis<'tcx>> Analysis<'tcx> for ValueAnalysisWrapper<T> {
- type Domain = State<T::Value>;
-
- const NAME: &'static str = T::NAME;
-
- fn bottom_value(&self, _body: &Body<'tcx>) -> Self::Domain {
- State::Unreachable
- }
-
- fn initialize_start_block(&self, body: &Body<'tcx>, state: &mut Self::Domain) {
- // The initial state maps all tracked places of argument projections to ⊤ and the rest to ⊥.
- assert_matches!(state, State::Unreachable);
- *state = State::new_reachable();
- for arg in body.args_iter() {
- state.flood(PlaceRef { local: arg, projection: &[] }, self.0.map());
- }
- }
-
- fn apply_statement_effect(
- &mut self,
- state: &mut Self::Domain,
- statement: &Statement<'tcx>,
- _location: Location,
- ) {
- if state.is_reachable() {
- self.0.handle_statement(statement, state);
- }
- }
-
- fn apply_terminator_effect<'mir>(
- &mut self,
- state: &mut Self::Domain,
- terminator: &'mir Terminator<'tcx>,
- _location: Location,
- ) -> TerminatorEdges<'mir, 'tcx> {
- if state.is_reachable() {
- self.0.handle_terminator(terminator, state)
- } else {
- TerminatorEdges::None
- }
- }
-
- fn apply_call_return_effect(
- &mut self,
- state: &mut Self::Domain,
- _block: BasicBlock,
- return_places: CallReturnPlaces<'_, 'tcx>,
- ) {
- if state.is_reachable() {
- self.0.handle_call_return(return_places, state)
- }
- }
-
- fn apply_switch_int_edge_effects(
- &mut self,
- _block: BasicBlock,
- _discr: &Operand<'tcx>,
- _apply_edge_effects: &mut impl SwitchIntEdgeEffects<Self::Domain>,
- ) {
- }
-}
rustc_index::newtype_index!(
/// This index uniquely identifies a place.
@@ -464,7 +84,7 @@ fn join(&mut self, other: &Self) -> bool {
}
}
-/// The dataflow state for an instance of [`ValueAnalysis`].
+/// Dataflow state.
///
/// Every instance specifies a lattice that represents the possible values of a single tracked
/// place. If we call this lattice `V` and set of tracked places `P`, then a [`State`] is an
@@ -514,7 +134,7 @@ pub fn all_bottom(&self) -> bool {
}
}
- fn is_reachable(&self) -> bool {
+ pub fn is_reachable(&self) -> bool {
matches!(self, State::Reachable(_))
}
@@ -858,7 +478,7 @@ fn register(
// Allocate a value slot if it doesn't have one, and the user requested one.
assert!(place_info.value_index.is_none());
if let Ok(layout) = tcx.layout_of(param_env.and(place_info.ty))
- && layout.abi.is_scalar()
+ && layout.backend_repr.is_scalar()
{
place_info.value_index = Some(self.value_count.into());
self.value_count += 1;
@@ -1317,34 +937,6 @@ fn visit_place(&mut self, place: &Place<'tcx>, context: PlaceContext, _location:
collector.result
}
-/// This is used to visualize the dataflow analysis.
-impl<'tcx, T> DebugWithContext<ValueAnalysisWrapper<T>> for State<T::Value>
-where
- T: ValueAnalysis<'tcx>,
- T::Value: Debug,
-{
- fn fmt_with(&self, ctxt: &ValueAnalysisWrapper<T>, f: &mut Formatter<'_>) -> std::fmt::Result {
- match self {
- State::Reachable(values) => debug_with_context(values, None, ctxt.0.map(), f),
- State::Unreachable => write!(f, "unreachable"),
- }
- }
-
- fn fmt_diff_with(
- &self,
- old: &Self,
- ctxt: &ValueAnalysisWrapper<T>,
- f: &mut Formatter<'_>,
- ) -> std::fmt::Result {
- match (self, old) {
- (State::Reachable(this), State::Reachable(old)) => {
- debug_with_context(this, Some(old), ctxt.0.map(), f)
- }
- _ => Ok(()), // Consider printing something here.
- }
- }
-}
-
fn debug_with_context_rec<V: Debug + Eq + HasBottom>(
place: PlaceIndex,
place_str: &str,
@@ -1391,7 +983,7 @@ fn debug_with_context_rec<V: Debug + Eq + HasBottom>(
Ok(())
}
-fn debug_with_context<V: Debug + Eq + HasBottom>(
+pub fn debug_with_context<V: Debug + Eq + HasBottom>(
new: &StateData<V>,
old: Option<&StateData<V>>,
map: &Map<'_>,
diff --git a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
index 0726346..dd85d06 100644
--- a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
+++ b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
@@ -2,6 +2,10 @@
//!
//! Currently, this pass only propagates scalar values.
+use std::assert_matches::assert_matches;
+use std::fmt::Formatter;
+
+use rustc_abi::{BackendRepr, FIRST_VARIANT, FieldIdx, Size, VariantIdx};
use rustc_const_eval::const_eval::{DummyMachine, throw_machine_stop_str};
use rustc_const_eval::interpret::{
ImmTy, Immediate, InterpCx, OpTy, PlaceTy, Projectable, interp_ok,
@@ -14,13 +18,13 @@
use rustc_middle::mir::*;
use rustc_middle::ty::layout::{HasParamEnv, LayoutOf};
use rustc_middle::ty::{self, Ty, TyCtxt};
-use rustc_mir_dataflow::lattice::FlatSet;
+use rustc_mir_dataflow::fmt::DebugWithContext;
+use rustc_mir_dataflow::lattice::{FlatSet, HasBottom};
use rustc_mir_dataflow::value_analysis::{
- Map, PlaceIndex, State, TrackElem, ValueAnalysis, ValueAnalysisWrapper, ValueOrPlace,
+ Map, PlaceIndex, State, TrackElem, ValueOrPlace, debug_with_context,
};
use rustc_mir_dataflow::{Analysis, Results, ResultsVisitor};
use rustc_span::DUMMY_SP;
-use rustc_target::abi::{Abi, FIRST_VARIANT, FieldIdx, Size, VariantIdx};
use tracing::{debug, debug_span, instrument};
// These constants are somewhat random guesses and have not been optimized.
@@ -58,8 +62,8 @@ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
// Perform the actual dataflow analysis.
let analysis = ConstAnalysis::new(tcx, body, map);
- let mut results = debug_span!("analyze")
- .in_scope(|| analysis.wrap().iterate_to_fixpoint(tcx, body, None));
+ let mut results =
+ debug_span!("analyze").in_scope(|| analysis.iterate_to_fixpoint(tcx, body, None));
// Collect results and patch the body afterwards.
let mut visitor = Collector::new(tcx, &body.local_decls);
@@ -69,6 +73,10 @@ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
}
}
+// Note: Currently, places that have their reference taken cannot be tracked. Although this would
+// be possible, it has to rely on some aliasing model, which we are not ready to commit to yet.
+// Because of that, we can assume that the only way to change the value behind a tracked place is
+// by direct assignment.
struct ConstAnalysis<'a, 'tcx> {
map: Map<'tcx>,
tcx: TyCtxt<'tcx>,
@@ -77,20 +85,198 @@ struct ConstAnalysis<'a, 'tcx> {
param_env: ty::ParamEnv<'tcx>,
}
-impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
- type Value = FlatSet<Scalar>;
+impl<'tcx> Analysis<'tcx> for ConstAnalysis<'_, 'tcx> {
+ type Domain = State<FlatSet<Scalar>>;
const NAME: &'static str = "ConstAnalysis";
- fn map(&self) -> &Map<'tcx> {
- &self.map
+ // The bottom state denotes uninitialized memory. Because we are only doing a sound
+ // approximation of the actual execution, we can also use this state for places where access
+ // would be UB.
+ fn bottom_value(&self, _body: &Body<'tcx>) -> Self::Domain {
+ State::Unreachable
+ }
+
+ fn initialize_start_block(&self, body: &Body<'tcx>, state: &mut Self::Domain) {
+ // The initial state maps all tracked places of argument projections to ⊤ and the rest to ⊥.
+ assert_matches!(state, State::Unreachable);
+ *state = State::new_reachable();
+ for arg in body.args_iter() {
+ state.flood(PlaceRef { local: arg, projection: &[] }, &self.map);
+ }
+ }
+
+ fn apply_statement_effect(
+ &mut self,
+ state: &mut Self::Domain,
+ statement: &Statement<'tcx>,
+ _location: Location,
+ ) {
+ if state.is_reachable() {
+ self.handle_statement(statement, state);
+ }
+ }
+
+ fn apply_terminator_effect<'mir>(
+ &mut self,
+ state: &mut Self::Domain,
+ terminator: &'mir Terminator<'tcx>,
+ _location: Location,
+ ) -> TerminatorEdges<'mir, 'tcx> {
+ if state.is_reachable() {
+ self.handle_terminator(terminator, state)
+ } else {
+ TerminatorEdges::None
+ }
+ }
+
+ fn apply_call_return_effect(
+ &mut self,
+ state: &mut Self::Domain,
+ _block: BasicBlock,
+ return_places: CallReturnPlaces<'_, 'tcx>,
+ ) {
+ if state.is_reachable() {
+ self.handle_call_return(return_places, state)
+ }
+ }
+}
+
+impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
+ fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, map: Map<'tcx>) -> Self {
+ let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id());
+ Self {
+ map,
+ tcx,
+ local_decls: &body.local_decls,
+ ecx: InterpCx::new(tcx, DUMMY_SP, param_env, DummyMachine),
+ param_env,
+ }
+ }
+
+ fn handle_statement(&self, statement: &Statement<'tcx>, state: &mut State<FlatSet<Scalar>>) {
+ match &statement.kind {
+ StatementKind::Assign(box (place, rvalue)) => {
+ self.handle_assign(*place, rvalue, state);
+ }
+ StatementKind::SetDiscriminant { box place, variant_index } => {
+ self.handle_set_discriminant(*place, *variant_index, state);
+ }
+ StatementKind::Intrinsic(box intrinsic) => {
+ self.handle_intrinsic(intrinsic);
+ }
+ StatementKind::StorageLive(local) | StatementKind::StorageDead(local) => {
+ // StorageLive leaves the local in an uninitialized state.
+ // StorageDead makes it UB to access the local afterwards.
+ state.flood_with(
+ Place::from(*local).as_ref(),
+ &self.map,
+ FlatSet::<Scalar>::BOTTOM,
+ );
+ }
+ StatementKind::Deinit(box place) => {
+ // Deinit makes the place uninitialized.
+ state.flood_with(place.as_ref(), &self.map, FlatSet::<Scalar>::BOTTOM);
+ }
+ StatementKind::Retag(..) => {
+ // We don't track references.
+ }
+ StatementKind::ConstEvalCounter
+ | StatementKind::Nop
+ | StatementKind::FakeRead(..)
+ | StatementKind::PlaceMention(..)
+ | StatementKind::Coverage(..)
+ | StatementKind::AscribeUserType(..) => (),
+ }
+ }
+
+ fn handle_intrinsic(&self, intrinsic: &NonDivergingIntrinsic<'tcx>) {
+ match intrinsic {
+ NonDivergingIntrinsic::Assume(..) => {
+ // Could use this, but ignoring it is sound.
+ }
+ NonDivergingIntrinsic::CopyNonOverlapping(CopyNonOverlapping {
+ dst: _,
+ src: _,
+ count: _,
+ }) => {
+ // This statement represents `*dst = *src`, `count` times.
+ }
+ }
+ }
+
+ fn handle_operand(
+ &self,
+ operand: &Operand<'tcx>,
+ state: &mut State<FlatSet<Scalar>>,
+ ) -> ValueOrPlace<FlatSet<Scalar>> {
+ match operand {
+ Operand::Constant(box constant) => {
+ ValueOrPlace::Value(self.handle_constant(constant, state))
+ }
+ Operand::Copy(place) | Operand::Move(place) => {
+ // On move, we would ideally flood the place with bottom. But with the current
+ // framework this is not possible (similar to `InterpCx::eval_operand`).
+ self.map.find(place.as_ref()).map(ValueOrPlace::Place).unwrap_or(ValueOrPlace::TOP)
+ }
+ }
+ }
+
+ /// The effect of a successful function call return should not be
+ /// applied here, see [`Analysis::apply_terminator_effect`].
+ fn handle_terminator<'mir>(
+ &self,
+ terminator: &'mir Terminator<'tcx>,
+ state: &mut State<FlatSet<Scalar>>,
+ ) -> TerminatorEdges<'mir, 'tcx> {
+ match &terminator.kind {
+ TerminatorKind::Call { .. } | TerminatorKind::InlineAsm { .. } => {
+ // Effect is applied by `handle_call_return`.
+ }
+ TerminatorKind::Drop { place, .. } => {
+ state.flood_with(place.as_ref(), &self.map, FlatSet::<Scalar>::BOTTOM);
+ }
+ TerminatorKind::Yield { .. } => {
+ // They would have an effect, but are not allowed in this phase.
+ bug!("encountered disallowed terminator");
+ }
+ TerminatorKind::SwitchInt { discr, targets } => {
+ return self.handle_switch_int(discr, targets, state);
+ }
+ TerminatorKind::TailCall { .. } => {
+ // FIXME(explicit_tail_calls): determine if we need to do something here (probably
+ // not)
+ }
+ TerminatorKind::Goto { .. }
+ | TerminatorKind::UnwindResume
+ | TerminatorKind::UnwindTerminate(_)
+ | TerminatorKind::Return
+ | TerminatorKind::Unreachable
+ | TerminatorKind::Assert { .. }
+ | TerminatorKind::CoroutineDrop
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. } => {
+ // These terminators have no effect on the analysis.
+ }
+ }
+ terminator.edges()
+ }
+
+ fn handle_call_return(
+ &self,
+ return_places: CallReturnPlaces<'_, 'tcx>,
+ state: &mut State<FlatSet<Scalar>>,
+ ) {
+ return_places.for_each(|place| {
+ state.flood(place.as_ref(), &self.map);
+ })
}
fn handle_set_discriminant(
&self,
place: Place<'tcx>,
variant_index: VariantIdx,
- state: &mut State<Self::Value>,
+ state: &mut State<FlatSet<Scalar>>,
) {
state.flood_discr(place.as_ref(), &self.map);
if self.map.find_discr(place.as_ref()).is_some() {
@@ -109,17 +295,17 @@ fn handle_assign(
&self,
target: Place<'tcx>,
rvalue: &Rvalue<'tcx>,
- state: &mut State<Self::Value>,
+ state: &mut State<FlatSet<Scalar>>,
) {
match rvalue {
Rvalue::Use(operand) => {
- state.flood(target.as_ref(), self.map());
+ state.flood(target.as_ref(), &self.map);
if let Some(target) = self.map.find(target.as_ref()) {
self.assign_operand(state, target, operand);
}
}
Rvalue::CopyForDeref(rhs) => {
- state.flood(target.as_ref(), self.map());
+ state.flood(target.as_ref(), &self.map);
if let Some(target) = self.map.find(target.as_ref()) {
self.assign_operand(state, target, &Operand::Copy(*rhs));
}
@@ -127,9 +313,9 @@ fn handle_assign(
Rvalue::Aggregate(kind, operands) => {
// If we assign `target = Enum::Variant#0(operand)`,
// we must make sure that all `target as Variant#i` are `Top`.
- state.flood(target.as_ref(), self.map());
+ state.flood(target.as_ref(), &self.map);
- let Some(target_idx) = self.map().find(target.as_ref()) else { return };
+ let Some(target_idx) = self.map.find(target.as_ref()) else { return };
let (variant_target, variant_index) = match **kind {
AggregateKind::Tuple | AggregateKind::Closure(..) => (Some(target_idx), None),
@@ -148,14 +334,14 @@ fn handle_assign(
if let Some(variant_target_idx) = variant_target {
for (field_index, operand) in operands.iter_enumerated() {
if let Some(field) =
- self.map().apply(variant_target_idx, TrackElem::Field(field_index))
+ self.map.apply(variant_target_idx, TrackElem::Field(field_index))
{
self.assign_operand(state, field, operand);
}
}
}
if let Some(variant_index) = variant_index
- && let Some(discr_idx) = self.map().apply(target_idx, TrackElem::Discriminant)
+ && let Some(discr_idx) = self.map.apply(target_idx, TrackElem::Discriminant)
{
// We are assigning the discriminant as part of an aggregate.
// This discriminant can only alias a variant field's value if the operand
@@ -170,23 +356,23 @@ fn handle_assign(
}
Rvalue::BinaryOp(op, box (left, right)) if op.is_overflowing() => {
// Flood everything now, so we can use `insert_value_idx` directly later.
- state.flood(target.as_ref(), self.map());
+ state.flood(target.as_ref(), &self.map);
- let Some(target) = self.map().find(target.as_ref()) else { return };
+ let Some(target) = self.map.find(target.as_ref()) else { return };
- let value_target = self.map().apply(target, TrackElem::Field(0_u32.into()));
- let overflow_target = self.map().apply(target, TrackElem::Field(1_u32.into()));
+ let value_target = self.map.apply(target, TrackElem::Field(0_u32.into()));
+ let overflow_target = self.map.apply(target, TrackElem::Field(1_u32.into()));
if value_target.is_some() || overflow_target.is_some() {
let (val, overflow) = self.binary_op(state, *op, left, right);
if let Some(value_target) = value_target {
// We have flooded `target` earlier.
- state.insert_value_idx(value_target, val, self.map());
+ state.insert_value_idx(value_target, val, &self.map);
}
if let Some(overflow_target) = overflow_target {
// We have flooded `target` earlier.
- state.insert_value_idx(overflow_target, overflow, self.map());
+ state.insert_value_idx(overflow_target, overflow, &self.map);
}
}
}
@@ -196,27 +382,30 @@ fn handle_assign(
_,
) => {
let pointer = self.handle_operand(operand, state);
- state.assign(target.as_ref(), pointer, self.map());
+ state.assign(target.as_ref(), pointer, &self.map);
- if let Some(target_len) = self.map().find_len(target.as_ref())
+ if let Some(target_len) = self.map.find_len(target.as_ref())
&& let operand_ty = operand.ty(self.local_decls, self.tcx)
&& let Some(operand_ty) = operand_ty.builtin_deref(true)
&& let ty::Array(_, len) = operand_ty.kind()
&& let Some(len) = Const::Ty(self.tcx.types.usize, *len)
.try_eval_scalar_int(self.tcx, self.param_env)
{
- state.insert_value_idx(target_len, FlatSet::Elem(len.into()), self.map());
+ state.insert_value_idx(target_len, FlatSet::Elem(len.into()), &self.map);
}
}
- _ => self.super_assign(target, rvalue, state),
+ _ => {
+ let result = self.handle_rvalue(rvalue, state);
+ state.assign(target.as_ref(), result, &self.map);
+ }
}
}
fn handle_rvalue(
&self,
rvalue: &Rvalue<'tcx>,
- state: &mut State<Self::Value>,
- ) -> ValueOrPlace<Self::Value> {
+ state: &mut State<FlatSet<Scalar>>,
+ ) -> ValueOrPlace<FlatSet<Scalar>> {
let val = match rvalue {
Rvalue::Len(place) => {
let place_ty = place.ty(self.local_decls, self.tcx);
@@ -225,7 +414,7 @@ fn handle_rvalue(
.try_eval_scalar(self.tcx, self.param_env)
.map_or(FlatSet::Top, FlatSet::Elem)
} else if let [ProjectionElem::Deref] = place.projection[..] {
- state.get_len(place.local.into(), self.map())
+ state.get_len(place.local.into(), &self.map)
} else {
FlatSet::Top
}
@@ -296,8 +485,24 @@ fn handle_rvalue(
};
FlatSet::Elem(Scalar::from_target_usize(val, &self.tcx))
}
- Rvalue::Discriminant(place) => state.get_discr(place.as_ref(), self.map()),
- _ => return self.super_rvalue(rvalue, state),
+ Rvalue::Discriminant(place) => state.get_discr(place.as_ref(), &self.map),
+ Rvalue::Use(operand) => return self.handle_operand(operand, state),
+ Rvalue::CopyForDeref(place) => {
+ return self.handle_operand(&Operand::Copy(*place), state);
+ }
+ Rvalue::Ref(..) | Rvalue::RawPtr(..) => {
+ // We don't track such places.
+ return ValueOrPlace::TOP;
+ }
+ Rvalue::Repeat(..)
+ | Rvalue::ThreadLocalRef(..)
+ | Rvalue::Cast(..)
+ | Rvalue::BinaryOp(..)
+ | Rvalue::Aggregate(..)
+ | Rvalue::ShallowInitBox(..) => {
+ // No modification is possible through these r-values.
+ return ValueOrPlace::TOP;
+ }
};
ValueOrPlace::Value(val)
}
@@ -305,8 +510,8 @@ fn handle_rvalue(
fn handle_constant(
&self,
constant: &ConstOperand<'tcx>,
- _state: &mut State<Self::Value>,
- ) -> Self::Value {
+ _state: &mut State<FlatSet<Scalar>>,
+ ) -> FlatSet<Scalar> {
constant
.const_
.try_eval_scalar(self.tcx, self.param_env)
@@ -317,11 +522,11 @@ fn handle_switch_int<'mir>(
&self,
discr: &'mir Operand<'tcx>,
targets: &'mir SwitchTargets,
- state: &mut State<Self::Value>,
+ state: &mut State<FlatSet<Scalar>>,
) -> TerminatorEdges<'mir, 'tcx> {
let value = match self.handle_operand(discr, state) {
ValueOrPlace::Value(value) => value,
- ValueOrPlace::Place(place) => state.get_idx(place, self.map()),
+ ValueOrPlace::Place(place) => state.get_idx(place, &self.map),
};
match value {
// We are branching on uninitialized data, this is UB, treat it as unreachable.
@@ -334,19 +539,6 @@ fn handle_switch_int<'mir>(
FlatSet::Top => TerminatorEdges::SwitchInt { discr, targets },
}
}
-}
-
-impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
- fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, map: Map<'tcx>) -> Self {
- let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id());
- Self {
- map,
- tcx,
- local_decls: &body.local_decls,
- ecx: InterpCx::new(tcx, DUMMY_SP, param_env, DummyMachine),
- param_env,
- }
- }
/// The caller must have flooded `place`.
fn assign_operand(
@@ -457,7 +649,7 @@ fn binary_op(
// a pair and sometimes not. But as a hack we always return a pair
// and just make the 2nd component `Bottom` when it does not exist.
Some(val) => {
- if matches!(val.layout.abi, Abi::ScalarPair(..)) {
+ if matches!(val.layout.backend_repr, BackendRepr::ScalarPair(..)) {
let (val, overflow) = val.to_scalar_pair();
(FlatSet::Elem(val), FlatSet::Elem(overflow))
} else {
@@ -470,7 +662,7 @@ fn binary_op(
// Exactly one side is known, attempt some algebraic simplifications.
(FlatSet::Elem(const_arg), _) | (_, FlatSet::Elem(const_arg)) => {
let layout = const_arg.layout;
- if !matches!(layout.abi, rustc_target::abi::Abi::Scalar(..)) {
+ if !matches!(layout.backend_repr, rustc_target::abi::BackendRepr::Scalar(..)) {
return (FlatSet::Top, FlatSet::Top);
}
@@ -537,16 +729,40 @@ fn wrap_immediate(&self, imm: Immediate) -> FlatSet<Scalar> {
}
}
-pub(crate) struct Patch<'tcx> {
+/// This is used to visualize the dataflow analysis.
+impl<'tcx> DebugWithContext<ConstAnalysis<'_, 'tcx>> for State<FlatSet<Scalar>> {
+ fn fmt_with(&self, ctxt: &ConstAnalysis<'_, 'tcx>, f: &mut Formatter<'_>) -> std::fmt::Result {
+ match self {
+ State::Reachable(values) => debug_with_context(values, None, &ctxt.map, f),
+ State::Unreachable => write!(f, "unreachable"),
+ }
+ }
+
+ fn fmt_diff_with(
+ &self,
+ old: &Self,
+ ctxt: &ConstAnalysis<'_, 'tcx>,
+ f: &mut Formatter<'_>,
+ ) -> std::fmt::Result {
+ match (self, old) {
+ (State::Reachable(this), State::Reachable(old)) => {
+ debug_with_context(this, Some(old), &ctxt.map, f)
+ }
+ _ => Ok(()), // Consider printing something here.
+ }
+ }
+}
+
+struct Patch<'tcx> {
tcx: TyCtxt<'tcx>,
/// For a given MIR location, this stores the values of the operands used by that location. In
/// particular, this is before the effect, such that the operands of `_1 = _1 + _2` are
/// properly captured. (This may become UB soon, but it is currently emitted even by safe code.)
- pub(crate) before_effect: FxHashMap<(Location, Place<'tcx>), Const<'tcx>>,
+ before_effect: FxHashMap<(Location, Place<'tcx>), Const<'tcx>>,
/// Stores the assigned values for assignments where the Rvalue is constant.
- pub(crate) assignments: FxHashMap<Location, Const<'tcx>>,
+ assignments: FxHashMap<Location, Const<'tcx>>,
}
impl<'tcx> Patch<'tcx> {
@@ -589,13 +805,13 @@ fn try_make_constant(
}
let place = map.find(place.as_ref())?;
- if layout.abi.is_scalar()
+ if layout.backend_repr.is_scalar()
&& let Some(value) = propagatable_scalar(place, state, map)
{
return Some(Const::Val(ConstValue::Scalar(value), ty));
}
- if matches!(layout.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
+ if matches!(layout.backend_repr, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)) {
let alloc_id = ecx
.intern_with_temp_alloc(layout, |ecx, dest| {
try_write_constant(ecx, dest, place, ty, state, map)
@@ -641,7 +857,7 @@ fn try_write_constant<'tcx>(
}
// Fast path for scalars.
- if layout.abi.is_scalar()
+ if layout.backend_repr.is_scalar()
&& let Some(value) = propagatable_scalar(place, state, map)
{
return ecx.write_immediate(Immediate::Scalar(value), dest);
@@ -725,8 +941,7 @@ fn try_write_constant<'tcx>(
interp_ok(())
}
-impl<'mir, 'tcx>
- ResultsVisitor<'mir, 'tcx, Results<'tcx, ValueAnalysisWrapper<ConstAnalysis<'_, 'tcx>>>>
+impl<'mir, 'tcx> ResultsVisitor<'mir, 'tcx, Results<'tcx, ConstAnalysis<'_, 'tcx>>>
for Collector<'_, 'tcx>
{
type Domain = State<FlatSet<Scalar>>;
@@ -734,7 +949,7 @@ impl<'mir, 'tcx>
#[instrument(level = "trace", skip(self, results, statement))]
fn visit_statement_before_primary_effect(
&mut self,
- results: &mut Results<'tcx, ValueAnalysisWrapper<ConstAnalysis<'_, 'tcx>>>,
+ results: &mut Results<'tcx, ConstAnalysis<'_, 'tcx>>,
state: &Self::Domain,
statement: &'mir Statement<'tcx>,
location: Location,
@@ -744,8 +959,8 @@ fn visit_statement_before_primary_effect(
OperandCollector {
state,
visitor: self,
- ecx: &mut results.analysis.0.ecx,
- map: &results.analysis.0.map,
+ ecx: &mut results.analysis.ecx,
+ map: &results.analysis.map,
}
.visit_rvalue(rvalue, location);
}
@@ -756,7 +971,7 @@ fn visit_statement_before_primary_effect(
#[instrument(level = "trace", skip(self, results, statement))]
fn visit_statement_after_primary_effect(
&mut self,
- results: &mut Results<'tcx, ValueAnalysisWrapper<ConstAnalysis<'_, 'tcx>>>,
+ results: &mut Results<'tcx, ConstAnalysis<'_, 'tcx>>,
state: &Self::Domain,
statement: &'mir Statement<'tcx>,
location: Location,
@@ -767,10 +982,10 @@ fn visit_statement_after_primary_effect(
}
StatementKind::Assign(box (place, _)) => {
if let Some(value) = self.try_make_constant(
- &mut results.analysis.0.ecx,
+ &mut results.analysis.ecx,
place,
state,
- &results.analysis.0.map,
+ &results.analysis.map,
) {
self.patch.assignments.insert(location, value);
}
@@ -781,7 +996,7 @@ fn visit_statement_after_primary_effect(
fn visit_terminator_before_primary_effect(
&mut self,
- results: &mut Results<'tcx, ValueAnalysisWrapper<ConstAnalysis<'_, 'tcx>>>,
+ results: &mut Results<'tcx, ConstAnalysis<'_, 'tcx>>,
state: &Self::Domain,
terminator: &'mir Terminator<'tcx>,
location: Location,
@@ -789,8 +1004,8 @@ fn visit_terminator_before_primary_effect(
OperandCollector {
state,
visitor: self,
- ecx: &mut results.analysis.0.ecx,
- map: &results.analysis.0.map,
+ ecx: &mut results.analysis.ecx,
+ map: &results.analysis.map,
}
.visit_terminator(terminator, location);
}
diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs
index 79c6237..274eea9 100644
--- a/compiler/rustc_mir_transform/src/gvn.rs
+++ b/compiler/rustc_mir_transform/src/gvn.rs
@@ -85,6 +85,7 @@
use std::borrow::Cow;
use either::Either;
+use rustc_abi::{self as abi, BackendRepr, FIRST_VARIANT, FieldIdx, Primitive, Size, VariantIdx};
use rustc_const_eval::const_eval::DummyMachine;
use rustc_const_eval::interpret::{
ImmTy, Immediate, InterpCx, MemPlaceMeta, MemoryKind, OpTy, Projectable, Scalar,
@@ -103,7 +104,6 @@
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_span::DUMMY_SP;
use rustc_span::def_id::DefId;
-use rustc_target::abi::{self, Abi, FIRST_VARIANT, FieldIdx, Primitive, Size, VariantIdx};
use smallvec::SmallVec;
use tracing::{debug, instrument, trace};
@@ -427,7 +427,10 @@ fn eval_to_const(&mut self, value: VnIndex) -> Option<OpTy<'tcx>> {
};
let ptr_imm = Immediate::new_pointer_with_meta(data, meta, &self.ecx);
ImmTy::from_immediate(ptr_imm, ty).into()
- } else if matches!(ty.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
+ } else if matches!(
+ ty.backend_repr,
+ BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)
+ ) {
let dest = self.ecx.allocate(ty, MemoryKind::Stack).discard_err()?;
let variant_dest = if let Some(variant) = variant {
self.ecx.project_downcast(&dest, variant).discard_err()?
@@ -573,12 +576,12 @@ fn eval_to_const(&mut self, value: VnIndex) -> Option<OpTy<'tcx>> {
// limited transmutes: it only works between types with the same layout, and
// cannot transmute pointers to integers.
if value.as_mplace_or_imm().is_right() {
- let can_transmute = match (value.layout.abi, to.abi) {
- (Abi::Scalar(s1), Abi::Scalar(s2)) => {
+ let can_transmute = match (value.layout.backend_repr, to.backend_repr) {
+ (BackendRepr::Scalar(s1), BackendRepr::Scalar(s2)) => {
s1.size(&self.ecx) == s2.size(&self.ecx)
&& !matches!(s1.primitive(), Primitive::Pointer(..))
}
- (Abi::ScalarPair(a1, b1), Abi::ScalarPair(a2, b2)) => {
+ (BackendRepr::ScalarPair(a1, b1), BackendRepr::ScalarPair(a2, b2)) => {
a1.size(&self.ecx) == a2.size(&self.ecx) &&
b1.size(&self.ecx) == b2.size(&self.ecx) &&
// The alignment of the second component determines its offset, so that also needs to match.
@@ -1079,7 +1082,9 @@ fn simplify_aggregate(
}
}
- if let AggregateTy::Def(_, _) = ty
+ // unsound: https://github.com/rust-lang/rust/issues/132353
+ if tcx.sess.opts.unstable_opts.unsound_mir_opts
+ && let AggregateTy::Def(_, _) = ty
&& let Some(value) =
self.simplify_aggregate_to_copy(rvalue, location, &fields, variant_index)
{
@@ -1241,7 +1246,7 @@ fn simplify_binary_inner(
let as_bits = |value| {
let constant = self.evaluated[value].as_ref()?;
- if layout.abi.is_scalar() {
+ if layout.backend_repr.is_scalar() {
let scalar = self.ecx.read_scalar(constant).discard_err()?;
scalar.to_bits(constant.layout.size).discard_err()
} else {
@@ -1497,12 +1502,12 @@ fn op_to_prop_const<'tcx>(
// Do not synthetize too large constants. Codegen will just memcpy them, which we'd like to
// avoid.
- if !matches!(op.layout.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
+ if !matches!(op.layout.backend_repr, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)) {
return None;
}
// If this constant has scalar ABI, return it as a `ConstValue::Scalar`.
- if let Abi::Scalar(abi::Scalar::Initialized { .. }) = op.layout.abi
+ if let BackendRepr::Scalar(abi::Scalar::Initialized { .. }) = op.layout.backend_repr
&& let Some(scalar) = ecx.read_scalar(op).discard_err()
{
if !scalar.try_to_scalar_int().is_ok() {
diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs
index 42d6bdf..404470d 100644
--- a/compiler/rustc_mir_transform/src/inline.rs
+++ b/compiler/rustc_mir_transform/src/inline.rs
@@ -244,8 +244,13 @@ fn try_inlining(
// Normally, this shouldn't be required, but trait normalization failure can create a
// validation ICE.
let output_type = callee_body.return_ty();
- if !util::relate_types(self.tcx, self.param_env, ty::Covariant, output_type, destination_ty)
- {
+ if !util::sub_types(
+ self.tcx,
+ caller_body.typing_mode(self.tcx),
+ self.param_env,
+ output_type,
+ destination_ty,
+ ) {
trace!(?output_type, ?destination_ty);
return Err("failed to normalize return type");
}
@@ -275,8 +280,13 @@ fn try_inlining(
self_arg_ty.into_iter().chain(arg_tuple_tys).zip(callee_body.args_iter())
{
let input_type = callee_body.local_decls[input].ty;
- if !util::relate_types(self.tcx, self.param_env, ty::Covariant, input_type, arg_ty)
- {
+ if !util::sub_types(
+ self.tcx,
+ caller_body.typing_mode(self.tcx),
+ self.param_env,
+ input_type,
+ arg_ty,
+ ) {
trace!(?arg_ty, ?input_type);
return Err("failed to normalize tuple argument type");
}
@@ -285,8 +295,13 @@ fn try_inlining(
for (arg, input) in args.iter().zip(callee_body.args_iter()) {
let input_type = callee_body.local_decls[input].ty;
let arg_ty = arg.node.ty(&caller_body.local_decls, self.tcx);
- if !util::relate_types(self.tcx, self.param_env, ty::Covariant, input_type, arg_ty)
- {
+ if !util::sub_types(
+ self.tcx,
+ caller_body.typing_mode(self.tcx),
+ self.param_env,
+ input_type,
+ arg_ty,
+ ) {
trace!(?arg_ty, ?input_type);
return Err("failed to normalize argument type");
}
diff --git a/compiler/rustc_mir_transform/src/known_panics_lint.rs b/compiler/rustc_mir_transform/src/known_panics_lint.rs
index 0892374..0604665 100644
--- a/compiler/rustc_mir_transform/src/known_panics_lint.rs
+++ b/compiler/rustc_mir_transform/src/known_panics_lint.rs
@@ -4,6 +4,7 @@
use std::fmt::Debug;
+use rustc_abi::{BackendRepr, FieldIdx, HasDataLayout, Size, TargetDataLayout, VariantIdx};
use rustc_const_eval::const_eval::DummyMachine;
use rustc_const_eval::interpret::{
ImmTy, InterpCx, InterpResult, Projectable, Scalar, format_interp_error, interp_ok,
@@ -19,7 +20,6 @@
use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout};
use rustc_middle::ty::{self, ConstInt, ParamEnv, ScalarInt, Ty, TyCtxt, TypeVisitableExt};
use rustc_span::Span;
-use rustc_target::abi::{Abi, FieldIdx, HasDataLayout, Size, TargetDataLayout, VariantIdx};
use tracing::{debug, instrument, trace};
use crate::errors::{AssertLint, AssertLintKind};
@@ -557,7 +557,7 @@ fn eval_rvalue(&mut self, rvalue: &Rvalue<'tcx>, dest: &Place<'tcx>) -> Option<(
let right = self.use_ecx(|this| this.ecx.read_immediate(&right))?;
let val = self.use_ecx(|this| this.ecx.binary_op(bin_op, &left, &right))?;
- if matches!(val.layout.abi, Abi::ScalarPair(..)) {
+ if matches!(val.layout.backend_repr, BackendRepr::ScalarPair(..)) {
// FIXME `Value` should properly support pairs in `Immediate`... but currently
// it does not.
let (val, overflow) = val.to_pair(&self.ecx);
@@ -651,9 +651,9 @@ fn eval_rvalue(&mut self, rvalue: &Rvalue<'tcx>, dest: &Place<'tcx>) -> Option<(
let to = self.ecx.layout_of(to).ok()?;
// `offset` for immediates only supports scalar/scalar-pair ABIs,
// so bail out if the target is not one.
- match (value.layout.abi, to.abi) {
- (Abi::Scalar(..), Abi::Scalar(..)) => {}
- (Abi::ScalarPair(..), Abi::ScalarPair(..)) => {}
+ match (value.layout.backend_repr, to.backend_repr) {
+ (BackendRepr::Scalar(..), BackendRepr::Scalar(..)) => {}
+ (BackendRepr::ScalarPair(..), BackendRepr::ScalarPair(..)) => {}
_ => return None,
}
diff --git a/compiler/rustc_mir_transform/src/validate.rs b/compiler/rustc_mir_transform/src/validate.rs
index 7735672..8109a9b 100644
--- a/compiler/rustc_mir_transform/src/validate.rs
+++ b/compiler/rustc_mir_transform/src/validate.rs
@@ -5,14 +5,14 @@
use rustc_index::IndexVec;
use rustc_index::bit_set::BitSet;
use rustc_infer::infer::TyCtxtInferExt;
-use rustc_infer::traits::{Obligation, ObligationCause, Reveal};
+use rustc_infer::traits::{Obligation, ObligationCause};
use rustc_middle::mir::coverage::CoverageKind;
use rustc_middle::mir::visit::{NonUseContext, PlaceContext, Visitor};
use rustc_middle::mir::*;
use rustc_middle::ty::adjustment::PointerCoercion;
use rustc_middle::ty::{
self, CoroutineArgsExt, InstanceKind, ParamEnv, ScalarInt, Ty, TyCtxt, TypeVisitableExt,
- TypingMode, Variance,
+ Variance,
};
use rustc_middle::{bug, span_bug};
use rustc_target::abi::{FIRST_VARIANT, Size};
@@ -20,7 +20,7 @@
use rustc_trait_selection::traits::ObligationCtxt;
use rustc_type_ir::Upcast;
-use crate::util::{is_within_packed, relate_types};
+use crate::util::{self, is_within_packed};
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum EdgeKind {
@@ -50,11 +50,7 @@ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
}
let def_id = body.source.def_id();
let mir_phase = self.mir_phase;
- let param_env = match mir_phase.reveal() {
- Reveal::UserFacing => tcx.param_env(def_id),
- Reveal::All => tcx.param_env_reveal_all_normalized(def_id),
- };
-
+ let param_env = mir_phase.param_env(tcx, def_id);
let can_unwind = if mir_phase <= MirPhase::Runtime(RuntimePhase::Initial) {
// In this case `AbortUnwindingCalls` haven't yet been executed.
true
@@ -587,7 +583,14 @@ fn mir_assign_valid_types(&self, src: Ty<'tcx>, dest: Ty<'tcx>) -> bool {
Variance::Covariant
};
- crate::util::relate_types(self.tcx, self.param_env, variance, src, dest)
+ crate::util::relate_types(
+ self.tcx,
+ self.body.typing_mode(self.tcx),
+ self.param_env,
+ variance,
+ src,
+ dest,
+ )
}
/// Check that the given predicate definitely holds in the param-env of this MIR body.
@@ -606,7 +609,7 @@ fn predicate_must_hold_modulo_regions(
return true;
}
- let infcx = self.tcx.infer_ctxt().build(TypingMode::from_param_env(self.param_env));
+ let infcx = self.tcx.infer_ctxt().build(self.body.typing_mode(self.tcx));
let ocx = ObligationCtxt::new(&infcx);
ocx.register_obligation(Obligation::new(
self.tcx,
@@ -798,10 +801,10 @@ fn visit_projection_elem(
}
}
ProjectionElem::Subtype(ty) => {
- if !relate_types(
+ if !util::sub_types(
self.tcx,
+ self.body.typing_mode(self.tcx),
self.param_env,
- Variance::Covariant,
ty,
place_ref.ty(&self.body.local_decls, self.tcx).ty,
) {
diff --git a/compiler/rustc_next_trait_solver/src/solve/effect_goals.rs b/compiler/rustc_next_trait_solver/src/solve/effect_goals.rs
index 8d57ad8..5bfc156 100644
--- a/compiler/rustc_next_trait_solver/src/solve/effect_goals.rs
+++ b/compiler/rustc_next_trait_solver/src/solve/effect_goals.rs
@@ -44,7 +44,7 @@ fn probe_and_match_goal_against_assumption(
) -> Result<Candidate<I>, NoSolution> {
if let Some(host_clause) = assumption.as_host_effect_clause() {
if host_clause.def_id() == goal.predicate.def_id()
- && host_clause.host().satisfies(goal.predicate.host)
+ && host_clause.constness().satisfies(goal.predicate.constness)
{
if !DeepRejectCtxt::relate_rigid_rigid(ecx.cx()).args_may_unify(
goal.predicate.trait_ref.args,
@@ -91,7 +91,7 @@ fn consider_additional_alias_assumptions(
cx,
cx.implied_const_bounds(alias_ty.def_id)
.iter_instantiated(cx, alias_ty.args)
- .map(|trait_ref| trait_ref.to_host_effect_clause(cx, goal.predicate.host)),
+ .map(|trait_ref| trait_ref.to_host_effect_clause(cx, goal.predicate.constness)),
) {
candidates.extend(Self::probe_and_match_goal_against_assumption(
ecx,
@@ -107,7 +107,7 @@ fn consider_additional_alias_assumptions(
.map(|trait_ref| {
goal.with(
cx,
- trait_ref.to_host_effect_clause(cx, goal.predicate.host),
+ trait_ref.to_host_effect_clause(cx, goal.predicate.constness),
)
}),
);
@@ -163,7 +163,10 @@ fn consider_impl_candidate(
.const_conditions(impl_def_id)
.iter_instantiated(cx, impl_args)
.map(|bound_trait_ref| {
- goal.with(cx, bound_trait_ref.to_host_effect_clause(cx, goal.predicate.host))
+ goal.with(
+ cx,
+ bound_trait_ref.to_host_effect_clause(cx, goal.predicate.constness),
+ )
});
ecx.add_goals(GoalSource::ImplWhereBound, const_conditions);
diff --git a/compiler/rustc_parse/src/parser/item.rs b/compiler/rustc_parse/src/parser/item.rs
index 3673372..6b4e2d0 100644
--- a/compiler/rustc_parse/src/parser/item.rs
+++ b/compiler/rustc_parse/src/parser/item.rs
@@ -1194,6 +1194,7 @@ fn parse_item_foreign_mod(
attrs: &mut AttrVec,
mut safety: Safety,
) -> PResult<'a, ItemInfo> {
+ let extern_span = self.prev_token.uninterpolated_span();
let abi = self.parse_abi(); // ABI?
// FIXME: This recovery should be tested better.
if safety == Safety::Default
@@ -1205,6 +1206,7 @@ fn parse_item_foreign_mod(
let _ = self.eat_keyword(kw::Unsafe);
}
let module = ast::ForeignMod {
+ extern_span,
safety,
abi,
items: self.parse_item_list(attrs, |p| p.parse_foreign_item(ForceCollect::No))?,
diff --git a/compiler/rustc_passes/src/layout_test.rs b/compiler/rustc_passes/src/layout_test.rs
index 921a915..986dce5 100644
--- a/compiler/rustc_passes/src/layout_test.rs
+++ b/compiler/rustc_passes/src/layout_test.rs
@@ -82,8 +82,12 @@ fn dump_layout_of(tcx: TyCtxt<'_>, item_def_id: LocalDefId, attr: &Attribute) {
let meta_items = attr.meta_item_list().unwrap_or_default();
for meta_item in meta_items {
match meta_item.name_or_empty() {
+ // FIXME: this never was about ABI and now this dump arg is confusing
sym::abi => {
- tcx.dcx().emit_err(LayoutAbi { span, abi: format!("{:?}", ty_layout.abi) });
+ tcx.dcx().emit_err(LayoutAbi {
+ span,
+ abi: format!("{:?}", ty_layout.backend_repr),
+ });
}
sym::align => {
diff --git a/compiler/rustc_privacy/src/lib.rs b/compiler/rustc_privacy/src/lib.rs
index 0595414..1a5c29a 100644
--- a/compiler/rustc_privacy/src/lib.rs
+++ b/compiler/rustc_privacy/src/lib.rs
@@ -139,7 +139,7 @@ fn visit_clause(&mut self, clause: ty::Clause<'tcx>) -> V::Result {
}
ty::ClauseKind::HostEffect(pred) => {
try_visit!(self.visit_trait(pred.trait_ref));
- pred.host.visit_with(self)
+ pred.constness.visit_with(self)
}
ty::ClauseKind::Projection(ty::ProjectionPredicate {
projection_term: projection_ty,
diff --git a/compiler/rustc_resolve/src/late.rs b/compiler/rustc_resolve/src/late.rs
index adb0ba7..f4a85c3 100644
--- a/compiler/rustc_resolve/src/late.rs
+++ b/compiler/rustc_resolve/src/late.rs
@@ -841,10 +841,9 @@ fn visit_ty(&mut self, ty: &'ast Ty) {
self.r.record_partial_res(ty.id, PartialRes::new(res));
visit::walk_ty(self, ty)
}
- TyKind::ImplTrait(node_id, _) => {
+ TyKind::ImplTrait(..) => {
let candidates = self.lifetime_elision_candidates.take();
visit::walk_ty(self, ty);
- self.record_lifetime_params_for_impl_trait(*node_id);
self.lifetime_elision_candidates = candidates;
}
TyKind::TraitObject(bounds, ..) => {
@@ -977,14 +976,6 @@ fn visit_fn(&mut self, fn_kind: FnKind<'ast>, sp: Span, fn_id: NodeId) {
sig.decl.inputs.iter().map(|Param { ty, .. }| (None, &**ty)),
&sig.decl.output,
);
-
- if let Some((coro_node_id, _)) = sig
- .header
- .coroutine_kind
- .map(|coroutine_kind| coroutine_kind.return_id())
- {
- this.record_lifetime_params_for_impl_trait(coro_node_id);
- }
},
);
return;
@@ -1026,10 +1017,6 @@ fn visit_fn(&mut self, fn_kind: FnKind<'ast>, sp: Span, fn_id: NodeId) {
.map(|Param { pat, ty, .. }| (Some(&**pat), &**ty)),
&declaration.output,
);
-
- if let Some((async_node_id, _)) = coro_node_id {
- this.record_lifetime_params_for_impl_trait(async_node_id);
- }
},
);
@@ -1220,7 +1207,6 @@ fn visit_assoc_item_constraint(&mut self, constraint: &'ast AssocItemConstraint)
}
},
AssocItemConstraintKind::Bound { ref bounds } => {
- self.record_lifetime_params_for_impl_trait(constraint.id);
walk_list!(self, visit_param_bound, bounds, BoundKind::Bound);
}
}
@@ -4795,30 +4781,6 @@ fn traits_in_scope(&mut self, ident: Ident, ns: Namespace) -> Vec<TraitCandidate
)
}
- /// Construct the list of in-scope lifetime parameters for impl trait lowering.
- /// We include all lifetime parameters, either named or "Fresh".
- /// The order of those parameters does not matter, as long as it is
- /// deterministic.
- fn record_lifetime_params_for_impl_trait(&mut self, impl_trait_node_id: NodeId) {
- let mut extra_lifetime_params = vec![];
-
- for rib in self.lifetime_ribs.iter().rev() {
- extra_lifetime_params
- .extend(rib.bindings.iter().map(|(&ident, &(node_id, res))| (ident, node_id, res)));
- match rib.kind {
- LifetimeRibKind::Item => break,
- LifetimeRibKind::AnonymousCreateParameter { binder, .. } => {
- if let Some(earlier_fresh) = self.r.extra_lifetime_params_map.get(&binder) {
- extra_lifetime_params.extend(earlier_fresh);
- }
- }
- _ => {}
- }
- }
-
- self.r.extra_lifetime_params_map.insert(impl_trait_node_id, extra_lifetime_params);
- }
-
fn resolve_and_cache_rustdoc_path(&mut self, path_str: &str, ns: Namespace) -> Option<Res> {
// FIXME: This caching may be incorrect in case of multiple `macro_rules`
// items with the same name in the same module.
diff --git a/compiler/rustc_session/src/config.rs b/compiler/rustc_session/src/config.rs
index d733e32..5ee3b40 100644
--- a/compiler/rustc_session/src/config.rs
+++ b/compiler/rustc_session/src/config.rs
@@ -2453,7 +2453,7 @@ pub fn build_session_options(early_dcx: &mut EarlyDiagCtxt, matches: &getopts::M
let output_types = parse_output_types(early_dcx, &unstable_opts, matches);
let mut cg = CodegenOptions::build(early_dcx, matches);
- let (disable_local_thinlto, mut codegen_units) = should_override_cgus_and_disable_thinlto(
+ let (disable_local_thinlto, codegen_units) = should_override_cgus_and_disable_thinlto(
early_dcx,
&output_types,
matches,
@@ -2476,18 +2476,6 @@ pub fn build_session_options(early_dcx: &mut EarlyDiagCtxt, matches: &getopts::M
let assert_incr_state = parse_assert_incr_state(early_dcx, &unstable_opts.assert_incr_state);
- if unstable_opts.profile && incremental.is_some() {
- early_dcx.early_fatal("can't instrument with gcov profiling when compiling incrementally");
- }
- if unstable_opts.profile {
- match codegen_units {
- Some(1) => {}
- None => codegen_units = Some(1),
- Some(_) => early_dcx
- .early_fatal("can't instrument with gcov profiling with multiple codegen units"),
- }
- }
-
if cg.profile_generate.enabled() && cg.profile_use.is_some() {
early_dcx.early_fatal("options `-C profile-generate` and `-C profile-use` are exclusive");
}
diff --git a/compiler/rustc_session/src/filesearch.rs b/compiler/rustc_session/src/filesearch.rs
index 4aae264..b3e3381 100644
--- a/compiler/rustc_session/src/filesearch.rs
+++ b/compiler/rustc_session/src/filesearch.rs
@@ -84,7 +84,7 @@ fn current_dll_path() -> Result<PathBuf, String> {
loop {
if libc::loadquery(
libc::L_GETINFO,
- buffer.as_mut_ptr() as *mut i8,
+ buffer.as_mut_ptr() as *mut u8,
(std::mem::size_of::<libc::ld_info>() * buffer.len()) as u32,
) >= 0
{
diff --git a/compiler/rustc_session/src/options.rs b/compiler/rustc_session/src/options.rs
index 54a4621..2b15862 100644
--- a/compiler/rustc_session/src/options.rs
+++ b/compiler/rustc_session/src/options.rs
@@ -1985,13 +1985,8 @@ pub(crate) fn parse_mir_include_spans(slot: &mut MirIncludeSpans, v: Option<&str
proc_macro_execution_strategy: ProcMacroExecutionStrategy = (ProcMacroExecutionStrategy::SameThread,
parse_proc_macro_execution_strategy, [UNTRACKED],
"how to run proc-macro code (default: same-thread)"),
- profile: bool = (false, parse_bool, [TRACKED],
- "insert profiling code (default: no)"),
profile_closures: bool = (false, parse_no_flag, [UNTRACKED],
"profile size of closures"),
- profile_emit: Option<PathBuf> = (None, parse_opt_pathbuf, [TRACKED],
- "file path to emit profiling data at runtime when using 'profile' \
- (default based on relative source path)"),
profile_sample_use: Option<PathBuf> = (None, parse_opt_pathbuf, [TRACKED],
"use the given `.prof` file for sampled profile-guided optimization (also known as AutoFDO)"),
profiler_runtime: String = (String::from("profiler_builtins"), parse_string, [TRACKED],
diff --git a/compiler/rustc_smir/src/rustc_smir/convert/abi.rs b/compiler/rustc_smir/src/rustc_smir/convert/abi.rs
index 410bf0f..af24fd2 100644
--- a/compiler/rustc_smir/src/rustc_smir/convert/abi.rs
+++ b/compiler/rustc_smir/src/rustc_smir/convert/abi.rs
@@ -56,7 +56,7 @@ fn stable(&self, tables: &mut Tables<'_>) -> Self::T {
LayoutShape {
fields: self.fields.stable(tables),
variants: self.variants.stable(tables),
- abi: self.abi.stable(tables),
+ abi: self.backend_repr.stable(tables),
abi_align: self.align.abi.stable(tables),
size: self.size.stable(tables),
}
@@ -196,20 +196,20 @@ fn stable(&self, tables: &mut Tables<'_>) -> Self::T {
}
}
-impl<'tcx> Stable<'tcx> for rustc_abi::Abi {
+impl<'tcx> Stable<'tcx> for rustc_abi::BackendRepr {
type T = ValueAbi;
fn stable(&self, tables: &mut Tables<'_>) -> Self::T {
match *self {
- rustc_abi::Abi::Uninhabited => ValueAbi::Uninhabited,
- rustc_abi::Abi::Scalar(scalar) => ValueAbi::Scalar(scalar.stable(tables)),
- rustc_abi::Abi::ScalarPair(first, second) => {
+ rustc_abi::BackendRepr::Uninhabited => ValueAbi::Uninhabited,
+ rustc_abi::BackendRepr::Scalar(scalar) => ValueAbi::Scalar(scalar.stable(tables)),
+ rustc_abi::BackendRepr::ScalarPair(first, second) => {
ValueAbi::ScalarPair(first.stable(tables), second.stable(tables))
}
- rustc_abi::Abi::Vector { element, count } => {
+ rustc_abi::BackendRepr::Vector { element, count } => {
ValueAbi::Vector { element: element.stable(tables), count }
}
- rustc_abi::Abi::Aggregate { sized } => ValueAbi::Aggregate { sized },
+ rustc_abi::BackendRepr::Memory { sized } => ValueAbi::Aggregate { sized },
}
}
}
diff --git a/compiler/rustc_target/src/callconv/loongarch.rs b/compiler/rustc_target/src/callconv/loongarch.rs
index ffec763..d1234c3 100644
--- a/compiler/rustc_target/src/callconv/loongarch.rs
+++ b/compiler/rustc_target/src/callconv/loongarch.rs
@@ -1,5 +1,7 @@
use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
-use crate::abi::{self, Abi, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
+use crate::abi::{
+ self, BackendRepr, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout,
+};
use crate::spec::HasTargetSpec;
use crate::spec::abi::Abi as SpecAbi;
@@ -21,8 +23,8 @@ enum FloatConv {
struct CannotUseFpConv;
fn is_loongarch_aggregate<Ty>(arg: &ArgAbi<'_, Ty>) -> bool {
- match arg.layout.abi {
- Abi::Vector { .. } => true,
+ match arg.layout.backend_repr {
+ BackendRepr::Vector { .. } => true,
_ => arg.layout.is_aggregate(),
}
}
@@ -38,8 +40,8 @@ fn should_use_fp_conv_helper<'a, Ty, C>(
where
Ty: TyAbiInterface<'a, C> + Copy,
{
- match arg_layout.abi {
- Abi::Scalar(scalar) => match scalar.primitive() {
+ match arg_layout.backend_repr {
+ BackendRepr::Scalar(scalar) => match scalar.primitive() {
abi::Int(..) | abi::Pointer(_) => {
if arg_layout.size.bits() > xlen {
return Err(CannotUseFpConv);
@@ -77,8 +79,8 @@ fn should_use_fp_conv_helper<'a, Ty, C>(
}
}
},
- Abi::Vector { .. } | Abi::Uninhabited => return Err(CannotUseFpConv),
- Abi::ScalarPair(..) | Abi::Aggregate { .. } => match arg_layout.fields {
+ BackendRepr::Vector { .. } | BackendRepr::Uninhabited => return Err(CannotUseFpConv),
+ BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields {
FieldsShape::Primitive => {
unreachable!("aggregates can't have `FieldsShape::Primitive`")
}
@@ -311,7 +313,7 @@ fn classify_arg<'a, Ty, C>(
}
fn extend_integer_width<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
- if let Abi::Scalar(scalar) = arg.layout.abi {
+ if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
if let abi::Int(i, _) = scalar.primitive() {
// 32-bit integers are always sign-extended
if i.size().bits() == 32 && xlen > 32 {
diff --git a/compiler/rustc_target/src/callconv/mips64.rs b/compiler/rustc_target/src/callconv/mips64.rs
index 2c3258c..5bdf4c2 100644
--- a/compiler/rustc_target/src/callconv/mips64.rs
+++ b/compiler/rustc_target/src/callconv/mips64.rs
@@ -5,7 +5,7 @@
fn extend_integer_width_mips<Ty>(arg: &mut ArgAbi<'_, Ty>, bits: u64) {
// Always sign extend u32 values on 64-bit mips
- if let abi::Abi::Scalar(scalar) = arg.layout.abi {
+ if let abi::BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
if let abi::Int(i, signed) = scalar.primitive() {
if !signed && i.size().bits() == 32 {
if let PassMode::Direct(ref mut attrs) = arg.mode {
@@ -24,8 +24,8 @@ fn float_reg<'a, Ty, C>(cx: &C, ret: &ArgAbi<'a, Ty>, i: usize) -> Option<Reg>
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout,
{
- match ret.layout.field(cx, i).abi {
- abi::Abi::Scalar(scalar) => match scalar.primitive() {
+ match ret.layout.field(cx, i).backend_repr {
+ abi::BackendRepr::Scalar(scalar) => match scalar.primitive() {
abi::Float(abi::F32) => Some(Reg::f32()),
abi::Float(abi::F64) => Some(Reg::f64()),
_ => None,
@@ -109,7 +109,7 @@ fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
let offset = arg.layout.fields.offset(i);
// We only care about aligned doubles
- if let abi::Abi::Scalar(scalar) = field.abi {
+ if let abi::BackendRepr::Scalar(scalar) = field.backend_repr {
if scalar.primitive() == abi::Float(abi::F64) {
if offset.is_aligned(dl.f64_align.abi) {
// Insert enough integers to cover [last_offset, offset)
diff --git a/compiler/rustc_target/src/callconv/mod.rs b/compiler/rustc_target/src/callconv/mod.rs
index 25b001b..8c3df9c 100644
--- a/compiler/rustc_target/src/callconv/mod.rs
+++ b/compiler/rustc_target/src/callconv/mod.rs
@@ -6,7 +6,8 @@
use rustc_span::Symbol;
use crate::abi::{
- self, Abi, AddressSpace, Align, HasDataLayout, Pointer, Size, TyAbiInterface, TyAndLayout,
+ self, AddressSpace, Align, BackendRepr, HasDataLayout, Pointer, Size, TyAbiInterface,
+ TyAndLayout,
};
use crate::spec::abi::Abi as SpecAbi;
use crate::spec::{self, HasTargetSpec, HasWasmCAbiOpt, HasX86AbiOpt, WasmCAbi};
@@ -350,15 +351,17 @@ pub fn new(
layout: TyAndLayout<'a, Ty>,
scalar_attrs: impl Fn(&TyAndLayout<'a, Ty>, abi::Scalar, Size) -> ArgAttributes,
) -> Self {
- let mode = match layout.abi {
- Abi::Uninhabited => PassMode::Ignore,
- Abi::Scalar(scalar) => PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO)),
- Abi::ScalarPair(a, b) => PassMode::Pair(
+ let mode = match layout.backend_repr {
+ BackendRepr::Uninhabited => PassMode::Ignore,
+ BackendRepr::Scalar(scalar) => {
+ PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO))
+ }
+ BackendRepr::ScalarPair(a, b) => PassMode::Pair(
scalar_attrs(&layout, a, Size::ZERO),
scalar_attrs(&layout, b, a.size(cx).align_to(b.align(cx).abi)),
),
- Abi::Vector { .. } => PassMode::Direct(ArgAttributes::new()),
- Abi::Aggregate { .. } => Self::indirect_pass_mode(&layout),
+ BackendRepr::Vector { .. } => PassMode::Direct(ArgAttributes::new()),
+ BackendRepr::Memory { .. } => Self::indirect_pass_mode(&layout),
};
ArgAbi { layout, mode }
}
@@ -460,7 +463,7 @@ pub fn pass_by_stack_offset(&mut self, byval_align: Option<Align>) {
pub fn extend_integer_width_to(&mut self, bits: u64) {
// Only integers have signedness
- if let Abi::Scalar(scalar) = self.layout.abi {
+ if let BackendRepr::Scalar(scalar) = self.layout.backend_repr {
if let abi::Int(i, signed) = scalar.primitive() {
if i.size().bits() < bits {
if let PassMode::Direct(ref mut attrs) = self.mode {
@@ -512,7 +515,7 @@ pub fn eq_abi(&self, other: &Self) -> bool
// That elevates any type difference to an ABI difference since we just use the
// full Rust type as the LLVM argument/return type.
if matches!(self.mode, PassMode::Direct(..))
- && matches!(self.layout.abi, Abi::Aggregate { .. })
+ && matches!(self.layout.backend_repr, BackendRepr::Memory { .. })
{
// For aggregates in `Direct` mode to be compatible, the types need to be equal.
self.layout.ty == other.layout.ty
@@ -791,8 +794,8 @@ pub fn adjust_for_rust_abi<C>(&mut self, cx: &C, abi: SpecAbi)
continue;
}
- match arg.layout.abi {
- Abi::Aggregate { .. } => {}
+ match arg.layout.backend_repr {
+ BackendRepr::Memory { .. } => {}
// This is a fun case! The gist of what this is doing is
// that we want callers and callees to always agree on the
@@ -813,7 +816,9 @@ pub fn adjust_for_rust_abi<C>(&mut self, cx: &C, abi: SpecAbi)
// Note that the intrinsic ABI is exempt here as
// that's how we connect up to LLVM and it's unstable
// anyway, we control all calls to it in libstd.
- Abi::Vector { .. } if abi != SpecAbi::RustIntrinsic && spec.simd_types_indirect => {
+ BackendRepr::Vector { .. }
+ if abi != SpecAbi::RustIntrinsic && spec.simd_types_indirect =>
+ {
arg.make_indirect();
continue;
}
diff --git a/compiler/rustc_target/src/callconv/riscv.rs b/compiler/rustc_target/src/callconv/riscv.rs
index f96169e..c0298ed 100644
--- a/compiler/rustc_target/src/callconv/riscv.rs
+++ b/compiler/rustc_target/src/callconv/riscv.rs
@@ -4,8 +4,10 @@
// Reference: Clang RISC-V ELF psABI lowering code
// https://github.com/llvm/llvm-project/blob/8e780252a7284be45cf1ba224cabd884847e8e92/clang/lib/CodeGen/TargetInfo.cpp#L9311-L9773
+use rustc_abi::{BackendRepr, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
+
+use crate::abi;
use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
-use crate::abi::{self, Abi, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
use crate::spec::HasTargetSpec;
use crate::spec::abi::Abi as SpecAbi;
@@ -27,8 +29,8 @@ enum FloatConv {
struct CannotUseFpConv;
fn is_riscv_aggregate<Ty>(arg: &ArgAbi<'_, Ty>) -> bool {
- match arg.layout.abi {
- Abi::Vector { .. } => true,
+ match arg.layout.backend_repr {
+ BackendRepr::Vector { .. } => true,
_ => arg.layout.is_aggregate(),
}
}
@@ -44,8 +46,8 @@ fn should_use_fp_conv_helper<'a, Ty, C>(
where
Ty: TyAbiInterface<'a, C> + Copy,
{
- match arg_layout.abi {
- Abi::Scalar(scalar) => match scalar.primitive() {
+ match arg_layout.backend_repr {
+ BackendRepr::Scalar(scalar) => match scalar.primitive() {
abi::Int(..) | abi::Pointer(_) => {
if arg_layout.size.bits() > xlen {
return Err(CannotUseFpConv);
@@ -83,8 +85,8 @@ fn should_use_fp_conv_helper<'a, Ty, C>(
}
}
},
- Abi::Vector { .. } | Abi::Uninhabited => return Err(CannotUseFpConv),
- Abi::ScalarPair(..) | Abi::Aggregate { .. } => match arg_layout.fields {
+ BackendRepr::Vector { .. } | BackendRepr::Uninhabited => return Err(CannotUseFpConv),
+ BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields {
FieldsShape::Primitive => {
unreachable!("aggregates can't have `FieldsShape::Primitive`")
}
@@ -317,7 +319,7 @@ fn classify_arg<'a, Ty, C>(
}
fn extend_integer_width<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
- if let Abi::Scalar(scalar) = arg.layout.abi {
+ if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
if let abi::Int(i, _) = scalar.primitive() {
// 32-bit integers are always sign-extended
if i.size().bits() == 32 && xlen > 32 {
diff --git a/compiler/rustc_target/src/callconv/sparc64.rs b/compiler/rustc_target/src/callconv/sparc64.rs
index 835353f..313d873 100644
--- a/compiler/rustc_target/src/callconv/sparc64.rs
+++ b/compiler/rustc_target/src/callconv/sparc64.rs
@@ -109,11 +109,11 @@ fn parse_structure<'a, Ty, C>(
return data;
}
- match layout.abi {
- abi::Abi::Scalar(scalar) => {
+ match layout.backend_repr {
+ abi::BackendRepr::Scalar(scalar) => {
data = arg_scalar(cx, &scalar, offset, data);
}
- abi::Abi::Aggregate { .. } => {
+ abi::BackendRepr::Memory { .. } => {
for i in 0..layout.fields.count() {
if offset < layout.fields.offset(i) {
offset = layout.fields.offset(i);
@@ -122,7 +122,7 @@ fn parse_structure<'a, Ty, C>(
}
}
_ => {
- if let abi::Abi::ScalarPair(scalar1, scalar2) = &layout.abi {
+ if let abi::BackendRepr::ScalarPair(scalar1, scalar2) = &layout.backend_repr {
data = arg_scalar_pair(cx, scalar1, scalar2, offset, data);
}
}
diff --git a/compiler/rustc_target/src/callconv/x86.rs b/compiler/rustc_target/src/callconv/x86.rs
index e907bee..a5af975 100644
--- a/compiler/rustc_target/src/callconv/x86.rs
+++ b/compiler/rustc_target/src/callconv/x86.rs
@@ -1,6 +1,6 @@
use crate::abi::call::{ArgAttribute, FnAbi, PassMode, Reg, RegKind};
use crate::abi::{
- Abi, AddressSpace, Align, Float, HasDataLayout, Pointer, TyAbiInterface, TyAndLayout,
+ AddressSpace, Align, BackendRepr, Float, HasDataLayout, Pointer, TyAbiInterface, TyAndLayout,
};
use crate::spec::HasTargetSpec;
use crate::spec::abi::Abi as SpecAbi;
@@ -105,10 +105,12 @@ fn contains_vector<'a, Ty, C>(cx: &C, layout: TyAndLayout<'a, Ty>) -> bool
where
Ty: TyAbiInterface<'a, C> + Copy,
{
- match layout.abi {
- Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) => false,
- Abi::Vector { .. } => true,
- Abi::Aggregate { .. } => {
+ match layout.backend_repr {
+ BackendRepr::Uninhabited
+ | BackendRepr::Scalar(_)
+ | BackendRepr::ScalarPair(..) => false,
+ BackendRepr::Vector { .. } => true,
+ BackendRepr::Memory { .. } => {
for i in 0..layout.fields.count() {
if contains_vector(cx, layout.field(cx, i)) {
return true;
@@ -223,9 +225,9 @@ pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty
// Intrinsics themselves are not actual "real" functions, so theres no need to change their ABIs.
&& abi != SpecAbi::RustIntrinsic
{
- let has_float = match fn_abi.ret.layout.abi {
- Abi::Scalar(s) => matches!(s.primitive(), Float(_)),
- Abi::ScalarPair(s1, s2) => {
+ let has_float = match fn_abi.ret.layout.backend_repr {
+ BackendRepr::Scalar(s) => matches!(s.primitive(), Float(_)),
+ BackendRepr::ScalarPair(s1, s2) => {
matches!(s1.primitive(), Float(_)) || matches!(s2.primitive(), Float(_))
}
_ => false, // anyway not passed via registers on x86
diff --git a/compiler/rustc_target/src/callconv/x86_64.rs b/compiler/rustc_target/src/callconv/x86_64.rs
index 9910e62..bd101b2 100644
--- a/compiler/rustc_target/src/callconv/x86_64.rs
+++ b/compiler/rustc_target/src/callconv/x86_64.rs
@@ -1,8 +1,10 @@
// The classification code for the x86_64 ABI is taken from the clay language
// https://github.com/jckarter/clay/blob/db0bd2702ab0b6e48965cd85f8859bbd5f60e48e/compiler/externals.cpp
+use rustc_abi::{BackendRepr, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
+
+use crate::abi;
use crate::abi::call::{ArgAbi, CastTarget, FnAbi, Reg, RegKind};
-use crate::abi::{self, Abi, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
/// Classification of "eightbyte" components.
// N.B., the order of the variants is from general to specific,
@@ -46,17 +48,17 @@ fn classify<'a, Ty, C>(
return Ok(());
}
- let mut c = match layout.abi {
- Abi::Uninhabited => return Ok(()),
+ let mut c = match layout.backend_repr {
+ BackendRepr::Uninhabited => return Ok(()),
- Abi::Scalar(scalar) => match scalar.primitive() {
+ BackendRepr::Scalar(scalar) => match scalar.primitive() {
abi::Int(..) | abi::Pointer(_) => Class::Int,
abi::Float(_) => Class::Sse,
},
- Abi::Vector { .. } => Class::Sse,
+ BackendRepr::Vector { .. } => Class::Sse,
- Abi::ScalarPair(..) | Abi::Aggregate { .. } => {
+ BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => {
for i in 0..layout.fields.count() {
let field_off = off + layout.fields.offset(i);
classify(cx, layout.field(cx, i), cls, field_off)?;
diff --git a/compiler/rustc_target/src/callconv/x86_win64.rs b/compiler/rustc_target/src/callconv/x86_win64.rs
index e5a20b2..83d94cb 100644
--- a/compiler/rustc_target/src/callconv/x86_win64.rs
+++ b/compiler/rustc_target/src/callconv/x86_win64.rs
@@ -1,25 +1,28 @@
+use rustc_abi::{BackendRepr, Float, Primitive};
+
use crate::abi::call::{ArgAbi, FnAbi, Reg};
-use crate::abi::{Abi, Float, Primitive};
use crate::spec::HasTargetSpec;
// Win64 ABI: https://docs.microsoft.com/en-us/cpp/build/parameter-passing
pub(crate) fn compute_abi_info<Ty>(cx: &impl HasTargetSpec, fn_abi: &mut FnAbi<'_, Ty>) {
let fixup = |a: &mut ArgAbi<'_, Ty>| {
- match a.layout.abi {
- Abi::Uninhabited | Abi::Aggregate { sized: false } => {}
- Abi::ScalarPair(..) | Abi::Aggregate { sized: true } => match a.layout.size.bits() {
- 8 => a.cast_to(Reg::i8()),
- 16 => a.cast_to(Reg::i16()),
- 32 => a.cast_to(Reg::i32()),
- 64 => a.cast_to(Reg::i64()),
- _ => a.make_indirect(),
- },
- Abi::Vector { .. } => {
+ match a.layout.backend_repr {
+ BackendRepr::Uninhabited | BackendRepr::Memory { sized: false } => {}
+ BackendRepr::ScalarPair(..) | BackendRepr::Memory { sized: true } => {
+ match a.layout.size.bits() {
+ 8 => a.cast_to(Reg::i8()),
+ 16 => a.cast_to(Reg::i16()),
+ 32 => a.cast_to(Reg::i32()),
+ 64 => a.cast_to(Reg::i64()),
+ _ => a.make_indirect(),
+ }
+ }
+ BackendRepr::Vector { .. } => {
// FIXME(eddyb) there should be a size cap here
// (probably what clang calls "illegal vectors").
}
- Abi::Scalar(scalar) => {
+ BackendRepr::Scalar(scalar) => {
// Match what LLVM does for `f128` so that `compiler-builtins` builtins match up
// with what LLVM expects.
if a.layout.size.bytes() > 8
diff --git a/compiler/rustc_target/src/callconv/xtensa.rs b/compiler/rustc_target/src/callconv/xtensa.rs
index e1728b0..9d313d1 100644
--- a/compiler/rustc_target/src/callconv/xtensa.rs
+++ b/compiler/rustc_target/src/callconv/xtensa.rs
@@ -6,7 +6,7 @@
//! Section 2.3 from the Xtensa programmers guide.
use crate::abi::call::{ArgAbi, FnAbi, Reg, Uniform};
-use crate::abi::{Abi, HasDataLayout, Size, TyAbiInterface};
+use crate::abi::{BackendRepr, HasDataLayout, Size, TyAbiInterface};
use crate::spec::HasTargetSpec;
const NUM_ARG_GPRS: u64 = 6;
@@ -114,8 +114,8 @@ pub(crate) fn compute_abi_info<'a, Ty, C>(_cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
}
fn is_xtensa_aggregate<'a, Ty>(arg: &ArgAbi<'a, Ty>) -> bool {
- match arg.layout.abi {
- Abi::Vector { .. } => true,
+ match arg.layout.backend_repr {
+ BackendRepr::Vector { .. } => true,
_ => arg.layout.is_aggregate(),
}
}
diff --git a/compiler/rustc_target/src/spec/base/apple/mod.rs b/compiler/rustc_target/src/spec/base/apple/mod.rs
index 73763cf..f45c866 100644
--- a/compiler/rustc_target/src/spec/base/apple/mod.rs
+++ b/compiler/rustc_target/src/spec/base/apple/mod.rs
@@ -1,10 +1,9 @@
use std::borrow::Cow;
use std::env;
-use std::num::ParseIntError;
use crate::spec::{
Cc, DebuginfoKind, FramePointer, LinkerFlavor, Lld, SplitDebuginfo, StackProbeType, StaticCow,
- Target, TargetOptions, cvs,
+ TargetOptions, cvs,
};
#[cfg(test)]
@@ -97,9 +96,8 @@ fn target_abi(self) -> &'static str {
}
}
-/// Get the base target options, LLVM target and `target_arch` from the three
-/// things that uniquely identify Rust's Apple targets: The OS, the
-/// architecture, and the ABI.
+/// Get the base target options, unversioned LLVM target and `target_arch` from the three
+/// things that uniquely identify Rust's Apple targets: The OS, the architecture, and the ABI.
pub(crate) fn base(
os: &'static str,
arch: Arch,
@@ -155,117 +153,14 @@ pub(crate) fn base(
..Default::default()
};
- (opts, llvm_target(os, arch, abi), arch.target_arch())
+ (opts, unversioned_llvm_target(os, arch, abi), arch.target_arch())
}
-pub fn platform(target: &Target) -> Option<u32> {
- Some(match (&*target.os, &*target.abi) {
- ("macos", _) => object::macho::PLATFORM_MACOS,
- ("ios", "macabi") => object::macho::PLATFORM_MACCATALYST,
- ("ios", "sim") => object::macho::PLATFORM_IOSSIMULATOR,
- ("ios", _) => object::macho::PLATFORM_IOS,
- ("watchos", "sim") => object::macho::PLATFORM_WATCHOSSIMULATOR,
- ("watchos", _) => object::macho::PLATFORM_WATCHOS,
- ("tvos", "sim") => object::macho::PLATFORM_TVOSSIMULATOR,
- ("tvos", _) => object::macho::PLATFORM_TVOS,
- // FIXME: Upgrade to `object-rs` 0.33+ implementation with visionOS platform definition
- ("visionos", "sim") => 12,
- ("visionos", _) => 11,
- _ => return None,
- })
-}
-
-/// Hack for calling `deployment_target` outside of this module.
-pub fn deployment_target_for_target(target: &Target) -> (u16, u8, u8) {
- let arch = if target.llvm_target.starts_with("arm64e") {
- Arch::Arm64e
- } else if target.arch == "aarch64" {
- Arch::Arm64
- } else {
- // Dummy architecture, only used by `deployment_target` anyhow
- Arch::X86_64
- };
- let abi = match &*target.abi {
- "macabi" => TargetAbi::MacCatalyst,
- "sim" => TargetAbi::Simulator,
- "" => TargetAbi::Normal,
- abi => unreachable!("invalid abi '{abi}' for Apple target"),
- };
- deployment_target(&target.os, arch, abi)
-}
-
-/// Get the deployment target based on the standard environment variables, or
-/// fall back to a sane default.
-fn deployment_target(os: &str, arch: Arch, abi: TargetAbi) -> (u16, u8, u8) {
- // When bumping a version in here, remember to update the platform-support
- // docs too.
- //
- // NOTE: If you are looking for the default deployment target, prefer
- // `rustc --print deployment-target`, as the default here may change in
- // future `rustc` versions.
-
- // Minimum operating system versions currently supported by `rustc`.
- let os_min = match os {
- "macos" => (10, 12, 0),
- "ios" => (10, 0, 0),
- "tvos" => (10, 0, 0),
- "watchos" => (5, 0, 0),
- "visionos" => (1, 0, 0),
- _ => unreachable!("tried to get deployment target for non-Apple platform"),
- };
-
- // On certain targets it makes sense to raise the minimum OS version.
- //
- // This matches what LLVM does, see:
- // <https://github.com/llvm/llvm-project/blob/llvmorg-18.1.8/llvm/lib/TargetParser/Triple.cpp#L1900-L1932>
- let min = match (os, arch, abi) {
- ("macos", Arch::Arm64 | Arch::Arm64e, _) => (11, 0, 0),
- ("ios", Arch::Arm64 | Arch::Arm64e, TargetAbi::MacCatalyst) => (14, 0, 0),
- ("ios", Arch::Arm64 | Arch::Arm64e, TargetAbi::Simulator) => (14, 0, 0),
- ("ios", Arch::Arm64e, TargetAbi::Normal) => (14, 0, 0),
- // Mac Catalyst defaults to 13.1 in Clang.
- ("ios", _, TargetAbi::MacCatalyst) => (13, 1, 0),
- ("tvos", Arch::Arm64 | Arch::Arm64e, TargetAbi::Simulator) => (14, 0, 0),
- ("watchos", Arch::Arm64 | Arch::Arm64e, TargetAbi::Simulator) => (7, 0, 0),
- _ => os_min,
- };
-
- // The environment variable used to fetch the deployment target.
- let env_var = match os {
- "macos" => "MACOSX_DEPLOYMENT_TARGET",
- "ios" => "IPHONEOS_DEPLOYMENT_TARGET",
- "watchos" => "WATCHOS_DEPLOYMENT_TARGET",
- "tvos" => "TVOS_DEPLOYMENT_TARGET",
- "visionos" => "XROS_DEPLOYMENT_TARGET",
- _ => unreachable!("tried to get deployment target env var for non-Apple platform"),
- };
-
- if let Ok(deployment_target) = env::var(env_var) {
- match parse_version(&deployment_target) {
- // It is common that the deployment target is set too low, e.g. on
- // macOS Aarch64 to also target older x86_64, the user may set a
- // lower deployment target than supported.
- //
- // To avoid such issues, we silently raise the deployment target
- // here.
- // FIXME: We want to show a warning when `version < os_min`.
- Ok(version) => version.max(min),
- // FIXME: Report erroneous environment variable to user.
- Err(_) => min,
- }
- } else {
- min
- }
-}
-
-/// Generate the target triple that we need to pass to LLVM and/or Clang.
-fn llvm_target(os: &str, arch: Arch, abi: TargetAbi) -> StaticCow<str> {
- // The target triple depends on the deployment target, and is required to
- // enable features such as cross-language LTO, and for picking the right
- // Mach-O commands.
- //
- // Certain optimizations also depend on the deployment target.
- let (major, minor, patch) = deployment_target(os, arch, abi);
+/// Generate part of the LLVM target triple.
+///
+/// See `rustc_codegen_ssa::back::versioned_llvm_target` for the full triple passed to LLVM and
+/// Clang.
+fn unversioned_llvm_target(os: &str, arch: Arch, abi: TargetAbi) -> StaticCow<str> {
let arch = arch.target_name();
// Convert to the "canonical" OS name used by LLVM:
// https://github.com/llvm/llvm-project/blob/llvmorg-18.1.8/llvm/lib/TargetParser/Triple.cpp#L236-L282
@@ -282,7 +177,7 @@ fn llvm_target(os: &str, arch: Arch, abi: TargetAbi) -> StaticCow<str> {
TargetAbi::MacCatalyst => "-macabi",
TargetAbi::Simulator => "-simulator",
};
- format!("{arch}-apple-{os}{major}.{minor}.{patch}{environment}").into()
+ format!("{arch}-apple-{os}{environment}").into()
}
fn link_env_remove(os: &'static str) -> StaticCow<[StaticCow<str>]> {
@@ -321,20 +216,3 @@ fn link_env_remove(os: &'static str) -> StaticCow<[StaticCow<str>]> {
cvs!["MACOSX_DEPLOYMENT_TARGET"]
}
}
-
-/// Parse an OS version triple (SDK version or deployment target).
-///
-/// The size of the returned numbers here are limited by Mach-O's
-/// `LC_BUILD_VERSION`.
-fn parse_version(version: &str) -> Result<(u16, u8, u8), ParseIntError> {
- if let Some((major, minor)) = version.split_once('.') {
- let major = major.parse()?;
- if let Some((minor, patch)) = minor.split_once('.') {
- Ok((major, minor.parse()?, patch.parse()?))
- } else {
- Ok((major, minor.parse()?, 0))
- }
- } else {
- Ok((version.parse()?, 0, 0))
- }
-}
diff --git a/compiler/rustc_target/src/spec/base/apple/tests.rs b/compiler/rustc_target/src/spec/base/apple/tests.rs
index 9435b9a..a7335c9 100644
--- a/compiler/rustc_target/src/spec/base/apple/tests.rs
+++ b/compiler/rustc_target/src/spec/base/apple/tests.rs
@@ -1,4 +1,3 @@
-use super::parse_version;
use crate::spec::targets::{
aarch64_apple_darwin, aarch64_apple_ios_sim, aarch64_apple_visionos_sim,
aarch64_apple_watchos_sim, i686_apple_darwin, x86_64_apple_darwin, x86_64_apple_ios,
@@ -40,11 +39,3 @@ fn macos_link_environment_unmodified() {
],);
}
}
-
-#[test]
-fn test_parse_version() {
- assert_eq!(parse_version("10"), Ok((10, 0, 0)));
- assert_eq!(parse_version("10.12"), Ok((10, 12, 0)));
- assert_eq!(parse_version("10.12.6"), Ok((10, 12, 6)));
- assert_eq!(parse_version("9999.99.99"), Ok((9999, 99, 99)));
-}
diff --git a/compiler/rustc_target/src/spec/mod.rs b/compiler/rustc_target/src/spec/mod.rs
index f4cbe47..d518ed6 100644
--- a/compiler/rustc_target/src/spec/mod.rs
+++ b/compiler/rustc_target/src/spec/mod.rs
@@ -55,14 +55,16 @@
use crate::spec::abi::Abi;
use crate::spec::crt_objects::CrtObjects;
-pub mod abi;
pub mod crt_objects;
+pub mod abi {
+ pub use rustc_abi::{
+ AbiDisabled, AbiUnsupported, ExternAbi as Abi, all_names, enabled_names, is_enabled,
+ is_stable, lookup,
+ };
+}
+
mod base;
-pub use base::apple::{
- deployment_target_for_target as current_apple_deployment_target,
- platform as current_apple_platform,
-};
pub use base::avr_gnu::ef_avr_arch;
/// Linker is called through a C/C++ compiler.
@@ -2003,7 +2005,12 @@ pub fn warning_messages(&self) -> Vec<String> {
/// Every field here must be specified, and has no default value.
#[derive(PartialEq, Clone, Debug)]
pub struct Target {
- /// Target triple to pass to LLVM.
+ /// Unversioned target triple to pass to LLVM.
+ ///
+ /// Target triples can optionally contain an OS version (notably Apple targets), which rustc
+ /// cannot know without querying the environment.
+ ///
+ /// Use `rustc_codegen_ssa::back::versioned_llvm_target` if you need the full LLVM target.
pub llvm_target: StaticCow<str>,
/// Metadata about a target, for example the description or tier.
/// Used for generating target documentation.
diff --git a/compiler/rustc_target/src/spec/targets/mipsel_sony_psx.rs b/compiler/rustc_target/src/spec/targets/mipsel_sony_psx.rs
index c60bf65..1b8f9b7 100644
--- a/compiler/rustc_target/src/spec/targets/mipsel_sony_psx.rs
+++ b/compiler/rustc_target/src/spec/targets/mipsel_sony_psx.rs
@@ -14,8 +14,11 @@ pub(crate) fn target() -> Target {
arch: "mips".into(),
options: TargetOptions {
- os: "none".into(),
- env: "psx".into(),
+ // The Playstation 1 is mostly bare-metal, but the BIOS does provide some a slight bit
+ // of functionality post load, so we still declare it as `cfg!(target_os = "psx")`.
+ //
+ // See <https://github.com/rust-lang/rust/pull/131168> for details.
+ os: "psx".into(),
vendor: "sony".into(),
linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
cpu: "mips1".into(),
diff --git a/compiler/rustc_target/src/spec/targets/sparc64_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/targets/sparc64_unknown_linux_gnu.rs
index 7d089ae..ac2141f 100644
--- a/compiler/rustc_target/src/spec/targets/sparc64_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/targets/sparc64_unknown_linux_gnu.rs
@@ -16,7 +16,7 @@ pub(crate) fn target() -> Target {
std: Some(true),
},
pointer_width: 64,
- data_layout: "E-m:e-i64:64-n32:64-S128".into(),
+ data_layout: "E-m:e-i64:64-i128:128-n32:64-S128".into(),
arch: "sparc64".into(),
options: base,
}
diff --git a/compiler/rustc_target/src/spec/targets/sparc64_unknown_netbsd.rs b/compiler/rustc_target/src/spec/targets/sparc64_unknown_netbsd.rs
index 21f09d6..d16b377 100644
--- a/compiler/rustc_target/src/spec/targets/sparc64_unknown_netbsd.rs
+++ b/compiler/rustc_target/src/spec/targets/sparc64_unknown_netbsd.rs
@@ -16,7 +16,7 @@ pub(crate) fn target() -> Target {
std: Some(true),
},
pointer_width: 64,
- data_layout: "E-m:e-i64:64-n32:64-S128".into(),
+ data_layout: "E-m:e-i64:64-i128:128-n32:64-S128".into(),
arch: "sparc64".into(),
options: TargetOptions { endian: Endian::Big, mcount: "__mcount".into(), ..base },
}
diff --git a/compiler/rustc_target/src/spec/targets/sparc64_unknown_openbsd.rs b/compiler/rustc_target/src/spec/targets/sparc64_unknown_openbsd.rs
index 12626dc..91e6406 100644
--- a/compiler/rustc_target/src/spec/targets/sparc64_unknown_openbsd.rs
+++ b/compiler/rustc_target/src/spec/targets/sparc64_unknown_openbsd.rs
@@ -17,7 +17,7 @@ pub(crate) fn target() -> Target {
std: Some(true),
},
pointer_width: 64,
- data_layout: "E-m:e-i64:64-n32:64-S128".into(),
+ data_layout: "E-m:e-i64:64-i128:128-n32:64-S128".into(),
arch: "sparc64".into(),
options: base,
}
diff --git a/compiler/rustc_target/src/spec/targets/sparc_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/targets/sparc_unknown_linux_gnu.rs
index 08f0bb3..2777395 100644
--- a/compiler/rustc_target/src/spec/targets/sparc_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/targets/sparc_unknown_linux_gnu.rs
@@ -11,7 +11,7 @@ pub(crate) fn target() -> Target {
std: Some(true),
},
pointer_width: 32,
- data_layout: "E-m:e-p:32:32-i64:64-f128:64-n32-S64".into(),
+ data_layout: "E-m:e-p:32:32-i64:64-i128:128-f128:64-n32-S64".into(),
arch: "sparc".into(),
options: TargetOptions {
cpu: "v9".into(),
diff --git a/compiler/rustc_target/src/spec/targets/sparc_unknown_none_elf.rs b/compiler/rustc_target/src/spec/targets/sparc_unknown_none_elf.rs
index 0157d03..987f694 100644
--- a/compiler/rustc_target/src/spec/targets/sparc_unknown_none_elf.rs
+++ b/compiler/rustc_target/src/spec/targets/sparc_unknown_none_elf.rs
@@ -17,7 +17,7 @@ pub(crate) fn target() -> Target {
..Default::default()
};
Target {
- data_layout: "E-m:e-p:32:32-i64:64-f128:64-n32-S64".into(),
+ data_layout: "E-m:e-p:32:32-i64:64-i128:128-f128:64-n32-S64".into(),
llvm_target: "sparc-unknown-none-elf".into(),
metadata: crate::spec::TargetMetadata {
description: Some("Bare 32-bit SPARC V7+".into()),
diff --git a/compiler/rustc_target/src/spec/targets/sparcv9_sun_solaris.rs b/compiler/rustc_target/src/spec/targets/sparcv9_sun_solaris.rs
index 138ce90..fdc9628 100644
--- a/compiler/rustc_target/src/spec/targets/sparcv9_sun_solaris.rs
+++ b/compiler/rustc_target/src/spec/targets/sparcv9_sun_solaris.rs
@@ -19,7 +19,7 @@ pub(crate) fn target() -> Target {
std: Some(true),
},
pointer_width: 64,
- data_layout: "E-m:e-i64:64-n32:64-S128".into(),
+ data_layout: "E-m:e-i64:64-i128:128-n32:64-S128".into(),
// Use "sparc64" instead of "sparcv9" here, since the former is already
// used widely in the source base. If we ever needed ABI
// differentiation from the sparc64, we could, but that would probably
diff --git a/compiler/rustc_target/src/spec/tests/tests_impl.rs b/compiler/rustc_target/src/spec/tests/tests_impl.rs
index cc5931b..bd47d12 100644
--- a/compiler/rustc_target/src/spec/tests/tests_impl.rs
+++ b/compiler/rustc_target/src/spec/tests/tests_impl.rs
@@ -165,7 +165,8 @@ fn check_consistency(&self) {
assert_matches!(&*self.llvm_abiname, "ilp32" | "ilp32f" | "ilp32d" | "ilp32e")
}
"riscv64" => {
- assert_matches!(&*self.llvm_abiname, "lp64" | "lp64f" | "lp64d" | "lp64q")
+ // Note that the `lp64e` is still unstable as it's not (yet) part of the ELF psABI.
+ assert_matches!(&*self.llvm_abiname, "lp64" | "lp64f" | "lp64d" | "lp64q" | "lp64e")
}
_ => {}
}
diff --git a/compiler/rustc_trait_selection/src/error_reporting/infer/need_type_info.rs b/compiler/rustc_trait_selection/src/error_reporting/infer/need_type_info.rs
index a6474f1..af3b5e0 100644
--- a/compiler/rustc_trait_selection/src/error_reporting/infer/need_type_info.rs
+++ b/compiler/rustc_trait_selection/src/error_reporting/infer/need_type_info.rs
@@ -551,10 +551,7 @@ pub fn emit_inference_failure_err(
] => "",
[
..,
- Adjustment {
- kind: Adjust::Borrow(AutoBorrow::Ref(_, mut_)),
- target: _,
- },
+ Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(mut_)), target: _ },
] => hir::Mutability::from(*mut_).ref_prefix_str(),
_ => "",
};
diff --git a/compiler/rustc_trait_selection/src/error_reporting/infer/nice_region_error/static_impl_trait.rs b/compiler/rustc_trait_selection/src/error_reporting/infer/nice_region_error/static_impl_trait.rs
index 8541621..2b19db2 100644
--- a/compiler/rustc_trait_selection/src/error_reporting/infer/nice_region_error/static_impl_trait.rs
+++ b/compiler/rustc_trait_selection/src/error_reporting/infer/nice_region_error/static_impl_trait.rs
@@ -284,7 +284,7 @@ pub fn suggest_new_region_bound(
}
match fn_return.kind {
// FIXME(precise_captures): Suggest adding to `use<...>` list instead.
- TyKind::OpaqueDef(opaque, _) => {
+ TyKind::OpaqueDef(opaque) => {
// Get the identity type for this RPIT
let did = opaque.def_id.to_def_id();
let ty = Ty::new_opaque(tcx, did, ty::GenericArgs::identity_for_item(tcx, did));
diff --git a/compiler/rustc_trait_selection/src/error_reporting/infer/note_and_explain.rs b/compiler/rustc_trait_selection/src/error_reporting/infer/note_and_explain.rs
index 0cf7c43..b97f3dc 100644
--- a/compiler/rustc_trait_selection/src/error_reporting/infer/note_and_explain.rs
+++ b/compiler/rustc_trait_selection/src/error_reporting/infer/note_and_explain.rs
@@ -384,7 +384,10 @@ fn foo(&self, x: T) -> T { x }
| DefKind::AssocFn
| DefKind::AssocConst
)
- && tcx.is_type_alias_impl_trait(opaque_ty.def_id)
+ && matches!(
+ tcx.opaque_ty_origin(opaque_ty.def_id),
+ hir::OpaqueTyOrigin::TyAlias { .. }
+ )
&& !tcx
.opaque_types_defined_by(body_owner_def_id.expect_local())
.contains(&opaque_ty.def_id.expect_local())
diff --git a/compiler/rustc_trait_selection/src/error_reporting/infer/region.rs b/compiler/rustc_trait_selection/src/error_reporting/infer/region.rs
index 833358b2..438639e 100644
--- a/compiler/rustc_trait_selection/src/error_reporting/infer/region.rs
+++ b/compiler/rustc_trait_selection/src/error_reporting/infer/region.rs
@@ -862,22 +862,6 @@ fn visit_lifetime(&mut self, lt: &'hir hir::Lifetime) {
self.add_lt_suggs.push(lt.suggestion(self.new_lt));
}
}
-
- fn visit_ty(&mut self, ty: &'hir hir::Ty<'hir>) {
- let hir::TyKind::OpaqueDef(opaque_ty, _) = ty.kind else {
- return hir::intravisit::walk_ty(self, ty);
- };
- if let Some(&(_, b)) =
- opaque_ty.lifetime_mapping.iter().find(|&(a, _)| a.res == self.needle)
- {
- let prev_needle =
- std::mem::replace(&mut self.needle, hir::LifetimeName::Param(b));
- for bound in opaque_ty.bounds {
- self.visit_param_bound(bound);
- }
- self.needle = prev_needle;
- }
- }
}
let (lifetime_def_id, lifetime_scope) =
diff --git a/compiler/rustc_trait_selection/src/error_reporting/traits/fulfillment_errors.rs b/compiler/rustc_trait_selection/src/error_reporting/traits/fulfillment_errors.rs
index 6014ed5..7aa558cf 100644
--- a/compiler/rustc_trait_selection/src/error_reporting/traits/fulfillment_errors.rs
+++ b/compiler/rustc_trait_selection/src/error_reporting/traits/fulfillment_errors.rs
@@ -545,10 +545,7 @@ pub fn report_selection_error(
polarity: ty::PredicatePolarity::Positive,
}),
None,
- Some(match predicate.host {
- ty::HostPolarity::Maybe => ty::BoundConstness::ConstIfConst,
- ty::HostPolarity::Const => ty::BoundConstness::Const,
- }),
+ Some(predicate.constness),
None,
String::new(),
);
@@ -2238,18 +2235,16 @@ fn get_standard_error_message(
(None, _) => Some(cannot_do_this),
// suggested using default post message
(
- Some(ty::BoundConstness::Const | ty::BoundConstness::ConstIfConst),
+ Some(ty::BoundConstness::Const | ty::BoundConstness::Maybe),
Some(AppendConstMessage::Default),
) => Some(format!("{cannot_do_this} in const contexts")),
// overridden post message
(
- Some(ty::BoundConstness::Const | ty::BoundConstness::ConstIfConst),
+ Some(ty::BoundConstness::Const | ty::BoundConstness::Maybe),
Some(AppendConstMessage::Custom(custom_msg, _)),
) => Some(format!("{cannot_do_this}{custom_msg}")),
// fallback to generic message
- (Some(ty::BoundConstness::Const | ty::BoundConstness::ConstIfConst), None) => {
- None
- }
+ (Some(ty::BoundConstness::Const | ty::BoundConstness::Maybe), None) => None,
}
})
.unwrap_or_else(|| {
@@ -2653,7 +2648,7 @@ fn report_opaque_type_auto_trait_leakage(
obligation: &PredicateObligation<'tcx>,
def_id: DefId,
) -> ErrorGuaranteed {
- let name = match self.tcx.opaque_type_origin(def_id.expect_local()) {
+ let name = match self.tcx.local_opaque_ty_origin(def_id.expect_local()) {
hir::OpaqueTyOrigin::FnReturn { .. } | hir::OpaqueTyOrigin::AsyncFn { .. } => {
"opaque type".to_string()
}
diff --git a/compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs b/compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs
index 8e0bdce..553bb61 100644
--- a/compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs
+++ b/compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs
@@ -361,7 +361,6 @@ pub fn suggest_restricting_param_bound(
})
| hir::Node::TraitItem(hir::TraitItem { generics, .. })
| hir::Node::ImplItem(hir::ImplItem { generics, .. })
- | hir::Node::OpaqueTy(hir::OpaqueTy { generics, .. })
if param_ty =>
{
// We skip the 0'th arg (self) because we do not want
@@ -424,10 +423,7 @@ pub fn suggest_restricting_param_bound(
| hir::ItemKind::Const(_, generics, _)
| hir::ItemKind::TraitAlias(generics, _),
..
- })
- | hir::Node::OpaqueTy(hir::OpaqueTy { generics, .. })
- if !param_ty =>
- {
+ }) if !param_ty => {
// Missing generic type parameter bound.
if suggest_arbitrary_trait_bound(
self.tcx,
@@ -5226,12 +5222,6 @@ fn point_at_assoc_type_restriction<G: EmissionGuarantee>(
let ty::ClauseKind::Projection(proj) = clause else {
return;
};
- // avoid ICEing since effects desugared associated types don't have names.
- // this path should only be hit for `~const` on invalid places, so they
- // will have an informative error already.
- if tcx.is_effects_desugared_assoc_ty(proj.projection_term.def_id) {
- return;
- }
let name = tcx.item_name(proj.projection_term.def_id);
let mut predicates = generics.predicates.iter().peekable();
let mut prev: Option<&hir::WhereBoundPredicate<'_>> = None;
diff --git a/compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs b/compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs
index bd4e3dd..0eaacbc 100644
--- a/compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs
+++ b/compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs
@@ -18,7 +18,7 @@
};
use rustc_span::Span;
use rustc_span::symbol::Symbol;
-use rustc_target::abi::Abi;
+use rustc_target::abi::BackendRepr;
use smallvec::SmallVec;
use tracing::{debug, instrument};
@@ -523,8 +523,8 @@ fn check_receiver_correct<'tcx>(tcx: TyCtxt<'tcx>, trait_def_id: DefId, method:
// e.g., `Rc<()>`
let unit_receiver_ty = receiver_for_self_ty(tcx, receiver_ty, tcx.types.unit, method_def_id);
- match tcx.layout_of(param_env.and(unit_receiver_ty)).map(|l| l.abi) {
- Ok(Abi::Scalar(..)) => (),
+ match tcx.layout_of(param_env.and(unit_receiver_ty)).map(|l| l.backend_repr) {
+ Ok(BackendRepr::Scalar(..)) => (),
abi => {
tcx.dcx().span_delayed_bug(
tcx.def_span(method_def_id),
@@ -538,8 +538,8 @@ fn check_receiver_correct<'tcx>(tcx: TyCtxt<'tcx>, trait_def_id: DefId, method:
// e.g., `Rc<dyn Trait>`
let trait_object_receiver =
receiver_for_self_ty(tcx, receiver_ty, trait_object_ty, method_def_id);
- match tcx.layout_of(param_env.and(trait_object_receiver)).map(|l| l.abi) {
- Ok(Abi::ScalarPair(..)) => (),
+ match tcx.layout_of(param_env.and(trait_object_receiver)).map(|l| l.backend_repr) {
+ Ok(BackendRepr::ScalarPair(..)) => (),
abi => {
tcx.dcx().span_delayed_bug(
tcx.def_span(method_def_id),
diff --git a/compiler/rustc_trait_selection/src/traits/effects.rs b/compiler/rustc_trait_selection/src/traits/effects.rs
index 60b3357..cb36f1a 100644
--- a/compiler/rustc_trait_selection/src/traits/effects.rs
+++ b/compiler/rustc_trait_selection/src/traits/effects.rs
@@ -47,7 +47,7 @@ fn match_candidate<'tcx>(
obligation: &HostEffectObligation<'tcx>,
candidate: ty::Binder<'tcx, ty::HostEffectPredicate<'tcx>>,
) -> Result<ThinVec<PredicateObligation<'tcx>>, NoSolution> {
- if !candidate.skip_binder().host.satisfies(obligation.predicate.host) {
+ if !candidate.skip_binder().constness.satisfies(obligation.predicate.constness) {
return Err(NoSolution);
}
@@ -135,7 +135,8 @@ fn evaluate_host_effect_from_selection_candiate<'tcx>(
.map(|(trait_ref, _)| {
obligation.with(
tcx,
- trait_ref.to_host_effect_clause(tcx, obligation.predicate.host),
+ trait_ref
+ .to_host_effect_clause(tcx, obligation.predicate.constness),
)
}),
);
diff --git a/compiler/rustc_trait_selection/src/traits/mod.rs b/compiler/rustc_trait_selection/src/traits/mod.rs
index 1c84f21..436c0fa 100644
--- a/compiler/rustc_trait_selection/src/traits/mod.rs
+++ b/compiler/rustc_trait_selection/src/traits/mod.rs
@@ -411,7 +411,7 @@ fn fold_const(&mut self, c: ty::Const<'tcx>) -> ty::Const<'tcx> {
debug!("normalize_param_env_or_error: elaborated-predicates={:?}", predicates);
let elaborated_env = ty::ParamEnv::new(tcx.mk_clauses(&predicates), unnormalized_env.reveal());
- if !normalize::needs_normalization(&elaborated_env, unnormalized_env.reveal()) {
+ if !elaborated_env.has_aliases() {
return elaborated_env;
}
diff --git a/compiler/rustc_trait_selection/src/traits/normalize.rs b/compiler/rustc_trait_selection/src/traits/normalize.rs
index 12e00ec..954dfe9 100644
--- a/compiler/rustc_trait_selection/src/traits/normalize.rs
+++ b/compiler/rustc_trait_selection/src/traits/normalize.rs
@@ -1,15 +1,16 @@
//! Deeply normalize types using the old trait solver.
use rustc_data_structures::stack::ensure_sufficient_stack;
-use rustc_infer::infer::InferOk;
use rustc_infer::infer::at::At;
+use rustc_infer::infer::{InferCtxt, InferOk};
use rustc_infer::traits::{
FromSolverError, Normalized, Obligation, PredicateObligations, TraitEngine,
};
use rustc_macros::extension;
-use rustc_middle::traits::{ObligationCause, ObligationCauseCode, Reveal};
+use rustc_middle::traits::{ObligationCause, ObligationCauseCode};
use rustc_middle::ty::{
self, Ty, TyCtxt, TypeFoldable, TypeFolder, TypeSuperFoldable, TypeVisitable, TypeVisitableExt,
+ TypingMode,
};
use tracing::{debug, instrument};
@@ -109,16 +110,19 @@ pub(crate) fn normalize_with_depth_to<'a, 'b, 'tcx, T>(
}
pub(super) fn needs_normalization<'tcx, T: TypeVisitable<TyCtxt<'tcx>>>(
+ infcx: &InferCtxt<'tcx>,
+ param_env_for_debug_assertion: ty::ParamEnv<'tcx>,
value: &T,
- reveal: Reveal,
) -> bool {
let mut flags = ty::TypeFlags::HAS_ALIAS;
// Opaques are treated as rigid with `Reveal::UserFacing`,
// so we can ignore those.
- match reveal {
- Reveal::UserFacing => flags.remove(ty::TypeFlags::HAS_TY_OPAQUE),
- Reveal::All => {}
+ match infcx.typing_mode(param_env_for_debug_assertion) {
+ TypingMode::Coherence | TypingMode::Analysis { defining_opaque_types: _ } => {
+ flags.remove(ty::TypeFlags::HAS_TY_OPAQUE)
+ }
+ TypingMode::PostAnalysis => {}
}
value.has_type_flags(flags)
@@ -154,7 +158,7 @@ fn fold<T: TypeFoldable<TyCtxt<'tcx>>>(&mut self, value: T) -> T {
"Normalizing {value:?} without wrapping in a `Binder`"
);
- if !needs_normalization(&value, self.param_env.reveal()) {
+ if !needs_normalization(self.selcx.infcx, self.param_env, &value) {
value
} else {
value.fold_with(self)
@@ -178,7 +182,7 @@ fn fold_binder<T: TypeFoldable<TyCtxt<'tcx>>>(
}
fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
- if !needs_normalization(&ty, self.param_env.reveal()) {
+ if !needs_normalization(self.selcx.infcx, self.param_env, &ty) {
return ty;
}
@@ -213,10 +217,11 @@ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
match kind {
ty::Opaque => {
// Only normalize `impl Trait` outside of type inference, usually in codegen.
- match self.param_env.reveal() {
- Reveal::UserFacing => ty.super_fold_with(self),
-
- Reveal::All => {
+ match self.selcx.infcx.typing_mode(self.param_env) {
+ TypingMode::Coherence | TypingMode::Analysis { defining_opaque_types: _ } => {
+ ty.super_fold_with(self)
+ }
+ TypingMode::PostAnalysis => {
let recursion_limit = self.cx().recursion_limit();
if !recursion_limit.value_within_limit(self.depth) {
self.selcx.infcx.err_ctxt().report_overflow_error(
@@ -403,7 +408,7 @@ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
fn fold_const(&mut self, constant: ty::Const<'tcx>) -> ty::Const<'tcx> {
let tcx = self.selcx.tcx();
if tcx.features().generic_const_exprs()
- || !needs_normalization(&constant, self.param_env.reveal())
+ || !needs_normalization(self.selcx.infcx, self.param_env, &constant)
{
constant
} else {
@@ -420,7 +425,7 @@ fn fold_const(&mut self, constant: ty::Const<'tcx>) -> ty::Const<'tcx> {
#[inline]
fn fold_predicate(&mut self, p: ty::Predicate<'tcx>) -> ty::Predicate<'tcx> {
- if p.allow_normalization() && needs_normalization(&p, self.param_env.reveal()) {
+ if p.allow_normalization() && needs_normalization(self.selcx.infcx, self.param_env, &p) {
p.super_fold_with(self)
} else {
p
diff --git a/compiler/rustc_trait_selection/src/traits/project.rs b/compiler/rustc_trait_selection/src/traits/project.rs
index a75c07c..aab854e 100644
--- a/compiler/rustc_trait_selection/src/traits/project.rs
+++ b/compiler/rustc_trait_selection/src/traits/project.rs
@@ -16,7 +16,7 @@
use rustc_middle::ty::fast_reject::DeepRejectCtxt;
use rustc_middle::ty::fold::TypeFoldable;
use rustc_middle::ty::visit::{MaxUniverse, TypeVisitable, TypeVisitableExt};
-use rustc_middle::ty::{self, Term, Ty, TyCtxt, Upcast};
+use rustc_middle::ty::{self, Term, Ty, TyCtxt, TypingMode, Upcast};
use rustc_middle::{bug, span_bug};
use rustc_span::symbol::sym;
use tracing::{debug, instrument};
@@ -975,18 +975,21 @@ fn assemble_candidates_from_impls<'cx, 'tcx>(
// and the obligation is monomorphic, otherwise passes such as
// transmute checking and polymorphic MIR optimizations could
// get a result which isn't correct for all monomorphizations.
- if obligation.param_env.reveal() == Reveal::All {
- // NOTE(eddyb) inference variables can resolve to parameters, so
- // assume `poly_trait_ref` isn't monomorphic, if it contains any.
- let poly_trait_ref = selcx.infcx.resolve_vars_if_possible(trait_ref);
- !poly_trait_ref.still_further_specializable()
- } else {
- debug!(
- assoc_ty = ?selcx.tcx().def_path_str(node_item.item.def_id),
- ?obligation.predicate,
- "assemble_candidates_from_impls: not eligible due to default",
- );
- false
+ match selcx.infcx.typing_mode(obligation.param_env) {
+ TypingMode::Coherence | TypingMode::Analysis { .. } => {
+ debug!(
+ assoc_ty = ?selcx.tcx().def_path_str(node_item.item.def_id),
+ ?obligation.predicate,
+ "assemble_candidates_from_impls: not eligible due to default",
+ );
+ false
+ }
+ TypingMode::PostAnalysis => {
+ // NOTE(eddyb) inference variables can resolve to parameters, so
+ // assume `poly_trait_ref` isn't monomorphic, if it contains any.
+ let poly_trait_ref = selcx.infcx.resolve_vars_if_possible(trait_ref);
+ !poly_trait_ref.still_further_specializable()
+ }
}
}
}
diff --git a/compiler/rustc_trait_selection/src/traits/query/normalize.rs b/compiler/rustc_trait_selection/src/traits/query/normalize.rs
index 01e6516..a8d701a 100644
--- a/compiler/rustc_trait_selection/src/traits/query/normalize.rs
+++ b/compiler/rustc_trait_selection/src/traits/query/normalize.rs
@@ -9,7 +9,7 @@
pub use rustc_middle::traits::query::NormalizationResult;
use rustc_middle::ty::fold::{FallibleTypeFolder, TypeFoldable, TypeSuperFoldable};
use rustc_middle::ty::visit::{TypeSuperVisitable, TypeVisitable, TypeVisitableExt};
-use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitor};
+use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitor, TypingMode};
use rustc_span::DUMMY_SP;
use tracing::{debug, info, instrument};
@@ -21,7 +21,7 @@
use crate::infer::{InferCtxt, InferOk};
use crate::traits::normalize::needs_normalization;
use crate::traits::{
- BoundVarReplacer, Normalized, ObligationCause, PlaceholderReplacer, Reveal, ScrubbedTraitError,
+ BoundVarReplacer, Normalized, ObligationCause, PlaceholderReplacer, ScrubbedTraitError,
};
#[extension(pub trait QueryNormalizeExt<'tcx>)]
@@ -89,7 +89,7 @@ fn query_normalize<T>(self, value: T) -> Result<Normalized<'tcx, T>, NoSolution>
}
}
- if !needs_normalization(&value, self.param_env.reveal()) {
+ if !needs_normalization(self.infcx, self.param_env, &value) {
return Ok(Normalized { value, obligations: PredicateObligations::new() });
}
@@ -191,7 +191,7 @@ fn try_fold_binder<T: TypeFoldable<TyCtxt<'tcx>>>(
#[instrument(level = "debug", skip(self))]
fn try_fold_ty(&mut self, ty: Ty<'tcx>) -> Result<Ty<'tcx>, Self::Error> {
- if !needs_normalization(&ty, self.param_env.reveal()) {
+ if !needs_normalization(self.infcx, self.param_env, &ty) {
return Ok(ty);
}
@@ -215,10 +215,12 @@ fn try_fold_ty(&mut self, ty: Ty<'tcx>) -> Result<Ty<'tcx>, Self::Error> {
let res = match kind {
ty::Opaque => {
// Only normalize `impl Trait` outside of type inference, usually in codegen.
- match self.param_env.reveal() {
- Reveal::UserFacing => ty.try_super_fold_with(self)?,
+ match self.infcx.typing_mode(self.param_env) {
+ TypingMode::Coherence | TypingMode::Analysis { defining_opaque_types: _ } => {
+ ty.try_super_fold_with(self)?
+ }
- Reveal::All => {
+ TypingMode::PostAnalysis => {
let args = data.args.try_fold_with(self)?;
let recursion_limit = self.cx().recursion_limit();
@@ -332,7 +334,7 @@ fn try_fold_const(
&mut self,
constant: ty::Const<'tcx>,
) -> Result<ty::Const<'tcx>, Self::Error> {
- if !needs_normalization(&constant, self.param_env.reveal()) {
+ if !needs_normalization(self.infcx, self.param_env, &constant) {
return Ok(constant);
}
@@ -351,7 +353,7 @@ fn try_fold_predicate(
&mut self,
p: ty::Predicate<'tcx>,
) -> Result<ty::Predicate<'tcx>, Self::Error> {
- if p.allow_normalization() && needs_normalization(&p, self.param_env.reveal()) {
+ if p.allow_normalization() && needs_normalization(self.infcx, self.param_env, &p) {
p.try_super_fold_with(self)
} else {
Ok(p)
diff --git a/compiler/rustc_ty_utils/src/abi.rs b/compiler/rustc_ty_utils/src/abi.rs
index 48149a0..722ef5f 100644
--- a/compiler/rustc_ty_utils/src/abi.rs
+++ b/compiler/rustc_ty_utils/src/abi.rs
@@ -1,7 +1,7 @@
use std::iter;
use rustc_abi::Primitive::Pointer;
-use rustc_abi::{Abi, PointerKind, Scalar, Size};
+use rustc_abi::{BackendRepr, PointerKind, Scalar, Size};
use rustc_hir as hir;
use rustc_hir::lang_items::LangItem;
use rustc_middle::bug;
@@ -469,7 +469,7 @@ fn fn_arg_sanity_check<'tcx>(
// careful. Scalar/ScalarPair is fine, since backends will generally use
// `layout.abi` and ignore everything else. We should just reject `Aggregate`
// entirely here, but some targets need to be fixed first.
- if matches!(arg.layout.abi, Abi::Aggregate { .. }) {
+ if matches!(arg.layout.backend_repr, BackendRepr::Memory { .. }) {
// For an unsized type we'd only pass the sized prefix, so there is no universe
// in which we ever want to allow this.
assert!(
@@ -500,7 +500,7 @@ fn fn_arg_sanity_check<'tcx>(
// Similar to `Direct`, we need to make sure that backends use `layout.abi` and
// ignore the rest of the layout.
assert!(
- matches!(arg.layout.abi, Abi::ScalarPair(..)),
+ matches!(arg.layout.backend_repr, BackendRepr::ScalarPair(..)),
"PassMode::Pair for type {}",
arg.layout.ty
);
@@ -658,9 +658,9 @@ fn fn_abi_adjust_for_abi<'tcx>(
fn unadjust<'tcx>(arg: &mut ArgAbi<'tcx, Ty<'tcx>>) {
// This still uses `PassMode::Pair` for ScalarPair types. That's unlikely to be intended,
// but who knows what breaks if we change this now.
- if matches!(arg.layout.abi, Abi::Aggregate { .. }) {
+ if matches!(arg.layout.backend_repr, BackendRepr::Memory { .. }) {
assert!(
- arg.layout.abi.is_sized(),
+ arg.layout.backend_repr.is_sized(),
"'unadjusted' ABI does not support unsized arguments"
);
}
@@ -731,8 +731,8 @@ fn make_thin_self_ptr<'tcx>(
// FIXME (mikeyhew) change this to use &own if it is ever added to the language
Ty::new_mut_ptr(tcx, layout.ty)
} else {
- match layout.abi {
- Abi::ScalarPair(..) | Abi::Scalar(..) => (),
+ match layout.backend_repr {
+ BackendRepr::ScalarPair(..) | BackendRepr::Scalar(..) => (),
_ => bug!("receiver type has unsupported layout: {:?}", layout),
}
diff --git a/compiler/rustc_ty_utils/src/assoc.rs b/compiler/rustc_ty_utils/src/assoc.rs
index 16fd282..3655fa5 100644
--- a/compiler/rustc_ty_utils/src/assoc.rs
+++ b/compiler/rustc_ty_utils/src/assoc.rs
@@ -143,7 +143,6 @@ fn associated_item_from_trait_item_ref(trait_item_ref: &hir::TraitItemRef) -> ty
container: ty::TraitContainer,
fn_has_self_parameter: has_self,
opt_rpitit_info: None,
- is_effects_desugaring: false,
}
}
@@ -163,7 +162,6 @@ fn associated_item_from_impl_item_ref(impl_item_ref: &hir::ImplItemRef) -> ty::A
container: ty::ImplContainer,
fn_has_self_parameter: has_self,
opt_rpitit_info: None,
- is_effects_desugaring: false,
}
}
@@ -190,7 +188,7 @@ struct RPITVisitor {
impl<'tcx> Visitor<'tcx> for RPITVisitor {
fn visit_ty(&mut self, ty: &'tcx hir::Ty<'tcx>) {
- if let hir::TyKind::OpaqueDef(opaq, _) = ty.kind
+ if let hir::TyKind::OpaqueDef(opaq) = ty.kind
&& self.rpits.insert(opaq.def_id)
{
for bound in opaq.bounds {
@@ -246,7 +244,7 @@ fn associated_type_for_impl_trait_in_trait(
) -> LocalDefId {
let (hir::OpaqueTyOrigin::FnReturn { parent: fn_def_id, .. }
| hir::OpaqueTyOrigin::AsyncFn { parent: fn_def_id, .. }) =
- tcx.opaque_type_origin(opaque_ty_def_id)
+ tcx.local_opaque_ty_origin(opaque_ty_def_id)
else {
bug!("expected opaque for {opaque_ty_def_id:?}");
};
@@ -275,7 +273,6 @@ fn associated_type_for_impl_trait_in_trait(
fn_def_id: fn_def_id.to_def_id(),
opaque_def_id: opaque_ty_def_id.to_def_id(),
}),
- is_effects_desugaring: false,
});
// Copy visility of the containing function.
@@ -284,8 +281,6 @@ fn associated_type_for_impl_trait_in_trait(
// Copy defaultness of the containing function.
trait_assoc_ty.defaultness(tcx.defaultness(fn_def_id));
- trait_assoc_ty.is_type_alias_impl_trait(false);
-
// There are no inferred outlives for the synthesized associated type.
trait_assoc_ty.inferred_outlives_of(&[]);
@@ -327,7 +322,6 @@ fn associated_type_for_impl_trait_in_impl(
container: ty::ImplContainer,
fn_has_self_parameter: false,
opt_rpitit_info: Some(ImplTraitInTraitData::Impl { fn_def_id: impl_fn_def_id.to_def_id() }),
- is_effects_desugaring: false,
});
// Copy visility of the containing function.
diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs
index 94b80e2..5ca7afe 100644
--- a/compiler/rustc_ty_utils/src/layout.rs
+++ b/compiler/rustc_ty_utils/src/layout.rs
@@ -5,8 +5,9 @@
use rustc_abi::Integer::{I8, I32};
use rustc_abi::Primitive::{self, Float, Int, Pointer};
use rustc_abi::{
- Abi, AbiAndPrefAlign, AddressSpace, Align, FieldsShape, HasDataLayout, LayoutCalculatorError,
- LayoutData, Niche, ReprOptions, Scalar, Size, StructKind, TagEncoding, Variants, WrappingRange,
+ AbiAndPrefAlign, AddressSpace, Align, BackendRepr, FieldsShape, HasDataLayout,
+ LayoutCalculatorError, LayoutData, Niche, ReprOptions, Scalar, Size, StructKind, TagEncoding,
+ Variants, WrappingRange,
};
use rustc_index::bit_set::BitSet;
use rustc_index::{IndexSlice, IndexVec};
@@ -173,7 +174,9 @@ fn layout_of_uncached<'tcx>(
let mut layout = LayoutData::clone(&layout.0);
match *pat {
ty::PatternKind::Range { start, end, include_end } => {
- if let Abi::Scalar(scalar) | Abi::ScalarPair(scalar, _) = &mut layout.abi {
+ if let BackendRepr::Scalar(scalar) | BackendRepr::ScalarPair(scalar, _) =
+ &mut layout.backend_repr
+ {
if let Some(start) = start {
scalar.valid_range_mut().start = start
.try_to_bits(tcx, param_env)
@@ -275,7 +278,7 @@ fn layout_of_uncached<'tcx>(
return Ok(tcx.mk_layout(LayoutData::scalar(cx, data_ptr)));
}
- let Abi::Scalar(metadata) = metadata_layout.abi else {
+ let BackendRepr::Scalar(metadata) = metadata_layout.backend_repr else {
return Err(error(cx, LayoutError::Unknown(pointee)));
};
@@ -330,9 +333,9 @@ fn layout_of_uncached<'tcx>(
.ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))?;
let abi = if count != 0 && ty.is_privately_uninhabited(tcx, param_env) {
- Abi::Uninhabited
+ BackendRepr::Uninhabited
} else {
- Abi::Aggregate { sized: true }
+ BackendRepr::Memory { sized: true }
};
let largest_niche = if count != 0 { element.largest_niche } else { None };
@@ -340,7 +343,7 @@ fn layout_of_uncached<'tcx>(
tcx.mk_layout(LayoutData {
variants: Variants::Single { index: FIRST_VARIANT },
fields: FieldsShape::Array { stride: element.size, count },
- abi,
+ backend_repr: abi,
largest_niche,
align: element.align,
size,
@@ -353,7 +356,7 @@ fn layout_of_uncached<'tcx>(
tcx.mk_layout(LayoutData {
variants: Variants::Single { index: FIRST_VARIANT },
fields: FieldsShape::Array { stride: element.size, count: 0 },
- abi: Abi::Aggregate { sized: false },
+ backend_repr: BackendRepr::Memory { sized: false },
largest_niche: None,
align: element.align,
size: Size::ZERO,
@@ -364,7 +367,7 @@ fn layout_of_uncached<'tcx>(
ty::Str => tcx.mk_layout(LayoutData {
variants: Variants::Single { index: FIRST_VARIANT },
fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
- abi: Abi::Aggregate { sized: false },
+ backend_repr: BackendRepr::Memory { sized: false },
largest_niche: None,
align: dl.i8_align,
size: Size::ZERO,
@@ -384,8 +387,8 @@ fn layout_of_uncached<'tcx>(
&ReprOptions::default(),
StructKind::AlwaysSized,
)?;
- match unit.abi {
- Abi::Aggregate { ref mut sized } => *sized = false,
+ match unit.backend_repr {
+ BackendRepr::Memory { ref mut sized } => *sized = false,
_ => bug!(),
}
tcx.mk_layout(unit)
@@ -500,7 +503,7 @@ fn layout_of_uncached<'tcx>(
// Compute the ABI of the element type: