Rollup merge of #143681 - RalfJung:bootstrap-miri-rebuilds, r=Kobzol
bootstrap/miri: avoid rebuilds for test builds
When building Miri in its own repo, we always build with `--all-targets`:
https://github.com/rust-lang/rust/blob/a00961269107703772c4e8f071f0accbe0f1a7e5/src/tools/miri/miri-script/src/util.rs#L167-L174
This saves a bunch of time since some of Miri's dependencies get more features enabled by some of Miri's dev-dependencies, and they all get built twice otherwise if you do `cargo build && cargo test` (which is typically what you end up doing inside `./miri test` and also inside `./x test miri`).
This applies the same approach to bootstrap, drastically reducing the edit-compile cycle for Miri work here. :)
diff --git a/crates/hir-ty/src/diagnostics/match_check/pat_analysis.rs b/crates/hir-ty/src/diagnostics/match_check/pat_analysis.rs
index 22b7f5a..56fd12e 100644
--- a/crates/hir-ty/src/diagnostics/match_check/pat_analysis.rs
+++ b/crates/hir-ty/src/diagnostics/match_check/pat_analysis.rs
@@ -489,6 +489,14 @@
fn complexity_exceeded(&self) -> Result<(), Self::Error> {
Err(())
}
+
+ fn report_mixed_deref_pat_ctors(
+ &self,
+ _deref_pat: &DeconstructedPat<'_>,
+ _normal_pat: &DeconstructedPat<'_>,
+ ) {
+ // FIXME(deref_patterns): This could report an error comparable to the one in rustc.
+ }
}
impl fmt::Debug for MatchCheckCtx<'_> {
diff --git a/crates/parser/src/lexed_str.rs b/crates/parser/src/lexed_str.rs
index e6c92de..bff9acd 100644
--- a/crates/parser/src/lexed_str.rs
+++ b/crates/parser/src/lexed_str.rs
@@ -11,8 +11,8 @@
use std::ops;
use rustc_literal_escaper::{
- EscapeError, Mode, unescape_byte, unescape_byte_str, unescape_c_str, unescape_char,
- unescape_str,
+ unescape_byte, unescape_byte_str, unescape_c_str, unescape_char, unescape_str, EscapeError,
+ Mode,
};
use crate::{
@@ -44,7 +44,9 @@
// Re-create the tokenizer from scratch every token because `GuardedStrPrefix` is one token in the lexer
// but we want to split it to two in edition <2024.
- while let Some(token) = rustc_lexer::tokenize(&text[conv.offset..]).next() {
+ while let Some(token) =
+ rustc_lexer::tokenize(&text[conv.offset..], rustc_lexer::FrontmatterAllowed::No).next()
+ {
let token_text = &text[conv.offset..][..token.len as usize];
conv.extend_token(&token.kind, token_text);
@@ -58,7 +60,7 @@
return None;
}
- let token = rustc_lexer::tokenize(text).next()?;
+ let token = rustc_lexer::tokenize(text, rustc_lexer::FrontmatterAllowed::No).next()?;
if token.len as usize != text.len() {
return None;
}
diff --git a/crates/proc-macro-srv/src/server_impl.rs b/crates/proc-macro-srv/src/server_impl.rs
index dd576f2..662f625 100644
--- a/crates/proc-macro-srv/src/server_impl.rs
+++ b/crates/proc-macro-srv/src/server_impl.rs
@@ -121,7 +121,7 @@
use proc_macro::bridge::LitKind;
use rustc_lexer::{LiteralKind, Token, TokenKind};
- let mut tokens = rustc_lexer::tokenize(s);
+ let mut tokens = rustc_lexer::tokenize(s, rustc_lexer::FrontmatterAllowed::No);
let minus_or_lit = tokens.next().unwrap_or(Token { kind: TokenKind::Eof, len: 0 });
let lit = if minus_or_lit.kind == TokenKind::Minus {
diff --git a/crates/rust-analyzer/src/cli/scip.rs b/crates/rust-analyzer/src/cli/scip.rs
index d258c5d..37f83f6 100644
--- a/crates/rust-analyzer/src/cli/scip.rs
+++ b/crates/rust-analyzer/src/cli/scip.rs
@@ -25,7 +25,7 @@
eprintln!("Generating SCIP start...");
let now = Instant::now();
- let no_progress = &|s| (eprintln!("rust-analyzer: Loading {s}"));
+ let no_progress = &|s| eprintln!("rust-analyzer: Loading {s}");
let root =
vfs::AbsPathBuf::assert_utf8(std::env::current_dir()?.join(&self.path)).normalize();
diff --git a/crates/tt/src/lib.rs b/crates/tt/src/lib.rs
index 14574a6..4412338 100644
--- a/crates/tt/src/lib.rs
+++ b/crates/tt/src/lib.rs
@@ -579,7 +579,7 @@
{
use rustc_lexer::LiteralKind;
- let token = rustc_lexer::tokenize(text).next_tuple();
+ let token = rustc_lexer::tokenize(text, rustc_lexer::FrontmatterAllowed::No).next_tuple();
let Some((rustc_lexer::Token {
kind: rustc_lexer::TokenKind::Literal { kind, suffix_start },
..