Rollup merge of #57768 - estebank:type-args-sugg, r=zackmdavis
Continue parsing after parent type args and suggest using angle brackets
```
error[E0214]: parenthesized parameters may only be used with a trait
--> $DIR/E0214.rs:2:15
|
LL | let v: Vec(&str) = vec!["foo"];
| ^^^^^^
| |
| only traits may use parentheses
| help: use angle brackets instead: `<&str>`
```
r? @zackmdavis
diff --git a/appveyor.yml b/appveyor.yml
index f043b8b..d70ad54 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -5,12 +5,6 @@
# server goes down presumably. See #43333 for more info
CARGO_HTTP_CHECK_REVOKE: false
- # Recommended by AppVeyor this moves our builds to GCE which incurs a 3-4
- # minute startup overhead, but that's paltry compared to our overall build
- # times so we're will to eat the cost. This is intended to give us better
- # performance I believe!
- appveyor_build_worker_cloud: gce
-
matrix:
# 32/64 bit MSVC tests
- MSYS_BITS: 64
diff --git a/src/bootstrap/bootstrap.py b/src/bootstrap/bootstrap.py
index f3dbae6..e8c1594 100644
--- a/src/bootstrap/bootstrap.py
+++ b/src/bootstrap/bootstrap.py
@@ -230,6 +230,9 @@
err = "unknown OS type: {}".format(ostype)
sys.exit(err)
+ if cputype == 'powerpc' and ostype == 'unknown-freebsd':
+ cputype = subprocess.check_output(
+ ['uname', '-p']).strip().decode(default_encoding)
cputype_mapper = {
'BePC': 'i686',
'aarch64': 'aarch64',
diff --git a/src/bootstrap/builder.rs b/src/bootstrap/builder.rs
index 9c58f5b..31adab6 100644
--- a/src/bootstrap/builder.rs
+++ b/src/bootstrap/builder.rs
@@ -660,6 +660,15 @@
}
}
+ /// Get the paths to all of the compiler's codegen backends.
+ fn codegen_backends(&self, compiler: Compiler) -> impl Iterator<Item = PathBuf> {
+ fs::read_dir(self.sysroot_codegen_backends(compiler))
+ .into_iter()
+ .flatten()
+ .filter_map(Result::ok)
+ .map(|entry| entry.path())
+ }
+
pub fn rustdoc(&self, host: Interned<String>) -> PathBuf {
self.ensure(tool::Rustdoc { host })
}
@@ -750,6 +759,9 @@
match mode {
Mode::Std => {
self.clear_if_dirty(&my_out, &self.rustc(compiler));
+ for backend in self.codegen_backends(compiler) {
+ self.clear_if_dirty(&my_out, &backend);
+ }
},
Mode::Test => {
self.clear_if_dirty(&my_out, &libstd_stamp);
diff --git a/src/libcore/convert.rs b/src/libcore/convert.rs
index 08b5ac0..203be54 100644
--- a/src/libcore/convert.rs
+++ b/src/libcore/convert.rs
@@ -463,11 +463,11 @@
// Infallible conversions are semantically equivalent to fallible conversions
// with an uninhabited error type.
#[unstable(feature = "try_from", issue = "33417")]
-impl<T, U> TryFrom<U> for T where T: From<U> {
+impl<T, U> TryFrom<U> for T where U: Into<T> {
type Error = !;
fn try_from(value: U) -> Result<Self, Self::Error> {
- Ok(T::from(value))
+ Ok(U::into(value))
}
}
diff --git a/src/librustc/ich/impls_syntax.rs b/src/librustc/ich/impls_syntax.rs
index de56718..7e48554 100644
--- a/src/librustc/ich/impls_syntax.rs
+++ b/src/librustc/ich/impls_syntax.rs
@@ -164,6 +164,7 @@
impl_stable_hash_for_spanned!(::syntax::ast::LitKind);
impl_stable_hash_for!(enum ::syntax::ast::LitKind {
Str(value, style),
+ Err(value),
ByteStr(value),
Byte(value),
Char(value),
@@ -329,6 +330,7 @@
match *lit {
token::Lit::Byte(val) |
token::Lit::Char(val) |
+ token::Lit::Err(val) |
token::Lit::Integer(val) |
token::Lit::Float(val) |
token::Lit::Str_(val) |
diff --git a/src/librustc/infer/lexical_region_resolve/mod.rs b/src/librustc/infer/lexical_region_resolve/mod.rs
index 545192a..c0952fe 100644
--- a/src/librustc/infer/lexical_region_resolve/mod.rs
+++ b/src/librustc/infer/lexical_region_resolve/mod.rs
@@ -241,6 +241,14 @@
match *b_data {
VarValue::Value(cur_region) => {
+ // Identical scopes can show up quite often, if the fixed point
+ // iteration converges slowly, skip them
+ if let (ReScope(a_scope), ReScope(cur_scope)) = (a_region, cur_region) {
+ if a_scope == cur_scope {
+ return false;
+ }
+ }
+
let mut lub = self.lub_concrete_regions(a_region, cur_region);
if lub == cur_region {
return false;
@@ -280,12 +288,6 @@
fn lub_concrete_regions(&self, a: Region<'tcx>, b: Region<'tcx>) -> Region<'tcx> {
let tcx = self.tcx();
- // Equal scopes can show up quite often, if the fixed point
- // iteration converges slowly, skip them
- if a == b {
- return a;
- }
-
match (a, b) {
(&ty::ReClosureBound(..), _)
| (_, &ty::ReClosureBound(..))
diff --git a/src/librustc_mir/hair/constant.rs b/src/librustc_mir/hair/constant.rs
index 37d741d..f63c3e2 100644
--- a/src/librustc_mir/hair/constant.rs
+++ b/src/librustc_mir/hair/constant.rs
@@ -37,6 +37,14 @@
let id = tcx.allocate_bytes(s.as_bytes());
ConstValue::new_slice(Scalar::Ptr(id.into()), s.len() as u64, &tcx)
},
+ LitKind::Err(ref s) => {
+ let s = s.as_str();
+ let id = tcx.allocate_bytes(s.as_bytes());
+ return Ok(ty::Const {
+ val: ConstValue::new_slice(Scalar::Ptr(id.into()), s.len() as u64, &tcx),
+ ty: tcx.types.err,
+ });
+ },
LitKind::ByteStr(ref data) => {
let id = tcx.allocate_bytes(data);
ConstValue::Scalar(Scalar::Ptr(id.into()))
diff --git a/src/librustc_typeck/check/mod.rs b/src/librustc_typeck/check/mod.rs
index c9173df..ad4bc3c 100644
--- a/src/librustc_typeck/check/mod.rs
+++ b/src/librustc_typeck/check/mod.rs
@@ -3121,7 +3121,8 @@
opt_ty.unwrap_or_else(
|| tcx.mk_float_var(self.next_float_var_id()))
}
- ast::LitKind::Bool(_) => tcx.types.bool
+ ast::LitKind::Bool(_) => tcx.types.bool,
+ ast::LitKind::Err(_) => tcx.types.err,
}
}
diff --git a/src/librustdoc/html/highlight.rs b/src/librustdoc/html/highlight.rs
index 87e979b..e43251b 100644
--- a/src/librustdoc/html/highlight.rs
+++ b/src/librustdoc/html/highlight.rs
@@ -25,40 +25,51 @@
tooltip: Option<(&str, &str)>,
) -> String {
debug!("highlighting: ================\n{}\n==============", src);
- let sess = parse::ParseSess::new(FilePathMapping::empty());
- let fm = sess.source_map().new_source_file(FileName::Custom("stdin".to_string()),
- src.to_string());
-
let mut out = Vec::new();
if let Some((tooltip, class)) = tooltip {
write!(out, "<div class='information'><div class='tooltip {}'>ⓘ<span \
class='tooltiptext'>{}</span></div></div>",
class, tooltip).unwrap();
}
- write_header(class, &mut out).unwrap();
- let lexer = match lexer::StringReader::new_without_err(&sess, fm, None, "Output from rustc:") {
- Ok(l) => l,
- Err(_) => {
- let first_line = src.lines().next().unwrap_or_else(|| "");
- let mut err = sess.span_diagnostic
- .struct_warn(&format!("Invalid doc comment starting with: `{}`\n\
- (Ignoring this codeblock)",
- first_line));
- err.emit();
- return String::new();
+ let sess = parse::ParseSess::new(FilePathMapping::empty());
+ let fm = sess.source_map().new_source_file(
+ FileName::Custom(String::from("rustdoc-highlighting")),
+ src.to_owned(),
+ );
+ let highlight_result =
+ lexer::StringReader::new_or_buffered_errs(&sess, fm, None).and_then(|lexer| {
+ let mut classifier = Classifier::new(lexer, sess.source_map());
+
+ let mut highlighted_source = vec![];
+ if classifier.write_source(&mut highlighted_source).is_err() {
+ Err(classifier.lexer.buffer_fatal_errors())
+ } else {
+ Ok(String::from_utf8_lossy(&highlighted_source).into_owned())
+ }
+ });
+
+ match highlight_result {
+ Ok(highlighted_source) => {
+ write_header(class, &mut out).unwrap();
+ write!(out, "{}", highlighted_source).unwrap();
+ if let Some(extension) = extension {
+ write!(out, "{}", extension).unwrap();
+ }
+ write_footer(&mut out).unwrap();
}
- };
- let mut classifier = Classifier::new(lexer, sess.source_map());
- if classifier.write_source(&mut out).is_err() {
- classifier.lexer.emit_fatal_errors();
- return format!("<pre>{}</pre>", src);
+ Err(errors) => {
+ // If errors are encountered while trying to highlight, cancel the errors and just emit
+ // the unhighlighted source. The errors will have already been reported in the
+ // `check-code-block-syntax` pass.
+ for mut error in errors {
+ error.cancel();
+ }
+
+ write!(out, "<pre><code>{}</code></pre>", src).unwrap();
+ }
}
- if let Some(extension) = extension {
- write!(out, "{}", extension).unwrap();
- }
- write_footer(&mut out).unwrap();
String::from_utf8_lossy(&out[..]).into_owned()
}
@@ -151,6 +162,17 @@
}
}
+enum HighlightError {
+ LexError,
+ IoError(io::Error),
+}
+
+impl From<io::Error> for HighlightError {
+ fn from(err: io::Error) -> Self {
+ HighlightError::IoError(err)
+ }
+}
+
impl<'a> Classifier<'a> {
fn new(lexer: lexer::StringReader<'a>, source_map: &'a SourceMap) -> Classifier<'a> {
Classifier {
@@ -162,17 +184,11 @@
}
}
- /// Gets the next token out of the lexer, emitting fatal errors if lexing fails.
- fn try_next_token(&mut self) -> io::Result<TokenAndSpan> {
+ /// Gets the next token out of the lexer.
+ fn try_next_token(&mut self) -> Result<TokenAndSpan, HighlightError> {
match self.lexer.try_next_token() {
Ok(tas) => Ok(tas),
- Err(_) => {
- let mut err = self.lexer.sess.span_diagnostic
- .struct_warn("Backing out of syntax highlighting");
- err.note("You probably did not intend to render this as a rust code-block");
- err.emit();
- Err(io::Error::new(io::ErrorKind::Other, ""))
- }
+ Err(_) => Err(HighlightError::LexError),
}
}
@@ -185,7 +201,7 @@
/// source.
fn write_source<W: Writer>(&mut self,
out: &mut W)
- -> io::Result<()> {
+ -> Result<(), HighlightError> {
loop {
let next = self.try_next_token()?;
if next.tok == token::Eof {
@@ -202,7 +218,7 @@
fn write_token<W: Writer>(&mut self,
out: &mut W,
tas: TokenAndSpan)
- -> io::Result<()> {
+ -> Result<(), HighlightError> {
let klass = match tas.tok {
token::Shebang(s) => {
out.string(Escape(&s.as_str()), Class::None)?;
@@ -296,7 +312,7 @@
token::Literal(lit, _suf) => {
match lit {
// Text literals.
- token::Byte(..) | token::Char(..) |
+ token::Byte(..) | token::Char(..) | token::Err(..) |
token::ByteStr(..) | token::ByteStrRaw(..) |
token::Str_(..) | token::StrRaw(..) => Class::String,
@@ -341,7 +357,9 @@
// Anything that didn't return above is the simple case where we the
// class just spans a single token, so we can use the `string` method.
- out.string(Escape(&self.snip(tas.sp)), klass)
+ out.string(Escape(&self.snip(tas.sp)), klass)?;
+
+ Ok(())
}
// Helper function to get a snippet from the source_map.
diff --git a/src/librustdoc/html/markdown.rs b/src/librustdoc/html/markdown.rs
index 05a9a2d..6b7f540 100644
--- a/src/librustdoc/html/markdown.rs
+++ b/src/librustdoc/html/markdown.rs
@@ -919,6 +919,115 @@
links
}
+#[derive(Debug)]
+crate struct RustCodeBlock {
+ /// The range in the markdown that the code block occupies. Note that this includes the fences
+ /// for fenced code blocks.
+ pub range: Range<usize>,
+ /// The range in the markdown that the code within the code block occupies.
+ pub code: Range<usize>,
+ pub is_fenced: bool,
+ pub syntax: Option<String>,
+}
+
+/// Returns a range of bytes for each code block in the markdown that is tagged as `rust` or
+/// untagged (and assumed to be rust).
+crate fn rust_code_blocks(md: &str) -> Vec<RustCodeBlock> {
+ let mut code_blocks = vec![];
+
+ if md.is_empty() {
+ return code_blocks;
+ }
+
+ let mut opts = Options::empty();
+ opts.insert(OPTION_ENABLE_TABLES);
+ opts.insert(OPTION_ENABLE_FOOTNOTES);
+ let mut p = Parser::new_ext(md, opts);
+
+ let mut code_block_start = 0;
+ let mut code_start = 0;
+ let mut is_fenced = false;
+ let mut previous_offset = 0;
+ let mut in_rust_code_block = false;
+ while let Some(event) = p.next() {
+ let offset = p.get_offset();
+
+ match event {
+ Event::Start(Tag::CodeBlock(syntax)) => {
+ let lang_string = if syntax.is_empty() {
+ LangString::all_false()
+ } else {
+ LangString::parse(&*syntax, ErrorCodes::Yes)
+ };
+
+ if lang_string.rust {
+ in_rust_code_block = true;
+
+ code_start = offset;
+ code_block_start = match md[previous_offset..offset].find("```") {
+ Some(fence_idx) => {
+ is_fenced = true;
+ previous_offset + fence_idx
+ }
+ None => offset,
+ };
+ }
+ }
+ Event::End(Tag::CodeBlock(syntax)) if in_rust_code_block => {
+ in_rust_code_block = false;
+
+ let code_block_end = if is_fenced {
+ let fence_str = &md[previous_offset..offset]
+ .chars()
+ .rev()
+ .collect::<String>();
+ fence_str
+ .find("```")
+ .map(|fence_idx| offset - fence_idx)
+ .unwrap_or_else(|| offset)
+ } else if md
+ .as_bytes()
+ .get(offset)
+ .map(|b| *b == b'\n')
+ .unwrap_or_default()
+ {
+ offset - 1
+ } else {
+ offset
+ };
+
+ let code_end = if is_fenced {
+ previous_offset
+ } else {
+ code_block_end
+ };
+
+ code_blocks.push(RustCodeBlock {
+ is_fenced,
+ range: Range {
+ start: code_block_start,
+ end: code_block_end,
+ },
+ code: Range {
+ start: code_start,
+ end: code_end,
+ },
+ syntax: if !syntax.is_empty() {
+ Some(syntax.into_owned())
+ } else {
+ None
+ },
+ });
+ }
+ _ => (),
+ }
+
+ previous_offset = offset;
+ }
+
+ code_blocks
+}
+
#[derive(Clone, Default, Debug)]
pub struct IdMap {
map: FxHashMap<String, usize>,
diff --git a/src/librustdoc/lib.rs b/src/librustdoc/lib.rs
index 4bbc01d..f4149b5 100644
--- a/src/librustdoc/lib.rs
+++ b/src/librustdoc/lib.rs
@@ -3,6 +3,7 @@
html_root_url = "https://doc.rust-lang.org/nightly/",
html_playground_url = "https://play.rust-lang.org/")]
+#![feature(bind_by_move_pattern_guards)]
#![feature(rustc_private)]
#![feature(box_patterns)]
#![feature(box_syntax)]
diff --git a/src/librustdoc/passes/check_code_block_syntax.rs b/src/librustdoc/passes/check_code_block_syntax.rs
new file mode 100644
index 0000000..a013cc3
--- /dev/null
+++ b/src/librustdoc/passes/check_code_block_syntax.rs
@@ -0,0 +1,109 @@
+use errors::Applicability;
+use syntax::parse::lexer::{TokenAndSpan, StringReader as Lexer};
+use syntax::parse::{ParseSess, token};
+use syntax::source_map::FilePathMapping;
+use syntax_pos::FileName;
+
+use clean;
+use core::DocContext;
+use fold::DocFolder;
+use html::markdown::{self, RustCodeBlock};
+use passes::Pass;
+
+pub const CHECK_CODE_BLOCK_SYNTAX: Pass =
+ Pass::early("check-code-block-syntax", check_code_block_syntax,
+ "validates syntax inside Rust code blocks");
+
+pub fn check_code_block_syntax(krate: clean::Crate, cx: &DocContext) -> clean::Crate {
+ SyntaxChecker { cx }.fold_crate(krate)
+}
+
+struct SyntaxChecker<'a, 'tcx: 'a, 'rcx: 'a> {
+ cx: &'a DocContext<'a, 'tcx, 'rcx>,
+}
+
+impl<'a, 'tcx, 'rcx> SyntaxChecker<'a, 'tcx, 'rcx> {
+ fn check_rust_syntax(&self, item: &clean::Item, dox: &str, code_block: RustCodeBlock) {
+ let sess = ParseSess::new(FilePathMapping::empty());
+ let source_file = sess.source_map().new_source_file(
+ FileName::Custom(String::from("doctest")),
+ dox[code_block.code].to_owned(),
+ );
+
+ let errors = Lexer::new_or_buffered_errs(&sess, source_file, None).and_then(|mut lexer| {
+ while let Ok(TokenAndSpan { tok, .. }) = lexer.try_next_token() {
+ if tok == token::Eof {
+ break;
+ }
+ }
+
+ let errors = lexer.buffer_fatal_errors();
+
+ if !errors.is_empty() {
+ Err(errors)
+ } else {
+ Ok(())
+ }
+ });
+
+ if let Err(errors) = errors {
+ let mut diag = if let Some(sp) =
+ super::source_span_for_markdown_range(self.cx, &dox, &code_block.range, &item.attrs)
+ {
+ let mut diag = self
+ .cx
+ .sess()
+ .struct_span_warn(sp, "could not parse code block as Rust code");
+
+ for mut err in errors {
+ diag.note(&format!("error from rustc: {}", err.message()));
+ err.cancel();
+ }
+
+ if code_block.syntax.is_none() && code_block.is_fenced {
+ let sp = sp.from_inner_byte_pos(0, 3);
+ diag.span_suggestion_with_applicability(
+ sp,
+ "mark blocks that do not contain Rust code as text",
+ String::from("```text"),
+ Applicability::MachineApplicable,
+ );
+ }
+
+ diag
+ } else {
+ // We couldn't calculate the span of the markdown block that had the error, so our
+ // diagnostics are going to be a bit lacking.
+ let mut diag = self.cx.sess().struct_span_warn(
+ super::span_of_attrs(&item.attrs),
+ "doc comment contains an invalid Rust code block",
+ );
+
+ for mut err in errors {
+ // Don't bother reporting the error, because we can't show where it happened.
+ err.cancel();
+ }
+
+ if code_block.syntax.is_none() && code_block.is_fenced {
+ diag.help("mark blocks that do not contain Rust code as text: ```text");
+ }
+
+ diag
+ };
+
+ diag.emit();
+ }
+ }
+}
+
+impl<'a, 'tcx, 'rcx> DocFolder for SyntaxChecker<'a, 'tcx, 'rcx> {
+ fn fold_item(&mut self, item: clean::Item) -> Option<clean::Item> {
+ if let Some(dox) = &item.attrs.collapsed_doc_value() {
+ for code_block in markdown::rust_code_blocks(&dox) {
+ self.check_rust_syntax(&item, &dox, code_block);
+ }
+ }
+
+ self.fold_item_recur(item)
+ }
+}
diff --git a/src/librustdoc/passes/collect_intra_doc_links.rs b/src/librustdoc/passes/collect_intra_doc_links.rs
index fdc1c06..3d6096b 100644
--- a/src/librustdoc/passes/collect_intra_doc_links.rs
+++ b/src/librustdoc/passes/collect_intra_doc_links.rs
@@ -6,7 +6,7 @@
use syntax::ast::{self, Ident, NodeId};
use syntax::feature_gate::UnstableFeatures;
use syntax::symbol::Symbol;
-use syntax_pos::{self, DUMMY_SP};
+use syntax_pos::DUMMY_SP;
use std::ops::Range;
@@ -16,6 +16,7 @@
use clean::*;
use passes::{look_for_tests, Pass};
+use super::span_of_attrs;
pub const COLLECT_INTRA_DOC_LINKS: Pass =
Pass::early("collect-intra-doc-links", collect_intra_doc_links,
@@ -440,28 +441,11 @@
None
}
-pub fn span_of_attrs(attrs: &Attributes) -> syntax_pos::Span {
- if attrs.doc_strings.is_empty() {
- return DUMMY_SP;
- }
- let start = attrs.doc_strings[0].span();
- let end = attrs.doc_strings.last().expect("No doc strings provided").span();
- start.to(end)
-}
-
/// Reports a resolution failure diagnostic.
///
-/// Ideally we can report the diagnostic with the actual span in the source where the link failure
-/// occurred. However, there's a mismatch between the span in the source code and the span in the
-/// markdown, so we have to do a bit of work to figure out the correspondence.
-///
-/// It's not too hard to find the span for sugared doc comments (`///` and `/**`), because the
-/// source will match the markdown exactly, excluding the comment markers. However, it's much more
-/// difficult to calculate the spans for unsugared docs, because we have to deal with escaping and
-/// other source features. So, we attempt to find the exact source span of the resolution failure
-/// in sugared docs, but use the span of the documentation attributes themselves for unsugared
-/// docs. Because this span might be overly large, we display the markdown line containing the
-/// failure as a note.
+/// If we cannot find the exact source span of the resolution failure, we use the span of the
+/// documentation attributes themselves. This is a little heavy-handed, so we display the markdown
+/// line containing the failure as a note as well.
fn resolution_failure(
cx: &DocContext,
attrs: &Attributes,
@@ -473,54 +457,7 @@
let msg = format!("`[{}]` cannot be resolved, ignoring it...", path_str);
let mut diag = if let Some(link_range) = link_range {
- let src = cx.sess().source_map().span_to_snippet(sp);
- let is_all_sugared_doc = attrs.doc_strings.iter().all(|frag| match frag {
- DocFragment::SugaredDoc(..) => true,
- _ => false,
- });
-
- if let (Ok(src), true) = (src, is_all_sugared_doc) {
- // The number of markdown lines up to and including the resolution failure.
- let num_lines = dox[..link_range.start].lines().count();
-
- // We use `split_terminator('\n')` instead of `lines()` when counting bytes to ensure
- // that DOS-style line endings do not cause the spans to be calculated incorrectly.
- let mut src_lines = src.split_terminator('\n');
- let mut md_lines = dox.split_terminator('\n').take(num_lines).peekable();
-
- // The number of bytes from the start of the source span to the resolution failure that
- // are *not* part of the markdown, like comment markers.
- let mut extra_src_bytes = 0;
-
- while let Some(md_line) = md_lines.next() {
- loop {
- let source_line = src_lines
- .next()
- .expect("could not find markdown line in source");
-
- match source_line.find(md_line) {
- Some(offset) => {
- extra_src_bytes += if md_lines.peek().is_some() {
- source_line.len() - md_line.len()
- } else {
- offset
- };
- break;
- }
- None => {
- // Since this is a source line that doesn't include a markdown line,
- // we have to count the newline that we split from earlier.
- extra_src_bytes += source_line.len() + 1;
- }
- }
- }
- }
-
- let sp = sp.from_inner_byte_pos(
- link_range.start + extra_src_bytes,
- link_range.end + extra_src_bytes,
- );
-
+ if let Some(sp) = super::source_span_for_markdown_range(cx, dox, &link_range, attrs) {
let mut diag = cx.tcx.struct_span_lint_node(
lint::builtin::INTRA_DOC_LINK_RESOLUTION_FAILURE,
NodeId::from_u32(0),
diff --git a/src/librustdoc/passes/mod.rs b/src/librustdoc/passes/mod.rs
index e897b9a..c9a3a2c 100644
--- a/src/librustdoc/passes/mod.rs
+++ b/src/librustdoc/passes/mod.rs
@@ -8,6 +8,8 @@
use std::mem;
use std::fmt;
use syntax::ast::NodeId;
+use syntax_pos::{DUMMY_SP, Span};
+use std::ops::Range;
use clean::{self, GetDefId, Item};
use core::{DocContext, DocAccessLevels};
@@ -16,8 +18,6 @@
use html::markdown::{find_testable_code, ErrorCodes, LangString};
-use self::collect_intra_doc_links::span_of_attrs;
-
mod collapse_docs;
pub use self::collapse_docs::COLLAPSE_DOCS;
@@ -45,6 +45,9 @@
mod collect_trait_impls;
pub use self::collect_trait_impls::COLLECT_TRAIT_IMPLS;
+mod check_code_block_syntax;
+pub use self::check_code_block_syntax::CHECK_CODE_BLOCK_SYNTAX;
+
/// Represents a single pass.
#[derive(Copy, Clone)]
pub enum Pass {
@@ -135,6 +138,7 @@
STRIP_PRIV_IMPORTS,
PROPAGATE_DOC_CFG,
COLLECT_INTRA_DOC_LINKS,
+ CHECK_CODE_BLOCK_SYNTAX,
COLLECT_TRAIT_IMPLS,
];
@@ -145,6 +149,7 @@
"strip-hidden",
"strip-private",
"collect-intra-doc-links",
+ "check-code-block-syntax",
"collapse-docs",
"unindent-comments",
"propagate-doc-cfg",
@@ -156,6 +161,7 @@
"check-private-items-doc-tests",
"strip-priv-imports",
"collect-intra-doc-links",
+ "check-code-block-syntax",
"collapse-docs",
"unindent-comments",
"propagate-doc-cfg",
@@ -396,3 +402,94 @@
}
}
}
+
+/// Return a span encompassing all the given attributes.
+crate fn span_of_attrs(attrs: &clean::Attributes) -> Span {
+ if attrs.doc_strings.is_empty() {
+ return DUMMY_SP;
+ }
+ let start = attrs.doc_strings[0].span();
+ let end = attrs.doc_strings.last().expect("No doc strings provided").span();
+ start.to(end)
+}
+
+/// Attempts to match a range of bytes from parsed markdown to a `Span` in the source code.
+///
+/// This method will return `None` if we cannot construct a span from the source map or if the
+/// attributes are not all sugared doc comments. It's difficult to calculate the correct span in
+/// that case due to escaping and other source features.
+crate fn source_span_for_markdown_range(
+ cx: &DocContext,
+ markdown: &str,
+ md_range: &Range<usize>,
+ attrs: &clean::Attributes,
+) -> Option<Span> {
+ let is_all_sugared_doc = attrs.doc_strings.iter().all(|frag| match frag {
+ clean::DocFragment::SugaredDoc(..) => true,
+ _ => false,
+ });
+
+ if !is_all_sugared_doc {
+ return None;
+ }
+
+ let snippet = cx
+ .sess()
+ .source_map()
+ .span_to_snippet(span_of_attrs(attrs))
+ .ok()?;
+
+ let starting_line = markdown[..md_range.start].lines().count() - 1;
+ let ending_line = markdown[..md_range.end].lines().count() - 1;
+
+ // We use `split_terminator('\n')` instead of `lines()` when counting bytes so that we only
+ // we can treat CRLF and LF line endings the same way.
+ let mut src_lines = snippet.split_terminator('\n');
+ let md_lines = markdown.split_terminator('\n');
+
+ // The number of bytes from the source span to the markdown span that are not part
+ // of the markdown, like comment markers.
+ let mut start_bytes = 0;
+ let mut end_bytes = 0;
+
+ 'outer: for (line_no, md_line) in md_lines.enumerate() {
+ loop {
+ let source_line = src_lines.next().expect("could not find markdown in source");
+ match source_line.find(md_line) {
+ Some(offset) => {
+ if line_no == starting_line {
+ start_bytes += offset;
+
+ if starting_line == ending_line {
+ break 'outer;
+ }
+ } else if line_no == ending_line {
+ end_bytes += offset;
+ break 'outer;
+ } else if line_no < starting_line {
+ start_bytes += source_line.len() - md_line.len();
+ } else {
+ end_bytes += source_line.len() - md_line.len();
+ }
+ break;
+ }
+ None => {
+ // Since this is a source line that doesn't include a markdown line,
+ // we have to count the newline that we split from earlier.
+ if line_no <= starting_line {
+ start_bytes += source_line.len() + 1;
+ } else {
+ end_bytes += source_line.len() + 1;
+ }
+ }
+ }
+ }
+ }
+
+ let sp = span_of_attrs(attrs).from_inner_byte_pos(
+ md_range.start + start_bytes,
+ md_range.end + start_bytes + end_bytes,
+ );
+
+ Some(sp)
+}
diff --git a/src/libstd/sys/unix/fast_thread_local.rs b/src/libstd/sys/unix/fast_thread_local.rs
index d48d701d..742ffd1 100644
--- a/src/libstd/sys/unix/fast_thread_local.rs
+++ b/src/libstd/sys/unix/fast_thread_local.rs
@@ -33,30 +33,57 @@
register_dtor_fallback(t, dtor);
}
-// macOS's analog of the above linux function is this _tlv_atexit function.
-// The disassembly of thread_local globals in C++ (at least produced by
-// clang) will have this show up in the output.
+// This implementation is very similar to register_dtor_fallback in
+// sys_common/thread_local.rs. The main difference is that we want to hook into
+// macOS's analog of the above linux function, _tlv_atexit. OSX will run the
+// registered dtors before any TLS slots get freed, and when the main thread
+// exits.
+//
+// Unfortunately, calling _tlv_atexit while tls dtors are running is UB. The
+// workaround below is to register, via _tlv_atexit, a custom DTOR list once per
+// thread. thread_local dtors are pushed to the DTOR list without calling
+// _tlv_atexit.
#[cfg(target_os = "macos")]
pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern fn(*mut u8)) {
+ use cell::Cell;
+ use ptr;
+
+ #[thread_local]
+ static REGISTERED: Cell<bool> = Cell::new(false);
+ if !REGISTERED.get() {
+ _tlv_atexit(run_dtors, ptr::null_mut());
+ REGISTERED.set(true);
+ }
+
+ type List = Vec<(*mut u8, unsafe extern fn(*mut u8))>;
+
+ #[thread_local]
+ static DTORS: Cell<*mut List> = Cell::new(ptr::null_mut());
+ if DTORS.get().is_null() {
+ let v: Box<List> = box Vec::new();
+ DTORS.set(Box::into_raw(v));
+ }
+
extern {
fn _tlv_atexit(dtor: unsafe extern fn(*mut u8),
arg: *mut u8);
}
- _tlv_atexit(dtor, t);
+
+ let list: &mut List = &mut *DTORS.get();
+ list.push((t, dtor));
+
+ unsafe extern fn run_dtors(_: *mut u8) {
+ let mut ptr = DTORS.replace(ptr::null_mut());
+ while !ptr.is_null() {
+ let list = Box::from_raw(ptr);
+ for (ptr, dtor) in list.into_iter() {
+ dtor(ptr);
+ }
+ ptr = DTORS.replace(ptr::null_mut());
+ }
+ }
}
pub fn requires_move_before_drop() -> bool {
- // The macOS implementation of TLS apparently had an odd aspect to it
- // where the pointer we have may be overwritten while this destructor
- // is running. Specifically if a TLS destructor re-accesses TLS it may
- // trigger a re-initialization of all TLS variables, paving over at
- // least some destroyed ones with initial values.
- //
- // This means that if we drop a TLS value in place on macOS that we could
- // revert the value to its original state halfway through the
- // destructor, which would be bad!
- //
- // Hence, we use `ptr::read` on macOS (to move to a "safe" location)
- // instead of drop_in_place.
- cfg!(target_os = "macos")
+ false
}
diff --git a/src/libstd/thread/local.rs b/src/libstd/thread/local.rs
index efd231e..5d2eb5f 100644
--- a/src/libstd/thread/local.rs
+++ b/src/libstd/thread/local.rs
@@ -69,9 +69,6 @@
/// destroyed, but not all platforms have this guard. Those platforms that do
/// not guard typically have a synthetic limit after which point no more
/// destructors are run.
-/// 3. On macOS, initializing TLS during destruction of other TLS slots can
-/// sometimes cancel *all* destructors for the current thread, whether or not
-/// the slots have already had their destructors run or not.
///
/// [`with`]: ../../std/thread/struct.LocalKey.html#method.with
/// [`thread_local!`]: ../../std/macro.thread_local.html
@@ -604,11 +601,8 @@
}
// Note that this test will deadlock if TLS destructors aren't run (this
- // requires the destructor to be run to pass the test). macOS has a known bug
- // where dtors-in-dtors may cancel other destructors, so we just ignore this
- // test on macOS.
+ // requires the destructor to be run to pass the test).
#[test]
- #[cfg_attr(target_os = "macos", ignore)]
fn dtors_in_dtors_in_dtors() {
struct S1(Sender<()>);
thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell::new(None));
diff --git a/src/libsyntax/ast.rs b/src/libsyntax/ast.rs
index d57f924..405cf61 100644
--- a/src/libsyntax/ast.rs
+++ b/src/libsyntax/ast.rs
@@ -1295,6 +1295,8 @@
FloatUnsuffixed(Symbol),
/// A boolean literal.
Bool(bool),
+ /// A recovered character literal that contains mutliple `char`s, most likely a typo.
+ Err(Symbol),
}
impl LitKind {
@@ -1331,6 +1333,7 @@
| LitKind::ByteStr(..)
| LitKind::Byte(..)
| LitKind::Char(..)
+ | LitKind::Err(..)
| LitKind::Int(_, LitIntType::Unsuffixed)
| LitKind::FloatUnsuffixed(..)
| LitKind::Bool(..) => true,
diff --git a/src/libsyntax/attr/mod.rs b/src/libsyntax/attr/mod.rs
index f6d7590..e5ce6a3 100644
--- a/src/libsyntax/attr/mod.rs
+++ b/src/libsyntax/attr/mod.rs
@@ -666,6 +666,7 @@
} else {
"false"
})), false),
+ LitKind::Err(val) => Token::Literal(token::Lit::Err(val), None),
}
}
diff --git a/src/libsyntax/ext/quote.rs b/src/libsyntax/ext/quote.rs
index c01e7f5..df3b49c 100644
--- a/src/libsyntax/ext/quote.rs
+++ b/src/libsyntax/ext/quote.rs
@@ -646,6 +646,7 @@
token::Literal(token::Byte(i), suf) => return mk_lit!("Byte", suf, i),
token::Literal(token::Char(i), suf) => return mk_lit!("Char", suf, i),
+ token::Literal(token::Err(_i), _suf) => return cx.expr(sp, ast::ExprKind::Err),
token::Literal(token::Integer(i), suf) => return mk_lit!("Integer", suf, i),
token::Literal(token::Float(i), suf) => return mk_lit!("Float", suf, i),
token::Literal(token::Str_(i), suf) => return mk_lit!("Str_", suf, i),
diff --git a/src/libsyntax/parse/lexer/mod.rs b/src/libsyntax/parse/lexer/mod.rs
index 0e1c3b4..8827e04 100644
--- a/src/libsyntax/parse/lexer/mod.rs
+++ b/src/libsyntax/parse/lexer/mod.rs
@@ -238,19 +238,6 @@
sr
}
- pub fn new_without_err(sess: &'a ParseSess,
- source_file: Lrc<syntax_pos::SourceFile>,
- override_span: Option<Span>,
- prepend_error_text: &str) -> Result<Self, ()> {
- let mut sr = StringReader::new_raw(sess, source_file, override_span);
- if sr.advance_token().is_err() {
- eprintln!("{}", prepend_error_text);
- sr.emit_fatal_errors();
- return Err(());
- }
- Ok(sr)
- }
-
pub fn new_or_buffered_errs(sess: &'a ParseSess,
source_file: Lrc<syntax_pos::SourceFile>,
override_span: Option<Span>) -> Result<Self, Vec<Diagnostic>> {
@@ -1408,9 +1395,10 @@
// lifetimes shouldn't end with a single quote
// if we find one, then this is an invalid character literal
if self.ch_is('\'') {
- self.fatal_span_verbose(start_with_quote, self.next_pos,
- String::from("character literal may only contain one codepoint"))
- .raise();
+ self.err_span_(start_with_quote, self.next_pos,
+ "character literal may only contain one codepoint");
+ self.bump();
+ return Ok(token::Literal(token::Err(Symbol::intern("??")), None))
}
@@ -1445,7 +1433,7 @@
format!("\"{}\"", &self.src[start..end]),
Applicability::MachineApplicable
).emit();
- return Ok(token::Literal(token::Str_(Symbol::intern("??")), None))
+ return Ok(token::Literal(token::Err(Symbol::intern("??")), None))
}
if self.ch_is('\n') || self.is_eof() || self.ch_is('/') {
// Only attempt to infer single line string literals. If we encounter
diff --git a/src/libsyntax/parse/mod.rs b/src/libsyntax/parse/mod.rs
index ddb350f..8d03969 100644
--- a/src/libsyntax/parse/mod.rs
+++ b/src/libsyntax/parse/mod.rs
@@ -466,6 +466,7 @@
match lit {
token::Byte(i) => (true, Some(LitKind::Byte(byte_lit(&i.as_str()).0))),
token::Char(i) => (true, Some(LitKind::Char(char_lit(&i.as_str(), diag).0))),
+ token::Err(i) => (true, Some(LitKind::Err(i))),
// There are some valid suffixes for integer and float literals,
// so all the handling is done internally.
diff --git a/src/libsyntax/parse/token.rs b/src/libsyntax/parse/token.rs
index 25a4da3..f06e975 100644
--- a/src/libsyntax/parse/token.rs
+++ b/src/libsyntax/parse/token.rs
@@ -60,6 +60,7 @@
pub enum Lit {
Byte(ast::Name),
Char(ast::Name),
+ Err(ast::Name),
Integer(ast::Name),
Float(ast::Name),
Str_(ast::Name),
@@ -73,6 +74,7 @@
match *self {
Byte(_) => "byte literal",
Char(_) => "char literal",
+ Err(_) => "invalid literal",
Integer(_) => "integer literal",
Float(_) => "float literal",
Str_(_) | StrRaw(..) => "string literal",
@@ -471,8 +473,7 @@
Le | EqEq | Ne | Ge | AndAnd | OrOr | Tilde | BinOpEq(..) | At | DotDotDot |
DotDotEq | Comma | Semi | ModSep | RArrow | LArrow | FatArrow | Pound | Dollar |
- Question | OpenDelim(..) | CloseDelim(..) => return None,
-
+ Question | OpenDelim(..) | CloseDelim(..) |
Literal(..) | Ident(..) | Lifetime(..) | Interpolated(..) | DocComment(..) |
Whitespace | Comment | Shebang(..) | Eof => return None,
})
diff --git a/src/libsyntax/print/pprust.rs b/src/libsyntax/print/pprust.rs
index c535940..7cecf4b 100644
--- a/src/libsyntax/print/pprust.rs
+++ b/src/libsyntax/print/pprust.rs
@@ -224,6 +224,7 @@
let mut out = match lit {
token::Byte(b) => format!("b'{}'", b),
token::Char(c) => format!("'{}'", c),
+ token::Err(c) => format!("'{}'", c),
token::Float(c) |
token::Integer(c) => c.to_string(),
token::Str_(s) => format!("\"{}\"", s),
@@ -603,6 +604,14 @@
}
match lit.node {
ast::LitKind::Str(st, style) => self.print_string(&st.as_str(), style),
+ ast::LitKind::Err(st) => {
+ let st = st.as_str().escape_debug();
+ let mut res = String::with_capacity(st.len() + 2);
+ res.push('\'');
+ res.push_str(&st);
+ res.push('\'');
+ self.writer().word(res)
+ }
ast::LitKind::Byte(byte) => {
let mut res = String::from("b'");
res.extend(ascii::escape_default(byte).map(|c| c as char));
diff --git a/src/libsyntax_ext/concat.rs b/src/libsyntax_ext/concat.rs
index 807f190..f148f8e 100644
--- a/src/libsyntax_ext/concat.rs
+++ b/src/libsyntax_ext/concat.rs
@@ -23,6 +23,7 @@
match e.node {
ast::ExprKind::Lit(ref lit) => match lit.node {
ast::LitKind::Str(ref s, _)
+ | ast::LitKind::Err(ref s)
| ast::LitKind::Float(ref s, _)
| ast::LitKind::FloatUnsuffixed(ref s) => {
accumulator.push_str(&s.as_str());
diff --git a/src/test/run-pass/try_from.rs b/src/test/run-pass/try_from.rs
new file mode 100644
index 0000000..4522ce3
--- /dev/null
+++ b/src/test/run-pass/try_from.rs
@@ -0,0 +1,37 @@
+// This test relies on `TryFrom` being blanket impl for all `T: Into`
+// and `TryInto` being blanket impl for all `U: TryFrom`
+
+// This test was added to show the motivation for doing this
+// over `TryFrom` being blanket impl for all `T: From`
+
+#![feature(try_from, never_type)]
+
+use std::convert::TryInto;
+
+struct Foo<T> {
+ t: T,
+}
+
+// This fails to compile due to coherence restrictions
+// as of Rust version 1.32.x, therefore it could not be used
+// instead of the `Into` version of the impl, and serves as
+// motivation for a blanket impl for all `T: Into`, instead
+// of a blanket impl for all `T: From`
+/*
+impl<T> From<Foo<T>> for Box<T> {
+ fn from(foo: Foo<T>) -> Box<T> {
+ Box::new(foo.t)
+ }
+}
+*/
+
+impl<T> Into<Vec<T>> for Foo<T> {
+ fn into(self) -> Vec<T> {
+ vec![self.t]
+ }
+}
+
+pub fn main() {
+ let _: Result<Vec<i32>, !> = Foo { t: 10 }.try_into();
+}
+
diff --git a/src/test/rustdoc-ui/invalid-syntax.rs b/src/test/rustdoc-ui/invalid-syntax.rs
index 537816b..924e038 100644
--- a/src/test/rustdoc-ui/invalid-syntax.rs
+++ b/src/test/rustdoc-ui/invalid-syntax.rs
@@ -1,7 +1,66 @@
// compile-pass
-// compile-flags: --error-format=human
/// ```
/// \__________pkt->size___________/ \_result->size_/ \__pkt->size__/
/// ```
pub fn foo() {}
+
+/// ```
+/// |
+/// LL | use foobar::Baz;
+/// | ^^^^^^ did you mean `baz::foobar`?
+/// ```
+pub fn bar() {}
+
+/// ```
+/// valid
+/// ```
+///
+/// ```
+/// \_
+/// ```
+///
+/// ```text
+/// "invalid
+/// ```
+pub fn valid_and_invalid() {}
+
+/// This is a normal doc comment, but...
+///
+/// There's a code block with bad syntax in it:
+///
+/// ```rust
+/// \_
+/// ```
+///
+/// Good thing we tested it!
+pub fn baz() {}
+
+/// Indented block start
+///
+/// code with bad syntax
+/// \_
+///
+/// Indented block end
+pub fn quux() {}
+
+/// Unclosed fence
+///
+/// ```
+/// slkdjf
+pub fn xyzzy() {}
+
+/// Indented code that contains a fence
+///
+/// ```
+pub fn blah() {}
+
+/// ```edition2018
+/// \_
+/// ```
+pub fn blargh() {}
+
+#[doc = "```"]
+/// \_
+#[doc = "```"]
+pub fn crazy_attrs() {}
diff --git a/src/test/rustdoc-ui/invalid-syntax.stderr b/src/test/rustdoc-ui/invalid-syntax.stderr
index b566133..1080038 100644
--- a/src/test/rustdoc-ui/invalid-syntax.stderr
+++ b/src/test/rustdoc-ui/invalid-syntax.stderr
@@ -1,10 +1,97 @@
-Output from rustc:
-error: unknown start of token: /
- --> <stdin>:1:1
- |
-1 | /__________pkt->size___________/ /_result->size_/ /__pkt->size__/
- | ^
+warning: could not parse code block as Rust code
+ --> $DIR/invalid-syntax.rs:3:5
+ |
+LL | /// ```
+ | _____^
+LL | | /// /__________pkt->size___________/ /_result->size_/ /__pkt->size__/
+LL | | /// ```
+ | |_______^
+ |
+ = note: error from rustc: unknown start of token: /
+help: mark blocks that do not contain Rust code as text
+ |
+LL | /// ```text
+ | ^^^^^^^
-warning: Invalid doc comment starting with: `/__________pkt->size___________/ /_result->size_/ /__pkt->size__/`
-(Ignoring this codeblock)
+warning: could not parse code block as Rust code
+ --> $DIR/invalid-syntax.rs:8:5
+ |
+LL | /// ```
+ | _____^
+LL | | /// |
+LL | | /// LL | use foobar::Baz;
+LL | | /// | ^^^^^^ did you mean `baz::foobar`?
+LL | | /// ```
+ | |_______^
+ |
+ = note: error from rustc: unknown start of token: `
+help: mark blocks that do not contain Rust code as text
+ |
+LL | /// ```text
+ | ^^^^^^^
+
+warning: could not parse code block as Rust code
+ --> $DIR/invalid-syntax.rs:19:5
+ |
+LL | /// ```
+ | _____^
+LL | | /// /_
+LL | | /// ```
+ | |_______^
+ |
+ = note: error from rustc: unknown start of token: /
+help: mark blocks that do not contain Rust code as text
+ |
+LL | /// ```text
+ | ^^^^^^^
+
+warning: could not parse code block as Rust code
+ --> $DIR/invalid-syntax.rs:32:5
+ |
+LL | /// ```rust
+ | _____^
+LL | | /// /_
+LL | | /// ```
+ | |_______^
+ |
+ = note: error from rustc: unknown start of token: /
+
+warning: could not parse code block as Rust code
+ --> $DIR/invalid-syntax.rs:41:9
+ |
+LL | /// code with bad syntax
+ | _________^
+LL | | /// /_
+ | |__________^
+ |
+ = note: error from rustc: unknown start of token: /
+
+warning: could not parse code block as Rust code
+ --> $DIR/invalid-syntax.rs:55:9
+ |
+LL | /// ```
+ | ^^^
+ |
+ = note: error from rustc: unknown start of token: `
+
+warning: could not parse code block as Rust code
+ --> $DIR/invalid-syntax.rs:58:5
+ |
+LL | /// ```edition2018
+ | _____^
+LL | | /// /_
+LL | | /// ```
+ | |_______^
+ |
+ = note: error from rustc: unknown start of token: /
+
+warning: doc comment contains an invalid Rust code block
+ --> $DIR/invalid-syntax.rs:63:1
+ |
+LL | / #[doc = "```"]
+LL | | /// /_
+LL | | #[doc = "```"]
+ | |______________^
+ |
+ = help: mark blocks that do not contain Rust code as text: ```text
diff --git a/src/test/rustdoc/bad-codeblock-syntax.rs b/src/test/rustdoc/bad-codeblock-syntax.rs
new file mode 100644
index 0000000..0ab2f68
--- /dev/null
+++ b/src/test/rustdoc/bad-codeblock-syntax.rs
@@ -0,0 +1,27 @@
+// @has bad_codeblock_syntax/fn.foo.html
+// @has - '//*[@class="docblock"]/pre/code' '\_'
+/// ```
+/// \_
+/// ```
+pub fn foo() {}
+
+// @has bad_codeblock_syntax/fn.bar.html
+// @has - '//*[@class="docblock"]/pre/code' '`baz::foobar`'
+/// ```
+/// `baz::foobar`
+/// ```
+pub fn bar() {}
+
+// @has bad_codeblock_syntax/fn.quux.html
+// @has - '//*[@class="docblock"]/pre/code' '\_'
+/// ```rust
+/// \_
+/// ```
+pub fn quux() {}
+
+// @has bad_codeblock_syntax/fn.ok.html
+// @has - '//*[@class="docblock"]/pre/code[@class="language-text"]' '\_'
+/// ```text
+/// \_
+/// ```
+pub fn ok() {}
diff --git a/src/test/ui/e0119/conflict-with-std.stderr b/src/test/ui/e0119/conflict-with-std.stderr
index d94e4dc..c2ae321 100644
--- a/src/test/ui/e0119/conflict-with-std.stderr
+++ b/src/test/ui/e0119/conflict-with-std.stderr
@@ -25,7 +25,7 @@
|
= note: conflicting implementation in crate `core`:
- impl<T, U> std::convert::TryFrom<U> for T
- where T: std::convert::From<U>;
+ where U: std::convert::Into<T>;
error: aborting due to 3 previous errors
diff --git a/src/test/ui/parser/lex-bad-char-literals-2.rs b/src/test/ui/parser/lex-bad-char-literals-2.rs
index 7f85999..1e180f8 100644
--- a/src/test/ui/parser/lex-bad-char-literals-2.rs
+++ b/src/test/ui/parser/lex-bad-char-literals-2.rs
@@ -1,4 +1,4 @@
// This test needs to the last one appearing in this file as it kills the parser
static c: char =
- 'nope' //~ ERROR: character literal may only contain one codepoint: 'nope'
+ 'nope' //~ ERROR: character literal may only contain one codepoint
;
diff --git a/src/test/ui/parser/lex-bad-char-literals-2.stderr b/src/test/ui/parser/lex-bad-char-literals-2.stderr
index a7075b7..7eadb8e 100644
--- a/src/test/ui/parser/lex-bad-char-literals-2.stderr
+++ b/src/test/ui/parser/lex-bad-char-literals-2.stderr
@@ -1,8 +1,13 @@
-error: character literal may only contain one codepoint: 'nope'
+error: character literal may only contain one codepoint
--> $DIR/lex-bad-char-literals-2.rs:3:5
|
-LL | 'nope' //~ ERROR: character literal may only contain one codepoint: 'nope'
+LL | 'nope' //~ ERROR: character literal may only contain one codepoint
| ^^^^^^
-error: aborting due to previous error
+error[E0601]: `main` function not found in crate `lex_bad_char_literals_2`
+ |
+ = note: consider adding a `main` function to `$DIR/lex-bad-char-literals-2.rs`
+error: aborting due to 2 previous errors
+
+For more information about this error, try `rustc --explain E0601`.
diff --git a/src/test/ui/parser/lex-bad-char-literals-3.rs b/src/test/ui/parser/lex-bad-char-literals-3.rs
index f874970..5194ff4 100644
--- a/src/test/ui/parser/lex-bad-char-literals-3.rs
+++ b/src/test/ui/parser/lex-bad-char-literals-3.rs
@@ -1,7 +1,7 @@
-// This test needs to the last one appearing in this file as it kills the parser
-static c: char =
- '●●' //~ ERROR: character literal may only contain one codepoint
- //~| ERROR: mismatched types
-;
+static c: char = '●●';
+//~^ ERROR: character literal may only contain one codepoint
-fn main() {}
+fn main() {
+ let ch: &str = '●●';
+ //~^ ERROR: character literal may only contain one codepoint
+}
diff --git a/src/test/ui/parser/lex-bad-char-literals-3.stderr b/src/test/ui/parser/lex-bad-char-literals-3.stderr
index 89f18e3..6462a3c 100644
--- a/src/test/ui/parser/lex-bad-char-literals-3.stderr
+++ b/src/test/ui/parser/lex-bad-char-literals-3.stderr
@@ -1,22 +1,22 @@
error: character literal may only contain one codepoint
- --> $DIR/lex-bad-char-literals-3.rs:3:5
+ --> $DIR/lex-bad-char-literals-3.rs:1:18
|
-LL | '●●' //~ ERROR: character literal may only contain one codepoint
- | ^^^^
+LL | static c: char = '●●';
+ | ^^^^
help: if you meant to write a `str` literal, use double quotes
|
-LL | "●●" //~ ERROR: character literal may only contain one codepoint
- | ^^^^
+LL | static c: char = "●●";
+ | ^^^^
-error[E0308]: mismatched types
- --> $DIR/lex-bad-char-literals-3.rs:3:5
+error: character literal may only contain one codepoint
+ --> $DIR/lex-bad-char-literals-3.rs:5:20
|
-LL | '●●' //~ ERROR: character literal may only contain one codepoint
- | ^^^^ expected char, found reference
+LL | let ch: &str = '●●';
+ | ^^^^
+help: if you meant to write a `str` literal, use double quotes
|
- = note: expected type `char`
- found type `&'static str`
+LL | let ch: &str = "●●";
+ | ^^^^
error: aborting due to 2 previous errors
-For more information about this error, try `rustc --explain E0308`.
diff --git a/src/test/ui/parser/lex-bad-char-literals-4.rs b/src/test/ui/parser/lex-bad-char-literals-4.rs
index 966e2bb..e13f11f 100644
--- a/src/test/ui/parser/lex-bad-char-literals-4.rs
+++ b/src/test/ui/parser/lex-bad-char-literals-4.rs
@@ -1,5 +1,5 @@
//
// This test needs to the last one appearing in this file as it kills the parser
static c: char =
- '● //~ ERROR: character literal may only contain one codepoint: '●
+ '● //~ ERROR: character literal may only contain one codepoint
;
diff --git a/src/test/ui/parser/lex-bad-char-literals-4.stderr b/src/test/ui/parser/lex-bad-char-literals-4.stderr
index 550cb54..881e3d5 100644
--- a/src/test/ui/parser/lex-bad-char-literals-4.stderr
+++ b/src/test/ui/parser/lex-bad-char-literals-4.stderr
@@ -1,7 +1,7 @@
error: character literal may only contain one codepoint: '●
--> $DIR/lex-bad-char-literals-4.rs:4:5
|
-LL | '● //~ ERROR: character literal may only contain one codepoint: '●
+LL | '● //~ ERROR: character literal may only contain one codepoint
| ^^
error: aborting due to previous error
diff --git a/src/test/ui/parser/lex-bad-char-literals-5.rs b/src/test/ui/parser/lex-bad-char-literals-5.rs
index 247289e..0c4339e 100644
--- a/src/test/ui/parser/lex-bad-char-literals-5.rs
+++ b/src/test/ui/parser/lex-bad-char-literals-5.rs
@@ -1,8 +1,7 @@
-//
-// This test needs to the last one appearing in this file as it kills the parser
-static c: char =
- '\x10\x10' //~ ERROR: character literal may only contain one codepoint
- //~| ERROR: mismatched types
-;
+static c: char = '\x10\x10';
+//~^ ERROR: character literal may only contain one codepoint
-fn main() {}
+fn main() {
+ let ch: &str = '\x10\x10';
+ //~^ ERROR: character literal may only contain one codepoint
+}
diff --git a/src/test/ui/parser/lex-bad-char-literals-5.stderr b/src/test/ui/parser/lex-bad-char-literals-5.stderr
index 523d71f..ef02973 100644
--- a/src/test/ui/parser/lex-bad-char-literals-5.stderr
+++ b/src/test/ui/parser/lex-bad-char-literals-5.stderr
@@ -1,22 +1,22 @@
error: character literal may only contain one codepoint
- --> $DIR/lex-bad-char-literals-5.rs:4:5
+ --> $DIR/lex-bad-char-literals-5.rs:1:18
|
-LL | '/x10/x10' //~ ERROR: character literal may only contain one codepoint
- | ^^^^^^^^^^
+LL | static c: char = '/x10/x10';
+ | ^^^^^^^^^^
help: if you meant to write a `str` literal, use double quotes
|
-LL | "/x10/x10" //~ ERROR: character literal may only contain one codepoint
- | ^^^^^^^^^^
+LL | static c: char = "/x10/x10";
+ | ^^^^^^^^^^
-error[E0308]: mismatched types
- --> $DIR/lex-bad-char-literals-5.rs:4:5
+error: character literal may only contain one codepoint
+ --> $DIR/lex-bad-char-literals-5.rs:5:20
|
-LL | '/x10/x10' //~ ERROR: character literal may only contain one codepoint
- | ^^^^^^^^^^ expected char, found reference
+LL | let ch: &str = '/x10/x10';
+ | ^^^^^^^^^^
+help: if you meant to write a `str` literal, use double quotes
|
- = note: expected type `char`
- found type `&'static str`
+LL | let ch: &str = "/x10/x10";
+ | ^^^^^^^^^^
error: aborting due to 2 previous errors
-For more information about this error, try `rustc --explain E0308`.
diff --git a/src/test/ui/parser/lex-bad-char-literals-6.rs b/src/test/ui/parser/lex-bad-char-literals-6.rs
new file mode 100644
index 0000000..4379b4f
--- /dev/null
+++ b/src/test/ui/parser/lex-bad-char-literals-6.rs
@@ -0,0 +1,17 @@
+fn main() {
+ let x: &str = 'ab';
+ //~^ ERROR: character literal may only contain one codepoint
+ let y: char = 'cd';
+ //~^ ERROR: character literal may only contain one codepoint
+ let z = 'ef';
+ //~^ ERROR: character literal may only contain one codepoint
+
+ if x == y {}
+ //~^ ERROR: can't compare `&str` with `char`
+ if y == z {} // no error here
+ if x == z {}
+ //~^ ERROR: can't compare `&str` with `char`
+
+ let a: usize = "";
+ //~^ ERROR: mismatched types
+}
diff --git a/src/test/ui/parser/lex-bad-char-literals-6.stderr b/src/test/ui/parser/lex-bad-char-literals-6.stderr
new file mode 100644
index 0000000..df99726
--- /dev/null
+++ b/src/test/ui/parser/lex-bad-char-literals-6.stderr
@@ -0,0 +1,47 @@
+error: character literal may only contain one codepoint
+ --> $DIR/lex-bad-char-literals-6.rs:2:19
+ |
+LL | let x: &str = 'ab';
+ | ^^^^
+
+error: character literal may only contain one codepoint
+ --> $DIR/lex-bad-char-literals-6.rs:4:19
+ |
+LL | let y: char = 'cd';
+ | ^^^^
+
+error: character literal may only contain one codepoint
+ --> $DIR/lex-bad-char-literals-6.rs:6:13
+ |
+LL | let z = 'ef';
+ | ^^^^
+
+error[E0277]: can't compare `&str` with `char`
+ --> $DIR/lex-bad-char-literals-6.rs:9:10
+ |
+LL | if x == y {}
+ | ^^ no implementation for `&str == char`
+ |
+ = help: the trait `std::cmp::PartialEq<char>` is not implemented for `&str`
+
+error[E0308]: mismatched types
+ --> $DIR/lex-bad-char-literals-6.rs:15:20
+ |
+LL | let a: usize = "";
+ | ^^ expected usize, found reference
+ |
+ = note: expected type `usize`
+ found type `&'static str`
+
+error[E0277]: can't compare `&str` with `char`
+ --> $DIR/lex-bad-char-literals-6.rs:12:10
+ |
+LL | if x == z {}
+ | ^^ no implementation for `&str == char`
+ |
+ = help: the trait `std::cmp::PartialEq<char>` is not implemented for `&str`
+
+error: aborting due to 6 previous errors
+
+Some errors occurred: E0277, E0308.
+For more information about an error, try `rustc --explain E0277`.
diff --git a/src/test/ui/str/str-as-char.fixed b/src/test/ui/str/str-as-char.fixed
index 9d4297b..accead5 100644
--- a/src/test/ui/str/str-as-char.fixed
+++ b/src/test/ui/str/str-as-char.fixed
@@ -1,6 +1,6 @@
// run-rustfix
fn main() {
- println!("●●");
- //~^ ERROR character literal may only contain one codepoint
+ println!("{}", "●●"); //~ ERROR character literal may only contain one codepoint
+ //~^ ERROR format argument must be a string literal
}
diff --git a/src/test/ui/str/str-as-char.rs b/src/test/ui/str/str-as-char.rs
index 710fa74..fb179ec 100644
--- a/src/test/ui/str/str-as-char.rs
+++ b/src/test/ui/str/str-as-char.rs
@@ -1,6 +1,6 @@
// run-rustfix
fn main() {
- println!('●●');
- //~^ ERROR character literal may only contain one codepoint
+ println!('●●'); //~ ERROR character literal may only contain one codepoint
+ //~^ ERROR format argument must be a string literal
}
diff --git a/src/test/ui/str/str-as-char.stderr b/src/test/ui/str/str-as-char.stderr
index 540a1b5..4ca430a 100644
--- a/src/test/ui/str/str-as-char.stderr
+++ b/src/test/ui/str/str-as-char.stderr
@@ -1,12 +1,22 @@
error: character literal may only contain one codepoint
--> $DIR/str-as-char.rs:4:14
|
-LL | println!('●●');
+LL | println!('●●'); //~ ERROR character literal may only contain one codepoint
| ^^^^
help: if you meant to write a `str` literal, use double quotes
|
-LL | println!("●●");
+LL | println!("●●"); //~ ERROR character literal may only contain one codepoint
| ^^^^
-error: aborting due to previous error
+error: format argument must be a string literal
+ --> $DIR/str-as-char.rs:4:14
+ |
+LL | println!('●●'); //~ ERROR character literal may only contain one codepoint
+ | ^^^^
+help: you might be missing a string literal to format with
+ |
+LL | println!("{}", '●●'); //~ ERROR character literal may only contain one codepoint
+ | ^^^^^
+
+error: aborting due to 2 previous errors