| use crate::ast::{AngleBracketedArgs, ParenthesizedArgs, AttrStyle, BareFnTy}; |
| use crate::ast::{GenericBound, TraitBoundModifier}; |
| use crate::ast::Unsafety; |
| use crate::ast::{Mod, AnonConst, Arg, Arm, Guard, Attribute, BindingMode, TraitItemKind}; |
| use crate::ast::Block; |
| use crate::ast::{BlockCheckMode, CaptureBy, Movability}; |
| use crate::ast::{Constness, Crate}; |
| use crate::ast::Defaultness; |
| use crate::ast::EnumDef; |
| use crate::ast::{Expr, ExprKind, RangeLimits}; |
| use crate::ast::{Field, FnDecl, FnHeader}; |
| use crate::ast::{ForeignItem, ForeignItemKind, FunctionRetTy}; |
| use crate::ast::{GenericParam, GenericParamKind}; |
| use crate::ast::GenericArg; |
| use crate::ast::{Ident, ImplItem, IsAsync, IsAuto, Item, ItemKind}; |
| use crate::ast::{Label, Lifetime, Lit, LitKind}; |
| use crate::ast::Local; |
| use crate::ast::MacStmtStyle; |
| use crate::ast::{Mac, Mac_, MacDelimiter}; |
| use crate::ast::{MutTy, Mutability}; |
| use crate::ast::{Pat, PatKind, PathSegment}; |
| use crate::ast::{PolyTraitRef, QSelf}; |
| use crate::ast::{Stmt, StmtKind}; |
| use crate::ast::{VariantData, StructField}; |
| use crate::ast::StrStyle; |
| use crate::ast::SelfKind; |
| use crate::ast::{TraitItem, TraitRef, TraitObjectSyntax}; |
| use crate::ast::{Ty, TyKind, TypeBinding, GenericBounds}; |
| use crate::ast::{Visibility, VisibilityKind, WhereClause, CrateSugar}; |
| use crate::ast::{UseTree, UseTreeKind}; |
| use crate::ast::{BinOpKind, UnOp}; |
| use crate::ast::{RangeEnd, RangeSyntax}; |
| use crate::{ast, attr}; |
| use crate::ext::base::DummyResult; |
| use crate::source_map::{self, SourceMap, Spanned, respan}; |
| use crate::parse::{self, SeqSep, classify, token}; |
| use crate::parse::lexer::{TokenAndSpan, UnmatchedBrace}; |
| use crate::parse::lexer::comments::{doc_comment_style, strip_doc_comment_decoration}; |
| use crate::parse::token::DelimToken; |
| use crate::parse::{new_sub_parser_from_file, ParseSess, Directory, DirectoryOwnership}; |
| use crate::util::parser::{AssocOp, Fixity}; |
| use crate::print::pprust; |
| use crate::ptr::P; |
| use crate::parse::PResult; |
| use crate::ThinVec; |
| use crate::tokenstream::{self, DelimSpan, TokenTree, TokenStream, TreeAndJoint}; |
| use crate::symbol::{Symbol, keywords}; |
| |
| use errors::{Applicability, DiagnosticBuilder, DiagnosticId}; |
| use rustc_target::spec::abi::{self, Abi}; |
| use syntax_pos::{Span, MultiSpan, BytePos, FileName}; |
| use log::{debug, trace}; |
| |
| use std::borrow::Cow; |
| use std::cmp; |
| use std::mem; |
| use std::path::{self, Path, PathBuf}; |
| use std::slice; |
| |
| #[derive(Debug)] |
| /// Whether the type alias or associated type is a concrete type or an existential type |
| pub enum AliasKind { |
| /// Just a new name for the same type |
| Weak(P<Ty>), |
| /// Only trait impls of the type will be usable, not the actual type itself |
| Existential(GenericBounds), |
| } |
| |
| bitflags::bitflags! { |
| struct Restrictions: u8 { |
| const STMT_EXPR = 1 << 0; |
| const NO_STRUCT_LITERAL = 1 << 1; |
| } |
| } |
| |
| type ItemInfo = (Ident, ItemKind, Option<Vec<Attribute>>); |
| |
| /// Specifies how to parse a path. |
| #[derive(Copy, Clone, PartialEq)] |
| pub enum PathStyle { |
| /// In some contexts, notably in expressions, paths with generic arguments are ambiguous |
| /// with something else. For example, in expressions `segment < ....` can be interpreted |
| /// as a comparison and `segment ( ....` can be interpreted as a function call. |
| /// In all such contexts the non-path interpretation is preferred by default for practical |
| /// reasons, but the path interpretation can be forced by the disambiguator `::`, e.g. |
| /// `x<y>` - comparisons, `x::<y>` - unambiguously a path. |
| Expr, |
| /// In other contexts, notably in types, no ambiguity exists and paths can be written |
| /// without the disambiguator, e.g., `x<y>` - unambiguously a path. |
| /// Paths with disambiguators are still accepted, `x::<Y>` - unambiguously a path too. |
| Type, |
| /// A path with generic arguments disallowed, e.g., `foo::bar::Baz`, used in imports, |
| /// visibilities or attributes. |
| /// Technically, this variant is unnecessary and e.g., `Expr` can be used instead |
| /// (paths in "mod" contexts have to be checked later for absence of generic arguments |
| /// anyway, due to macros), but it is used to avoid weird suggestions about expected |
| /// tokens when something goes wrong. |
| Mod, |
| } |
| |
| #[derive(Clone, Copy, PartialEq, Debug)] |
| enum SemiColonMode { |
| Break, |
| Ignore, |
| Comma, |
| } |
| |
| #[derive(Clone, Copy, PartialEq, Debug)] |
| enum BlockMode { |
| Break, |
| Ignore, |
| } |
| |
| /// Possibly accepts an `token::Interpolated` expression (a pre-parsed expression |
| /// dropped into the token stream, which happens while parsing the result of |
| /// macro expansion). Placement of these is not as complex as I feared it would |
| /// be. The important thing is to make sure that lookahead doesn't balk at |
| /// `token::Interpolated` tokens. |
| macro_rules! maybe_whole_expr { |
| ($p:expr) => { |
| if let token::Interpolated(nt) = $p.token.clone() { |
| match *nt { |
| token::NtExpr(ref e) | token::NtLiteral(ref e) => { |
| $p.bump(); |
| return Ok((*e).clone()); |
| } |
| token::NtPath(ref path) => { |
| $p.bump(); |
| let span = $p.span; |
| let kind = ExprKind::Path(None, (*path).clone()); |
| return Ok($p.mk_expr(span, kind, ThinVec::new())); |
| } |
| token::NtBlock(ref block) => { |
| $p.bump(); |
| let span = $p.span; |
| let kind = ExprKind::Block((*block).clone(), None); |
| return Ok($p.mk_expr(span, kind, ThinVec::new())); |
| } |
| _ => {}, |
| }; |
| } |
| } |
| } |
| |
| /// As maybe_whole_expr, but for things other than expressions |
| macro_rules! maybe_whole { |
| ($p:expr, $constructor:ident, |$x:ident| $e:expr) => { |
| if let token::Interpolated(nt) = $p.token.clone() { |
| if let token::$constructor($x) = (*nt).clone() { |
| $p.bump(); |
| return Ok($e); |
| } |
| } |
| }; |
| } |
| |
| fn maybe_append(mut lhs: Vec<Attribute>, mut rhs: Option<Vec<Attribute>>) -> Vec<Attribute> { |
| if let Some(ref mut rhs) = rhs { |
| lhs.append(rhs); |
| } |
| lhs |
| } |
| |
| #[derive(Debug, Clone, Copy, PartialEq)] |
| enum PrevTokenKind { |
| DocComment, |
| Comma, |
| Plus, |
| Interpolated, |
| Eof, |
| Ident, |
| Other, |
| } |
| |
| trait RecoverQPath: Sized { |
| const PATH_STYLE: PathStyle = PathStyle::Expr; |
| fn to_ty(&self) -> Option<P<Ty>>; |
| fn to_recovered(&self, qself: Option<QSelf>, path: ast::Path) -> Self; |
| fn to_string(&self) -> String; |
| } |
| |
| impl RecoverQPath for Ty { |
| const PATH_STYLE: PathStyle = PathStyle::Type; |
| fn to_ty(&self) -> Option<P<Ty>> { |
| Some(P(self.clone())) |
| } |
| fn to_recovered(&self, qself: Option<QSelf>, path: ast::Path) -> Self { |
| Self { span: path.span, node: TyKind::Path(qself, path), id: self.id } |
| } |
| fn to_string(&self) -> String { |
| pprust::ty_to_string(self) |
| } |
| } |
| |
| impl RecoverQPath for Pat { |
| fn to_ty(&self) -> Option<P<Ty>> { |
| self.to_ty() |
| } |
| fn to_recovered(&self, qself: Option<QSelf>, path: ast::Path) -> Self { |
| Self { span: path.span, node: PatKind::Path(qself, path), id: self.id } |
| } |
| fn to_string(&self) -> String { |
| pprust::pat_to_string(self) |
| } |
| } |
| |
| impl RecoverQPath for Expr { |
| fn to_ty(&self) -> Option<P<Ty>> { |
| self.to_ty() |
| } |
| fn to_recovered(&self, qself: Option<QSelf>, path: ast::Path) -> Self { |
| Self { span: path.span, node: ExprKind::Path(qself, path), |
| id: self.id, attrs: self.attrs.clone() } |
| } |
| fn to_string(&self) -> String { |
| pprust::expr_to_string(self) |
| } |
| } |
| |
| /* ident is handled by common.rs */ |
| |
| #[derive(Clone)] |
| pub struct Parser<'a> { |
| pub sess: &'a ParseSess, |
| /// the current token: |
| pub token: token::Token, |
| /// the span of the current token: |
| pub span: Span, |
| /// the span of the previous token: |
| meta_var_span: Option<Span>, |
| pub prev_span: Span, |
| /// the previous token kind |
| prev_token_kind: PrevTokenKind, |
| restrictions: Restrictions, |
| /// Used to determine the path to externally loaded source files |
| crate directory: Directory<'a>, |
| /// Whether to parse sub-modules in other files. |
| pub recurse_into_file_modules: bool, |
| /// Name of the root module this parser originated from. If `None`, then the |
| /// name is not known. This does not change while the parser is descending |
| /// into modules, and sub-parsers have new values for this name. |
| pub root_module_name: Option<String>, |
| crate expected_tokens: Vec<TokenType>, |
| token_cursor: TokenCursor, |
| desugar_doc_comments: bool, |
| /// Whether we should configure out of line modules as we parse. |
| pub cfg_mods: bool, |
| /// This field is used to keep track of how many left angle brackets we have seen. This is |
| /// required in order to detect extra leading left angle brackets (`<` characters) and error |
| /// appropriately. |
| /// |
| /// See the comments in the `parse_path_segment` function for more details. |
| crate unmatched_angle_bracket_count: u32, |
| crate max_angle_bracket_count: u32, |
| /// List of all unclosed delimiters found by the lexer. If an entry is used for error recovery |
| /// it gets removed from here. Every entry left at the end gets emitted as an independent |
| /// error. |
| crate unclosed_delims: Vec<UnmatchedBrace>, |
| } |
| |
| |
| #[derive(Clone)] |
| struct TokenCursor { |
| frame: TokenCursorFrame, |
| stack: Vec<TokenCursorFrame>, |
| } |
| |
| #[derive(Clone)] |
| struct TokenCursorFrame { |
| delim: token::DelimToken, |
| span: DelimSpan, |
| open_delim: bool, |
| tree_cursor: tokenstream::Cursor, |
| close_delim: bool, |
| last_token: LastToken, |
| } |
| |
| /// This is used in `TokenCursorFrame` above to track tokens that are consumed |
| /// by the parser, and then that's transitively used to record the tokens that |
| /// each parse AST item is created with. |
| /// |
| /// Right now this has two states, either collecting tokens or not collecting |
| /// tokens. If we're collecting tokens we just save everything off into a local |
| /// `Vec`. This should eventually though likely save tokens from the original |
| /// token stream and just use slicing of token streams to avoid creation of a |
| /// whole new vector. |
| /// |
| /// The second state is where we're passively not recording tokens, but the last |
| /// token is still tracked for when we want to start recording tokens. This |
| /// "last token" means that when we start recording tokens we'll want to ensure |
| /// that this, the first token, is included in the output. |
| /// |
| /// You can find some more example usage of this in the `collect_tokens` method |
| /// on the parser. |
| #[derive(Clone)] |
| enum LastToken { |
| Collecting(Vec<TreeAndJoint>), |
| Was(Option<TreeAndJoint>), |
| } |
| |
| impl TokenCursorFrame { |
| fn new(sp: DelimSpan, delim: DelimToken, tts: &TokenStream) -> Self { |
| TokenCursorFrame { |
| delim: delim, |
| span: sp, |
| open_delim: delim == token::NoDelim, |
| tree_cursor: tts.clone().into_trees(), |
| close_delim: delim == token::NoDelim, |
| last_token: LastToken::Was(None), |
| } |
| } |
| } |
| |
| impl TokenCursor { |
| fn next(&mut self) -> TokenAndSpan { |
| loop { |
| let tree = if !self.frame.open_delim { |
| self.frame.open_delim = true; |
| TokenTree::open_tt(self.frame.span.open, self.frame.delim) |
| } else if let Some(tree) = self.frame.tree_cursor.next() { |
| tree |
| } else if !self.frame.close_delim { |
| self.frame.close_delim = true; |
| TokenTree::close_tt(self.frame.span.close, self.frame.delim) |
| } else if let Some(frame) = self.stack.pop() { |
| self.frame = frame; |
| continue |
| } else { |
| return TokenAndSpan { tok: token::Eof, sp: syntax_pos::DUMMY_SP } |
| }; |
| |
| match self.frame.last_token { |
| LastToken::Collecting(ref mut v) => v.push(tree.clone().into()), |
| LastToken::Was(ref mut t) => *t = Some(tree.clone().into()), |
| } |
| |
| match tree { |
| TokenTree::Token(sp, tok) => return TokenAndSpan { tok: tok, sp: sp }, |
| TokenTree::Delimited(sp, delim, tts) => { |
| let frame = TokenCursorFrame::new(sp, delim, &tts); |
| self.stack.push(mem::replace(&mut self.frame, frame)); |
| } |
| } |
| } |
| } |
| |
| fn next_desugared(&mut self) -> TokenAndSpan { |
| let (sp, name) = match self.next() { |
| TokenAndSpan { sp, tok: token::DocComment(name) } => (sp, name), |
| tok => return tok, |
| }; |
| |
| let stripped = strip_doc_comment_decoration(&name.as_str()); |
| |
| // Searches for the occurrences of `"#*` and returns the minimum number of `#`s |
| // required to wrap the text. |
| let mut num_of_hashes = 0; |
| let mut count = 0; |
| for ch in stripped.chars() { |
| count = match ch { |
| '"' => 1, |
| '#' if count > 0 => count + 1, |
| _ => 0, |
| }; |
| num_of_hashes = cmp::max(num_of_hashes, count); |
| } |
| |
| let delim_span = DelimSpan::from_single(sp); |
| let body = TokenTree::Delimited( |
| delim_span, |
| token::Bracket, |
| [TokenTree::Token(sp, token::Ident(ast::Ident::from_str("doc"), false)), |
| TokenTree::Token(sp, token::Eq), |
| TokenTree::Token(sp, token::Literal( |
| token::StrRaw(Symbol::intern(&stripped), num_of_hashes), None)) |
| ] |
| .iter().cloned().collect::<TokenStream>().into(), |
| ); |
| |
| self.stack.push(mem::replace(&mut self.frame, TokenCursorFrame::new( |
| delim_span, |
| token::NoDelim, |
| &if doc_comment_style(&name.as_str()) == AttrStyle::Inner { |
| [TokenTree::Token(sp, token::Pound), TokenTree::Token(sp, token::Not), body] |
| .iter().cloned().collect::<TokenStream>().into() |
| } else { |
| [TokenTree::Token(sp, token::Pound), body] |
| .iter().cloned().collect::<TokenStream>().into() |
| }, |
| ))); |
| |
| self.next() |
| } |
| } |
| |
| #[derive(Clone, PartialEq)] |
| crate enum TokenType { |
| Token(token::Token), |
| Keyword(keywords::Keyword), |
| Operator, |
| Lifetime, |
| Ident, |
| Path, |
| Type, |
| Const, |
| } |
| |
| impl TokenType { |
| fn to_string(&self) -> String { |
| match *self { |
| TokenType::Token(ref t) => format!("`{}`", pprust::token_to_string(t)), |
| TokenType::Keyword(kw) => format!("`{}`", kw.name()), |
| TokenType::Operator => "an operator".to_string(), |
| TokenType::Lifetime => "lifetime".to_string(), |
| TokenType::Ident => "identifier".to_string(), |
| TokenType::Path => "path".to_string(), |
| TokenType::Type => "type".to_string(), |
| TokenType::Const => "const".to_string(), |
| } |
| } |
| } |
| |
| /// Returns `true` if `IDENT t` can start a type -- `IDENT::a::b`, `IDENT<u8, u8>`, |
| /// `IDENT<<u8 as Trait>::AssocTy>`. |
| /// |
| /// Types can also be of the form `IDENT(u8, u8) -> u8`, however this assumes |
| /// that `IDENT` is not the ident of a fn trait. |
| fn can_continue_type_after_non_fn_ident(t: &token::Token) -> bool { |
| t == &token::ModSep || t == &token::Lt || |
| t == &token::BinOp(token::Shl) |
| } |
| |
| /// Information about the path to a module. |
| pub struct ModulePath { |
| name: String, |
| path_exists: bool, |
| pub result: Result<ModulePathSuccess, Error>, |
| } |
| |
| pub struct ModulePathSuccess { |
| pub path: PathBuf, |
| pub directory_ownership: DirectoryOwnership, |
| warn: bool, |
| } |
| |
| pub enum Error { |
| FileNotFoundForModule { |
| mod_name: String, |
| default_path: String, |
| secondary_path: String, |
| dir_path: String, |
| }, |
| DuplicatePaths { |
| mod_name: String, |
| default_path: String, |
| secondary_path: String, |
| }, |
| UselessDocComment, |
| InclusiveRangeWithNoEnd, |
| } |
| |
| impl Error { |
| fn span_err<S: Into<MultiSpan>>(self, |
| sp: S, |
| handler: &errors::Handler) -> DiagnosticBuilder<'_> { |
| match self { |
| Error::FileNotFoundForModule { ref mod_name, |
| ref default_path, |
| ref secondary_path, |
| ref dir_path } => { |
| let mut err = struct_span_err!(handler, sp, E0583, |
| "file not found for module `{}`", mod_name); |
| err.help(&format!("name the file either {} or {} inside the directory \"{}\"", |
| default_path, |
| secondary_path, |
| dir_path)); |
| err |
| } |
| Error::DuplicatePaths { ref mod_name, ref default_path, ref secondary_path } => { |
| let mut err = struct_span_err!(handler, sp, E0584, |
| "file for module `{}` found at both {} and {}", |
| mod_name, |
| default_path, |
| secondary_path); |
| err.help("delete or rename one of them to remove the ambiguity"); |
| err |
| } |
| Error::UselessDocComment => { |
| let mut err = struct_span_err!(handler, sp, E0585, |
| "found a documentation comment that doesn't document anything"); |
| err.help("doc comments must come before what they document, maybe a comment was \ |
| intended with `//`?"); |
| err |
| } |
| Error::InclusiveRangeWithNoEnd => { |
| let mut err = struct_span_err!(handler, sp, E0586, |
| "inclusive range with no end"); |
| err.help("inclusive ranges must be bounded at the end (`..=b` or `a..=b`)"); |
| err |
| } |
| } |
| } |
| } |
| |
| #[derive(Debug)] |
| enum LhsExpr { |
| NotYetParsed, |
| AttributesParsed(ThinVec<Attribute>), |
| AlreadyParsed(P<Expr>), |
| } |
| |
| impl From<Option<ThinVec<Attribute>>> for LhsExpr { |
| fn from(o: Option<ThinVec<Attribute>>) -> Self { |
| if let Some(attrs) = o { |
| LhsExpr::AttributesParsed(attrs) |
| } else { |
| LhsExpr::NotYetParsed |
| } |
| } |
| } |
| |
| impl From<P<Expr>> for LhsExpr { |
| fn from(expr: P<Expr>) -> Self { |
| LhsExpr::AlreadyParsed(expr) |
| } |
| } |
| |
| /// Creates a placeholder argument. |
| fn dummy_arg(span: Span) -> Arg { |
| let ident = Ident::new(keywords::Invalid.name(), span); |
| let pat = P(Pat { |
| id: ast::DUMMY_NODE_ID, |
| node: PatKind::Ident(BindingMode::ByValue(Mutability::Immutable), ident, None), |
| span, |
| }); |
| let ty = Ty { |
| node: TyKind::Err, |
| span, |
| id: ast::DUMMY_NODE_ID |
| }; |
| Arg { ty: P(ty), pat: pat, id: ast::DUMMY_NODE_ID } |
| } |
| |
| #[derive(Copy, Clone, Debug)] |
| enum TokenExpectType { |
| Expect, |
| NoExpect, |
| } |
| |
| impl<'a> Parser<'a> { |
| pub fn new(sess: &'a ParseSess, |
| tokens: TokenStream, |
| directory: Option<Directory<'a>>, |
| recurse_into_file_modules: bool, |
| desugar_doc_comments: bool) |
| -> Self { |
| let mut parser = Parser { |
| sess, |
| token: token::Whitespace, |
| span: syntax_pos::DUMMY_SP, |
| prev_span: syntax_pos::DUMMY_SP, |
| meta_var_span: None, |
| prev_token_kind: PrevTokenKind::Other, |
| restrictions: Restrictions::empty(), |
| recurse_into_file_modules, |
| directory: Directory { |
| path: Cow::from(PathBuf::new()), |
| ownership: DirectoryOwnership::Owned { relative: None } |
| }, |
| root_module_name: None, |
| expected_tokens: Vec::new(), |
| token_cursor: TokenCursor { |
| frame: TokenCursorFrame::new( |
| DelimSpan::dummy(), |
| token::NoDelim, |
| &tokens.into(), |
| ), |
| stack: Vec::new(), |
| }, |
| desugar_doc_comments, |
| cfg_mods: true, |
| unmatched_angle_bracket_count: 0, |
| max_angle_bracket_count: 0, |
| unclosed_delims: Vec::new(), |
| }; |
| |
| let tok = parser.next_tok(); |
| parser.token = tok.tok; |
| parser.span = tok.sp; |
| |
| if let Some(directory) = directory { |
| parser.directory = directory; |
| } else if !parser.span.is_dummy() { |
| if let FileName::Real(mut path) = sess.source_map().span_to_unmapped_path(parser.span) { |
| path.pop(); |
| parser.directory.path = Cow::from(path); |
| } |
| } |
| |
| parser.process_potential_macro_variable(); |
| parser |
| } |
| |
| fn next_tok(&mut self) -> TokenAndSpan { |
| let mut next = if self.desugar_doc_comments { |
| self.token_cursor.next_desugared() |
| } else { |
| self.token_cursor.next() |
| }; |
| if next.sp.is_dummy() { |
| // Tweak the location for better diagnostics, but keep syntactic context intact. |
| next.sp = self.prev_span.with_ctxt(next.sp.ctxt()); |
| } |
| next |
| } |
| |
| /// Converts the current token to a string using `self`'s reader. |
| pub fn this_token_to_string(&self) -> String { |
| pprust::token_to_string(&self.token) |
| } |
| |
| fn token_descr(&self) -> Option<&'static str> { |
| Some(match &self.token { |
| t if t.is_special_ident() => "reserved identifier", |
| t if t.is_used_keyword() => "keyword", |
| t if t.is_unused_keyword() => "reserved keyword", |
| token::DocComment(..) => "doc comment", |
| _ => return None, |
| }) |
| } |
| |
| fn this_token_descr(&self) -> String { |
| if let Some(prefix) = self.token_descr() { |
| format!("{} `{}`", prefix, self.this_token_to_string()) |
| } else { |
| format!("`{}`", self.this_token_to_string()) |
| } |
| } |
| |
| fn unexpected_last<T>(&self, t: &token::Token) -> PResult<'a, T> { |
| let token_str = pprust::token_to_string(t); |
| Err(self.span_fatal(self.prev_span, &format!("unexpected token: `{}`", token_str))) |
| } |
| |
| crate fn unexpected<T>(&mut self) -> PResult<'a, T> { |
| match self.expect_one_of(&[], &[]) { |
| Err(e) => Err(e), |
| Ok(_) => unreachable!(), |
| } |
| } |
| |
| /// Expects and consumes the token `t`. Signals an error if the next token is not `t`. |
| pub fn expect(&mut self, t: &token::Token) -> PResult<'a, bool /* recovered */> { |
| if self.expected_tokens.is_empty() { |
| if self.token == *t { |
| self.bump(); |
| Ok(false) |
| } else { |
| let token_str = pprust::token_to_string(t); |
| let this_token_str = self.this_token_descr(); |
| let mut err = self.fatal(&format!("expected `{}`, found {}", |
| token_str, |
| this_token_str)); |
| |
| let sp = if self.token == token::Token::Eof { |
| // EOF, don't want to point at the following char, but rather the last token |
| self.prev_span |
| } else { |
| self.sess.source_map().next_point(self.prev_span) |
| }; |
| let label_exp = format!("expected `{}`", token_str); |
| match self.recover_closing_delimiter(&[t.clone()], err) { |
| Err(e) => err = e, |
| Ok(recovered) => { |
| return Ok(recovered); |
| } |
| } |
| let cm = self.sess.source_map(); |
| match (cm.lookup_line(self.span.lo()), cm.lookup_line(sp.lo())) { |
| (Ok(ref a), Ok(ref b)) if a.line == b.line => { |
| // When the spans are in the same line, it means that the only content |
| // between them is whitespace, point only at the found token. |
| err.span_label(self.span, label_exp); |
| } |
| _ => { |
| err.span_label(sp, label_exp); |
| err.span_label(self.span, "unexpected token"); |
| } |
| } |
| Err(err) |
| } |
| } else { |
| self.expect_one_of(slice::from_ref(t), &[]) |
| } |
| } |
| |
| fn recover_closing_delimiter( |
| &mut self, |
| tokens: &[token::Token], |
| mut err: DiagnosticBuilder<'a>, |
| ) -> PResult<'a, bool> { |
| let mut pos = None; |
| // we want to use the last closing delim that would apply |
| for (i, unmatched) in self.unclosed_delims.iter().enumerate().rev() { |
| if tokens.contains(&token::CloseDelim(unmatched.expected_delim)) |
| && Some(self.span) > unmatched.unclosed_span |
| { |
| pos = Some(i); |
| } |
| } |
| match pos { |
| Some(pos) => { |
| // Recover and assume that the detected unclosed delimiter was meant for |
| // this location. Emit the diagnostic and act as if the delimiter was |
| // present for the parser's sake. |
| |
| // Don't attempt to recover from this unclosed delimiter more than once. |
| let unmatched = self.unclosed_delims.remove(pos); |
| let delim = TokenType::Token(token::CloseDelim(unmatched.expected_delim)); |
| |
| // We want to suggest the inclusion of the closing delimiter where it makes |
| // the most sense, which is immediately after the last token: |
| // |
| // {foo(bar {}} |
| // - ^ |
| // | | |
| // | help: `)` may belong here (FIXME: #58270) |
| // | |
| // unclosed delimiter |
| if let Some(sp) = unmatched.unclosed_span { |
| err.span_label(sp, "unclosed delimiter"); |
| } |
| err.span_suggestion_short( |
| self.sess.source_map().next_point(self.prev_span), |
| &format!("{} may belong here", delim.to_string()), |
| delim.to_string(), |
| Applicability::MaybeIncorrect, |
| ); |
| err.emit(); |
| self.expected_tokens.clear(); // reduce errors |
| Ok(true) |
| } |
| _ => Err(err), |
| } |
| } |
| |
| /// Expect next token to be edible or inedible token. If edible, |
| /// then consume it; if inedible, then return without consuming |
| /// anything. Signal a fatal error if next token is unexpected. |
| pub fn expect_one_of( |
| &mut self, |
| edible: &[token::Token], |
| inedible: &[token::Token], |
| ) -> PResult<'a, bool /* recovered */> { |
| fn tokens_to_string(tokens: &[TokenType]) -> String { |
| let mut i = tokens.iter(); |
| // This might be a sign we need a connect method on Iterator. |
| let b = i.next() |
| .map_or(String::new(), |t| t.to_string()); |
| i.enumerate().fold(b, |mut b, (i, a)| { |
| if tokens.len() > 2 && i == tokens.len() - 2 { |
| b.push_str(", or "); |
| } else if tokens.len() == 2 && i == tokens.len() - 2 { |
| b.push_str(" or "); |
| } else { |
| b.push_str(", "); |
| } |
| b.push_str(&a.to_string()); |
| b |
| }) |
| } |
| if edible.contains(&self.token) { |
| self.bump(); |
| Ok(false) |
| } else if inedible.contains(&self.token) { |
| // leave it in the input |
| Ok(false) |
| } else { |
| let mut expected = edible.iter() |
| .map(|x| TokenType::Token(x.clone())) |
| .chain(inedible.iter().map(|x| TokenType::Token(x.clone()))) |
| .chain(self.expected_tokens.iter().cloned()) |
| .collect::<Vec<_>>(); |
| expected.sort_by_cached_key(|x| x.to_string()); |
| expected.dedup(); |
| let expect = tokens_to_string(&expected[..]); |
| let actual = self.this_token_to_string(); |
| let (msg_exp, (label_sp, label_exp)) = if expected.len() > 1 { |
| let short_expect = if expected.len() > 6 { |
| format!("{} possible tokens", expected.len()) |
| } else { |
| expect.clone() |
| }; |
| (format!("expected one of {}, found `{}`", expect, actual), |
| (self.sess.source_map().next_point(self.prev_span), |
| format!("expected one of {} here", short_expect))) |
| } else if expected.is_empty() { |
| (format!("unexpected token: `{}`", actual), |
| (self.prev_span, "unexpected token after this".to_string())) |
| } else { |
| (format!("expected {}, found `{}`", expect, actual), |
| (self.sess.source_map().next_point(self.prev_span), |
| format!("expected {} here", expect))) |
| }; |
| let mut err = self.fatal(&msg_exp); |
| if self.token.is_ident_named("and") { |
| err.span_suggestion_short( |
| self.span, |
| "use `&&` instead of `and` for the boolean operator", |
| "&&".to_string(), |
| Applicability::MaybeIncorrect, |
| ); |
| } |
| if self.token.is_ident_named("or") { |
| err.span_suggestion_short( |
| self.span, |
| "use `||` instead of `or` for the boolean operator", |
| "||".to_string(), |
| Applicability::MaybeIncorrect, |
| ); |
| } |
| let sp = if self.token == token::Token::Eof { |
| // This is EOF, don't want to point at the following char, but rather the last token |
| self.prev_span |
| } else { |
| label_sp |
| }; |
| match self.recover_closing_delimiter(&expected.iter().filter_map(|tt| match tt { |
| TokenType::Token(t) => Some(t.clone()), |
| _ => None, |
| }).collect::<Vec<_>>(), err) { |
| Err(e) => err = e, |
| Ok(recovered) => { |
| return Ok(recovered); |
| } |
| } |
| |
| let cm = self.sess.source_map(); |
| match (cm.lookup_line(self.span.lo()), cm.lookup_line(sp.lo())) { |
| (Ok(ref a), Ok(ref b)) if a.line == b.line => { |
| // When the spans are in the same line, it means that the only content between |
| // them is whitespace, point at the found token in that case: |
| // |
| // X | () => { syntax error }; |
| // | ^^^^^ expected one of 8 possible tokens here |
| // |
| // instead of having: |
| // |
| // X | () => { syntax error }; |
| // | -^^^^^ unexpected token |
| // | | |
| // | expected one of 8 possible tokens here |
| err.span_label(self.span, label_exp); |
| } |
| _ if self.prev_span == syntax_pos::DUMMY_SP => { |
| // Account for macro context where the previous span might not be |
| // available to avoid incorrect output (#54841). |
| err.span_label(self.span, "unexpected token"); |
| } |
| _ => { |
| err.span_label(sp, label_exp); |
| err.span_label(self.span, "unexpected token"); |
| } |
| } |
| Err(err) |
| } |
| } |
| |
| /// Returns the span of expr, if it was not interpolated or the span of the interpolated token. |
| fn interpolated_or_expr_span(&self, |
| expr: PResult<'a, P<Expr>>) |
| -> PResult<'a, (Span, P<Expr>)> { |
| expr.map(|e| { |
| if self.prev_token_kind == PrevTokenKind::Interpolated { |
| (self.prev_span, e) |
| } else { |
| (e.span, e) |
| } |
| }) |
| } |
| |
| fn expected_ident_found(&self) -> DiagnosticBuilder<'a> { |
| let mut err = self.struct_span_err(self.span, |
| &format!("expected identifier, found {}", |
| self.this_token_descr())); |
| if let token::Ident(ident, false) = &self.token { |
| if ident.is_reserved() && !ident.is_path_segment_keyword() && |
| ident.name != keywords::Underscore.name() |
| { |
| err.span_suggestion( |
| self.span, |
| "you can escape reserved keywords to use them as identifiers", |
| format!("r#{}", ident), |
| Applicability::MaybeIncorrect, |
| ); |
| } |
| } |
| if let Some(token_descr) = self.token_descr() { |
| err.span_label(self.span, format!("expected identifier, found {}", token_descr)); |
| } else { |
| err.span_label(self.span, "expected identifier"); |
| if self.token == token::Comma && self.look_ahead(1, |t| t.is_ident()) { |
| err.span_suggestion( |
| self.span, |
| "remove this comma", |
| String::new(), |
| Applicability::MachineApplicable, |
| ); |
| } |
| } |
| err |
| } |
| |
| pub fn parse_ident(&mut self) -> PResult<'a, ast::Ident> { |
| self.parse_ident_common(true) |
| } |
| |
| fn parse_ident_common(&mut self, recover: bool) -> PResult<'a, ast::Ident> { |
| match self.token { |
| token::Ident(ident, _) => { |
| if self.token.is_reserved_ident() { |
| let mut err = self.expected_ident_found(); |
| if recover { |
| err.emit(); |
| } else { |
| return Err(err); |
| } |
| } |
| let span = self.span; |
| self.bump(); |
| Ok(Ident::new(ident.name, span)) |
| } |
| _ => { |
| Err(if self.prev_token_kind == PrevTokenKind::DocComment { |
| self.span_fatal_err(self.prev_span, Error::UselessDocComment) |
| } else { |
| self.expected_ident_found() |
| }) |
| } |
| } |
| } |
| |
| /// Checks if the next token is `tok`, and returns `true` if so. |
| /// |
| /// This method will automatically add `tok` to `expected_tokens` if `tok` is not |
| /// encountered. |
| crate fn check(&mut self, tok: &token::Token) -> bool { |
| let is_present = self.token == *tok; |
| if !is_present { self.expected_tokens.push(TokenType::Token(tok.clone())); } |
| is_present |
| } |
| |
| /// Consumes a token 'tok' if it exists. Returns whether the given token was present. |
| pub fn eat(&mut self, tok: &token::Token) -> bool { |
| let is_present = self.check(tok); |
| if is_present { self.bump() } |
| is_present |
| } |
| |
| fn check_keyword(&mut self, kw: keywords::Keyword) -> bool { |
| self.expected_tokens.push(TokenType::Keyword(kw)); |
| self.token.is_keyword(kw) |
| } |
| |
| /// If the next token is the given keyword, eats it and returns |
| /// `true`. Otherwise, returns `false`. |
| pub fn eat_keyword(&mut self, kw: keywords::Keyword) -> bool { |
| if self.check_keyword(kw) { |
| self.bump(); |
| true |
| } else { |
| false |
| } |
| } |
| |
| fn eat_keyword_noexpect(&mut self, kw: keywords::Keyword) -> bool { |
| if self.token.is_keyword(kw) { |
| self.bump(); |
| true |
| } else { |
| false |
| } |
| } |
| |
| /// If the given word is not a keyword, signals an error. |
| /// If the next token is not the given word, signals an error. |
| /// Otherwise, eats it. |
| fn expect_keyword(&mut self, kw: keywords::Keyword) -> PResult<'a, ()> { |
| if !self.eat_keyword(kw) { |
| self.unexpected() |
| } else { |
| Ok(()) |
| } |
| } |
| |
| fn check_ident(&mut self) -> bool { |
| if self.token.is_ident() { |
| true |
| } else { |
| self.expected_tokens.push(TokenType::Ident); |
| false |
| } |
| } |
| |
| fn check_path(&mut self) -> bool { |
| if self.token.is_path_start() { |
| true |
| } else { |
| self.expected_tokens.push(TokenType::Path); |
| false |
| } |
| } |
| |
| fn check_type(&mut self) -> bool { |
| if self.token.can_begin_type() { |
| true |
| } else { |
| self.expected_tokens.push(TokenType::Type); |
| false |
| } |
| } |
| |
| fn check_const_arg(&mut self) -> bool { |
| if self.token.can_begin_const_arg() { |
| true |
| } else { |
| self.expected_tokens.push(TokenType::Const); |
| false |
| } |
| } |
| |
| /// Expects and consumes a `+`. if `+=` is seen, replaces it with a `=` |
| /// and continues. If a `+` is not seen, returns `false`. |
| /// |
| /// This is used when token-splitting `+=` into `+`. |
| /// See issue #47856 for an example of when this may occur. |
| fn eat_plus(&mut self) -> bool { |
| self.expected_tokens.push(TokenType::Token(token::BinOp(token::Plus))); |
| match self.token { |
| token::BinOp(token::Plus) => { |
| self.bump(); |
| true |
| } |
| token::BinOpEq(token::Plus) => { |
| let span = self.span.with_lo(self.span.lo() + BytePos(1)); |
| self.bump_with(token::Eq, span); |
| true |
| } |
| _ => false, |
| } |
| } |
| |
| |
| /// Checks to see if the next token is either `+` or `+=`. |
| /// Otherwise returns `false`. |
| fn check_plus(&mut self) -> bool { |
| if self.token.is_like_plus() { |
| true |
| } |
| else { |
| self.expected_tokens.push(TokenType::Token(token::BinOp(token::Plus))); |
| false |
| } |
| } |
| |
| /// Expects and consumes an `&`. If `&&` is seen, replaces it with a single |
| /// `&` and continues. If an `&` is not seen, signals an error. |
| fn expect_and(&mut self) -> PResult<'a, ()> { |
| self.expected_tokens.push(TokenType::Token(token::BinOp(token::And))); |
| match self.token { |
| token::BinOp(token::And) => { |
| self.bump(); |
| Ok(()) |
| } |
| token::AndAnd => { |
| let span = self.span.with_lo(self.span.lo() + BytePos(1)); |
| Ok(self.bump_with(token::BinOp(token::And), span)) |
| } |
| _ => self.unexpected() |
| } |
| } |
| |
| /// Expects and consumes an `|`. If `||` is seen, replaces it with a single |
| /// `|` and continues. If an `|` is not seen, signals an error. |
| fn expect_or(&mut self) -> PResult<'a, ()> { |
| self.expected_tokens.push(TokenType::Token(token::BinOp(token::Or))); |
| match self.token { |
| token::BinOp(token::Or) => { |
| self.bump(); |
| Ok(()) |
| } |
| token::OrOr => { |
| let span = self.span.with_lo(self.span.lo() + BytePos(1)); |
| Ok(self.bump_with(token::BinOp(token::Or), span)) |
| } |
| _ => self.unexpected() |
| } |
| } |
| |
| fn expect_no_suffix(&self, sp: Span, kind: &str, suffix: Option<ast::Name>) { |
| match suffix { |
| None => {/* everything ok */} |
| Some(suf) => { |
| let text = suf.as_str(); |
| if text.is_empty() { |
| self.span_bug(sp, "found empty literal suffix in Some") |
| } |
| let msg = format!("{} with a suffix is invalid", kind); |
| self.struct_span_err(sp, &msg) |
| .span_label(sp, msg) |
| .emit(); |
| } |
| } |
| } |
| |
| /// Attempts to consume a `<`. If `<<` is seen, replaces it with a single |
| /// `<` and continue. If `<-` is seen, replaces it with a single `<` |
| /// and continue. If a `<` is not seen, returns false. |
| /// |
| /// This is meant to be used when parsing generics on a path to get the |
| /// starting token. |
| fn eat_lt(&mut self) -> bool { |
| self.expected_tokens.push(TokenType::Token(token::Lt)); |
| let ate = match self.token { |
| token::Lt => { |
| self.bump(); |
| true |
| } |
| token::BinOp(token::Shl) => { |
| let span = self.span.with_lo(self.span.lo() + BytePos(1)); |
| self.bump_with(token::Lt, span); |
| true |
| } |
| token::LArrow => { |
| let span = self.span.with_lo(self.span.lo() + BytePos(1)); |
| self.bump_with(token::BinOp(token::Minus), span); |
| true |
| } |
| _ => false, |
| }; |
| |
| if ate { |
| // See doc comment for `unmatched_angle_bracket_count`. |
| self.unmatched_angle_bracket_count += 1; |
| self.max_angle_bracket_count += 1; |
| debug!("eat_lt: (increment) count={:?}", self.unmatched_angle_bracket_count); |
| } |
| |
| ate |
| } |
| |
| fn expect_lt(&mut self) -> PResult<'a, ()> { |
| if !self.eat_lt() { |
| self.unexpected() |
| } else { |
| Ok(()) |
| } |
| } |
| |
| /// Expects and consumes a single `>` token. if a `>>` is seen, replaces it |
| /// with a single `>` and continues. If a `>` is not seen, signals an error. |
| fn expect_gt(&mut self) -> PResult<'a, ()> { |
| self.expected_tokens.push(TokenType::Token(token::Gt)); |
| let ate = match self.token { |
| token::Gt => { |
| self.bump(); |
| Some(()) |
| } |
| token::BinOp(token::Shr) => { |
| let span = self.span.with_lo(self.span.lo() + BytePos(1)); |
| Some(self.bump_with(token::Gt, span)) |
| } |
| token::BinOpEq(token::Shr) => { |
| let span = self.span.with_lo(self.span.lo() + BytePos(1)); |
| Some(self.bump_with(token::Ge, span)) |
| } |
| token::Ge => { |
| let span = self.span.with_lo(self.span.lo() + BytePos(1)); |
| Some(self.bump_with(token::Eq, span)) |
| } |
| _ => None, |
| }; |
| |
| match ate { |
| Some(_) => { |
| // See doc comment for `unmatched_angle_bracket_count`. |
| if self.unmatched_angle_bracket_count > 0 { |
| self.unmatched_angle_bracket_count -= 1; |
| debug!("expect_gt: (decrement) count={:?}", self.unmatched_angle_bracket_count); |
| } |
| |
| Ok(()) |
| }, |
| None => self.unexpected(), |
| } |
| } |
| |
| /// Eats and discards tokens until one of `kets` is encountered. Respects token trees, |
| /// passes through any errors encountered. Used for error recovery. |
| fn eat_to_tokens(&mut self, kets: &[&token::Token]) { |
| let handler = self.diagnostic(); |
| |
| if let Err(ref mut err) = self.parse_seq_to_before_tokens(kets, |
| SeqSep::none(), |
| TokenExpectType::Expect, |
| |p| Ok(p.parse_token_tree())) { |
| handler.cancel(err); |
| } |
| } |
| |
| /// Parses a sequence, including the closing delimiter. The function |
| /// `f` must consume tokens until reaching the next separator or |
| /// closing bracket. |
| pub fn parse_seq_to_end<T, F>(&mut self, |
| ket: &token::Token, |
| sep: SeqSep, |
| f: F) |
| -> PResult<'a, Vec<T>> where |
| F: FnMut(&mut Parser<'a>) -> PResult<'a, T>, |
| { |
| let (val, recovered) = self.parse_seq_to_before_end(ket, sep, f)?; |
| if !recovered { |
| self.bump(); |
| } |
| Ok(val) |
| } |
| |
| /// Parses a sequence, not including the closing delimiter. The function |
| /// `f` must consume tokens until reaching the next separator or |
| /// closing bracket. |
| pub fn parse_seq_to_before_end<T, F>( |
| &mut self, |
| ket: &token::Token, |
| sep: SeqSep, |
| f: F, |
| ) -> PResult<'a, (Vec<T>, bool)> |
| where F: FnMut(&mut Parser<'a>) -> PResult<'a, T> |
| { |
| self.parse_seq_to_before_tokens(&[ket], sep, TokenExpectType::Expect, f) |
| } |
| |
| fn parse_seq_to_before_tokens<T, F>( |
| &mut self, |
| kets: &[&token::Token], |
| sep: SeqSep, |
| expect: TokenExpectType, |
| mut f: F, |
| ) -> PResult<'a, (Vec<T>, bool /* recovered */)> |
| where F: FnMut(&mut Parser<'a>) -> PResult<'a, T> |
| { |
| let mut first = true; |
| let mut recovered = false; |
| let mut v = vec![]; |
| while !kets.iter().any(|k| { |
| match expect { |
| TokenExpectType::Expect => self.check(k), |
| TokenExpectType::NoExpect => self.token == **k, |
| } |
| }) { |
| match self.token { |
| token::CloseDelim(..) | token::Eof => break, |
| _ => {} |
| }; |
| if let Some(ref t) = sep.sep { |
| if first { |
| first = false; |
| } else { |
| match self.expect(t) { |
| Ok(false) => {} |
| Ok(true) => { |
| recovered = true; |
| break; |
| } |
| Err(mut e) => { |
| // Attempt to keep parsing if it was a similar separator |
| if let Some(ref tokens) = t.similar_tokens() { |
| if tokens.contains(&self.token) { |
| self.bump(); |
| } |
| } |
| e.emit(); |
| // Attempt to keep parsing if it was an omitted separator |
| match f(self) { |
| Ok(t) => { |
| v.push(t); |
| continue; |
| }, |
| Err(mut e) => { |
| e.cancel(); |
| break; |
| } |
| } |
| } |
| } |
| } |
| } |
| if sep.trailing_sep_allowed && kets.iter().any(|k| { |
| match expect { |
| TokenExpectType::Expect => self.check(k), |
| TokenExpectType::NoExpect => self.token == **k, |
| } |
| }) { |
| break; |
| } |
| |
| let t = f(self)?; |
| v.push(t); |
| } |
| |
| Ok((v, recovered)) |
| } |
| |
| /// Parses a sequence, including the closing delimiter. The function |
| /// `f` must consume tokens until reaching the next separator or |
| /// closing bracket. |
| fn parse_unspanned_seq<T, F>( |
| &mut self, |
| bra: &token::Token, |
| ket: &token::Token, |
| sep: SeqSep, |
| f: F, |
| ) -> PResult<'a, Vec<T>> where |
| F: FnMut(&mut Parser<'a>) -> PResult<'a, T>, |
| { |
| self.expect(bra)?; |
| let (result, recovered) = self.parse_seq_to_before_end(ket, sep, f)?; |
| if !recovered { |
| self.eat(ket); |
| } |
| Ok(result) |
| } |
| |
| /// Advance the parser by one token |
| pub fn bump(&mut self) { |
| if self.prev_token_kind == PrevTokenKind::Eof { |
| // Bumping after EOF is a bad sign, usually an infinite loop. |
| self.bug("attempted to bump the parser past EOF (may be stuck in a loop)"); |
| } |
| |
| self.prev_span = self.meta_var_span.take().unwrap_or(self.span); |
| |
| // Record last token kind for possible error recovery. |
| self.prev_token_kind = match self.token { |
| token::DocComment(..) => PrevTokenKind::DocComment, |
| token::Comma => PrevTokenKind::Comma, |
| token::BinOp(token::Plus) => PrevTokenKind::Plus, |
| token::Interpolated(..) => PrevTokenKind::Interpolated, |
| token::Eof => PrevTokenKind::Eof, |
| token::Ident(..) => PrevTokenKind::Ident, |
| _ => PrevTokenKind::Other, |
| }; |
| |
| let next = self.next_tok(); |
| self.span = next.sp; |
| self.token = next.tok; |
| self.expected_tokens.clear(); |
| // check after each token |
| self.process_potential_macro_variable(); |
| } |
| |
| /// Advance the parser using provided token as a next one. Use this when |
| /// consuming a part of a token. For example a single `<` from `<<`. |
| fn bump_with(&mut self, next: token::Token, span: Span) { |
| self.prev_span = self.span.with_hi(span.lo()); |
| // It would be incorrect to record the kind of the current token, but |
| // fortunately for tokens currently using `bump_with`, the |
| // prev_token_kind will be of no use anyway. |
| self.prev_token_kind = PrevTokenKind::Other; |
| self.span = span; |
| self.token = next; |
| self.expected_tokens.clear(); |
| } |
| |
| pub fn look_ahead<R, F>(&self, dist: usize, f: F) -> R where |
| F: FnOnce(&token::Token) -> R, |
| { |
| if dist == 0 { |
| return f(&self.token) |
| } |
| |
| f(&match self.token_cursor.frame.tree_cursor.look_ahead(dist - 1) { |
| Some(tree) => match tree { |
| TokenTree::Token(_, tok) => tok, |
| TokenTree::Delimited(_, delim, _) => token::OpenDelim(delim), |
| }, |
| None => token::CloseDelim(self.token_cursor.frame.delim), |
| }) |
| } |
| |
| fn look_ahead_span(&self, dist: usize) -> Span { |
| if dist == 0 { |
| return self.span |
| } |
| |
| match self.token_cursor.frame.tree_cursor.look_ahead(dist - 1) { |
| Some(TokenTree::Token(span, _)) => span, |
| Some(TokenTree::Delimited(span, ..)) => span.entire(), |
| None => self.look_ahead_span(dist - 1), |
| } |
| } |
| pub fn fatal(&self, m: &str) -> DiagnosticBuilder<'a> { |
| self.sess.span_diagnostic.struct_span_fatal(self.span, m) |
| } |
| pub fn span_fatal<S: Into<MultiSpan>>(&self, sp: S, m: &str) -> DiagnosticBuilder<'a> { |
| self.sess.span_diagnostic.struct_span_fatal(sp, m) |
| } |
| fn span_fatal_err<S: Into<MultiSpan>>(&self, sp: S, err: Error) -> DiagnosticBuilder<'a> { |
| err.span_err(sp, self.diagnostic()) |
| } |
| fn bug(&self, m: &str) -> ! { |
| self.sess.span_diagnostic.span_bug(self.span, m) |
| } |
| fn span_err<S: Into<MultiSpan>>(&self, sp: S, m: &str) { |
| self.sess.span_diagnostic.span_err(sp, m) |
| } |
| fn struct_span_err<S: Into<MultiSpan>>(&self, sp: S, m: &str) -> DiagnosticBuilder<'a> { |
| self.sess.span_diagnostic.struct_span_err(sp, m) |
| } |
| crate fn span_bug<S: Into<MultiSpan>>(&self, sp: S, m: &str) -> ! { |
| self.sess.span_diagnostic.span_bug(sp, m) |
| } |
| |
| fn cancel(&self, err: &mut DiagnosticBuilder<'_>) { |
| self.sess.span_diagnostic.cancel(err) |
| } |
| |
| crate fn diagnostic(&self) -> &'a errors::Handler { |
| &self.sess.span_diagnostic |
| } |
| |
| /// Is the current token one of the keywords that signals a bare function type? |
| fn token_is_bare_fn_keyword(&mut self) -> bool { |
| self.check_keyword(keywords::Fn) || |
| self.check_keyword(keywords::Unsafe) || |
| self.check_keyword(keywords::Extern) |
| } |
| |
| /// Parses a `TyKind::BareFn` type. |
| fn parse_ty_bare_fn(&mut self, generic_params: Vec<GenericParam>) -> PResult<'a, TyKind> { |
| /* |
| |
| [unsafe] [extern "ABI"] fn (S) -> T |
| ^~~~^ ^~~~^ ^~^ ^ |
| | | | | |
| | | | Return type |
| | | Argument types |
| | | |
| | ABI |
| Function Style |
| */ |
| |
| let unsafety = self.parse_unsafety(); |
| let abi = if self.eat_keyword(keywords::Extern) { |
| self.parse_opt_abi()?.unwrap_or(Abi::C) |
| } else { |
| Abi::Rust |
| }; |
| |
| self.expect_keyword(keywords::Fn)?; |
| let (inputs, variadic) = self.parse_fn_args(false, true)?; |
| let ret_ty = self.parse_ret_ty(false)?; |
| let decl = P(FnDecl { |
| inputs, |
| output: ret_ty, |
| variadic, |
| }); |
| Ok(TyKind::BareFn(P(BareFnTy { |
| abi, |
| unsafety, |
| generic_params, |
| decl, |
| }))) |
| } |
| |
| /// Parses asyncness: `async` or nothing. |
| fn parse_asyncness(&mut self) -> IsAsync { |
| if self.eat_keyword(keywords::Async) { |
| IsAsync::Async { |
| closure_id: ast::DUMMY_NODE_ID, |
| return_impl_trait_id: ast::DUMMY_NODE_ID, |
| } |
| } else { |
| IsAsync::NotAsync |
| } |
| } |
| |
| /// Parses unsafety: `unsafe` or nothing. |
| fn parse_unsafety(&mut self) -> Unsafety { |
| if self.eat_keyword(keywords::Unsafe) { |
| Unsafety::Unsafe |
| } else { |
| Unsafety::Normal |
| } |
| } |
| |
| /// Parses the items in a trait declaration. |
| pub fn parse_trait_item(&mut self, at_end: &mut bool) -> PResult<'a, TraitItem> { |
| maybe_whole!(self, NtTraitItem, |x| x); |
| let attrs = self.parse_outer_attributes()?; |
| let (mut item, tokens) = self.collect_tokens(|this| { |
| this.parse_trait_item_(at_end, attrs) |
| })?; |
| // See `parse_item` for why this clause is here. |
| if !item.attrs.iter().any(|attr| attr.style == AttrStyle::Inner) { |
| item.tokens = Some(tokens); |
| } |
| Ok(item) |
| } |
| |
| fn parse_trait_item_(&mut self, |
| at_end: &mut bool, |
| mut attrs: Vec<Attribute>) -> PResult<'a, TraitItem> { |
| let lo = self.span; |
| |
| let (name, node, generics) = if self.eat_keyword(keywords::Type) { |
| self.parse_trait_item_assoc_ty()? |
| } else if self.is_const_item() { |
| self.expect_keyword(keywords::Const)?; |
| let ident = self.parse_ident()?; |
| self.expect(&token::Colon)?; |
| let ty = self.parse_ty()?; |
| let default = if self.eat(&token::Eq) { |
| let expr = self.parse_expr()?; |
| self.expect(&token::Semi)?; |
| Some(expr) |
| } else { |
| self.expect(&token::Semi)?; |
| None |
| }; |
| (ident, TraitItemKind::Const(ty, default), ast::Generics::default()) |
| } else if let Some(mac) = self.parse_assoc_macro_invoc("trait", None, &mut false)? { |
| // trait item macro. |
| (keywords::Invalid.ident(), ast::TraitItemKind::Macro(mac), ast::Generics::default()) |
| } else { |
| let (constness, unsafety, asyncness, abi) = self.parse_fn_front_matter()?; |
| |
| let ident = self.parse_ident()?; |
| let mut generics = self.parse_generics()?; |
| |
| let d = self.parse_fn_decl_with_self(|p: &mut Parser<'a>| { |
| // This is somewhat dubious; We don't want to allow |
| // argument names to be left off if there is a |
| // definition... |
| |
| // We don't allow argument names to be left off in edition 2018. |
| p.parse_arg_general(p.span.rust_2018(), true) |
| })?; |
| generics.where_clause = self.parse_where_clause()?; |
| |
| let sig = ast::MethodSig { |
| header: FnHeader { |
| unsafety, |
| constness, |
| abi, |
| asyncness, |
| }, |
| decl: d, |
| }; |
| |
| let body = match self.token { |
| token::Semi => { |
| self.bump(); |
| *at_end = true; |
| debug!("parse_trait_methods(): parsing required method"); |
| None |
| } |
| token::OpenDelim(token::Brace) => { |
| debug!("parse_trait_methods(): parsing provided method"); |
| *at_end = true; |
| let (inner_attrs, body) = self.parse_inner_attrs_and_block()?; |
| attrs.extend(inner_attrs.iter().cloned()); |
| Some(body) |
| } |
| token::Interpolated(ref nt) => { |
| match **nt { |
| token::NtBlock(..) => { |
| *at_end = true; |
| let (inner_attrs, body) = self.parse_inner_attrs_and_block()?; |
| attrs.extend(inner_attrs.iter().cloned()); |
| Some(body) |
| } |
| _ => { |
| let token_str = self.this_token_descr(); |
| let mut err = self.fatal(&format!("expected `;` or `{{`, found {}", |
| token_str)); |
| err.span_label(self.span, "expected `;` or `{`"); |
| return Err(err); |
| } |
| } |
| } |
| _ => { |
| let token_str = self.this_token_descr(); |
| let mut err = self.fatal(&format!("expected `;` or `{{`, found {}", |
| token_str)); |
| err.span_label(self.span, "expected `;` or `{`"); |
| return Err(err); |
| } |
| }; |
| (ident, ast::TraitItemKind::Method(sig, body), generics) |
| }; |
| |
| Ok(TraitItem { |
| id: ast::DUMMY_NODE_ID, |
| ident: name, |
| attrs, |
| generics, |
| node, |
| span: lo.to(self.prev_span), |
| tokens: None, |
| }) |
| } |
| |
| /// Parses an optional return type `[ -> TY ]` in a function declaration. |
| fn parse_ret_ty(&mut self, allow_plus: bool) -> PResult<'a, FunctionRetTy> { |
| if self.eat(&token::RArrow) { |
| Ok(FunctionRetTy::Ty(self.parse_ty_common(allow_plus, true)?)) |
| } else { |
| Ok(FunctionRetTy::Default(self.span.shrink_to_lo())) |
| } |
| } |
| |
| /// Parses a type. |
| pub fn parse_ty(&mut self) -> PResult<'a, P<Ty>> { |
| self.parse_ty_common(true, true) |
| } |
| |
| /// Parses a type in restricted contexts where `+` is not permitted. |
| /// |
| /// Example 1: `&'a TYPE` |
| /// `+` is prohibited to maintain operator priority (P(+) < P(&)). |
| /// Example 2: `value1 as TYPE + value2` |
| /// `+` is prohibited to avoid interactions with expression grammar. |
| fn parse_ty_no_plus(&mut self) -> PResult<'a, P<Ty>> { |
| self.parse_ty_common(false, true) |
| } |
| |
| fn parse_ty_common(&mut self, allow_plus: bool, allow_qpath_recovery: bool) |
| -> PResult<'a, P<Ty>> { |
| maybe_whole!(self, NtTy, |x| x); |
| |
| let lo = self.span; |
| let mut impl_dyn_multi = false; |
| let node = if self.eat(&token::OpenDelim(token::Paren)) { |
| // `(TYPE)` is a parenthesized type. |
| // `(TYPE,)` is a tuple with a single field of type TYPE. |
| let mut ts = vec![]; |
| let mut last_comma = false; |
| while self.token != token::CloseDelim(token::Paren) { |
| ts.push(self.parse_ty()?); |
| if self.eat(&token::Comma) { |
| last_comma = true; |
| } else { |
| last_comma = false; |
| break; |
| } |
| } |
| let trailing_plus = self.prev_token_kind == PrevTokenKind::Plus; |
| self.expect(&token::CloseDelim(token::Paren))?; |
| |
| if ts.len() == 1 && !last_comma { |
| let ty = ts.into_iter().nth(0).unwrap().into_inner(); |
| let maybe_bounds = allow_plus && self.token.is_like_plus(); |
| match ty.node { |
| // `(TY_BOUND_NOPAREN) + BOUND + ...`. |
| TyKind::Path(None, ref path) if maybe_bounds => { |
| self.parse_remaining_bounds(Vec::new(), path.clone(), lo, true)? |
| } |
| TyKind::TraitObject(ref bounds, TraitObjectSyntax::None) |
| if maybe_bounds && bounds.len() == 1 && !trailing_plus => { |
| let path = match bounds[0] { |
| GenericBound::Trait(ref pt, ..) => pt.trait_ref.path.clone(), |
| GenericBound::Outlives(..) => self.bug("unexpected lifetime bound"), |
| }; |
| self.parse_remaining_bounds(Vec::new(), path, lo, true)? |
| } |
| // `(TYPE)` |
| _ => TyKind::Paren(P(ty)) |
| } |
| } else { |
| TyKind::Tup(ts) |
| } |
| } else if self.eat(&token::Not) { |
| // Never type `!` |
| TyKind::Never |
| } else if self.eat(&token::BinOp(token::Star)) { |
| // Raw pointer |
| TyKind::Ptr(self.parse_ptr()?) |
| } else if self.eat(&token::OpenDelim(token::Bracket)) { |
| // Array or slice |
| let t = self.parse_ty()?; |
| // Parse optional `; EXPR` in `[TYPE; EXPR]` |
| let t = match self.maybe_parse_fixed_length_of_vec()? { |
| None => TyKind::Slice(t), |
| Some(length) => TyKind::Array(t, AnonConst { |
| id: ast::DUMMY_NODE_ID, |
| value: length, |
| }), |
| }; |
| self.expect(&token::CloseDelim(token::Bracket))?; |
| t |
| } else if self.check(&token::BinOp(token::And)) || self.check(&token::AndAnd) { |
| // Reference |
| self.expect_and()?; |
| self.parse_borrowed_pointee()? |
| } else if self.eat_keyword_noexpect(keywords::Typeof) { |
| // `typeof(EXPR)` |
| // In order to not be ambiguous, the type must be surrounded by parens. |
| self.expect(&token::OpenDelim(token::Paren))?; |
| let e = AnonConst { |
| id: ast::DUMMY_NODE_ID, |
| value: self.parse_expr()?, |
| }; |
| self.expect(&token::CloseDelim(token::Paren))?; |
| TyKind::Typeof(e) |
| } else if self.eat_keyword(keywords::Underscore) { |
| // A type to be inferred `_` |
| TyKind::Infer |
| } else if self.token_is_bare_fn_keyword() { |
| // Function pointer type |
| self.parse_ty_bare_fn(Vec::new())? |
| } else if self.check_keyword(keywords::For) { |
| // Function pointer type or bound list (trait object type) starting with a poly-trait. |
| // `for<'lt> [unsafe] [extern "ABI"] fn (&'lt S) -> T` |
| // `for<'lt> Trait1<'lt> + Trait2 + 'a` |
| let lo = self.span; |
| let lifetime_defs = self.parse_late_bound_lifetime_defs()?; |
| if self.token_is_bare_fn_keyword() { |
| self.parse_ty_bare_fn(lifetime_defs)? |
| } else { |
| let path = self.parse_path(PathStyle::Type)?; |
| let parse_plus = allow_plus && self.check_plus(); |
| self.parse_remaining_bounds(lifetime_defs, path, lo, parse_plus)? |
| } |
| } else if self.eat_keyword(keywords::Impl) { |
| // Always parse bounds greedily for better error recovery. |
| let bounds = self.parse_generic_bounds(None)?; |
| impl_dyn_multi = bounds.len() > 1 || self.prev_token_kind == PrevTokenKind::Plus; |
| TyKind::ImplTrait(ast::DUMMY_NODE_ID, bounds) |
| } else if self.check_keyword(keywords::Dyn) && |
| (self.span.rust_2018() || |
| self.look_ahead(1, |t| t.can_begin_bound() && |
| !can_continue_type_after_non_fn_ident(t))) { |
| self.bump(); // `dyn` |
| // Always parse bounds greedily for better error recovery. |
| let bounds = self.parse_generic_bounds(None)?; |
| impl_dyn_multi = bounds.len() > 1 || self.prev_token_kind == PrevTokenKind::Plus; |
| TyKind::TraitObject(bounds, TraitObjectSyntax::Dyn) |
| } else if self.check(&token::Question) || |
| self.check_lifetime() && self.look_ahead(1, |t| t.is_like_plus()) { |
| // Bound list (trait object type) |
| TyKind::TraitObject(self.parse_generic_bounds_common(allow_plus, None)?, |
| TraitObjectSyntax::None) |
| } else if self.eat_lt() { |
| // Qualified path |
| let (qself, path) = self.parse_qpath(PathStyle::Type)?; |
| TyKind::Path(Some(qself), path) |
| } else if self.token.is_path_start() { |
| // Simple path |
| let path = self.parse_path(PathStyle::Type)?; |
| if self.eat(&token::Not) { |
| // Macro invocation in type position |
| let (delim, tts) = self.expect_delimited_token_tree()?; |
| let node = Mac_ { path, tts, delim }; |
| TyKind::Mac(respan(lo.to(self.prev_span), node)) |
| } else { |
| // Just a type path or bound list (trait object type) starting with a trait. |
| // `Type` |
| // `Trait1 + Trait2 + 'a` |
| if allow_plus && self.check_plus() { |
| self.parse_remaining_bounds(Vec::new(), path, lo, true)? |
| } else { |
| TyKind::Path(None, path) |
| } |
| } |
| } else { |
| let msg = format!("expected type, found {}", self.this_token_descr()); |
| return Err(self.fatal(&msg)); |
| }; |
| |
| let span = lo.to(self.prev_span); |
| let ty = Ty { node, span, id: ast::DUMMY_NODE_ID }; |
| |
| // Try to recover from use of `+` with incorrect priority. |
| self.maybe_report_ambiguous_plus(allow_plus, impl_dyn_multi, &ty); |
| self.maybe_recover_from_bad_type_plus(allow_plus, &ty)?; |
| let ty = self.maybe_recover_from_bad_qpath(ty, allow_qpath_recovery)?; |
| |
| Ok(P(ty)) |
| } |
| |
| fn parse_remaining_bounds(&mut self, generic_params: Vec<GenericParam>, path: ast::Path, |
| lo: Span, parse_plus: bool) -> PResult<'a, TyKind> { |
| let poly_trait_ref = PolyTraitRef::new(generic_params, path, lo.to(self.prev_span)); |
| let mut bounds = vec![GenericBound::Trait(poly_trait_ref, TraitBoundModifier::None)]; |
| if parse_plus { |
| self.eat_plus(); // `+`, or `+=` gets split and `+` is discarded |
| bounds.append(&mut self.parse_generic_bounds(None)?); |
| } |
| Ok(TyKind::TraitObject(bounds, TraitObjectSyntax::None)) |
| } |
| |
| fn maybe_report_ambiguous_plus(&mut self, allow_plus: bool, impl_dyn_multi: bool, ty: &Ty) { |
| if !allow_plus && impl_dyn_multi { |
| let sum_with_parens = format!("({})", pprust::ty_to_string(&ty)); |
| self.struct_span_err(ty.span, "ambiguous `+` in a type") |
| .span_suggestion( |
| ty.span, |
| "use parentheses to disambiguate", |
| sum_with_parens, |
| Applicability::MachineApplicable |
| ).emit(); |
| } |
| } |
| |
| fn maybe_recover_from_bad_type_plus(&mut self, allow_plus: bool, ty: &Ty) -> PResult<'a, ()> { |
| // Do not add `+` to expected tokens. |
| if !allow_plus || !self.token.is_like_plus() { |
| return Ok(()) |
| } |
| |
| self.bump(); // `+` |
| let bounds = self.parse_generic_bounds(None)?; |
| let sum_span = ty.span.to(self.prev_span); |
| |
| let mut err = struct_span_err!(self.sess.span_diagnostic, sum_span, E0178, |
| "expected a path on the left-hand side of `+`, not `{}`", pprust::ty_to_string(ty)); |
| |
| match ty.node { |
| TyKind::Rptr(ref lifetime, ref mut_ty) => { |
| let sum_with_parens = pprust::to_string(|s| { |
| use crate::print::pprust::PrintState; |
| |
| s.s.word("&")?; |
| s.print_opt_lifetime(lifetime)?; |
| s.print_mutability(mut_ty.mutbl)?; |
| s.popen()?; |
| s.print_type(&mut_ty.ty)?; |
| s.print_type_bounds(" +", &bounds)?; |
| s.pclose() |
| }); |
| err.span_suggestion( |
| sum_span, |
| "try adding parentheses", |
| sum_with_parens, |
| Applicability::MachineApplicable |
| ); |
| } |
| TyKind::Ptr(..) | TyKind::BareFn(..) => { |
| err.span_label(sum_span, "perhaps you forgot parentheses?"); |
| } |
| _ => { |
| err.span_label(sum_span, "expected a path"); |
| }, |
| } |
| err.emit(); |
| Ok(()) |
| } |
| |
| // Try to recover from associated item paths like `[T]::AssocItem`/`(T, U)::AssocItem`. |
| fn maybe_recover_from_bad_qpath<T: RecoverQPath>(&mut self, base: T, allow_recovery: bool) |
| -> PResult<'a, T> { |
| // Do not add `::` to expected tokens. |
| if !allow_recovery || self.token != token::ModSep { |
| return Ok(base); |
| } |
| let ty = match base.to_ty() { |
| Some(ty) => ty, |
| None => return Ok(base), |
| }; |
| |
| self.bump(); // `::` |
| let mut segments = Vec::new(); |
| self.parse_path_segments(&mut segments, T::PATH_STYLE, true)?; |
| |
| let span = ty.span.to(self.prev_span); |
| let path_span = span.to(span); // use an empty path since `position` == 0 |
| let recovered = base.to_recovered( |
| Some(QSelf { ty, path_span, position: 0 }), |
| ast::Path { segments, span }, |
| ); |
| |
| self.diagnostic() |
| .struct_span_err(span, "missing angle brackets in associated item path") |
| .span_suggestion( // this is a best-effort recovery |
| span, "try", recovered.to_string(), Applicability::MaybeIncorrect |
| ).emit(); |
| |
| Ok(recovered) |
| } |
| |
| fn parse_borrowed_pointee(&mut self) -> PResult<'a, TyKind> { |
| let opt_lifetime = if self.check_lifetime() { Some(self.expect_lifetime()) } else { None }; |
| let mutbl = self.parse_mutability(); |
| let ty = self.parse_ty_no_plus()?; |
| return Ok(TyKind::Rptr(opt_lifetime, MutTy { ty: ty, mutbl: mutbl })); |
| } |
| |
| fn parse_ptr(&mut self) -> PResult<'a, MutTy> { |
| let mutbl = if self.eat_keyword(keywords::Mut) { |
| Mutability::Mutable |
| } else if self.eat_keyword(keywords::Const) { |
| Mutability::Immutable |
| } else { |
| let span = self.prev_span; |
| let msg = "expected mut or const in raw pointer type"; |
| self.struct_span_err(span, msg) |
| .span_label(span, msg) |
| .help("use `*mut T` or `*const T` as appropriate") |
| .emit(); |
| Mutability::Immutable |
| }; |
| let t = self.parse_ty_no_plus()?; |
| Ok(MutTy { ty: t, mutbl: mutbl }) |
| } |
| |
| fn is_named_argument(&mut self) -> bool { |
| let offset = match self.token { |
| token::Interpolated(ref nt) => match **nt { |
| token::NtPat(..) => return self.look_ahead(1, |t| t == &token::Colon), |
| _ => 0, |
| } |
| token::BinOp(token::And) | token::AndAnd => 1, |
| _ if self.token.is_keyword(keywords::Mut) => 1, |
| _ => 0, |
| }; |
| |
| self.look_ahead(offset, |t| t.is_ident()) && |
| self.look_ahead(offset + 1, |t| t == &token::Colon) |
| } |
| |
| /// Skips unexpected attributes and doc comments in this position and emits an appropriate |
| /// error. |
| fn eat_incorrect_doc_comment(&mut self, applied_to: &str) { |
| if let token::DocComment(_) = self.token { |
| let mut err = self.diagnostic().struct_span_err( |
| self.span, |
| &format!("documentation comments cannot be applied to {}", applied_to), |
| ); |
| err.span_label(self.span, "doc comments are not allowed here"); |
| err.emit(); |
| self.bump(); |
| } else if self.token == token::Pound && self.look_ahead(1, |t| { |
| *t == token::OpenDelim(token::Bracket) |
| }) { |
| let lo = self.span; |
| // Skip every token until next possible arg. |
| while self.token != token::CloseDelim(token::Bracket) { |
| self.bump(); |
| } |
| let sp = lo.to(self.span); |
| self.bump(); |
| let mut err = self.diagnostic().struct_span_err( |
| sp, |
| &format!("attributes cannot be applied to {}", applied_to), |
| ); |
| err.span_label(sp, "attributes are not allowed here"); |
| err.emit(); |
| } |
| } |
| |
| /// This version of parse arg doesn't necessarily require identifier names. |
| fn parse_arg_general(&mut self, require_name: bool, is_trait_item: bool) -> PResult<'a, Arg> { |
| maybe_whole!(self, NtArg, |x| x); |
| |
| if let Ok(Some(_)) = self.parse_self_arg() { |
| let mut err = self.struct_span_err(self.prev_span, |
| "unexpected `self` argument in function"); |
| err.span_label(self.prev_span, |
| "`self` is only valid as the first argument of an associated function"); |
| return Err(err); |
| } |
| |
| let (pat, ty) = if require_name || self.is_named_argument() { |
| debug!("parse_arg_general parse_pat (require_name:{})", |
| require_name); |
| self.eat_incorrect_doc_comment("method arguments"); |
| let pat = self.parse_pat(Some("argument name"))?; |
| |
| if let Err(mut err) = self.expect(&token::Colon) { |
| // If we find a pattern followed by an identifier, it could be an (incorrect) |
| // C-style parameter declaration. |
| if self.check_ident() && self.look_ahead(1, |t| { |
| *t == token::Comma || *t == token::CloseDelim(token::Paren) |
| }) { |
| let ident = self.parse_ident().unwrap(); |
| let span = pat.span.with_hi(ident.span.hi()); |
| |
| err.span_suggestion( |
| span, |
| "declare the type after the parameter binding", |
| "<identifier>: <type>", |
| Applicability::HasPlaceholders, |
| ); |
| } else if require_name && is_trait_item { |
| if let PatKind::Ident(_, ident, _) = pat.node { |
| err.span_suggestion( |
| pat.span, |
| "explicitly ignore parameter", |
| format!("_: {}", ident), |
| Applicability::MachineApplicable, |
| ); |
| } |
| |
| err.note("anonymous parameters are removed in the 2018 edition (see RFC 1685)"); |
| } |
| |
| return Err(err); |
| } |
| |
| self.eat_incorrect_doc_comment("a method argument's type"); |
| (pat, self.parse_ty()?) |
| } else { |
| debug!("parse_arg_general ident_to_pat"); |
| let parser_snapshot_before_ty = self.clone(); |
| self.eat_incorrect_doc_comment("a method argument's type"); |
| let mut ty = self.parse_ty(); |
| if ty.is_ok() && self.token != token::Comma && |
| self.token != token::CloseDelim(token::Paren) { |
| // This wasn't actually a type, but a pattern looking like a type, |
| // so we are going to rollback and re-parse for recovery. |
| ty = self.unexpected(); |
| } |
| match ty { |
| Ok(ty) => { |
| let ident = Ident::new(keywords::Invalid.name(), self.prev_span); |
| let pat = P(Pat { |
| id: ast::DUMMY_NODE_ID, |
| node: PatKind::Ident( |
| BindingMode::ByValue(Mutability::Immutable), ident, None), |
| span: ty.span, |
| }); |
| (pat, ty) |
| } |
| Err(mut err) => { |
| // Recover from attempting to parse the argument as a type without pattern. |
| err.cancel(); |
| mem::replace(self, parser_snapshot_before_ty); |
| let pat = self.parse_pat(Some("argument name"))?; |
| self.expect(&token::Colon)?; |
| let ty = self.parse_ty()?; |
| |
| let mut err = self.diagnostic().struct_span_err_with_code( |
| pat.span, |
| "patterns aren't allowed in methods without bodies", |
| DiagnosticId::Error("E0642".into()), |
| ); |
| err.span_suggestion_short( |
| pat.span, |
| "give this argument a name or use an underscore to ignore it", |
| "_".to_owned(), |
| Applicability::MachineApplicable, |
| ); |
| err.emit(); |
| |
| // Pretend the pattern is `_`, to avoid duplicate errors from AST validation. |
| let pat = P(Pat { |
| node: PatKind::Wild, |
| span: pat.span, |
| id: ast::DUMMY_NODE_ID |
| }); |
| (pat, ty) |
| } |
| } |
| }; |
| |
| Ok(Arg { ty, pat, id: ast::DUMMY_NODE_ID }) |
| } |
| |
| /// Parses a single function argument. |
| crate fn parse_arg(&mut self) -> PResult<'a, Arg> { |
| self.parse_arg_general(true, false) |
| } |
| |
| /// Parses an argument in a lambda header (e.g., `|arg, arg|`). |
| fn parse_fn_block_arg(&mut self) -> PResult<'a, Arg> { |
| let pat = self.parse_pat(Some("argument name"))?; |
| let t = if self.eat(&token::Colon) { |
| self.parse_ty()? |
| } else { |
| P(Ty { |
| id: ast::DUMMY_NODE_ID, |
| node: TyKind::Infer, |
| span: self.prev_span, |
| }) |
| }; |
| Ok(Arg { |
| ty: t, |
| pat, |
| id: ast::DUMMY_NODE_ID |
| }) |
| } |
| |
| fn maybe_parse_fixed_length_of_vec(&mut self) -> PResult<'a, Option<P<ast::Expr>>> { |
| if self.eat(&token::Semi) { |
| Ok(Some(self.parse_expr()?)) |
| } else { |
| Ok(None) |
| } |
| } |
| |
| /// Matches `token_lit = LIT_INTEGER | ...`. |
| fn parse_lit_token(&mut self) -> PResult<'a, LitKind> { |
| let out = match self.token { |
| token::Interpolated(ref nt) => match **nt { |
| token::NtExpr(ref v) | token::NtLiteral(ref v) => match v.node { |
| ExprKind::Lit(ref lit) => { lit.node.clone() } |
| _ => { return self.unexpected_last(&self.token); } |
| }, |
| _ => { return self.unexpected_last(&self.token); } |
| }, |
| token::Literal(lit, suf) => { |
| let diag = Some((self.span, &self.sess.span_diagnostic)); |
| let (suffix_illegal, result) = parse::lit_token(lit, suf, diag); |
| |
| if suffix_illegal { |
| let sp = self.span; |
| self.expect_no_suffix(sp, lit.literal_name(), suf) |
| } |
| |
| result.unwrap() |
| } |
| token::Dot if self.look_ahead(1, |t| match t { |
| token::Literal(parse::token::Lit::Integer(_) , _) => true, |
| _ => false, |
| }) => { // recover from `let x = .4;` |
| let lo = self.span; |
| self.bump(); |
| if let token::Literal( |
| parse::token::Lit::Integer(val), |
| suffix, |
| ) = self.token { |
| let suffix = suffix.and_then(|s| { |
| let s = s.as_str().get(); |
| if ["f32", "f64"].contains(&s) { |
| Some(s) |
| } else { |
| None |
| } |
| }).unwrap_or(""); |
| self.bump(); |
| let sp = lo.to(self.prev_span); |
| let mut err = self.diagnostic() |
| .struct_span_err(sp, "float literals must have an integer part"); |
| err.span_suggestion( |
| sp, |
| "must have an integer part", |
| format!("0.{}{}", val, suffix), |
| Applicability::MachineApplicable, |
| ); |
| err.emit(); |
| return Ok(match suffix { |
| "f32" => ast::LitKind::Float(val, ast::FloatTy::F32), |
| "f64" => ast::LitKind::Float(val, ast::FloatTy::F64), |
| _ => ast::LitKind::FloatUnsuffixed(val), |
| }); |
| } else { |
| unreachable!(); |
| }; |
| } |
| _ => { return self.unexpected_last(&self.token); } |
| }; |
| |
| self.bump(); |
| Ok(out) |
| } |
| |
| /// Matches `lit = true | false | token_lit`. |
| crate fn parse_lit(&mut self) -> PResult<'a, Lit> { |
| let lo = self.span; |
| let lit = if self.eat_keyword(keywords::True) { |
| LitKind::Bool(true) |
| } else if self.eat_keyword(keywords::False) { |
| LitKind::Bool(false) |
| } else { |
| let lit = self.parse_lit_token()?; |
| lit |
| }; |
| Ok(source_map::Spanned { node: lit, span: lo.to(self.prev_span) }) |
| } |
| |
| /// Matches `'-' lit | lit` (cf. `ast_validation::AstValidator::check_expr_within_pat`). |
| crate fn parse_literal_maybe_minus(&mut self) -> PResult<'a, P<Expr>> { |
| maybe_whole_expr!(self); |
| |
| let minus_lo = self.span; |
| let minus_present = self.eat(&token::BinOp(token::Minus)); |
| let lo = self.span; |
| let literal = self.parse_lit()?; |
| let hi = self.prev_span; |
| let expr = self.mk_expr(lo.to(hi), ExprKind::Lit(literal), ThinVec::new()); |
| |
| if minus_present { |
| let minus_hi = self.prev_span; |
| let unary = self.mk_unary(UnOp::Neg, expr); |
| Ok(self.mk_expr(minus_lo.to(minus_hi), unary, ThinVec::new())) |
| } else { |
| Ok(expr) |
| } |
| } |
| |
| fn parse_path_segment_ident(&mut self) -> PResult<'a, ast::Ident> { |
| match self.token { |
| token::Ident(ident, _) if self.token.is_path_segment_keyword() => { |
| let span = self.span; |
| self.bump(); |
| Ok(Ident::new(ident.name, span)) |
| } |
| _ => self.parse_ident(), |
| } |
| } |
| |
| fn parse_ident_or_underscore(&mut self) -> PResult<'a, ast::Ident> { |
| match self.token { |
| token::Ident(ident, false) if ident.name == keywords::Underscore.name() => { |
| let span = self.span; |
| self.bump(); |
| Ok(Ident::new(ident.name, span)) |
| } |
| _ => self.parse_ident(), |
| } |
| } |
| |
| /// Parses a qualified path. |
| /// Assumes that the leading `<` has been parsed already. |
| /// |
| /// `qualified_path = <type [as trait_ref]>::path` |
| /// |
| /// # Examples |
| /// `<T>::default` |
| /// `<T as U>::a` |
| /// `<T as U>::F::a<S>` (without disambiguator) |
| /// `<T as U>::F::a::<S>` (with disambiguator) |
| fn parse_qpath(&mut self, style: PathStyle) -> PResult<'a, (QSelf, ast::Path)> { |
| let lo = self.prev_span; |
| let ty = self.parse_ty()?; |
| |
| // `path` will contain the prefix of the path up to the `>`, |
| // if any (e.g., `U` in the `<T as U>::*` examples |
| // above). `path_span` has the span of that path, or an empty |
| // span in the case of something like `<T>::Bar`. |
| let (mut path, path_span); |
| if self.eat_keyword(keywords::As) { |
| let path_lo = self.span; |
| path = self.parse_path(PathStyle::Type)?; |
| path_span = path_lo.to(self.prev_span); |
| } else { |
| path = ast::Path { segments: Vec::new(), span: syntax_pos::DUMMY_SP }; |
| path_span = self.span.to(self.span); |
| } |
| |
| // See doc comment for `unmatched_angle_bracket_count`. |
| self.expect(&token::Gt)?; |
| if self.unmatched_angle_bracket_count > 0 { |
| self.unmatched_angle_bracket_count -= 1; |
| debug!("parse_qpath: (decrement) count={:?}", self.unmatched_angle_bracket_count); |
| } |
| |
| self.expect(&token::ModSep)?; |
| |
| let qself = QSelf { ty, path_span, position: path.segments.len() }; |
| self.parse_path_segments(&mut path.segments, style, true)?; |
| |
| Ok((qself, ast::Path { segments: path.segments, span: lo.to(self.prev_span) })) |
| } |
| |
| /// Parses simple paths. |
| /// |
| /// `path = [::] segment+` |
| /// `segment = ident | ident[::]<args> | ident[::](args) [-> type]` |
| /// |
| /// # Examples |
| /// `a::b::C<D>` (without disambiguator) |
| /// `a::b::C::<D>` (with disambiguator) |
| /// `Fn(Args)` (without disambiguator) |
| /// `Fn::(Args)` (with disambiguator) |
| pub fn parse_path(&mut self, style: PathStyle) -> PResult<'a, ast::Path> { |
| self.parse_path_common(style, true) |
| } |
| |
| crate fn parse_path_common(&mut self, style: PathStyle, enable_warning: bool) |
| -> PResult<'a, ast::Path> { |
| maybe_whole!(self, NtPath, |path| { |
| if style == PathStyle::Mod && |
| path.segments.iter().any(|segment| segment.args.is_some()) { |
| self.diagnostic().span_err(path.span, "unexpected generic arguments in path"); |
| } |
| path |
| }); |
| |
| let lo = self.meta_var_span.unwrap_or(self.span); |
| let mut segments = Vec::new(); |
| let mod_sep_ctxt = self.span.ctxt(); |
| if self.eat(&token::ModSep) { |
| segments.push(PathSegment::path_root(lo.shrink_to_lo().with_ctxt(mod_sep_ctxt))); |
| } |
| self.parse_path_segments(&mut segments, style, enable_warning)?; |
| |
| Ok(ast::Path { segments, span: lo.to(self.prev_span) }) |
| } |
| |
| /// Like `parse_path`, but also supports parsing `Word` meta items into paths for |
| /// backwards-compatibility. This is used when parsing derive macro paths in `#[derive]` |
| /// attributes. |
| pub fn parse_path_allowing_meta(&mut self, style: PathStyle) -> PResult<'a, ast::Path> { |
| let meta_ident = match self.token { |
| token::Interpolated(ref nt) => match **nt { |
| token::NtMeta(ref meta) => match meta.node { |
| ast::MetaItemKind::Word => Some(meta.ident.clone()), |
| _ => None, |
| }, |
| _ => None, |
| }, |
| _ => None, |
| }; |
| if let Some(path) = meta_ident { |
| self.bump(); |
| return Ok(path); |
| } |
| self.parse_path(style) |
| } |
| |
| fn parse_path_segments(&mut self, |
| segments: &mut Vec<PathSegment>, |
| style: PathStyle, |
| enable_warning: bool) |
| -> PResult<'a, ()> { |
| loop { |
| let segment = self.parse_path_segment(style, enable_warning)?; |
| if style == PathStyle::Expr { |
| // In order to check for trailing angle brackets, we must have finished |
| // recursing (`parse_path_segment` can indirectly call this function), |
| // that is, the next token must be the highlighted part of the below example: |
| // |
| // `Foo::<Bar as Baz<T>>::Qux` |
| // ^ here |
| // |
| // As opposed to the below highlight (if we had only finished the first |
| // recursion): |
| // |
| // `Foo::<Bar as Baz<T>>::Qux` |
| // ^ here |
| // |
| // `PathStyle::Expr` is only provided at the root invocation and never in |
| // `parse_path_segment` to recurse and therefore can be checked to maintain |
| // this invariant. |
| self.check_trailing_angle_brackets(&segment, token::ModSep); |
| } |
| segments.push(segment); |
| |
| if self.is_import_coupler() || !self.eat(&token::ModSep) { |
| return Ok(()); |
| } |
| } |
| } |
| |
| fn parse_path_segment(&mut self, style: PathStyle, enable_warning: bool) |
| -> PResult<'a, PathSegment> { |
| let ident = self.parse_path_segment_ident()?; |
| |
| let is_args_start = |token: &token::Token| match *token { |
| token::Lt | token::BinOp(token::Shl) | token::OpenDelim(token::Paren) => true, |
| _ => false, |
| }; |
| let check_args_start = |this: &mut Self| { |
| this.expected_tokens.extend_from_slice( |
| &[TokenType::Token(token::Lt), TokenType::Token(token::OpenDelim(token::Paren))] |
| ); |
| is_args_start(&this.token) |
| }; |
| |
| Ok(if style == PathStyle::Type && check_args_start(self) || |
| style != PathStyle::Mod && self.check(&token::ModSep) |
| && self.look_ahead(1, |t| is_args_start(t)) { |
| // Generic arguments are found - `<`, `(`, `::<` or `::(`. |
| if self.eat(&token::ModSep) && style == PathStyle::Type && enable_warning { |
| self.diagnostic().struct_span_warn(self.prev_span, "unnecessary path disambiguator") |
| .span_label(self.prev_span, "try removing `::`").emit(); |
| } |
| let lo = self.span; |
| |
| // We use `style == PathStyle::Expr` to check if this is in a recursion or not. If |
| // it isn't, then we reset the unmatched angle bracket count as we're about to start |
| // parsing a new path. |
| if style == PathStyle::Expr { |
| self.unmatched_angle_bracket_count = 0; |
| self.max_angle_bracket_count = 0; |
| } |
| |
| let args = if self.eat_lt() { |
| // `<'a, T, A = U>` |
| let (args, bindings) = |
| self.parse_generic_args_with_leaning_angle_bracket_recovery(style, lo)?; |
| self.expect_gt()?; |
| let span = lo.to(self.prev_span); |
| AngleBracketedArgs { args, bindings, span }.into() |
| } else { |
| // `(T, U) -> R` |
| self.bump(); // `(` |
| let (inputs, recovered) = self.parse_seq_to_before_tokens( |
| &[&token::CloseDelim(token::Paren)], |
| SeqSep::trailing_allowed(token::Comma), |
| TokenExpectType::Expect, |
| |p| p.parse_ty())?; |
| if !recovered { |
| self.bump(); // `)` |
| } |
| let span = lo.to(self.prev_span); |
| let output = if self.eat(&token::RArrow) { |
| Some(self.parse_ty_common(false, false)?) |
| } else { |
| None |
| }; |
| ParenthesizedArgs { inputs, output, span }.into() |
| }; |
| |
| PathSegment { ident, args, id: ast::DUMMY_NODE_ID } |
| } else { |
| // Generic arguments are not found. |
| PathSegment::from_ident(ident) |
| }) |
| } |
| |
| crate fn check_lifetime(&mut self) -> bool { |
| self.expected_tokens.push(TokenType::Lifetime); |
| self.token.is_lifetime() |
| } |
| |
| /// Parses a single lifetime `'a` or panics. |
| crate fn expect_lifetime(&mut self) -> Lifetime { |
| if let Some(ident) = self.token.lifetime() { |
| let span = self.span; |
| self.bump(); |
| Lifetime { ident: Ident::new(ident.name, span), id: ast::DUMMY_NODE_ID } |
| } else { |
| self.span_bug(self.span, "not a lifetime") |
| } |
| } |
| |
| fn eat_label(&mut self) -> Option<Label> { |
| if let Some(ident) = self.token.lifetime() { |
| let span = self.span; |
| self.bump(); |
| Some(Label { ident: Ident::new(ident.name, span) }) |
| } else { |
| None |
| } |
| } |
| |
| /// Parses mutability (`mut` or nothing). |
| fn parse_mutability(&mut self) -> Mutability { |
| if self.eat_keyword(keywords::Mut) { |
| Mutability::Mutable |
| } else { |
| Mutability::Immutable |
| } |
| } |
| |
| fn parse_field_name(&mut self) -> PResult<'a, Ident> { |
| if let token::Literal(token::Integer(name), None) = self.token { |
| self.bump(); |
| Ok(Ident::new(name, self.prev_span)) |
| } else { |
| self.parse_ident_common(false) |
| } |
| } |
| |
| /// Parse ident (COLON expr)? |
| fn parse_field(&mut self) -> PResult<'a, Field> { |
| let attrs = self.parse_outer_attributes()?; |
| let lo = self.span; |
| |
| // Check if a colon exists one ahead. This means we're parsing a fieldname. |
| let (fieldname, expr, is_shorthand) = if self.look_ahead(1, |t| { |
| t == &token::Colon || t == &token::Eq |
| }) { |
| let fieldname = self.parse_field_name()?; |
| |
| // Check for an equals token. This means the source incorrectly attempts to |
| // initialize a field with an eq rather than a colon. |
| if self.token == token::Eq { |
| self.diagnostic() |
| .struct_span_err(self.span, "expected `:`, found `=`") |
| .span_suggestion( |
| fieldname.span.shrink_to_hi().to(self.span), |
| "replace equals symbol with a colon", |
| ":".to_string(), |
| Applicability::MachineApplicable, |
| ) |
| .emit(); |
| } |
| self.bump(); // `:` |
| (fieldname, self.parse_expr()?, false) |
| } else { |
| let fieldname = self.parse_ident_common(false)?; |
| |
| // Mimic `x: x` for the `x` field shorthand. |
| let path = ast::Path::from_ident(fieldname); |
| let expr = self.mk_expr(fieldname.span, ExprKind::Path(None, path), ThinVec::new()); |
| (fieldname, expr, true) |
| }; |
| Ok(ast::Field { |
| ident: fieldname, |
| span: lo.to(expr.span), |
| expr, |
| is_shorthand, |
| attrs: attrs.into(), |
| }) |
| } |
| |
| fn mk_expr(&mut self, span: Span, node: ExprKind, attrs: ThinVec<Attribute>) -> P<Expr> { |
| P(Expr { node, span, attrs, id: ast::DUMMY_NODE_ID }) |
| } |
| |
| fn mk_unary(&mut self, unop: ast::UnOp, expr: P<Expr>) -> ast::ExprKind { |
| ExprKind::Unary(unop, expr) |
| } |
| |
| fn mk_binary(&mut self, binop: ast::BinOp, lhs: P<Expr>, rhs: P<Expr>) -> ast::ExprKind { |
| ExprKind::Binary(binop, lhs, rhs) |
| } |
| |
| fn mk_call(&mut self, f: P<Expr>, args: Vec<P<Expr>>) -> ast::ExprKind { |
| ExprKind::Call(f, args) |
| } |
| |
| fn mk_index(&mut self, expr: P<Expr>, idx: P<Expr>) -> ast::ExprKind { |
| ExprKind::Index(expr, idx) |
| } |
| |
| fn mk_range(&mut self, |
| start: Option<P<Expr>>, |
| end: Option<P<Expr>>, |
| limits: RangeLimits) |
| -> PResult<'a, ast::ExprKind> { |
| if end.is_none() && limits == RangeLimits::Closed { |
| Err(self.span_fatal_err(self.span, Error::InclusiveRangeWithNoEnd)) |
| } else { |
| Ok(ExprKind::Range(start, end, limits)) |
| } |
| } |
| |
| fn mk_assign_op(&mut self, binop: ast::BinOp, |
| lhs: P<Expr>, rhs: P<Expr>) -> ast::ExprKind { |
| ExprKind::AssignOp(binop, lhs, rhs) |
| } |
| |
| pub fn mk_mac_expr(&mut self, span: Span, m: Mac_, attrs: ThinVec<Attribute>) -> P<Expr> { |
| P(Expr { |
| id: ast::DUMMY_NODE_ID, |
| node: ExprKind::Mac(source_map::Spanned {node: m, span: span}), |
| span, |
| attrs, |
| }) |
| } |
| |
| fn expect_delimited_token_tree(&mut self) -> PResult<'a, (MacDelimiter, TokenStream)> { |
| let delim = match self.token { |
| token::OpenDelim(delim) => delim, |
| _ => { |
| let msg = "expected open delimiter"; |
| let mut err = self.fatal(msg); |
| err.span_label(self.span, msg); |
| return Err(err) |
| } |
| }; |
| let tts = match self.parse_token_tree() { |
| TokenTree::Delimited(_, _, tts) => tts, |
| _ => unreachable!(), |
| }; |
| let delim = match delim { |
| token::Paren => MacDelimiter::Parenthesis, |
| token::Bracket => MacDelimiter::Bracket, |
| token::Brace => MacDelimiter::Brace, |
| token::NoDelim => self.bug("unexpected no delimiter"), |
| }; |
| Ok((delim, tts.into())) |
| } |
| |
| /// At the bottom (top?) of the precedence hierarchy, |
| /// Parses things like parenthesized exprs, macros, `return`, etc. |
| /// |
| /// N.B., this does not parse outer attributes, and is private because it only works |
| /// correctly if called from `parse_dot_or_call_expr()`. |
| fn parse_bottom_expr(&mut self) -> PResult<'a, P<Expr>> { |
| maybe_whole_expr!(self); |
| |
| // Outer attributes are already parsed and will be |
| // added to the return value after the fact. |
| // |
| // Therefore, prevent sub-parser from parsing |
| // attributes by giving them a empty "already parsed" list. |
| let mut attrs = ThinVec::new(); |
| |
| let lo = self.span; |
| let mut hi = self.span; |
| |
| let ex: ExprKind; |
| |
| // Note: when adding new syntax here, don't forget to adjust Token::can_begin_expr(). |
| match self.token { |
| token::OpenDelim(token::Paren) => { |
| self.bump(); |
| |
| attrs.extend(self.parse_inner_attributes()?); |
| |
| // (e) is parenthesized e |
| // (e,) is a tuple with only one field, e |
| let mut es = vec![]; |
| let mut trailing_comma = false; |
| let mut recovered = false; |
| while self.token != token::CloseDelim(token::Paren) { |
| es.push(self.parse_expr()?); |
| recovered = self.expect_one_of( |
| &[], |
| &[token::Comma, token::CloseDelim(token::Paren)], |
| )?; |
| if self.eat(&token::Comma) { |
| trailing_comma = true; |
| } else { |
| trailing_comma = false; |
| break; |
| } |
| } |
| if !recovered { |
| self.bump(); |
| } |
| |
| hi = self.prev_span; |
| ex = if es.len() == 1 && !trailing_comma { |
| ExprKind::Paren(es.into_iter().nth(0).unwrap()) |
| } else { |
| ExprKind::Tup(es) |
| }; |
| } |
| token::OpenDelim(token::Brace) => { |
| return self.parse_block_expr(None, lo, BlockCheckMode::Default, attrs); |
| } |
| token::BinOp(token::Or) | token::OrOr => { |
| return self.parse_lambda_expr(attrs); |
| } |
| token::OpenDelim(token::Bracket) => { |
| self.bump(); |
| |
| attrs.extend(self.parse_inner_attributes()?); |
| |
| if self.eat(&token::CloseDelim(token::Bracket)) { |
| // Empty vector. |
| ex = ExprKind::Array(Vec::new()); |
| } else { |
| // Nonempty vector. |
| let first_expr = self.parse_expr()?; |
| if self.eat(&token::Semi) { |
| // Repeating array syntax: [ 0; 512 ] |
| let count = AnonConst { |
| id: ast::DUMMY_NODE_ID, |
| value: self.parse_expr()?, |
| }; |
| self.expect(&token::CloseDelim(token::Bracket))?; |
| ex = ExprKind::Repeat(first_expr, count); |
| } else if self.eat(&token::Comma) { |
| // Vector with two or more elements. |
| let remaining_exprs = self.parse_seq_to_end( |
| &token::CloseDelim(token::Bracket), |
| SeqSep::trailing_allowed(token::Comma), |
| |p| Ok(p.parse_expr()?) |
| )?; |
| let mut exprs = vec![first_expr]; |
| exprs.extend(remaining_exprs); |
| ex = ExprKind::Array(exprs); |
| } else { |
| // Vector with one element. |
| self.expect(&token::CloseDelim(token::Bracket))?; |
| ex = ExprKind::Array(vec![first_expr]); |
| } |
| } |
| hi = self.prev_span; |
| } |
| _ => { |
| if self.eat_lt() { |
| let (qself, path) = self.parse_qpath(PathStyle::Expr)?; |
| hi = path.span; |
| return Ok(self.mk_expr(lo.to(hi), ExprKind::Path(Some(qself), path), attrs)); |
| } |
| if self.span.rust_2018() && self.check_keyword(keywords::Async) |
| { |
| if self.is_async_block() { // check for `async {` and `async move {` |
| return self.parse_async_block(attrs); |
| } else { |
| return self.parse_lambda_expr(attrs); |
| } |
| } |
| if self.check_keyword(keywords::Move) || self.check_keyword(keywords::Static) { |
| return self.parse_lambda_expr(attrs); |
| } |
| if self.eat_keyword(keywords::If) { |
| return self.parse_if_expr(attrs); |
| } |
| if self.eat_keyword(keywords::For) { |
| let lo = self.prev_span; |
| return self.parse_for_expr(None, lo, attrs); |
| } |
| if self.eat_keyword(keywords::While) { |
| let lo = self.prev_span; |
| return self.parse_while_expr(None, lo, attrs); |
| } |
| if let Some(label) = self.eat_label() { |
| let lo = label.ident.span; |
| self.expect(&token::Colon)?; |
| if self.eat_keyword(keywords::While) { |
| return self.parse_while_expr(Some(label), lo, attrs) |
| } |
| if self.eat_keyword(keywords::For) { |
| return self.parse_for_expr(Some(label), lo, attrs) |
| } |
| if self.eat_keyword(keywords::Loop) { |
| return self.parse_loop_expr(Some(label), lo, attrs) |
| } |
| if self.token == token::OpenDelim(token::Brace) { |
| return self.parse_block_expr(Some(label), |
| lo, |
| BlockCheckMode::Default, |
| attrs); |
| } |
| let msg = "expected `while`, `for`, `loop` or `{` after a label"; |
| let mut err = self.fatal(msg); |
| err.span_label(self.span, msg); |
| return Err(err); |
| } |
| if self.eat_keyword(keywords::Loop) { |
| let lo = self.prev_span; |
| return self.parse_loop_expr(None, lo, attrs); |
| } |
| if self.eat_keyword(keywords::Continue) { |
| let label = self.eat_label(); |
| let ex = ExprKind::Continue(label); |
| let hi = self.prev_span; |
| return Ok(self.mk_expr(lo.to(hi), ex, attrs)); |
| } |
| if self.eat_keyword(keywords::Match) { |
| let match_sp = self.prev_span; |
| return self.parse_match_expr(attrs).map_err(|mut err| { |
| err.span_label(match_sp, "while parsing this match expression"); |
| err |
| }); |
| } |
| if self.eat_keyword(keywords::Unsafe) { |
| return self.parse_block_expr( |
| None, |
| lo, |
| BlockCheckMode::Unsafe(ast::UserProvided), |
| attrs); |
| } |
| if self.is_do_catch_block() { |
| let mut db = self.fatal("found removed `do catch` syntax"); |
| db.help("Following RFC #2388, the new non-placeholder syntax is `try`"); |
| return Err(db); |
| } |
| if self.is_try_block() { |
| let lo = self.span; |
| assert!(self.eat_keyword(keywords::Try)); |
| return self.parse_try_block(lo, attrs); |
| } |
| if self.eat_keyword(keywords::Return) { |
| if self.token.can_begin_expr() { |
| let e = self.parse_expr()?; |
| hi = e.span; |
| ex = ExprKind::Ret(Some(e)); |
| } else { |
| ex = ExprKind::Ret(None); |
| } |
| } else if self.eat_keyword(keywords::Break) { |
| let label = self.eat_label(); |
| let e = if self.token.can_begin_expr() |
| && !(self.token == token::OpenDelim(token::Brace) |
| && self.restrictions.contains( |
| Restrictions::NO_STRUCT_LITERAL)) { |
| Some(self.parse_expr()?) |
| } else { |
| None |
| }; |
| ex = ExprKind::Break(label, e); |
| hi = self.prev_span; |
| } else if self.eat_keyword(keywords::Yield) { |
| if self.token.can_begin_expr() { |
| let e = self.parse_expr()?; |
| hi = e.span; |
| ex = ExprKind::Yield(Some(e)); |
| } else { |
| ex = ExprKind::Yield(None); |
| } |
| } else if self.token.is_keyword(keywords::Let) { |
| // Catch this syntax error here, instead of in `parse_ident`, so |
| // that we can explicitly mention that let is not to be used as an expression |
| let mut db = self.fatal("expected expression, found statement (`let`)"); |
| db.span_label(self.span, "expected expression"); |
| db.note("variable declaration using `let` is a statement"); |
| return Err(db); |
| } else if self.token.is_path_start() { |
| let pth = self.parse_path(PathStyle::Expr)?; |
| |
| // `!`, as an operator, is prefix, so we know this isn't that |
| if self.eat(&token::Not) { |
| // MACRO INVOCATION expression |
| let (delim, tts) = self.expect_delimited_token_tree()?; |
| let hi = self.prev_span; |
| let node = Mac_ { path: pth, tts, delim }; |
| return Ok(self.mk_mac_expr(lo.to(hi), node, attrs)) |
| } |
| if self.check(&token::OpenDelim(token::Brace)) { |
| // This is a struct literal, unless we're prohibited |
| // from parsing struct literals here. |
| let prohibited = self.restrictions.contains( |
| Restrictions::NO_STRUCT_LITERAL |
| ); |
| if !prohibited { |
| return self.parse_struct_expr(lo, pth, attrs); |
| } |
| } |
| |
| hi = pth.span; |
| ex = ExprKind::Path(None, pth); |
| } else { |
| if !self.unclosed_delims.is_empty() && self.check(&token::Semi) { |
| // Don't complain about bare semicolons after unclosed braces |
| // recovery in order to keep the error count down. Fixing the |
| // delimiters will possibly also fix the bare semicolon found in |
| // expression context. For example, silence the following error: |
| // ``` |
| // error: expected expression, found `;` |
| // --> file.rs:2:13 |
| // | |
| // 2 | foo(bar(; |
| // | ^ expected expression |
| // ``` |
| self.bump(); |
| return Ok(self.mk_expr(self.span, ExprKind::Err, ThinVec::new())); |
| } |
| match self.parse_literal_maybe_minus() { |
| Ok(expr) => { |
| hi = expr.span; |
| ex = expr.node.clone(); |
| } |
| Err(mut err) => { |
| self.cancel(&mut err); |
| let msg = format!("expected expression, found {}", |
| self.this_token_descr()); |
| let mut err = self.fatal(&msg); |
| err.span_label(self.span, "expected expression"); |
| return Err(err); |
| } |
| } |
| } |
| } |
| } |
| |
| let expr = Expr { node: ex, span: lo.to(hi), id: ast::DUMMY_NODE_ID, attrs }; |
| let expr = self.maybe_recover_from_bad_qpath(expr, true)?; |
| |
| return Ok(P(expr)); |
| } |
| |
| fn parse_struct_expr(&mut self, lo: Span, pth: ast::Path, mut attrs: ThinVec<Attribute>) |
| -> PResult<'a, P<Expr>> { |
| let struct_sp = lo.to(self.prev_span); |
| self.bump(); |
| let mut fields = Vec::new(); |
| let mut base = None; |
| |
| attrs.extend(self.parse_inner_attributes()?); |
| |
| while self.token != token::CloseDelim(token::Brace) { |
| if self.eat(&token::DotDot) { |
| let exp_span = self.prev_span; |
| match self.parse_expr() { |
| Ok(e) => { |
| base = Some(e); |
| } |
| Err(mut e) => { |
| e.emit(); |
| self.recover_stmt(); |
| } |
| } |
| if self.token == token::Comma { |
| let mut err = self.sess.span_diagnostic.mut_span_err( |
| exp_span.to(self.prev_span), |
| "cannot use a comma after the base struct", |
| ); |
| err.span_suggestion_short( |
| self.span, |
| "remove this comma", |
| String::new(), |
| Applicability::MachineApplicable |
| ); |
| err.note("the base struct must always be the last field"); |
| err.emit(); |
| self.recover_stmt(); |
| } |
| break; |
| } |
| |
| let mut recovery_field = None; |
| if let token::Ident(ident, _) = self.token { |
| if !self.token.is_reserved_ident() && self.look_ahead(1, |t| *t == token::Colon) { |
| // Use in case of error after field-looking code: `S { foo: () with a }` |
| let mut ident = ident.clone(); |
| ident.span = self.span; |
| recovery_field = Some(ast::Field { |
| ident, |
| span: self.span, |
| expr: self.mk_expr(self.span, ExprKind::Err, ThinVec::new()), |
| is_shorthand: false, |
| attrs: ThinVec::new(), |
| }); |
| } |
| } |
| let mut parsed_field = None; |
| match self.parse_field() { |
| Ok(f) => parsed_field = Some(f), |
| Err(mut e) => { |
| e.span_label(struct_sp, "while parsing this struct"); |
| e.emit(); |
| |
| // If the next token is a comma, then try to parse |
| // what comes next as additional fields, rather than |
| // bailing out until next `}`. |
| if self.token != token::Comma { |
| self.recover_stmt_(SemiColonMode::Comma, BlockMode::Ignore); |
| if self.token != token::Comma { |
| break; |
| } |
| } |
| } |
| } |
| |
| match self.expect_one_of(&[token::Comma], |
| &[token::CloseDelim(token::Brace)]) { |
| Ok(_) => if let Some(f) = parsed_field.or(recovery_field) { |
| // only include the field if there's no parse error for the field name |
| fields.push(f); |
| } |
| Err(mut e) => { |
| if let Some(f) = recovery_field { |
| fields.push(f); |
| } |
| e.span_label(struct_sp, "while parsing this struct"); |
| e.emit(); |
| self.recover_stmt_(SemiColonMode::Comma, BlockMode::Ignore); |
| self.eat(&token::Comma); |
| } |
| } |
| } |
| |
| let span = lo.to(self.span); |
| self.expect(&token::CloseDelim(token::Brace))?; |
| return Ok(self.mk_expr(span, ExprKind::Struct(pth, fields, base), attrs)); |
| } |
| |
| fn parse_or_use_outer_attributes(&mut self, |
| already_parsed_attrs: Option<ThinVec<Attribute>>) |
| -> PResult<'a, ThinVec<Attribute>> { |
| if let Some(attrs) = already_parsed_attrs { |
| Ok(attrs) |
| } else { |
| self.parse_outer_attributes().map(|a| a.into()) |
| } |
| } |
| |
| /// Parses a block or unsafe block. |
| fn parse_block_expr(&mut self, opt_label: Option<Label>, |
| lo: Span, blk_mode: BlockCheckMode, |
| outer_attrs: ThinVec<Attribute>) |
| -> PResult<'a, P<Expr>> { |
| self.expect(&token::OpenDelim(token::Brace))?; |
| |
| let mut attrs = outer_attrs; |
| attrs.extend(self.parse_inner_attributes()?); |
| |
| let blk = self.parse_block_tail(lo, blk_mode)?; |
| return Ok(self.mk_expr(blk.span, ExprKind::Block(blk, opt_label), attrs)); |
| } |
| |
| /// Parses `a.b` or `a(13)` or `a[4]` or just `a`. |
| fn parse_dot_or_call_expr(&mut self, |
| already_parsed_attrs: Option<ThinVec<Attribute>>) |
| -> PResult<'a, P<Expr>> { |
| let attrs = self.parse_or_use_outer_attributes(already_parsed_attrs)?; |
| |
| let b = self.parse_bottom_expr(); |
| let (span, b) = self.interpolated_or_expr_span(b)?; |
| self.parse_dot_or_call_expr_with(b, span, attrs) |
| } |
| |
| fn parse_dot_or_call_expr_with(&mut self, |
| e0: P<Expr>, |
| lo: Span, |
| mut attrs: ThinVec<Attribute>) |
| -> PResult<'a, P<Expr>> { |
| // Stitch the list of outer attributes onto the return value. |
| // A little bit ugly, but the best way given the current code |
| // structure |
| self.parse_dot_or_call_expr_with_(e0, lo) |
| .map(|expr| |
| expr.map(|mut expr| { |
| attrs.extend::<Vec<_>>(expr.attrs.into()); |
| expr.attrs = attrs; |
| match expr.node { |
| ExprKind::If(..) | ExprKind::IfLet(..) => { |
| if !expr.attrs.is_empty() { |
| // Just point to the first attribute in there... |
| let span = expr.attrs[0].span; |
| |
| self.span_err(span, |
| "attributes are not yet allowed on `if` \ |
| expressions"); |
| } |
| } |
| _ => {} |
| } |
| expr |
| }) |
| ) |
| } |
| |
| // Assuming we have just parsed `.`, continue parsing into an expression. |
| fn parse_dot_suffix(&mut self, self_arg: P<Expr>, lo: Span) -> PResult<'a, P<Expr>> { |
| let segment = self.parse_path_segment(PathStyle::Expr, true)?; |
| self.check_trailing_angle_brackets(&segment, token::OpenDelim(token::Paren)); |
| |
| Ok(match self.token { |
| token::OpenDelim(token::Paren) => { |
| // Method call `expr.f()` |
| let mut args = self.parse_unspanned_seq( |
| &token::OpenDelim(token::Paren), |
| &token::CloseDelim(token::Paren), |
| SeqSep::trailing_allowed(token::Comma), |
| |p| Ok(p.parse_expr()?) |
| )?; |
| args.insert(0, self_arg); |
| |
| let span = lo.to(self.prev_span); |
| self.mk_expr(span, ExprKind::MethodCall(segment, args), ThinVec::new()) |
| } |
| _ => { |
| // Field access `expr.f` |
| if let Some(args) = segment.args { |
| self.span_err(args.span(), |
| "field expressions may not have generic arguments"); |
| } |
| |
| let span = lo.to(self.prev_span); |
| self.mk_expr(span, ExprKind::Field(self_arg, segment.ident), ThinVec::new()) |
| } |
| }) |
| } |
| |
| /// This function checks if there are trailing angle brackets and produces |
| /// a diagnostic to suggest removing them. |
| /// |
| /// ```ignore (diagnostic) |
| /// let _ = vec![1, 2, 3].into_iter().collect::<Vec<usize>>>>(); |
| /// ^^ help: remove extra angle brackets |
| /// ``` |
| fn check_trailing_angle_brackets(&mut self, segment: &PathSegment, end: token::Token) { |
| // This function is intended to be invoked after parsing a path segment where there are two |
| // cases: |
| // |
| // 1. A specific token is expected after the path segment. |
| // eg. `x.foo(`, `x.foo::<u32>(` (parenthesis - method call), |
| // `Foo::`, or `Foo::<Bar>::` (mod sep - continued path). |
| // 2. No specific token is expected after the path segment. |
| // eg. `x.foo` (field access) |
| // |
| // This function is called after parsing `.foo` and before parsing the token `end` (if |
| // present). This includes any angle bracket arguments, such as `.foo::<u32>` or |
| // `Foo::<Bar>`. |
| |
| // We only care about trailing angle brackets if we previously parsed angle bracket |
| // arguments. This helps stop us incorrectly suggesting that extra angle brackets be |
| // removed in this case: |
| // |
| // `x.foo >> (3)` (where `x.foo` is a `u32` for example) |
| // |
| // This case is particularly tricky as we won't notice it just looking at the tokens - |
| // it will appear the same (in terms of upcoming tokens) as below (since the `::<u32>` will |
| // have already been parsed): |
| // |
| // `x.foo::<u32>>>(3)` |
| let parsed_angle_bracket_args = segment.args |
| .as_ref() |
| .map(|args| args.is_angle_bracketed()) |
| .unwrap_or(false); |
| |
| debug!( |
| "check_trailing_angle_brackets: parsed_angle_bracket_args={:?}", |
| parsed_angle_bracket_args, |
| ); |
| if !parsed_angle_bracket_args { |
| return; |
| } |
| |
| // Keep the span at the start so we can highlight the sequence of `>` characters to be |
| // removed. |
| let lo = self.span; |
| |
| // We need to look-ahead to see if we have `>` characters without moving the cursor forward |
| // (since we might have the field access case and the characters we're eating are |
| // actual operators and not trailing characters - ie `x.foo >> 3`). |
| let mut position = 0; |
| |
| // We can encounter `>` or `>>` tokens in any order, so we need to keep track of how |
| // many of each (so we can correctly pluralize our error messages) and continue to |
| // advance. |
| let mut number_of_shr = 0; |
| let mut number_of_gt = 0; |
| while self.look_ahead(position, |t| { |
| trace!("check_trailing_angle_brackets: t={:?}", t); |
| if *t == token::BinOp(token::BinOpToken::Shr) { |
| number_of_shr += 1; |
| true |
| } else if *t == token::Gt { |
| number_of_gt += 1; |
| true |
| } else { |
| false |
| } |
| }) { |
| position += 1; |
| } |
| |
| // If we didn't find any trailing `>` characters, then we have nothing to error about. |
| debug!( |
| "check_trailing_angle_brackets: number_of_gt={:?} number_of_shr={:?}", |
| number_of_gt, number_of_shr, |
| ); |
| if number_of_gt < 1 && number_of_shr < 1 { |
| return; |
| } |
| |
| // Finally, double check that we have our end token as otherwise this is the |
| // second case. |
| if self.look_ahead(position, |t| { |
| trace!("check_trailing_angle_brackets: t={:?}", t); |
| *t == end |
| }) { |
| // Eat from where we started until the end token so that parsing can continue |
| // as if we didn't have those extra angle brackets. |
| self.eat_to_tokens(&[&end]); |
| let span = lo.until(self.span); |
| |
| let plural = number_of_gt > 1 || number_of_shr >= 1; |
| self.diagnostic() |
| .struct_span_err( |
| span, |
| &format!("unmatched angle bracket{}", if plural { "s" } else { "" }), |
| ) |
| .span_suggestion( |
| span, |
| &format!("remove extra angle bracket{}", if plural { "s" } else { "" }), |
| String::new(), |
| Applicability::MachineApplicable, |
| ) |
| .emit(); |
| } |
| } |
| |
| fn parse_dot_or_call_expr_with_(&mut self, e0: P<Expr>, lo: Span) -> PResult<'a, P<Expr>> { |
| let mut e = e0; |
| let mut hi; |
| loop { |
| // expr? |
| while self.eat(&token::Question) { |
| let hi = self.prev_span; |
| e = self.mk_expr(lo.to(hi), ExprKind::Try(e), ThinVec::new()); |
| } |
| |
| // expr.f |
| if self.eat(&token::Dot) { |
| match self.token { |
| token::Ident(..) => { |
| e = self.parse_dot_suffix(e, lo)?; |
| } |
| token::Literal(token::Integer(name), _) => { |
| let span = self.span; |
| self.bump(); |
| let field = ExprKind::Field(e, Ident::new(name, span)); |
| e = self.mk_expr(lo.to(span), field, ThinVec::new()); |
| } |
| token::Literal(token::Float(n), _suf) => { |
| self.bump(); |
| let fstr = n.as_str(); |
| let mut err = self.diagnostic() |
| .struct_span_err(self.prev_span, &format!("unexpected token: `{}`", n)); |
| err.span_label(self.prev_span, "unexpected token"); |
| if fstr.chars().all(|x| "0123456789.".contains(x)) { |
| let float = match fstr.parse::<f64>().ok() { |
| Some(f) => f, |
| None => continue, |
| }; |
| let sugg = pprust::to_string(|s| { |
| use crate::print::pprust::PrintState; |
| s.popen()?; |
| s.print_expr(&e)?; |
| s.s.word( ".")?; |
| s.print_usize(float.trunc() as usize)?; |
| s.pclose()?; |
| s.s.word(".")?; |
| s.s.word(fstr.splitn(2, ".").last().unwrap().to_string()) |
| }); |
| err.span_suggestion( |
| lo.to(self.prev_span), |
| "try parenthesizing the first index", |
| sugg, |
| Applicability::MachineApplicable |
| ); |
| } |
| return Err(err); |
| |
| } |
| _ => { |
| // FIXME Could factor this out into non_fatal_unexpected or something. |
| let actual = self.this_token_to_string(); |
| self.span_err(self.span, &format!("unexpected token: `{}`", actual)); |
| } |
| } |
| continue; |
| } |
| if self.expr_is_complete(&e) { break; } |
| match self.token { |
| // expr(...) |
| token::OpenDelim(token::Paren) => { |
| let es = self.parse_unspanned_seq( |
| &token::OpenDelim(token::Paren), |
| &token::CloseDelim(token::Paren), |
| SeqSep::trailing_allowed(token::Comma), |
| |p| Ok(p.parse_expr()?) |
| )?; |
| hi = self.prev_span; |
| |
| let nd = self.mk_call(e, es); |
| e = self.mk_expr(lo.to(hi), nd, ThinVec::new()); |
| } |
| |
| // expr[...] |
| // Could be either an index expression or a slicing expression. |
| token::OpenDelim(token::Bracket) => { |
| self.bump(); |
| let ix = self.parse_expr()?; |
| hi = self.span; |
| self.expect(&token::CloseDelim(token::Bracket))?; |
| let index = self.mk_index(e, ix); |
| e = self.mk_expr(lo.to(hi), index, ThinVec::new()) |
| } |
| _ => return Ok(e) |
| } |
| } |
| return Ok(e); |
| } |
| |
| crate fn process_potential_macro_variable(&mut self) { |
| let (token, span) = match self.token { |
| token::Dollar if self.span.ctxt() != syntax_pos::hygiene::SyntaxContext::empty() && |
| self.look_ahead(1, |t| t.is_ident()) => { |
| self.bump(); |
| let name = match self.token { |
| token::Ident(ident, _) => ident, |
| _ => unreachable!() |
| }; |
| let mut err = self.fatal(&format!("unknown macro variable `{}`", name)); |
| err.span_label(self.span, "unknown macro variable"); |
| err.emit(); |
| self.bump(); |
| return |
| } |
| token::Interpolated(ref nt) => { |
| self.meta_var_span = Some(self.span); |
| // Interpolated identifier and lifetime tokens are replaced with usual identifier |
| // and lifetime tokens, so the former are never encountered during normal parsing. |
| match **nt { |
| token::NtIdent(ident, is_raw) => (token::Ident(ident, is_raw), ident.span), |
| token::NtLifetime(ident) => (token::Lifetime(ident), ident.span), |
| _ => return, |
| } |
| } |
| _ => return, |
| }; |
| self.token = token; |
| self.span = span; |
| } |
| |
| /// Parses a single token tree from the input. |
| crate fn parse_token_tree(&mut self) -> TokenTree { |
| match self.token { |
| token::OpenDelim(..) => { |
| let frame = mem::replace(&mut self.token_cursor.frame, |
| self.token_cursor.stack.pop().unwrap()); |
| self.span = frame.span.entire(); |
| self.bump(); |
| TokenTree::Delimited( |
| frame.span, |
| frame.delim, |
| frame.tree_cursor.stream.into(), |
| ) |
| }, |
| token::CloseDelim(_) | token::Eof => unreachable!(), |
| _ => { |
| let (token, span) = (mem::replace(&mut self.token, token::Whitespace), self.span); |
| self.bump(); |
| TokenTree::Token(span, token) |
| } |
| } |
| } |
| |
| // parse a stream of tokens into a list of TokenTree's, |
| // up to EOF. |
| pub fn parse_all_token_trees(&mut self) -> PResult<'a, Vec<TokenTree>> { |
| let mut tts = Vec::new(); |
| while self.token != token::Eof { |
| tts.push(self.parse_token_tree()); |
| } |
| Ok(tts) |
| } |
| |
| pub fn parse_tokens(&mut self) -> TokenStream { |
| let mut result = Vec::new(); |
| loop { |
| match self.token { |
| token::Eof | token::CloseDelim(..) => break, |
| _ => result.push(self.parse_token_tree().into()), |
| } |
| } |
| TokenStream::new(result) |
| } |
| |
| /// Parse a prefix-unary-operator expr |
| fn parse_prefix_expr(&mut self, |
| already_parsed_attrs: Option<ThinVec<Attribute>>) |
| -> PResult<'a, P<Expr>> { |
| let attrs = self.parse_or_use_outer_attributes(already_parsed_attrs)?; |
| let lo = self.span; |
| // Note: when adding new unary operators, don't forget to adjust Token::can_begin_expr() |
| let (hi, ex) = match self.token { |
| token::Not => { |
| self.bump(); |
| let e = self.parse_prefix_expr(None); |
| let (span, e) = self.interpolated_or_expr_span(e)?; |
| (lo.to(span), self.mk_unary(UnOp::Not, e)) |
| } |
| // Suggest `!` for bitwise negation when encountering a `~` |
| token::Tilde => { |
| self.bump(); |
| let e = self.parse_prefix_expr(None); |
| let (span, e) = self.interpolated_or_expr_span(e)?; |
| let span_of_tilde = lo; |
| let mut err = self.diagnostic() |
| .struct_span_err(span_of_tilde, "`~` cannot be used as a unary operator"); |
| err.span_suggestion_short( |
| span_of_tilde, |
| "use `!` to perform bitwise negation", |
| "!".to_owned(), |
| Applicability::MachineApplicable |
| ); |
| err.emit(); |
| (lo.to(span), self.mk_unary(UnOp::Not, e)) |
| } |
| token::BinOp(token::Minus) => { |
| self.bump(); |
| let e = self.parse_prefix_expr(None); |
| let (span, e) = self.interpolated_or_expr_span(e)?; |
| (lo.to(span), self.mk_unary(UnOp::Neg, e)) |
| } |
| token::BinOp(token::Star) => { |
| self.bump(); |
| let e = self.parse_prefix_expr(None); |
| let (span, e) = self.interpolated_or_expr_span(e)?; |
| (lo.to(span), self.mk_unary(UnOp::Deref, e)) |
| } |
| token::BinOp(token::And) | token::AndAnd => { |
| self.expect_and()?; |
| let m = self.parse_mutability(); |
| let e = self.parse_prefix_expr(None); |
| let (span, e) = self.interpolated_or_expr_span(e)?; |
| (lo.to(span), ExprKind::AddrOf(m, e)) |
| } |
| token::Ident(..) if self.token.is_keyword(keywords::In) => { |
| self.bump(); |
| let place = self.parse_expr_res( |
| Restrictions::NO_STRUCT_LITERAL, |
| None, |
| )?; |
| let blk = self.parse_block()?; |
| let span = blk.span; |
| let blk_expr = self.mk_expr(span, ExprKind::Block(blk, None), ThinVec::new()); |
| (lo.to(span), ExprKind::ObsoleteInPlace(place, blk_expr)) |
| } |
| token::Ident(..) if self.token.is_keyword(keywords::Box) => { |
| self.bump(); |
| let e = self.parse_prefix_expr(None); |
| let (span, e) = self.interpolated_or_expr_span(e)?; |
| (lo.to(span), ExprKind::Box(e)) |
| } |
| token::Ident(..) if self.token.is_ident_named("not") => { |
| // `not` is just an ordinary identifier in Rust-the-language, |
| // but as `rustc`-the-compiler, we can issue clever diagnostics |
| // for confused users who really want to say `!` |
| let token_cannot_continue_expr = |t: &token::Token| match *t { |
| // These tokens can start an expression after `!`, but |
| // can't continue an expression after an ident |
| token::Ident(ident, is_raw) => token::ident_can_begin_expr(ident, is_raw), |
| token::Literal(..) | token::Pound => true, |
| token::Interpolated(ref nt) => match **nt { |
| token::NtIdent(..) | token::NtExpr(..) | |
| token::NtBlock(..) | token::NtPath(..) => true, |
| _ => false, |
| }, |
| _ => false |
| }; |
| let cannot_continue_expr = self.look_ahead(1, token_cannot_continue_expr); |
| if cannot_continue_expr { |
| self.bump(); |
| // Emit the error ... |
| let mut err = self.diagnostic() |
| .struct_span_err(self.span, |
| &format!("unexpected {} after identifier", |
| self.this_token_descr())); |
| // span the `not` plus trailing whitespace to avoid |
| // trailing whitespace after the `!` in our suggestion |
| let to_replace = self.sess.source_map() |
| .span_until_non_whitespace(lo.to(self.span)); |
| err.span_suggestion_short( |
| to_replace, |
| "use `!` to perform logical negation", |
| "!".to_owned(), |
| Applicability::MachineApplicable |
| ); |
| err.emit(); |
| // —and recover! (just as if we were in the block |
| // for the `token::Not` arm) |
| let e = self.parse_prefix_expr(None); |
| let (span, e) = self.interpolated_or_expr_span(e)?; |
| (lo.to(span), self.mk_unary(UnOp::Not, e)) |
| } else { |
| return self.parse_dot_or_call_expr(Some(attrs)); |
| } |
| } |
| _ => { return self.parse_dot_or_call_expr(Some(attrs)); } |
| }; |
| return Ok(self.mk_expr(lo.to(hi), ex, attrs)); |
| } |
| |
| /// Parses an associative expression. |
| /// |
| /// This parses an expression accounting for associativity and precedence of the operators in |
| /// the expression. |
| #[inline] |
| fn parse_assoc_expr(&mut self, |
| already_parsed_attrs: Option<ThinVec<Attribute>>) |
| -> PResult<'a, P<Expr>> { |
| self.parse_assoc_expr_with(0, already_parsed_attrs.into()) |
| } |
| |
| /// Parses an associative expression with operators of at least `min_prec` precedence. |
| fn parse_assoc_expr_with(&mut self, |
| min_prec: usize, |
| lhs: LhsExpr) |
| -> PResult<'a, P<Expr>> { |
| let mut lhs = if let LhsExpr::AlreadyParsed(expr) = lhs { |
| expr |
| } else { |
| let attrs = match lhs { |
| LhsExpr::AttributesParsed(attrs) => Some(attrs), |
| _ => None, |
| }; |
| if [token::DotDot, token::DotDotDot, token::DotDotEq].contains(&self.token) { |
| return self.parse_prefix_range_expr(attrs); |
| } else { |
| self.parse_prefix_expr(attrs)? |
| } |
| }; |
| |
| if self.expr_is_complete(&lhs) { |
| // Semi-statement forms are odd. See https://github.com/rust-lang/rust/issues/29071 |
| return Ok(lhs); |
| } |
| self.expected_tokens.push(TokenType::Operator); |
| while let Some(op) = AssocOp::from_token(&self.token) { |
| |
| // Adjust the span for interpolated LHS to point to the `$lhs` token and not to what |
| // it refers to. Interpolated identifiers are unwrapped early and never show up here |
| // as `PrevTokenKind::Interpolated` so if LHS is a single identifier we always process |
| // it as "interpolated", it doesn't change the answer for non-interpolated idents. |
| let lhs_span = match (self.prev_token_kind, &lhs.node) { |
| (PrevTokenKind::Interpolated, _) => self.prev_span, |
| (PrevTokenKind::Ident, &ExprKind::Path(None, ref path)) |
| if path.segments.len() == 1 => self.prev_span, |
| _ => lhs.span, |
| }; |
| |
| let cur_op_span = self.span; |
| let restrictions = if op.is_assign_like() { |
| self.restrictions & Restrictions::NO_STRUCT_LITERAL |
| } else { |
| self.restrictions |
| }; |
| if op.precedence() < min_prec { |
| break; |
| } |
| // Check for deprecated `...` syntax |
| if self.token == token::DotDotDot && op == AssocOp::DotDotEq { |
| self.err_dotdotdot_syntax(self.span); |
| } |
| |
| self.bump(); |
| if op.is_comparison() { |
| self.check_no_chained_comparison(&lhs, &op); |
| } |
| // Special cases: |
| if op == AssocOp::As { |
| lhs = self.parse_assoc_op_cast(lhs, lhs_span, ExprKind::Cast)?; |
| continue |
| } else if op == AssocOp::Colon { |
| lhs = match self.parse_assoc_op_cast(lhs, lhs_span, ExprKind::Type) { |
| Ok(lhs) => lhs, |
| Err(mut err) => { |
| err.span_label(self.span, |
| "expecting a type here because of type ascription"); |
| let cm = self.sess.source_map(); |
| let cur_pos = cm.lookup_char_pos(self.span.lo()); |
| let op_pos = cm.lookup_char_pos(cur_op_span.hi()); |
| if cur_pos.line != op_pos.line { |
| err.span_suggestion( |
| cur_op_span, |
| "try using a semicolon", |
| ";".to_string(), |
| Applicability::MaybeIncorrect // speculative |
| ); |
| } |
| return Err(err); |
| } |
| }; |
| continue |
| } else if op == AssocOp::DotDot || op == AssocOp::DotDotEq { |
| // If we didn’t have to handle `x..`/`x..=`, it would be pretty easy to |
| // generalise it to the Fixity::None code. |
| // |
| // We have 2 alternatives here: `x..y`/`x..=y` and `x..`/`x..=` The other |
| // two variants are handled with `parse_prefix_range_expr` call above. |
| let rhs = if self.is_at_start_of_range_notation_rhs() { |
| Some(self.parse_assoc_expr_with(op.precedence() + 1, |
| LhsExpr::NotYetParsed)?) |
| } else { |
| None |
| }; |
| let (lhs_span, rhs_span) = (lhs.span, if let Some(ref x) = rhs { |
| x.span |
| } else { |
| cur_op_span |
| }); |
| let limits = if op == AssocOp::DotDot { |
| RangeLimits::HalfOpen |
| } else { |
| RangeLimits::Closed |
| }; |
| |
| let r = self.mk_range(Some(lhs), rhs, limits)?; |
| lhs = self.mk_expr(lhs_span.to(rhs_span), r, ThinVec::new()); |
| break |
| } |
| |
| let rhs = match op.fixity() { |
| Fixity::Right => self.with_res( |
| restrictions - Restrictions::STMT_EXPR, |
| |this| { |
| this.parse_assoc_expr_with(op.precedence(), |
| LhsExpr::NotYetParsed) |
| }), |
| Fixity::Left => self.with_res( |
| restrictions - Restrictions::STMT_EXPR, |
| |this| { |
| this.parse_assoc_expr_with(op.precedence() + 1, |
| LhsExpr::NotYetParsed) |
| }), |
| // We currently have no non-associative operators that are not handled above by |
| // the special cases. The code is here only for future convenience. |
| Fixity::None => self.with_res( |
| restrictions - Restrictions::STMT_EXPR, |
| |this| { |
| this.parse_assoc_expr_with(op.precedence() + 1, |
| LhsExpr::NotYetParsed) |
| }), |
| }?; |
| |
| // Make sure that the span of the parent node is larger than the span of lhs and rhs, |
| // including the attributes. |
| let lhs_span = lhs |
| .attrs |
| .iter() |
| .filter(|a| a.style == AttrStyle::Outer) |
| .next() |
| .map_or(lhs_span, |a| a.span); |
| let span = lhs_span.to(rhs.span); |
| lhs = match op { |
| AssocOp::Add | AssocOp::Subtract | AssocOp::Multiply | AssocOp::Divide | |
| AssocOp::Modulus | AssocOp::LAnd | AssocOp::LOr | AssocOp::BitXor | |
| AssocOp::BitAnd | AssocOp::BitOr | AssocOp::ShiftLeft | AssocOp::ShiftRight | |
| AssocOp::Equal | AssocOp::Less | AssocOp::LessEqual | AssocOp::NotEqual | |
| AssocOp::Greater | AssocOp::GreaterEqual => { |
| let ast_op = op.to_ast_binop().unwrap(); |
| let binary = self.mk_binary(source_map::respan(cur_op_span, ast_op), lhs, rhs); |
| self.mk_expr(span, binary, ThinVec::new()) |
| } |
| AssocOp::Assign => |
| self.mk_expr(span, ExprKind::Assign(lhs, rhs), ThinVec::new()), |
| AssocOp::ObsoleteInPlace => |
| self.mk_expr(span, ExprKind::ObsoleteInPlace(lhs, rhs), ThinVec::new()), |
| AssocOp::AssignOp(k) => { |
| let aop = match k { |
| token::Plus => BinOpKind::Add, |
| token::Minus => BinOpKind::Sub, |
| token::Star => BinOpKind::Mul, |
| token::Slash => BinOpKind::Div, |
| token::Percent => BinOpKind::Rem, |
| token::Caret => BinOpKind::BitXor, |
| token::And => BinOpKind::BitAnd, |
| token::Or => BinOpKind::BitOr, |
| token::Shl => BinOpKind::Shl, |
| token::Shr => BinOpKind::Shr, |
| }; |
| let aopexpr = self.mk_assign_op(source_map::respan(cur_op_span, aop), lhs, rhs); |
| self.mk_expr(span, aopexpr, ThinVec::new()) |
| } |
| AssocOp::As | AssocOp::Colon | AssocOp::DotDot | AssocOp::DotDotEq => { |
| self.bug("AssocOp should have been handled by special case") |
| } |
| }; |
| |
| if op.fixity() == Fixity::None { break } |
| } |
| Ok(lhs) |
| } |
| |
| fn parse_assoc_op_cast(&mut self, lhs: P<Expr>, lhs_span: Span, |
| expr_kind: fn(P<Expr>, P<Ty>) -> ExprKind) |
| -> PResult<'a, P<Expr>> { |
| let mk_expr = |this: &mut Self, rhs: P<Ty>| { |
| this.mk_expr(lhs_span.to(rhs.span), expr_kind(lhs, rhs), ThinVec::new()) |
| }; |
| |
| // Save the state of the parser before parsing type normally, in case there is a |
| // LessThan comparison after this cast. |
| let parser_snapshot_before_type = self.clone(); |
| match self.parse_ty_no_plus() { |
| Ok(rhs) => { |
| Ok(mk_expr(self, rhs)) |
| } |
| Err(mut type_err) => { |
| // Rewind to before attempting to parse the type with generics, to recover |
| // from situations like `x as usize < y` in which we first tried to parse |
| // `usize < y` as a type with generic arguments. |
| let parser_snapshot_after_type = self.clone(); |
| mem::replace(self, parser_snapshot_before_type); |
| |
| match self.parse_path(PathStyle::Expr) { |
| Ok(path) => { |
| let (op_noun, op_verb) = match self.token { |
| token::Lt => ("comparison", "comparing"), |
| token::BinOp(token::Shl) => ("shift", "shifting"), |
| _ => { |
| // We can end up here even without `<` being the next token, for |
| // example because `parse_ty_no_plus` returns `Err` on keywords, |
| // but `parse_path` returns `Ok` on them due to error recovery. |
| // Return original error and parser state. |
| mem::replace(self, parser_snapshot_after_type); |
| return Err(type_err); |
| } |
| }; |
| |
| // Successfully parsed the type path leaving a `<` yet to parse. |
| type_err.cancel(); |
| |
| // Report non-fatal diagnostics, keep `x as usize` as an expression |
| // in AST and continue parsing. |
| let msg = format!("`<` is interpreted as a start of generic \ |
| arguments for `{}`, not a {}", path, op_noun); |
| let mut err = self.sess.span_diagnostic.struct_span_err(self.span, &msg); |
| err.span_label(self.look_ahead_span(1).to(parser_snapshot_after_type.span), |
| "interpreted as generic arguments"); |
| err.span_label(self.span, format!("not interpreted as {}", op_noun)); |
| |
| let expr = mk_expr(self, P(Ty { |
| span: path.span, |
| node: TyKind::Path(None, path), |
| id: ast::DUMMY_NODE_ID |
| })); |
| |
| let expr_str = self.sess.source_map().span_to_snippet(expr.span) |
| .unwrap_or_else(|_| pprust::expr_to_string(&expr)); |
| err.span_suggestion( |
| expr.span, |
| &format!("try {} the cast value", op_verb), |
| format!("({})", expr_str), |
| Applicability::MachineApplicable |
| ); |
| err.emit(); |
| |
| Ok(expr) |
| } |
| Err(mut path_err) => { |
| // Couldn't parse as a path, return original error and parser state. |
| path_err.cancel(); |
| mem::replace(self, parser_snapshot_after_type); |
| Err(type_err) |
| } |
| } |
| } |
| } |
| } |
| |
| /// Produce an error if comparison operators are chained (RFC #558). |
| /// We only need to check lhs, not rhs, because all comparison ops |
| /// have same precedence and are left-associative |
| fn check_no_chained_comparison(&mut self, lhs: &Expr, outer_op: &AssocOp) { |
| debug_assert!(outer_op.is_comparison(), |
| "check_no_chained_comparison: {:?} is not comparison", |
| outer_op); |
| match lhs.node { |
| ExprKind::Binary(op, _, _) if op.node.is_comparison() => { |
| // respan to include both operators |
| let op_span = op.span.to(self.span); |
| let mut err = self.diagnostic().struct_span_err(op_span, |
| "chained comparison operators require parentheses"); |
| if op.node == BinOpKind::Lt && |
| *outer_op == AssocOp::Less || // Include `<` to provide this recommendation |
| *outer_op == AssocOp::Greater // even in a case like the following: |
| { // Foo<Bar<Baz<Qux, ()>>> |
| err.help( |
| "use `::<...>` instead of `<...>` if you meant to specify type arguments"); |
| err.help("or use `(...)` if you meant to specify fn arguments"); |
| } |
| err.emit(); |
| } |
| _ => {} |
| } |
| } |
| |
| /// Parse prefix-forms of range notation: `..expr`, `..`, `..=expr` |
| fn parse_prefix_range_expr(&mut self, |
| already_parsed_attrs: Option<ThinVec<Attribute>>) |
| -> PResult<'a, P<Expr>> { |
| // Check for deprecated `...` syntax |
| if self.token == token::DotDotDot { |
| self.err_dotdotdot_syntax(self.span); |
| } |
| |
| debug_assert!([token::DotDot, token::DotDotDot, token::DotDotEq].contains(&self.token), |
| "parse_prefix_range_expr: token {:?} is not DotDot/DotDotEq", |
| self.token); |
| let tok = self.token.clone(); |
| let attrs = self.parse_or_use_outer_attributes(already_parsed_attrs)?; |
| let lo = self.span; |
| let mut hi = self.span; |
| self.bump(); |
| let opt_end = if self.is_at_start_of_range_notation_rhs() { |
| // RHS must be parsed with more associativity than the dots. |
| let next_prec = AssocOp::from_token(&tok).unwrap().precedence() + 1; |
| Some(self.parse_assoc_expr_with(next_prec, |
| LhsExpr::NotYetParsed) |
| .map(|x|{ |
| hi = x.span; |
| x |
| })?) |
| } else { |
| None |
| }; |
| let limits = if tok == token::DotDot { |
| RangeLimits::HalfOpen |
| } else { |
| RangeLimits::Closed |
| }; |
| |
| let r = self.mk_range(None, opt_end, limits)?; |
| Ok(self.mk_expr(lo.to(hi), r, attrs)) |
| } |
| |
| fn is_at_start_of_range_notation_rhs(&self) -> bool { |
| if self.token.can_begin_expr() { |
| // parse `for i in 1.. { }` as infinite loop, not as `for i in (1..{})`. |
| if self.token == token::OpenDelim(token::Brace) { |
| return !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL); |
| } |
| true |
| } else { |
| false |
| } |
| } |
| |
| /// Parses an `if` or `if let` expression (`if` token already eaten). |
| fn parse_if_expr(&mut self, attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { |
| if self.check_keyword(keywords::Let) { |
| return self.parse_if_let_expr(attrs); |
| } |
| let lo = self.prev_span; |
| let cond = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?; |
| |
| // Verify that the parsed `if` condition makes sense as a condition. If it is a block, then |
| // verify that the last statement is either an implicit return (no `;`) or an explicit |
| // return. This won't catch blocks with an explicit `return`, but that would be caught by |
| // the dead code lint. |
| if self.eat_keyword(keywords::Else) || !cond.returns() { |
| let sp = self.sess.source_map().next_point(lo); |
| let mut err = self.diagnostic() |
| .struct_span_err(sp, "missing condition for `if` statement"); |
| err.span_label(sp, "expected if condition here"); |
| return Err(err) |
| } |
| let not_block = self.token != token::OpenDelim(token::Brace); |
| let thn = self.parse_block().map_err(|mut err| { |
| if not_block { |
| err.span_label(lo, "this `if` statement has a condition, but no block"); |
| } |
| err |
| })?; |
| let mut els: Option<P<Expr>> = None; |
| let mut hi = thn.span; |
| if self.eat_keyword(keywords::Else) { |
| let elexpr = self.parse_else_expr()?; |
| hi = elexpr.span; |
| els = Some(elexpr); |
| } |
| Ok(self.mk_expr(lo.to(hi), ExprKind::If(cond, thn, els), attrs)) |
| } |
| |
| /// Parses an `if let` expression (`if` token already eaten). |
| fn parse_if_let_expr(&mut self, attrs: ThinVec<Attribute>) |
| -> PResult<'a, P<Expr>> { |
| let lo = self.prev_span; |
| self.expect_keyword(keywords::Let)?; |
| let pats = self.parse_pats()?; |
| self.expect(&token::Eq)?; |
| let expr = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?; |
| let thn = self.parse_block()?; |
| let (hi, els) = if self.eat_keyword(keywords::Else) { |
| let expr = self.parse_else_expr()?; |
| (expr.span, Some(expr)) |
| } else { |
| (thn.span, None) |
| }; |
| Ok(self.mk_expr(lo.to(hi), ExprKind::IfLet(pats, expr, thn, els), attrs)) |
| } |
| |
| /// Parses `move |args| expr`. |
| fn parse_lambda_expr(&mut self, |
| attrs: ThinVec<Attribute>) |
| -> PResult<'a, P<Expr>> |
| { |
| let lo = self.span; |
| let movability = if self.eat_keyword(keywords::Static) { |
| Movability::Static |
| } else { |
| Movability::Movable |
| }; |
| let asyncness = if self.span.rust_2018() { |
| self.parse_asyncness() |
| } else { |
| IsAsync::NotAsync |
| }; |
| let capture_clause = if self.eat_keyword(keywords::Move) { |
| CaptureBy::Value |
| } else { |
| CaptureBy::Ref |
| }; |
| let decl = self.parse_fn_block_decl()?; |
| let decl_hi = self.prev_span; |
| let body = match decl.output { |
| FunctionRetTy::Default(_) => { |
| let restrictions = self.restrictions - Restrictions::STMT_EXPR; |
| self.parse_expr_res(restrictions, None)? |
| }, |
| _ => { |
| // If an explicit return type is given, require a |
| // block to appear (RFC 968). |
| let body_lo = self.span; |
| self.parse_block_expr(None, body_lo, BlockCheckMode::Default, ThinVec::new())? |
| } |
| }; |
| |
| Ok(self.mk_expr( |
| lo.to(body.span), |
| ExprKind::Closure(capture_clause, asyncness, movability, decl, body, lo.to(decl_hi)), |
| attrs)) |
| } |
| |
| // `else` token already eaten |
| fn parse_else_expr(&mut self) -> PResult<'a, P<Expr>> { |
| if self.eat_keyword(keywords::If) { |
| return self.parse_if_expr(ThinVec::new()); |
| } else { |
| let blk = self.parse_block()?; |
| return Ok(self.mk_expr(blk.span, ExprKind::Block(blk, None), ThinVec::new())); |
| } |
| } |
| |
| /// Parse a 'for' .. 'in' expression ('for' token already eaten) |
| fn parse_for_expr(&mut self, opt_label: Option<Label>, |
| span_lo: Span, |
| mut attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { |
| // Parse: `for <src_pat> in <src_expr> <src_loop_block>` |
| |
| let pat = self.parse_top_level_pat()?; |
| if !self.eat_keyword(keywords::In) { |
| let in_span = self.prev_span.between(self.span); |
| let mut err = self.sess.span_diagnostic |
| .struct_span_err(in_span, "missing `in` in `for` loop"); |
| err.span_suggestion_short( |
| in_span, "try adding `in` here", " in ".into(), |
| // has been misleading, at least in the past (closed Issue #48492) |
| Applicability::MaybeIncorrect |
| ); |
| err.emit(); |
| } |
| let in_span = self.prev_span; |
| if self.eat_keyword(keywords::In) { |
| // a common typo: `for _ in in bar {}` |
| let mut err = self.sess.span_diagnostic.struct_span_err( |
| self.prev_span, |
| "expected iterable, found keyword `in`", |
| ); |
| err.span_suggestion_short( |
| in_span.until(self.prev_span), |
| "remove the duplicated `in`", |
| String::new(), |
| Applicability::MachineApplicable, |
| ); |
| err.note("if you meant to use emplacement syntax, it is obsolete (for now, anyway)"); |
| err.note("for more information on the status of emplacement syntax, see <\ |
| https://github.com/rust-lang/rust/issues/27779#issuecomment-378416911>"); |
| err.emit(); |
| } |
| let expr = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?; |
| let (iattrs, loop_block) = self.parse_inner_attrs_and_block()?; |
| attrs.extend(iattrs); |
| |
| let hi = self.prev_span; |
| Ok(self.mk_expr(span_lo.to(hi), ExprKind::ForLoop(pat, expr, loop_block, opt_label), attrs)) |
| } |
| |
| /// Parses a `while` or `while let` expression (`while` token already eaten). |
| fn parse_while_expr(&mut self, opt_label: Option<Label>, |
| span_lo: Span, |
| mut attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { |
| if self.token.is_keyword(keywords::Let) { |
| return self.parse_while_let_expr(opt_label, span_lo, attrs); |
| } |
| let cond = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?; |
| let (iattrs, body) = self.parse_inner_attrs_and_block()?; |
| attrs.extend(iattrs); |
| let span = span_lo.to(body.span); |
| return Ok(self.mk_expr(span, ExprKind::While(cond, body, opt_label), attrs)); |
| } |
| |
| /// Parses a `while let` expression (`while` token already eaten). |
| fn parse_while_let_expr(&mut self, opt_label: Option<Label>, |
| span_lo: Span, |
| mut attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { |
| self.expect_keyword(keywords::Let)?; |
| let pats = self.parse_pats()?; |
| self.expect(&token::Eq)?; |
| let expr = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?; |
| let (iattrs, body) = self.parse_inner_attrs_and_block()?; |
| attrs.extend(iattrs); |
| let span = span_lo.to(body.span); |
| return Ok(self.mk_expr(span, ExprKind::WhileLet(pats, expr, body, opt_label), attrs)); |
| } |
| |
| // parse `loop {...}`, `loop` token already eaten |
| fn parse_loop_expr(&mut self, opt_label: Option<Label>, |
| span_lo: Span, |
| mut attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { |
| let (iattrs, body) = self.parse_inner_attrs_and_block()?; |
| attrs.extend(iattrs); |
| let span = span_lo.to(body.span); |
| Ok(self.mk_expr(span, ExprKind::Loop(body, opt_label), attrs)) |
| } |
| |
| /// Parses an `async move {...}` expression. |
| pub fn parse_async_block(&mut self, mut attrs: ThinVec<Attribute>) |
| -> PResult<'a, P<Expr>> |
| { |
| let span_lo = self.span; |
| self.expect_keyword(keywords::Async)?; |
| let capture_clause = if self.eat_keyword(keywords::Move) { |
| CaptureBy::Value |
| } else { |
| CaptureBy::Ref |
| }; |
| let (iattrs, body) = self.parse_inner_attrs_and_block()?; |
| attrs.extend(iattrs); |
| Ok(self.mk_expr( |
| span_lo.to(body.span), |
| ExprKind::Async(capture_clause, ast::DUMMY_NODE_ID, body), attrs)) |
| } |
| |
| /// Parses a `try {...}` expression (`try` token already eaten). |
| fn parse_try_block(&mut self, span_lo: Span, mut attrs: ThinVec<Attribute>) |
| -> PResult<'a, P<Expr>> |
| { |
| let (iattrs, body) = self.parse_inner_attrs_and_block()?; |
| attrs.extend(iattrs); |
| Ok(self.mk_expr(span_lo.to(body.span), ExprKind::TryBlock(body), attrs)) |
| } |
| |
| // `match` token already eaten |
| fn parse_match_expr(&mut self, mut attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { |
| let match_span = self.prev_span; |
| let lo = self.prev_span; |
| let discriminant = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, |
| None)?; |
| if let Err(mut e) = self.expect(&token::OpenDelim(token::Brace)) { |
| if self.token == token::Token::Semi { |
| e.span_suggestion_short( |
| match_span, |
| "try removing this `match`", |
| String::new(), |
| Applicability::MaybeIncorrect // speculative |
| ); |
| } |
| return Err(e) |
| } |
| attrs.extend(self.parse_inner_attributes()?); |
| |
| let mut arms: Vec<Arm> = Vec::new(); |
| while self.token != token::CloseDelim(token::Brace) { |
| match self.parse_arm() { |
| Ok(arm) => arms.push(arm), |
| Err(mut e) => { |
| // Recover by skipping to the end of the block. |
| e.emit(); |
| self.recover_stmt(); |
| let span = lo.to(self.span); |
| if self.token == token::CloseDelim(token::Brace) { |
| self.bump(); |
| } |
| return Ok(self.mk_expr(span, ExprKind::Match(discriminant, arms), attrs)); |
| } |
| } |
| } |
| let hi = self.span; |
| self.bump(); |
| return Ok(self.mk_expr(lo.to(hi), ExprKind::Match(discriminant, arms), attrs)); |
| } |
| |
| crate fn parse_arm(&mut self) -> PResult<'a, Arm> { |
| maybe_whole!(self, NtArm, |x| x); |
| |
| let attrs = self.parse_outer_attributes()?; |
| let pats = self.parse_pats()?; |
| let guard = if self.eat_keyword(keywords::If) { |
| Some(Guard::If(self.parse_expr()?)) |
| } else { |
| None |
| }; |
| let arrow_span = self.span; |
| self.expect(&token::FatArrow)?; |
| let arm_start_span = self.span; |
| |
| let expr = self.parse_expr_res(Restrictions::STMT_EXPR, None) |
| .map_err(|mut err| { |
| err.span_label(arrow_span, "while parsing the `match` arm starting here"); |
| err |
| })?; |
| |
| let require_comma = classify::expr_requires_semi_to_be_stmt(&expr) |
| && self.token != token::CloseDelim(token::Brace); |
| |
| if require_comma { |
| let cm = self.sess.source_map(); |
| self.expect_one_of(&[token::Comma], &[token::CloseDelim(token::Brace)]) |
| .map_err(|mut err| { |
| match (cm.span_to_lines(expr.span), cm.span_to_lines(arm_start_span)) { |
| (Ok(ref expr_lines), Ok(ref arm_start_lines)) |
| if arm_start_lines.lines[0].end_col == expr_lines.lines[0].end_col |
| && expr_lines.lines.len() == 2 |
| && self.token == token::FatArrow => { |
| // We check whether there's any trailing code in the parse span, |
| // if there isn't, we very likely have the following: |
| // |
| // X | &Y => "y" |
| // | -- - missing comma |
| // | | |
| // | arrow_span |
| // X | &X => "x" |
| // | - ^^ self.span |
| // | | |
| // | parsed until here as `"y" & X` |
| err.span_suggestion_short( |
| cm.next_point(arm_start_span), |
| "missing a comma here to end this `match` arm", |
| ",".to_owned(), |
| Applicability::MachineApplicable |
| ); |
| } |
| _ => { |
| err.span_label(arrow_span, |
| "while parsing the `match` arm starting here"); |
| } |
| } |
| err |
| })?; |
| } else { |
| self.eat(&token::Comma); |
| } |
| |
| Ok(ast::Arm { |
| attrs, |
| pats, |
| guard, |
| body: expr, |
| }) |
| } |
| |
| /// Parses an expression. |
| #[inline] |
| pub fn parse_expr(&mut self) -> PResult<'a, P<Expr>> { |
| self.parse_expr_res(Restrictions::empty(), None) |
| } |
| |
| /// Evaluates the closure with restrictions in place. |
| /// |
| /// Afters the closure is evaluated, restrictions are reset. |
| fn with_res<F, T>(&mut self, r: Restrictions, f: F) -> T |
| where F: FnOnce(&mut Self) -> T |
| { |
| let old = self.restrictions; |
| self.restrictions = r; |
| let r = f(self); |
| self.restrictions = old; |
| return r; |
| |
|