Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove DiagnosticBuilder::buffer #119841

Merged
merged 12 commits into from
Jan 11, 2024
Merged
Prev Previous commit
Next Next commit
Fix lifetimes in StringReader.
Two different lifetimes are conflated. This doesn't matter right now,
but needs to be fixed for the next commit to work. And the more
descriptive lifetime names make the code easier to read.
nnethercote committed Jan 11, 2024

Verified

This commit was created on GitHub.com and signed with GitHub’s verified signature. The key has expired.
commit d02150fd45b4e63007a446f497ce032f48b1166e
24 changes: 12 additions & 12 deletions compiler/rustc_parse/src/lexer/mod.rs
Original file line number Diff line number Diff line change
@@ -42,9 +42,9 @@ pub struct UnmatchedDelim {
pub candidate_span: Option<Span>,
}

pub(crate) fn parse_token_trees<'a>(
sess: &'a ParseSess,
mut src: &'a str,
pub(crate) fn parse_token_trees<'sess, 'src>(
sess: &'sess ParseSess,
mut src: &'src str,
mut start_pos: BytePos,
override_span: Option<Span>,
) -> Result<TokenStream, Vec<Diagnostic>> {
@@ -90,25 +90,25 @@ pub(crate) fn parse_token_trees<'a>(
}
}

struct StringReader<'a> {
sess: &'a ParseSess,
struct StringReader<'sess, 'src> {
sess: &'sess ParseSess,
/// Initial position, read-only.
start_pos: BytePos,
/// The absolute offset within the source_map of the current character.
pos: BytePos,
/// Source text to tokenize.
src: &'a str,
src: &'src str,
/// Cursor for getting lexer tokens.
cursor: Cursor<'a>,
cursor: Cursor<'src>,
override_span: Option<Span>,
/// When a "unknown start of token: \u{a0}" has already been emitted earlier
/// in this file, it's safe to treat further occurrences of the non-breaking
/// space character as whitespace.
nbsp_is_whitespace: bool,
}

impl<'a> StringReader<'a> {
pub fn dcx(&self) -> &'a DiagCtxt {
impl<'sess, 'src> StringReader<'sess, 'src> {
pub fn dcx(&self) -> &'sess DiagCtxt {
&self.sess.dcx
}

@@ -526,7 +526,7 @@ impl<'a> StringReader<'a> {

/// Slice of the source text from `start` up to but excluding `self.pos`,
/// meaning the slice does not include the character `self.ch`.
fn str_from(&self, start: BytePos) -> &'a str {
fn str_from(&self, start: BytePos) -> &'src str {
self.str_from_to(start, self.pos)
}

@@ -537,12 +537,12 @@ impl<'a> StringReader<'a> {
}

/// Slice of the source text spanning from `start` up to but excluding `end`.
fn str_from_to(&self, start: BytePos, end: BytePos) -> &'a str {
fn str_from_to(&self, start: BytePos, end: BytePos) -> &'src str {
&self.src[self.src_index(start)..self.src_index(end)]
}

/// Slice of the source text spanning from `start` until the end
fn str_from_to_end(&self, start: BytePos) -> &'a str {
fn str_from_to_end(&self, start: BytePos) -> &'src str {
&self.src[self.src_index(start)..]
}

24 changes: 14 additions & 10 deletions compiler/rustc_parse/src/lexer/tokentrees.rs
Original file line number Diff line number Diff line change
@@ -8,18 +8,18 @@ use rustc_ast_pretty::pprust::token_to_string;
use rustc_errors::{Applicability, PErr};
use rustc_span::symbol::kw;

pub(super) struct TokenTreesReader<'a> {
string_reader: StringReader<'a>,
pub(super) struct TokenTreesReader<'sess, 'src> {
string_reader: StringReader<'sess, 'src>,
/// The "next" token, which has been obtained from the `StringReader` but
/// not yet handled by the `TokenTreesReader`.
token: Token,
diag_info: TokenTreeDiagInfo,
}

impl<'a> TokenTreesReader<'a> {
impl<'sess, 'src> TokenTreesReader<'sess, 'src> {
pub(super) fn parse_all_token_trees(
string_reader: StringReader<'a>,
) -> (TokenStream, Result<(), Vec<PErr<'a>>>, Vec<UnmatchedDelim>) {
string_reader: StringReader<'sess, 'src>,
) -> (TokenStream, Result<(), Vec<PErr<'sess>>>, Vec<UnmatchedDelim>) {
let mut tt_reader = TokenTreesReader {
string_reader,
token: Token::dummy(),
@@ -35,7 +35,7 @@ impl<'a> TokenTreesReader<'a> {
fn parse_token_trees(
&mut self,
is_delimited: bool,
) -> (Spacing, TokenStream, Result<(), Vec<PErr<'a>>>) {
) -> (Spacing, TokenStream, Result<(), Vec<PErr<'sess>>>) {
// Move past the opening delimiter.
let (_, open_spacing) = self.bump(false);

@@ -71,7 +71,7 @@ impl<'a> TokenTreesReader<'a> {
}
}

fn eof_err(&mut self) -> PErr<'a> {
fn eof_err(&mut self) -> PErr<'sess> {
let msg = "this file contains an unclosed delimiter";
let mut err = self.string_reader.sess.dcx.struct_span_err(self.token.span, msg);
for &(_, sp) in &self.diag_info.open_braces {
@@ -99,7 +99,7 @@ impl<'a> TokenTreesReader<'a> {
fn parse_token_tree_open_delim(
&mut self,
open_delim: Delimiter,
) -> Result<TokenTree, Vec<PErr<'a>>> {
) -> Result<TokenTree, Vec<PErr<'sess>>> {
// The span for beginning of the delimited section
let pre_span = self.token.span;

@@ -229,7 +229,11 @@ impl<'a> TokenTreesReader<'a> {
(this_tok, this_spacing)
}

fn unclosed_delim_err(&mut self, tts: TokenStream, mut errs: Vec<PErr<'a>>) -> Vec<PErr<'a>> {
fn unclosed_delim_err(
&mut self,
tts: TokenStream,
mut errs: Vec<PErr<'sess>>,
) -> Vec<PErr<'sess>> {
// If there are unclosed delims, see if there are diff markers and if so, point them
// out instead of complaining about the unclosed delims.
let mut parser = crate::stream_to_parser(self.string_reader.sess, tts, None);
@@ -285,7 +289,7 @@ impl<'a> TokenTreesReader<'a> {
return errs;
}

fn close_delim_err(&mut self, delim: Delimiter) -> PErr<'a> {
fn close_delim_err(&mut self, delim: Delimiter) -> PErr<'sess> {
// An unexpected closing delimiter (i.e., there is no
// matching opening delimiter).
let token_str = token_to_string(&self.token);
2 changes: 1 addition & 1 deletion compiler/rustc_parse/src/lexer/unicode_chars.rs
Original file line number Diff line number Diff line change
@@ -337,7 +337,7 @@ const ASCII_ARRAY: &[(&str, &str, Option<token::TokenKind>)] = &[
];

pub(super) fn check_for_substitution(
reader: &StringReader<'_>,
reader: &StringReader<'_, '_>,
pos: BytePos,
ch: char,
count: usize,