Omit integer suffix when unnecessary

See PR # 21378 for context
This commit is contained in:
Alfie John
2015-01-28 01:01:48 +00:00
parent ca4b9674c2
commit 9683745fed
15 changed files with 142 additions and 142 deletions

View File

@@ -62,7 +62,7 @@ pub fn doc_comment_style(comment: &str) -> ast::AttrStyle {
pub fn strip_doc_comment_decoration(comment: &str) -> String {
/// remove whitespace-only lines from the start/end of lines
fn vertical_trim(lines: Vec<String> ) -> Vec<String> {
let mut i = 0us;
let mut i = 0;
let mut j = lines.len();
// first line of all-stars should be omitted
if lines.len() > 0 &&
@@ -158,7 +158,7 @@ fn push_blank_line_comment(rdr: &StringReader, comments: &mut Vec<Comment>) {
fn consume_whitespace_counting_blank_lines(rdr: &mut StringReader,
comments: &mut Vec<Comment>) {
while is_whitespace(rdr.curr) && !rdr.is_eof() {
if rdr.col == CharPos(0us) && rdr.curr_is('\n') {
if rdr.col == CharPos(0) && rdr.curr_is('\n') {
push_blank_line_comment(rdr, &mut *comments);
}
rdr.bump();
@@ -305,7 +305,7 @@ fn read_block_comment(rdr: &mut StringReader,
let mut style = if code_to_the_left { Trailing } else { Isolated };
rdr.consume_non_eol_whitespace();
if !rdr.is_eof() && !rdr.curr_is('\n') && lines.len() == 1us {
if !rdr.is_eof() && !rdr.curr_is('\n') && lines.len() == 1 {
style = Mixed;
}
debug!("<<< block comment");

View File

@@ -279,7 +279,7 @@ impl<'a> StringReader<'a> {
/// Converts CRLF to LF in the given string, raising an error on bare CR.
fn translate_crlf<'b>(&self, start: BytePos,
s: &'b str, errmsg: &'b str) -> CowString<'b> {
let mut i = 0us;
let mut i = 0;
while i < s.len() {
let str::CharRange { ch, next } = s.char_range_at(i);
if ch == '\r' {
@@ -331,10 +331,10 @@ impl<'a> StringReader<'a> {
let byte_offset_diff = next.next - current_byte_offset;
self.pos = self.pos + Pos::from_usize(byte_offset_diff);
self.curr = Some(next.ch);
self.col = self.col + CharPos(1us);
self.col = self.col + CharPos(1);
if last_char == '\n' {
self.filemap.next_line(self.last_pos);
self.col = CharPos(0us);
self.col = CharPos(0);
}
if byte_offset_diff > 1 {
@@ -472,7 +472,7 @@ impl<'a> StringReader<'a> {
cmap.files.borrow_mut().push(self.filemap.clone());
let loc = cmap.lookup_char_pos_adj(self.last_pos);
debug!("Skipping a shebang");
if loc.line == 1us && loc.col == CharPos(0us) {
if loc.line == 1 && loc.col == CharPos(0) {
// FIXME: Add shebang "token", return it
let start = self.last_pos;
while !self.curr_is('\n') && !self.is_eof() { self.bump(); }
@@ -646,7 +646,7 @@ impl<'a> StringReader<'a> {
/// Scan through any digits (base `radix`) or underscores, and return how
/// many digits there were.
fn scan_digits(&mut self, radix: usize) -> usize {
let mut len = 0us;
let mut len = 0;
loop {
let c = self.curr;
if c == Some('_') { debug!("skipping a _"); self.bump(); continue; }
@@ -799,14 +799,14 @@ impl<'a> StringReader<'a> {
if self.curr == Some('{') {
self.scan_unicode_escape(delim)
} else {
let res = self.scan_hex_digits(4us, delim, false);
let res = self.scan_hex_digits(4, delim, false);
let sp = codemap::mk_sp(escaped_pos, self.last_pos);
self.old_escape_warning(sp);
res
}
}
'U' if !ascii_only => {
let res = self.scan_hex_digits(8us, delim, false);
let res = self.scan_hex_digits(8, delim, false);
let sp = codemap::mk_sp(escaped_pos, self.last_pos);
self.old_escape_warning(sp);
res
@@ -877,7 +877,7 @@ impl<'a> StringReader<'a> {
fn scan_unicode_escape(&mut self, delim: char) -> bool {
self.bump(); // past the {
let start_bpos = self.last_pos;
let mut count = 0us;
let mut count = 0;
let mut accum_int = 0;
while !self.curr_is('}') && count <= 6 {
@@ -937,10 +937,10 @@ impl<'a> StringReader<'a> {
/// error if it isn't.
fn check_float_base(&mut self, start_bpos: BytePos, last_bpos: BytePos, base: usize) {
match base {
16us => self.err_span_(start_bpos, last_bpos, "hexadecimal float literal is not \
16 => self.err_span_(start_bpos, last_bpos, "hexadecimal float literal is not \
supported"),
8us => self.err_span_(start_bpos, last_bpos, "octal float literal is not supported"),
2us => self.err_span_(start_bpos, last_bpos, "binary float literal is not supported"),
8 => self.err_span_(start_bpos, last_bpos, "octal float literal is not supported"),
2 => self.err_span_(start_bpos, last_bpos, "binary float literal is not supported"),
_ => ()
}
}
@@ -1189,7 +1189,7 @@ impl<'a> StringReader<'a> {
'r' => {
let start_bpos = self.last_pos;
self.bump();
let mut hash_count = 0us;
let mut hash_count = 0;
while self.curr_is('#') {
self.bump();
hash_count += 1;
@@ -1374,7 +1374,7 @@ impl<'a> StringReader<'a> {
fn scan_raw_byte_string(&mut self) -> token::Lit {
let start_bpos = self.last_pos;
self.bump();
let mut hash_count = 0us;
let mut hash_count = 0;
while self.curr_is('#') {
self.bump();
hash_count += 1;

View File

@@ -181,7 +181,7 @@ pub fn parse_tts_from_source_str(name: String,
name,
source
);
p.quote_depth += 1us;
p.quote_depth += 1;
// right now this is re-creating the token trees from ... token trees.
maybe_aborted(p.parse_all_token_trees(),p)
}
@@ -324,7 +324,7 @@ pub mod with_hygiene {
name,
source
);
p.quote_depth += 1us;
p.quote_depth += 1;
// right now this is re-creating the token trees from ... token trees.
maybe_aborted(p.parse_all_token_trees(),p)
}
@@ -683,9 +683,9 @@ pub fn integer_lit(s: &str, suffix: Option<&str>, sd: &SpanHandler, sp: Span) ->
match suffix {
Some(suf) if looks_like_width_suffix(&['f'], suf) => {
match base {
16us => sd.span_err(sp, "hexadecimal float literal is not supported"),
8us => sd.span_err(sp, "octal float literal is not supported"),
2us => sd.span_err(sp, "binary float literal is not supported"),
16 => sd.span_err(sp, "hexadecimal float literal is not supported"),
8 => sd.span_err(sp, "octal float literal is not supported"),
2 => sd.span_err(sp, "binary float literal is not supported"),
_ => ()
}
let ident = token::intern_and_get_ident(&*s);

View File

@@ -740,7 +740,7 @@ impl<'a> Parser<'a> {
// would encounter a `>` and stop. This lets the parser handle trailing
// commas in generic parameters, because it can stop either after
// parsing a type or after parsing a comma.
for i in iter::count(0us, 1) {
for i in iter::count(0, 1) {
if self.check(&token::Gt)
|| self.token == token::BinOp(token::Shr)
|| self.token == token::Ge
@@ -917,7 +917,7 @@ impl<'a> Parser<'a> {
};
self.span = next.sp;
self.token = next.tok;
self.tokens_consumed += 1us;
self.tokens_consumed += 1;
self.expected_tokens.clear();
// check after each token
self.check_unknown_macro_variable();
@@ -2625,7 +2625,7 @@ impl<'a> Parser<'a> {
}
pub fn check_unknown_macro_variable(&mut self) {
if self.quote_depth == 0us {
if self.quote_depth == 0 {
match self.token {
token::SubstNt(name, _) =>
self.fatal(&format!("unknown macro variable `{}`",
@@ -2694,7 +2694,7 @@ impl<'a> Parser<'a> {
token_str)[])
},
/* we ought to allow different depths of unquotation */
token::Dollar | token::SubstNt(..) if p.quote_depth > 0us => {
token::Dollar | token::SubstNt(..) if p.quote_depth > 0 => {
p.parse_unquoted()
}
_ => {
@@ -5633,7 +5633,7 @@ impl<'a> Parser<'a> {
return Ok(item);
}
if self.token.is_keyword(keywords::Unsafe) &&
self.look_ahead(1us, |t| t.is_keyword(keywords::Trait))
self.look_ahead(1, |t| t.is_keyword(keywords::Trait))
{
// UNSAFE TRAIT ITEM
self.expect_keyword(keywords::Unsafe);
@@ -5650,7 +5650,7 @@ impl<'a> Parser<'a> {
return Ok(item);
}
if self.token.is_keyword(keywords::Unsafe) &&
self.look_ahead(1us, |t| t.is_keyword(keywords::Impl))
self.look_ahead(1, |t| t.is_keyword(keywords::Impl))
{
// IMPL ITEM
self.expect_keyword(keywords::Unsafe);
@@ -5680,7 +5680,7 @@ impl<'a> Parser<'a> {
return Ok(item);
}
if self.token.is_keyword(keywords::Unsafe)
&& self.look_ahead(1us, |t| *t != token::OpenDelim(token::Brace)) {
&& self.look_ahead(1, |t| *t != token::OpenDelim(token::Brace)) {
// UNSAFE FUNCTION ITEM
self.bump();
let abi = if self.eat_keyword(keywords::Extern) {
@@ -5958,7 +5958,7 @@ impl<'a> Parser<'a> {
}
}
}
let mut rename_to = path[path.len() - 1us];
let mut rename_to = path[path.len() - 1];
let path = ast::Path {
span: mk_sp(lo, self.last_span.hi),
global: false,