From 8a2723010290077bca034cd988067c70d0a638db Mon Sep 17 00:00:00 2001 From: Kevin Butler Date: Mon, 9 Nov 2015 21:04:31 +0000 Subject: [PATCH 1/3] libsyntax: use char::is_whitespace instead of custom implementations Fixes #29590. --- src/libsyntax/parse/lexer/mod.rs | 5 +---- src/libsyntax/util/parser_testing.rs | 3 +-- src/test/run-pass/parser-unicode-whitespace.rs | 18 ++++++++++++++++++ 3 files changed, 20 insertions(+), 6 deletions(-) create mode 100644 src/test/run-pass/parser-unicode-whitespace.rs diff --git a/src/libsyntax/parse/lexer/mod.rs b/src/libsyntax/parse/lexer/mod.rs index 1402b7888dd10..3e61aaff3c927 100644 --- a/src/libsyntax/parse/lexer/mod.rs +++ b/src/libsyntax/parse/lexer/mod.rs @@ -1592,10 +1592,7 @@ impl<'a> StringReader<'a> { } pub fn is_whitespace(c: Option) -> bool { - match c.unwrap_or('\x00') { // None can be null for now... it's not whitespace - ' ' | '\n' | '\t' | '\r' => true, - _ => false, - } + c.map_or(false, char::is_whitespace) } fn in_range(c: Option, lo: char, hi: char) -> bool { diff --git a/src/libsyntax/util/parser_testing.rs b/src/libsyntax/util/parser_testing.rs index 454b925a4945e..74ff42b5db9ba 100644 --- a/src/libsyntax/util/parser_testing.rs +++ b/src/libsyntax/util/parser_testing.rs @@ -140,9 +140,8 @@ fn scan_for_non_ws_or_end(a : &str, idx: usize) -> usize { i } -/// Copied from lexer. pub fn is_whitespace(c: char) -> bool { - return c == ' ' || c == '\t' || c == '\r' || c == '\n'; + c.is_whitespace() } #[cfg(test)] diff --git a/src/test/run-pass/parser-unicode-whitespace.rs b/src/test/run-pass/parser-unicode-whitespace.rs new file mode 100644 index 0000000000000..b45216e704d27 --- /dev/null +++ b/src/test/run-pass/parser-unicode-whitespace.rs @@ -0,0 +1,18 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +// Beware editing: it has numerous whitespace characters which are important +pub fn main() { + assert_eq!(4 +  7 * 2 + + +/ 3 * 2 , 4 + 7 * 2 / 3 * 2); +} From 9e3e43f3f6bb0d87da5f5b7fd92db0cc990e62a3 Mon Sep 17 00:00:00 2001 From: Kevin Butler Date: Tue, 10 Nov 2015 00:29:56 +0000 Subject: [PATCH 2/3] libsyntax: make matches_codepattern unicode aware --- src/libsyntax/util/parser_testing.rs | 102 +++++++++++++++------------ 1 file changed, 57 insertions(+), 45 deletions(-) diff --git a/src/libsyntax/util/parser_testing.rs b/src/libsyntax/util/parser_testing.rs index 74ff42b5db9ba..c19033f234723 100644 --- a/src/libsyntax/util/parser_testing.rs +++ b/src/libsyntax/util/parser_testing.rs @@ -14,7 +14,7 @@ use parse::new_parser_from_source_str; use parse::parser::Parser; use parse::token; use ptr::P; -use str::char_at; +use std::iter::Peekable; /// Map a string to tts, using a made-up filename: pub fn string_to_tts(source_str: String) -> Vec { @@ -87,57 +87,55 @@ pub fn strs_to_idents(ids: Vec<&str> ) -> Vec { /// Does the given string match the pattern? whitespace in the first string /// may be deleted or replaced with other whitespace to match the pattern. -/// this function is Unicode-ignorant; fortunately, the careful design of -/// UTF-8 mitigates this ignorance. In particular, this function only collapses -/// sequences of \n, \r, ' ', and \t, but it should otherwise tolerate Unicode -/// chars. Unsurprisingly, it doesn't do NKF-normalization(?). +/// This function is relatively Unicode-ignorant; fortunately, the careful design +/// of UTF-8 mitigates this ignorance. It doesn't do NKF-normalization(?). pub fn matches_codepattern(a : &str, b : &str) -> bool { - let mut idx_a = 0; - let mut idx_b = 0; + let mut a_iter = a.chars().peekable(); + let mut b_iter = b.chars().peekable(); + loop { - if idx_a == a.len() && idx_b == b.len() { - return true; - } - else if idx_a == a.len() {return false;} - else if idx_b == b.len() { - // maybe the stuff left in a is all ws? - if is_whitespace(char_at(a, idx_a)) { - return scan_for_non_ws_or_end(a,idx_a) == a.len(); - } else { - return false; + let (a, b) = match (a_iter.peek(), b_iter.peek()) { + (None, None) => return true, + (None, _) => return false, + (Some(a), None) => { + if a.is_whitespace() { + break // trailing whitespace check is out of loop for borrowck + } else { + return false + } } - } - // ws in both given and pattern: - else if is_whitespace(char_at(a, idx_a)) - && is_whitespace(char_at(b, idx_b)) { - idx_a = scan_for_non_ws_or_end(a,idx_a); - idx_b = scan_for_non_ws_or_end(b,idx_b); - } - // ws in given only: - else if is_whitespace(char_at(a, idx_a)) { - idx_a = scan_for_non_ws_or_end(a,idx_a); - } - // *don't* silently eat ws in expected only. - else if char_at(a, idx_a) == char_at(b, idx_b) { - idx_a += 1; - idx_b += 1; - } - else { - return false; + (Some(&a), Some(&b)) => (a, b) + }; + + if a.is_whitespace() && b.is_whitespace() { + // skip whitespace for a and b + scan_for_non_ws_or_end(&mut a_iter); + scan_for_non_ws_or_end(&mut b_iter); + } else if a.is_whitespace() { + // skip whitespace for a + scan_for_non_ws_or_end(&mut a_iter); + } else if a == b { + a_iter.next(); + b_iter.next(); + } else { + return false } } + + // check if a has *only* trailing whitespace + a_iter.all(|c| c.is_whitespace()) } -/// Given a string and an index, return the first usize >= idx -/// that is a non-ws-char or is outside of the legal range of -/// the string. -fn scan_for_non_ws_or_end(a : &str, idx: usize) -> usize { - let mut i = idx; - let len = a.len(); - while (i < len) && (is_whitespace(char_at(a, i))) { - i += 1; +/// Advances the given peekable `Iterator` until it reaches a non-whitespace character +fn scan_for_non_ws_or_end>(iter: &mut Peekable) { + loop { + match iter.peek() { + Some(c) if c.is_whitespace() => {} // fall through; borrowck + _ => return + } + + iter.next(); } - i } pub fn is_whitespace(c: char) -> bool { @@ -148,7 +146,8 @@ pub fn is_whitespace(c: char) -> bool { mod tests { use super::*; - #[test] fn eqmodws() { + #[test] + fn eqmodws() { assert_eq!(matches_codepattern("",""),true); assert_eq!(matches_codepattern("","a"),false); assert_eq!(matches_codepattern("a",""),false); @@ -159,5 +158,18 @@ mod tests { assert_eq!(matches_codepattern("a b","a b"),true); assert_eq!(matches_codepattern("ab","a b"),false); assert_eq!(matches_codepattern("a b","ab"),true); + assert_eq!(matches_codepattern(" a b","ab"),true); + } + + #[test] + fn more_whitespace() { + assert_eq!(matches_codepattern("","\x0C"), false); + assert_eq!(matches_codepattern("a b","a\u{2002}b"),true); + assert_eq!(matches_codepattern("a b ","a \u{0085}\n\t\r b"),true); + assert_eq!(matches_codepattern("a b","a \u{0085}\n\t\r b "),false); + assert_eq!(matches_codepattern("a b","a\u{2002}b"),true); + assert_eq!(matches_codepattern("ab","a\u{2003}b"),false); + assert_eq!(matches_codepattern("a \u{3000}b","ab"),true); + assert_eq!(matches_codepattern("\u{205F}a b","ab"),true); } } From 24578e0fe555f267bef40528b8ac79bc7e898007 Mon Sep 17 00:00:00 2001 From: Kevin Butler Date: Thu, 12 Nov 2015 02:43:43 +0000 Subject: [PATCH 3/3] libsyntax: accept only whitespace with the PATTERN_WHITE_SPACE property This aligns with unicode recommendations and should be stable for all future unicode releases. See http://unicode.org/reports/tr31/#R3. This renames `libsyntax::lexer::is_whitespace` to `is_pattern_whitespace` so potentially breaks users of libsyntax. --- mk/crates.mk | 2 +- src/etc/unicode.py | 4 +- src/librustc_unicode/lib.rs | 5 +++ src/librustc_unicode/tables.rs | 9 +++++ src/libsyntax/lib.rs | 1 + src/libsyntax/parse/lexer/comments.rs | 4 +- src/libsyntax/parse/lexer/mod.rs | 17 ++++---- src/libsyntax/util/parser_testing.rs | 39 +++++++++---------- .../run-pass/parser-unicode-whitespace.rs | 12 ++++-- 9 files changed, 57 insertions(+), 36 deletions(-) diff --git a/mk/crates.mk b/mk/crates.mk index be53234cb02e2..1ccc45113b4ff 100644 --- a/mk/crates.mk +++ b/mk/crates.mk @@ -86,7 +86,7 @@ DEPS_serialize := std log DEPS_term := std log DEPS_test := std getopts serialize rbml term native:rust_test_helpers -DEPS_syntax := std term serialize log arena libc rustc_bitflags +DEPS_syntax := std term serialize log arena libc rustc_bitflags rustc_unicode DEPS_syntax_ext := syntax fmt_macros DEPS_rustc := syntax fmt_macros flate arena serialize getopts rbml rustc_front\ diff --git a/src/etc/unicode.py b/src/etc/unicode.py index 10b864a902dc0..5a7632868e467 100755 --- a/src/etc/unicode.py +++ b/src/etc/unicode.py @@ -398,7 +398,7 @@ def emit_norm_module(f, canon, compat, combine, norm_props): derived = load_properties("DerivedCoreProperties.txt", want_derived) scripts = load_properties("Scripts.txt", []) props = load_properties("PropList.txt", - ["White_Space", "Join_Control", "Noncharacter_Code_Point"]) + ["White_Space", "Join_Control", "Noncharacter_Code_Point", "Pattern_White_Space"]) norm_props = load_properties("DerivedNormalizationProps.txt", ["Full_Composition_Exclusion"]) @@ -408,7 +408,7 @@ def emit_norm_module(f, canon, compat, combine, norm_props): # category tables for (name, cat, pfuns) in ("general_category", gencats, ["N", "Cc"]), \ ("derived_property", derived, want_derived), \ - ("property", props, ["White_Space"]): + ("property", props, ["White_Space", "Pattern_White_Space"]): emit_property_module(rf, name, cat, pfuns) # normalizations and conversions module diff --git a/src/librustc_unicode/lib.rs b/src/librustc_unicode/lib.rs index 161da07911061..d0b0c9ba5a528 100644 --- a/src/librustc_unicode/lib.rs +++ b/src/librustc_unicode/lib.rs @@ -50,3 +50,8 @@ pub mod str { pub mod derived_property { pub use tables::derived_property::{Cased, Case_Ignorable}; } + +// For use in libsyntax +pub mod property { + pub use tables::property::Pattern_White_Space; +} diff --git a/src/librustc_unicode/tables.rs b/src/librustc_unicode/tables.rs index a147bea791c47..ad17016eae8cb 100644 --- a/src/librustc_unicode/tables.rs +++ b/src/librustc_unicode/tables.rs @@ -1180,6 +1180,15 @@ pub mod derived_property { } pub mod property { + pub const Pattern_White_Space_table: &'static [(char, char)] = &[ + ('\u{9}', '\u{d}'), ('\u{20}', '\u{20}'), ('\u{85}', '\u{85}'), ('\u{200e}', '\u{200f}'), + ('\u{2028}', '\u{2029}') + ]; + + pub fn Pattern_White_Space(c: char) -> bool { + super::bsearch_range_table(c, Pattern_White_Space_table) + } + pub const White_Space_table: &'static [(char, char)] = &[ ('\u{9}', '\u{d}'), ('\u{20}', '\u{20}'), ('\u{85}', '\u{85}'), ('\u{a0}', '\u{a0}'), ('\u{1680}', '\u{1680}'), ('\u{2000}', '\u{200a}'), ('\u{2028}', '\u{2029}'), ('\u{202f}', diff --git a/src/libsyntax/lib.rs b/src/libsyntax/lib.rs index 795f4044f6eb1..6fe741794cfcc 100644 --- a/src/libsyntax/lib.rs +++ b/src/libsyntax/lib.rs @@ -37,6 +37,7 @@ extern crate term; extern crate libc; #[macro_use] extern crate log; #[macro_use] #[no_link] extern crate rustc_bitflags; +extern crate rustc_unicode; extern crate serialize as rustc_serialize; // used by deriving diff --git a/src/libsyntax/parse/lexer/comments.rs b/src/libsyntax/parse/lexer/comments.rs index e336c98f03ca0..629edced804f5 100644 --- a/src/libsyntax/parse/lexer/comments.rs +++ b/src/libsyntax/parse/lexer/comments.rs @@ -15,7 +15,7 @@ use codemap::{BytePos, CharPos, CodeMap, Pos}; use errors; use parse::lexer::is_block_doc_comment; use parse::lexer::{StringReader, TokenAndSpan}; -use parse::lexer::{is_whitespace, Reader}; +use parse::lexer::{is_pattern_whitespace, Reader}; use parse::lexer; use print::pprust; use str::char_at; @@ -153,7 +153,7 @@ fn push_blank_line_comment(rdr: &StringReader, comments: &mut Vec) { } fn consume_whitespace_counting_blank_lines(rdr: &mut StringReader, comments: &mut Vec) { - while is_whitespace(rdr.curr) && !rdr.is_eof() { + while is_pattern_whitespace(rdr.curr) && !rdr.is_eof() { if rdr.col == CharPos(0) && rdr.curr_is('\n') { push_blank_line_comment(rdr, &mut *comments); } diff --git a/src/libsyntax/parse/lexer/mod.rs b/src/libsyntax/parse/lexer/mod.rs index 3e61aaff3c927..88a876cac73d6 100644 --- a/src/libsyntax/parse/lexer/mod.rs +++ b/src/libsyntax/parse/lexer/mod.rs @@ -16,6 +16,7 @@ use ext::tt::transcribe::tt_next_token; use parse::token::str_to_ident; use parse::token; use str::char_at; +use rustc_unicode::property::Pattern_White_Space; use std::borrow::Cow; use std::char; @@ -546,10 +547,10 @@ impl<'a> StringReader<'a> { let c = self.scan_comment(); debug!("scanning a comment {:?}", c); c - } - c if is_whitespace(Some(c)) => { + }, + c if is_pattern_whitespace(Some(c)) => { let start_bpos = self.last_pos; - while is_whitespace(self.curr) { + while is_pattern_whitespace(self.curr) { self.bump(); } let c = Some(TokenAndSpan { @@ -1435,7 +1436,7 @@ impl<'a> StringReader<'a> { } fn consume_whitespace(&mut self) { - while is_whitespace(self.curr) && !self.is_eof() { + while is_pattern_whitespace(self.curr) && !self.is_eof() { self.bump(); } } @@ -1460,7 +1461,7 @@ impl<'a> StringReader<'a> { } fn consume_non_eol_whitespace(&mut self) { - while is_whitespace(self.curr) && !self.curr_is('\n') && !self.is_eof() { + while is_pattern_whitespace(self.curr) && !self.curr_is('\n') && !self.is_eof() { self.bump(); } } @@ -1591,8 +1592,10 @@ impl<'a> StringReader<'a> { } } -pub fn is_whitespace(c: Option) -> bool { - c.map_or(false, char::is_whitespace) +// This tests the character for the unicode property 'PATTERN_WHITE_SPACE' which +// is guaranteed to be forward compatible. http://unicode.org/reports/tr31/#R3 +pub fn is_pattern_whitespace(c: Option) -> bool { + c.map_or(false, Pattern_White_Space) } fn in_range(c: Option, lo: char, hi: char) -> bool { diff --git a/src/libsyntax/util/parser_testing.rs b/src/libsyntax/util/parser_testing.rs index c19033f234723..a78950e959f99 100644 --- a/src/libsyntax/util/parser_testing.rs +++ b/src/libsyntax/util/parser_testing.rs @@ -10,7 +10,7 @@ use ast; use parse::{ParseSess,PResult,filemap_to_tts}; -use parse::new_parser_from_source_str; +use parse::{lexer, new_parser_from_source_str}; use parse::parser::Parser; use parse::token; use ptr::P; @@ -97,8 +97,8 @@ pub fn matches_codepattern(a : &str, b : &str) -> bool { let (a, b) = match (a_iter.peek(), b_iter.peek()) { (None, None) => return true, (None, _) => return false, - (Some(a), None) => { - if a.is_whitespace() { + (Some(&a), None) => { + if is_pattern_whitespace(a) { break // trailing whitespace check is out of loop for borrowck } else { return false @@ -107,11 +107,11 @@ pub fn matches_codepattern(a : &str, b : &str) -> bool { (Some(&a), Some(&b)) => (a, b) }; - if a.is_whitespace() && b.is_whitespace() { + if is_pattern_whitespace(a) && is_pattern_whitespace(b) { // skip whitespace for a and b scan_for_non_ws_or_end(&mut a_iter); scan_for_non_ws_or_end(&mut b_iter); - } else if a.is_whitespace() { + } else if is_pattern_whitespace(a) { // skip whitespace for a scan_for_non_ws_or_end(&mut a_iter); } else if a == b { @@ -123,23 +123,18 @@ pub fn matches_codepattern(a : &str, b : &str) -> bool { } // check if a has *only* trailing whitespace - a_iter.all(|c| c.is_whitespace()) + a_iter.all(is_pattern_whitespace) } /// Advances the given peekable `Iterator` until it reaches a non-whitespace character fn scan_for_non_ws_or_end>(iter: &mut Peekable) { - loop { - match iter.peek() { - Some(c) if c.is_whitespace() => {} // fall through; borrowck - _ => return - } - + while lexer::is_pattern_whitespace(iter.peek().cloned()) { iter.next(); } } -pub fn is_whitespace(c: char) -> bool { - c.is_whitespace() +pub fn is_pattern_whitespace(c: char) -> bool { + lexer::is_pattern_whitespace(Some(c)) } #[cfg(test)] @@ -162,14 +157,18 @@ mod tests { } #[test] - fn more_whitespace() { + fn pattern_whitespace() { assert_eq!(matches_codepattern("","\x0C"), false); - assert_eq!(matches_codepattern("a b","a\u{2002}b"),true); assert_eq!(matches_codepattern("a b ","a \u{0085}\n\t\r b"),true); assert_eq!(matches_codepattern("a b","a \u{0085}\n\t\r b "),false); - assert_eq!(matches_codepattern("a b","a\u{2002}b"),true); - assert_eq!(matches_codepattern("ab","a\u{2003}b"),false); - assert_eq!(matches_codepattern("a \u{3000}b","ab"),true); - assert_eq!(matches_codepattern("\u{205F}a b","ab"),true); + } + + #[test] + fn non_pattern_whitespace() { + // These have the property 'White_Space' but not 'Pattern_White_Space' + assert_eq!(matches_codepattern("a b","a\u{2002}b"), false); + assert_eq!(matches_codepattern("a b","a\u{2002}b"), false); + assert_eq!(matches_codepattern("\u{205F}a b","ab"), false); + assert_eq!(matches_codepattern("a \u{3000}b","ab"), false); } } diff --git a/src/test/run-pass/parser-unicode-whitespace.rs b/src/test/run-pass/parser-unicode-whitespace.rs index b45216e704d27..837bb8339e1d1 100644 --- a/src/test/run-pass/parser-unicode-whitespace.rs +++ b/src/test/run-pass/parser-unicode-whitespace.rs @@ -9,10 +9,14 @@ // except according to those terms. -// Beware editing: it has numerous whitespace characters which are important +// Beware editing: it has numerous whitespace characters which are important. +// It contains one ranges from the 'PATTERN_WHITE_SPACE' property outlined in +// http://unicode.org/Public/UNIDATA/PropList.txt +// +// The characters in the first expression of the assertion can be generated +// from: "4\u{0C}+\n\t\r7\t*\u{20}2\u{85}/\u{200E}3\u{200F}*\u{2028}2\u{2029}" pub fn main() { - assert_eq!(4 +  7 * 2 - +assert_eq!(4 + -/ 3 * 2 , 4 + 7 * 2 / 3 * 2); +7 * 2…/‎3‏*
2
, 4 + 7 * 2 / 3 * 2); }